gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: March 4, 2003
# Author: Francesc Alted - faltet@pytables.com
#
# $Id$
#
########################################################################
"""Utility functions"""
import os
import sys
import subprocess
from time import time
import numpy
from tables.flavor import array_of_flavor
# The map between byteorders in NumPy and PyTables
byteorders = {'>': 'big',
'<': 'little',
'=': sys.byteorder,
'|': 'irrelevant'}
# The type used for size values: indexes, coordinates, dimension
# lengths, row numbers, shapes, chunk shapes, byte counts...
SizeType = numpy.int64
def correct_byteorder(ptype, byteorder):
"""Fix the byteorder depending on the PyTables types."""
if ptype in ['string', 'bool', 'int8', 'uint8']:
return "irrelevant"
else:
return byteorder
def is_idx(index):
"""Checks if an object can work as an index or not."""
if type(index) in (int, long):
return True
elif hasattr(index, "__index__"): # Only works on Python 2.5 (PEP 357)
# Exclude the array([idx]) as working as an index. Fixes #303.
if (hasattr(index, "shape") and index.shape != ()):
return False
try:
index.__index__()
return True
except TypeError:
return False
elif isinstance(index, numpy.integer):
return True
# For Python 2.4 one should test 0-dim and 1-dim, 1-elem arrays as well
elif (isinstance(index, numpy.ndarray) and (index.shape == ()) and
index.dtype.str[1] == 'i'):
return True
return False
def idx2long(index):
"""Convert a possible index into a long int"""
try:
return long(index)
except:
raise TypeError("not an integer type.")
# This is used in VLArray and EArray to produce NumPy object compliant
# with atom from a generic python type. If copy is stated as True, it
# is assured that it will return a copy of the object and never the same
# object or a new one sharing the same memory.
def convertToNPAtom(arr, atom, copy=False):
"Convert a generic object into a NumPy object compliant with atom."
# First, convert the object into a NumPy array
nparr = array_of_flavor(arr, 'numpy')
# Copy of data if necessary for getting a contiguous buffer, or if
# dtype is not the correct one.
if atom.shape == ():
# Scalar atom case
nparr = numpy.array(nparr, dtype=atom.dtype, copy=copy)
else:
# Multidimensional atom case. Addresses #133.
# We need to use this strange way to obtain a dtype compliant
# array because NumPy doesn't honor the shape of the dtype when
# it is multidimensional. See:
# http://scipy.org/scipy/numpy/ticket/926
# for details.
# All of this is done just to taking advantage of the NumPy
# broadcasting rules.
newshape = nparr.shape[:-len(atom.dtype.shape)]
nparr2 = numpy.empty(newshape, dtype=[('', atom.dtype)])
nparr2['f0'][:] = nparr
# Return a view (i.e. get rid of the record type)
nparr = nparr2.view(atom.dtype)
return nparr
# The next is used in Array, EArray and VLArray, and it is a bit more
# high level than convertToNPAtom
def convertToNPAtom2(object, atom):
"""Convert a generic object into a NumPy object compliant with atom."""
# Check whether the object needs to be copied to make the operation
# safe to in-place conversion.
copy = atom.type in ['time64']
nparr = convertToNPAtom(object, atom, copy)
# Finally, check the byteorder and change it if needed
byteorder = byteorders[nparr.dtype.byteorder]
if ( byteorder in ['little', 'big'] and byteorder != sys.byteorder ):
# The byteorder needs to be fixed (a copy is made
# so that the original array is not modified)
nparr = nparr.byteswap()
return nparr
def checkFileAccess(filename, mode='r'):
"""Check for file access in the specified `mode`.
`mode` is one of the modes supported by `File` objects. If the file
indicated by `filename` can be accessed using that `mode`, the
function ends successfully. Else, an ``IOError`` is raised
explaining the reason of the failure.
All this paraphernalia is used to avoid the lengthy and scaring HDF5
messages produced when there are problems opening a file. No
changes are ever made to the file system.
"""
if mode == 'r':
# The file should be readable.
if not os.access(filename, os.F_OK):
raise IOError("``%s`` does not exist" % (filename,))
if not os.path.isfile(filename):
raise IOError("``%s`` is not a regular file" % (filename,))
if not os.access(filename, os.R_OK):
raise IOError("file ``%s`` exists but it can not be read"
% (filename,))
elif mode == 'w':
if os.access(filename, os.F_OK):
# Since the file is not removed but replaced,
# it must already be accessible to read and write operations.
checkFileAccess(filename, 'r+')
else:
# A new file is going to be created,
# so the directory should be writable.
parentname = os.path.dirname(filename)
if not parentname:
parentname = '.'
if not os.access(parentname, os.F_OK):
raise IOError("``%s`` does not exist" % (parentname,))
if not os.path.isdir(parentname):
raise IOError("``%s`` is not a directory" % (parentname,))
if not os.access(parentname, os.W_OK):
raise IOError("directory ``%s`` exists but it can not be written"
% (parentname,))
elif mode == 'a':
if os.access(filename, os.F_OK):
checkFileAccess(filename, 'r+')
else:
checkFileAccess(filename, 'w')
elif mode == 'r+':
checkFileAccess(filename, 'r')
if not os.access(filename, os.W_OK):
raise IOError("file ``%s`` exists but it can not be written"
% (filename,))
else:
raise ValueError("invalid mode: %r" % (mode,))
def lazyattr(fget):
"""Create a *lazy attribute* from the result of `fget`.
This function is intended to be used as a *method decorator*. It
returns a *property* which caches the result of calling the `fget`
instance method. The docstring of `fget` is used for the property
itself. For instance:
>>> class MyClass(object):
... @lazyattr
... def attribute(self):
... 'Attribute description.'
... print('creating value')
... return 10
...
>>> type(MyClass.attribute)
<type 'property'>
>>> MyClass.attribute.__doc__
'Attribute description.'
>>> obj = MyClass()
>>> obj.__dict__
{}
>>> obj.attribute
creating value
10
>>> obj.__dict__
{'attribute': 10}
>>> obj.attribute
10
>>> del obj.attribute
Traceback (most recent call last):
...
AttributeError: can't delete attribute
.. warning::
Please note that this decorator *changes the type of the
decorated object* from an instance method into a property.
"""
name = fget.__name__
def newfget(self):
mydict = self.__dict__
if name in mydict:
return mydict[name]
mydict[name] = value = fget(self)
return value
return property(newfget, None, None, fget.__doc__)
def show_stats(explain, tref, encoding=None):
"""Show the used memory (only works for Linux 2.6.x)."""
if encoding is None:
encoding = sys.getdefaultencoding()
# Build the command to obtain memory info
cmd = "cat /proc/%s/status" % os.getpid()
sout = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout
for line in sout:
line = line.decode(encoding)
if line.startswith("VmSize:"):
vmsize = int(line.split()[1])
elif line.startswith("VmRSS:"):
vmrss = int(line.split()[1])
elif line.startswith("VmData:"):
vmdata = int(line.split()[1])
elif line.startswith("VmStk:"):
vmstk = int(line.split()[1])
elif line.startswith("VmExe:"):
vmexe = int(line.split()[1])
elif line.startswith("VmLib:"):
vmlib = int(line.split()[1])
sout.close()
print "Memory usage: ******* %s *******" % explain
print "VmSize: %7s kB\tVmRSS: %7s kB" % (vmsize, vmrss)
print "VmData: %7s kB\tVmStk: %7s kB" % (vmdata, vmstk)
print "VmExe: %7s kB\tVmLib: %7s kB" % (vmexe, vmlib)
tnow = time()
print "WallClock time:", round(tnow - tref, 3)
return tnow
# Utilities to detect leaked instances. See recipe 14.10 of the Python
# Cookbook by Martelli & Ascher.
tracked_classes = {}
import weakref
def logInstanceCreation(instance, name=None):
if name is None:
name = instance.__class__.__name__
if name not in tracked_classes:
tracked_classes[name] = []
tracked_classes[name].append(weakref.ref(instance))
def string_to_classes(s):
if s == '*':
c = sorted(tracked_classes.iterkeys())
return c
else:
return s.split()
def fetchLoggedInstances(classes="*"):
classnames = string_to_classes(classes)
return [(cn, len(tracked_classes[cn])) for cn in classnames]
def countLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write("%s: %d\n" % (classname, len(tracked_classes[classname])))
def listLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write('\n%s:\n' % classname)
for ref in tracked_classes[classname]:
obj = ref()
if obj is not None:
file.write(' %s\n' % repr(obj))
def dumpLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write('\n%s:\n' % classname)
for ref in tracked_classes[classname]:
obj = ref()
if obj is not None:
file.write(' %s:\n' % obj)
for key, value in obj.__dict__.iteritems():
file.write(' %20s : %s\n' % (key, value))
#
# A class useful for cache usage
#
class CacheDict(dict):
"""A dictionary that prevents itself from growing too much."""
def __init__(self, maxentries):
self.maxentries = maxentries
super(CacheDict, self).__init__(self)
def __setitem__(self, key, value):
# Protection against growing the cache too much
if len(self) > self.maxentries:
# Remove a 10% of (arbitrary) elements from the cache
entries_to_remove = self.maxentries / 10
for k in self.keys()[:entries_to_remove]:
super(CacheDict, self).__delitem__(k)
super(CacheDict, self).__setitem__(key, value)
class NailedDict(object):
"""A dictionary which ignores its items when it has nails on it."""
def __init__(self, maxentries):
self.maxentries = maxentries
self._cache = {}
self._nailcount = 0
# Only a restricted set of dictionary methods are supported. That
# is why we buy instead of inherit.
# The following are intended to be used by ``Table`` code changing
# the set of usable indexes.
def clear(self):
self._cache.clear()
def nail(self):
self._nailcount += 1
def unnail(self):
self._nailcount -= 1
# The following are intended to be used by ``Table`` code handling
# conditions.
def __contains__(self, key):
if self._nailcount > 0:
return False
return key in self._cache
def __getitem__(self, key):
if self._nailcount > 0:
raise KeyError(key)
return self._cache[key]
def get(self, key, default=None):
if self._nailcount > 0:
return default
return self._cache.get(key, default)
def __setitem__(self, key, value):
if self._nailcount > 0:
return
cache = self._cache
# Protection against growing the cache too much
if len(cache) > self.maxentries:
# Remove a 10% of (arbitrary) elements from the cache
entries_to_remove = self.maxentries // 10
for k in cache.keys()[:entries_to_remove]:
del cache[k]
cache[key] = value
def detectNumberOfCores():
"""Detects the number of cores on a system. Cribbed from pp."""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if "NUMBER_OF_PROCESSORS" in os.environ:
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]);
if ncpus > 0:
return ncpus
return 1 # Default
# Main part
# =========
def _test():
"""Run ``doctest`` on this module."""
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
|
|
#!/usr/bin/env python
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import optparse
import os
import sys
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir))
sys.path.insert(0, top_dir)
# To get this installed you have to do the following:
#
# $ pip install pydot2
import pydot
from automaton import machines
from taskflow.engines.action_engine import builder
from taskflow.engines.worker_based import protocol
from taskflow import states
# This is just needed to get at the machine object (we will not
# actually be running it...).
class DummyRuntime(object):
def __init__(self):
self.analyzer = mock.MagicMock()
self.completer = mock.MagicMock()
self.scheduler = mock.MagicMock()
self.storage = mock.MagicMock()
def clean_event(name):
name = name.replace("_", " ")
name = name.strip()
return name
def make_machine(start_state, transitions):
machine = machines.FiniteMachine()
machine.add_state(start_state)
for (start_state, end_state) in transitions:
if start_state not in machine:
machine.add_state(start_state)
if end_state not in machine:
machine.add_state(end_state)
# Make a fake event (not used anyway)...
event = "on_%s" % (end_state)
machine.add_transition(start_state, end_state, event.lower())
machine.default_start_state = start_state
return machine
def map_color(internal_states, state):
if state in internal_states:
return 'blue'
if state in (states.FAILURE, states.REVERT_FAILURE):
return 'red'
if state == states.REVERTED:
return 'darkorange'
if state in (states.SUCCESS, states.COMPLETE):
return 'green'
return None
def main():
parser = optparse.OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="write svg to FILE", metavar="FILE")
parser.add_option("-t", "--tasks", dest="tasks",
action='store_true',
help="use task state transitions",
default=False)
parser.add_option("-r", "--retries", dest="retries",
action='store_true',
help="use retry state transitions",
default=False)
parser.add_option("-e", "--engines", dest="engines",
action='store_true',
help="use engine state transitions",
default=False)
parser.add_option("-w", "--wbe-requests", dest="wbe_requests",
action='store_true',
help="use wbe request transitions",
default=False)
parser.add_option("-j", "--jobs", dest="jobs",
action='store_true',
help="use job transitions",
default=False)
parser.add_option("-T", "--format", dest="format",
help="output in given format",
default='svg')
(options, args) = parser.parse_args()
if options.filename is None:
options.filename = 'states.%s' % options.format
types = [
options.engines,
options.retries,
options.tasks,
options.wbe_requests,
options.jobs,
]
if sum([int(i) for i in types]) > 1:
parser.error("Only one of task/retry/engines/wbe requests/jobs"
" may be specified.")
internal_states = list()
ordering = 'in'
if options.tasks:
source_type = "Tasks"
source = make_machine(states.PENDING,
list(states._ALLOWED_TASK_TRANSITIONS))
elif options.retries:
source_type = "Retries"
source = make_machine(states.PENDING,
list(states._ALLOWED_RETRY_TRANSITIONS))
elif options.engines:
source_type = "Engines"
b = builder.MachineBuilder(DummyRuntime(), mock.MagicMock())
source, memory = b.build()
internal_states.extend(builder.META_STATES)
ordering = 'out'
elif options.wbe_requests:
source_type = "WBE requests"
source = make_machine(protocol.WAITING,
list(protocol._ALLOWED_TRANSITIONS))
elif options.jobs:
source_type = "Jobs"
source = make_machine(states.UNCLAIMED,
list(states._ALLOWED_JOB_TRANSITIONS))
else:
source_type = "Flow"
source = make_machine(states.PENDING,
list(states._ALLOWED_FLOW_TRANSITIONS))
graph_name = "%s states" % source_type
g = pydot.Dot(graph_name=graph_name, rankdir='LR',
nodesep='0.25', overlap='false',
ranksep="0.5", size="11x8.5",
splines='true', ordering=ordering)
node_attrs = {
'fontsize': '11',
}
nodes = {}
for (start_state, on_event, end_state) in source:
on_event = clean_event(on_event)
if start_state not in nodes:
start_node_attrs = node_attrs.copy()
text_color = map_color(internal_states, start_state)
if text_color:
start_node_attrs['fontcolor'] = text_color
nodes[start_state] = pydot.Node(start_state, **start_node_attrs)
g.add_node(nodes[start_state])
if end_state not in nodes:
end_node_attrs = node_attrs.copy()
text_color = map_color(internal_states, end_state)
if text_color:
end_node_attrs['fontcolor'] = text_color
nodes[end_state] = pydot.Node(end_state, **end_node_attrs)
g.add_node(nodes[end_state])
if options.engines:
edge_attrs = {
'label': on_event,
}
if 'reverted' in on_event:
edge_attrs['fontcolor'] = 'darkorange'
if 'fail' in on_event:
edge_attrs['fontcolor'] = 'red'
if 'success' in on_event:
edge_attrs['fontcolor'] = 'green'
else:
edge_attrs = {}
g.add_edge(pydot.Edge(nodes[start_state], nodes[end_state],
**edge_attrs))
start = pydot.Node("__start__", shape="point", width="0.1",
xlabel='start', fontcolor='green', **node_attrs)
g.add_node(start)
g.add_edge(pydot.Edge(start, nodes[source.default_start_state], style='dotted'))
print("*" * len(graph_name))
print(graph_name)
print("*" * len(graph_name))
print(source.pformat())
print(g.to_string().strip())
g.write(options.filename, format=options.format)
print("Created %s at '%s'" % (options.format, options.filename))
# To make the svg more pretty use the following:
# $ xsltproc ../diagram-tools/notugly.xsl ./states.svg > pretty-states.svg
# Get diagram-tools from https://github.com/vidarh/diagram-tools.git
if __name__ == '__main__':
main()
|
|
"""
============================
``ctypes`` Utility Functions
============================
See Also
---------
load_library : Load a C library.
ndpointer : Array restype/argtype with verification.
as_ctypes : Create a ctypes array from an ndarray.
as_array : Create an ndarray from a ctypes array.
References
----------
.. [1] "SciPy Cookbook: ctypes", http://www.scipy.org/Cookbook/Ctypes
Examples
--------
Load the C library:
>>> _lib = np.ctypeslib.load_library('libmystuff', '.') #DOCTEST: +ignore
Our result type, an ndarray that must be of type double, be 1-dimensional
and is C-contiguous in memory:
>>> array_1d_double = np.ctypeslib.ndpointer(
... dtype=np.double,
... ndim=1, flags='CONTIGUOUS') #DOCTEST: +ignore
Our C-function typically takes an array and updates its values
in-place. For example::
void foo_func(double* x, int length)
{
int i;
for (i = 0; i < length; i++) {
x[i] = i*i;
}
}
We wrap it using:
>>> lib.foo_func.restype = None #DOCTEST: +ignore
>>> lib.foo.argtypes = [array_1d_double, c_int] #DOCTEST: +ignore
Then, we're ready to call ``foo_func``:
>>> out = np.empty(15, dtype=np.double)
>>> _lib.foo_func(out, len(out)) #DOCTEST: +ignore
"""
__all__ = ['load_library', 'ndpointer', 'test', 'ctypes_load_library',
'c_intp', 'as_ctypes', 'as_array']
import sys, os
from numpy import integer, ndarray, dtype as _dtype, deprecate, array
from numpy.core.multiarray import _flagdict, flagsobj
try:
import ctypes
except ImportError:
ctypes = None
if ctypes is None:
def _dummy(*args, **kwds):
raise ImportError, "ctypes is not available."
ctypes_load_library = _dummy
load_library = _dummy
as_ctypes = _dummy
as_array = _dummy
from numpy import intp as c_intp
else:
import numpy.core._internal as nic
c_intp = nic._getintp_ctype()
del nic
# Adapted from Albert Strasheim
def load_library(libname, loader_path):
if ctypes.__version__ < '1.0.1':
import warnings
warnings.warn("All features of ctypes interface may not work " \
"with ctypes < 1.0.1")
ext = os.path.splitext(libname)[1]
if not ext:
# Try to load library with platform-specific name, otherwise
# default to libname.[so|pyd]. Sometimes, these files are built
# erroneously on non-linux platforms.
libname_ext = ['%s.so' % libname, '%s.pyd' % libname]
if sys.platform == 'win32':
libname_ext.insert(0, '%s.dll' % libname)
elif sys.platform == 'darwin':
libname_ext.insert(0, '%s.dylib' % libname)
else:
libname_ext = [libname]
loader_path = os.path.abspath(loader_path)
if not os.path.isdir(loader_path):
libdir = os.path.dirname(loader_path)
else:
libdir = loader_path
for ln in libname_ext:
try:
libpath = os.path.join(libdir, ln)
return ctypes.cdll[libpath]
except OSError, e:
pass
raise e
ctypes_load_library = deprecate(load_library, 'ctypes_load_library',
'load_library')
def _num_fromflags(flaglist):
num = 0
for val in flaglist:
num += _flagdict[val]
return num
_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE',
'OWNDATA', 'UPDATEIFCOPY']
def _flags_fromnum(num):
res = []
for key in _flagnames:
value = _flagdict[key]
if (num & value):
res.append(key)
return res
class _ndptr(object):
def from_param(cls, obj):
if not isinstance(obj, ndarray):
raise TypeError, "argument must be an ndarray"
if cls._dtype_ is not None \
and obj.dtype != cls._dtype_:
raise TypeError, "array must have data type %s" % cls._dtype_
if cls._ndim_ is not None \
and obj.ndim != cls._ndim_:
raise TypeError, "array must have %d dimension(s)" % cls._ndim_
if cls._shape_ is not None \
and obj.shape != cls._shape_:
raise TypeError, "array must have shape %s" % str(cls._shape_)
if cls._flags_ is not None \
and ((obj.flags.num & cls._flags_) != cls._flags_):
raise TypeError, "array must have flags %s" % \
_flags_fromnum(cls._flags_)
return obj.ctypes
from_param = classmethod(from_param)
# Factory for an array-checking class with from_param defined for
# use with ctypes argtypes mechanism
_pointer_type_cache = {}
def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
"""
Array-checking restype/argtypes.
An ndpointer instance is used to describe an ndarray in restypes
and argtypes specifications. This approach is more flexible than
using, for example, ``POINTER(c_double)``, since several restrictions
can be specified, which are verified upon calling the ctypes function.
These include data type, number of dimensions, shape and flags. If a
given array does not satisfy the specified restrictions,
a ``TypeError`` is raised.
Parameters
----------
dtype : data-type, optional
Array data-type.
ndim : int, optional
Number of array dimensions.
shape : tuple of ints, optional
Array shape.
flags : string or tuple of strings
Array flags; may be one or more of:
- C_CONTIGUOUS / C / CONTIGUOUS
- F_CONTIGUOUS / F / FORTRAN
- OWNDATA / O
- WRITEABLE / W
- ALIGNED / A
- UPDATEIFCOPY / U
Examples
--------
>>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=float64,
... ndim=1,
... flags='C_CONTIGUOUS')]
>>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))
"""
if dtype is not None:
dtype = _dtype(dtype)
num = None
if flags is not None:
if isinstance(flags, str):
flags = flags.split(',')
elif isinstance(flags, (int, integer)):
num = flags
flags = _flags_fromnum(num)
elif isinstance(flags, flagsobj):
num = flags.num
flags = _flags_fromnum(num)
if num is None:
try:
flags = [x.strip().upper() for x in flags]
except:
raise TypeError, "invalid flags specification"
num = _num_fromflags(flags)
try:
return _pointer_type_cache[(dtype, ndim, shape, num)]
except KeyError:
pass
if dtype is None:
name = 'any'
elif dtype.names:
name = str(id(dtype))
else:
name = dtype.str
if ndim is not None:
name += "_%dd" % ndim
if shape is not None:
try:
strshape = [str(x) for x in shape]
except TypeError:
strshape = [str(shape)]
shape = (shape,)
shape = tuple(shape)
name += "_"+"x".join(strshape)
if flags is not None:
name += "_"+"_".join(flags)
else:
flags = []
klass = type("ndpointer_%s"%name, (_ndptr,),
{"_dtype_": dtype,
"_shape_" : shape,
"_ndim_" : ndim,
"_flags_" : num})
_pointer_type_cache[dtype] = klass
return klass
if ctypes is not None:
ct = ctypes
################################################################
# simple types
# maps the numpy typecodes like '<f8' to simple ctypes types like
# c_double. Filled in by prep_simple.
_typecodes = {}
def prep_simple(simple_type, typestr):
"""Given a ctypes simple type, construct and attach an
__array_interface__ property to it if it does not yet have one.
"""
try: simple_type.__array_interface__
except AttributeError: pass
else: return
_typecodes[typestr] = simple_type
def __array_interface__(self):
return {'descr': [('', typestr)],
'__ref': self,
'strides': None,
'shape': (),
'version': 3,
'typestr': typestr,
'data': (ct.addressof(self), False),
}
simple_type.__array_interface__ = property(__array_interface__)
if sys.byteorder == "little":
TYPESTR = "<%c%d"
else:
TYPESTR = ">%c%d"
simple_types = [
((ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong), "i"),
((ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong), "u"),
((ct.c_float, ct.c_double), "f"),
]
# Prep that numerical ctypes types:
for types, code in simple_types:
for tp in types:
prep_simple(tp, TYPESTR % (code, ct.sizeof(tp)))
################################################################
# array types
_ARRAY_TYPE = type(ct.c_int * 1)
def prep_array(array_type):
"""Given a ctypes array type, construct and attach an
__array_interface__ property to it if it does not yet have one.
"""
try: array_type.__array_interface__
except AttributeError: pass
else: return
shape = []
ob = array_type
while type(ob) == _ARRAY_TYPE:
shape.append(ob._length_)
ob = ob._type_
shape = tuple(shape)
ai = ob().__array_interface__
descr = ai['descr']
typestr = ai['typestr']
def __array_interface__(self):
return {'descr': descr,
'__ref': self,
'strides': None,
'shape': shape,
'version': 3,
'typestr': typestr,
'data': (ct.addressof(self), False),
}
array_type.__array_interface__ = property(__array_interface__)
################################################################
# public functions
def as_array(obj):
"""Create a numpy array from a ctypes array. The numpy array
shares the memory with the ctypes object."""
tp = type(obj)
try: tp.__array_interface__
except AttributeError: prep_array(tp)
return array(obj, copy=False)
def as_ctypes(obj):
"""Create and return a ctypes object from a numpy array. Actually
anything that exposes the __array_interface__ is accepted."""
ai = obj.__array_interface__
if ai["strides"]:
raise TypeError("strided arrays not supported")
if ai["version"] != 3:
raise TypeError("only __array_interface__ version 3 supported")
addr, readonly = ai["data"]
if readonly:
raise TypeError("readonly arrays unsupported")
tp = _typecodes[ai["typestr"]]
for dim in ai["shape"][::-1]:
tp = tp * dim
result = tp.from_address(addr)
result.__keep = ai
return result
def test(level=1, verbosity=1):
from numpy.testing import NumpyTest
return NumpyTest().test(level, verbosity)
|
|
import logging
from allauth.socialaccount.models import SocialToken
from django.conf import settings
from requests_oauthlib import OAuth1Session, OAuth2Session
from .models import GithubProject, GithubOrganization, BitbucketProject, BitbucketTeam
from tastyapi import apiv2
log = logging.getLogger(__name__)
def get_oauth_session(user, provider):
tokens = SocialToken.objects.filter(account__user__username=user.username, app__provider=provider)
if tokens.exists():
token = tokens[0]
else:
return None
if provider == 'github':
session = OAuth2Session(
client_id=token.app.client_id,
token={
'access_token': str(token.token),
'token_type': 'bearer'
}
)
elif provider == 'bitbucket':
session = OAuth1Session(
token.app.client_id,
client_secret=token.app.secret,
resource_owner_key=token.token,
resource_owner_secret=token.token_secret
)
return session or None
def make_github_project(user, org, privacy, repo_json):
log.info('Trying GitHub: %s' % repo_json['full_name'])
if (repo_json['private'] is True and privacy == 'private' or
repo_json['private'] is False and privacy == 'public'):
project, created = GithubProject.objects.get_or_create(
full_name=repo_json['full_name'],
users__pk=user.pk,
)
if project.organization and project.organization != org:
log.debug('Not importing %s because mismatched orgs' % repo_json['name'])
return None
else:
project.organization = org
project.users.add(user)
project.name = repo_json['name']
project.description = repo_json['description']
project.git_url = repo_json['git_url']
project.ssh_url = repo_json['ssh_url']
project.html_url = repo_json['html_url']
project.json = repo_json
project.save()
return project
else:
log.debug('Not importing %s because mismatched type' % repo_json['name'])
def make_github_organization(user, org_json):
org, created = GithubOrganization.objects.get_or_create(
login=org_json.get('login'),
)
org.html_url = org_json.get('html_url')
org.name = org_json.get('name')
org.email = org_json.get('email')
org.json = org_json
org.users.add(user)
org.save()
return org
def get_token_for_project(project, force_local=False):
if not getattr(settings, 'ALLOW_PRIVATE_REPOS', False):
return None
token = None
try:
if getattr(settings, 'DONT_HIT_DB', True) and not force_local:
token = apiv2.project(project.pk).token().get()['token']
else:
for user in project.users.all():
tokens = SocialToken.objects.filter(account__user__username=user.username, app__provider='github')
if tokens.exists():
token = tokens[0].token
except Exception:
log.error('Failed to get token for user', exc_info=True)
return token
def github_paginate(session, url):
"""
Scans trough all github paginates results and returns the concatenated
list of results.
:param session: requests client instance
:param url: start url to get the data from.
See https://developer.github.com/v3/#pagination
"""
result = []
while url:
r = session.get(url)
result.extend(r.json())
next = r.links.get('next')
if next:
url = next.get('url')
else:
url = None
return result
def import_github(user, sync):
""" Do the actual github import """
repo_type = getattr(settings, 'GITHUB_PRIVACY', 'public')
session = get_oauth_session(user, provider='github')
if sync and session:
# Get user repos
owner_resp = github_paginate(session, 'https://api.github.com/user/repos?per_page=100')
try:
for repo in owner_resp:
make_github_project(user=user, org=None, privacy=repo_type, repo_json=repo)
except TypeError, e:
print e
# Get org repos
try:
resp = session.get('https://api.github.com/user/orgs')
for org_json in resp.json():
org_resp = session.get('https://api.github.com/orgs/%s' % org_json['login'])
org_obj = make_github_organization(user=user, org_json=org_resp.json())
# Add repos
org_repos_resp = github_paginate(session, 'https://api.github.com/orgs/%s/repos?per_page=100' % org_json['login'])
for repo in org_repos_resp:
make_github_project(user=user, org=org_obj, privacy=repo_type, repo_json=repo)
except TypeError, e:
print e
return session is not None
###
# Bitbucket
###
def bitbucket_paginate(session, url):
"""
Scans trough all github paginates results and returns the concatenated
list of results.
:param session: requests client instance
:param url: start url to get the data from.
"""
result = []
while url:
r = session.get(url)
result.extend([r.json()])
next_url = r.json().get('next')
if next_url:
url = next_url
else:
url = None
return result
def make_bitbucket_project(user, org, privacy, repo_json):
log.info('Trying Bitbucket: %s' % repo_json['full_name'])
if (repo_json['is_private'] is True and privacy == 'private' or
repo_json['is_private'] is False and privacy == 'public'):
project, created = BitbucketProject.objects.get_or_create(
full_name=repo_json['full_name'],
)
if project.organization and project.organization != org:
log.debug('Not importing %s because mismatched orgs' % repo_json['name'])
return None
else:
project.organization = org
project.users.add(user)
project.name = repo_json['name']
project.description = repo_json['description']
project.git_url = repo_json['links']['clone'][0]['href']
project.ssh_url = repo_json['links']['clone'][1]['href']
project.html_url = repo_json['links']['html']['href']
project.vcs = repo_json['scm']
project.json = repo_json
project.save()
return project
else:
log.debug('Not importing %s because mismatched type' % repo_json['name'])
def process_bitbucket_json(user, json, repo_type):
try:
for page in json:
for repo in page['values']:
make_bitbucket_project(user=user, org=None, privacy=repo_type, repo_json=repo)
except TypeError, e:
print e
def import_bitbucket(user, sync):
""" Do the actual github import """
repo_type = getattr(settings, 'GITHUB_PRIVACY', 'public')
session = get_oauth_session(user, provider='bitbucket')
if sync and session:
# Get user repos
try:
owner_resp = bitbucket_paginate(session, 'https://bitbucket.org/api/2.0/repositories/{owner}'.format(owner=user.username))
process_bitbucket_json(user, owner_resp, repo_type)
except TypeError, e:
print e
# Get org repos
# resp = session.get('https://bitbucket.org/api/1.0/user/privileges/')
# for team in resp.json()['teams'].keys():
# org_resp = bitbucket_paginate(session, 'https://bitbucket.org/api/2.0/teams/{teamname}/repositories' % team)
# process_bitbucket_json(user, org_resp, repo_type)
return session is not None
|
|
import os
import time
import numpy as np
import asap3
from asap3.mpi import world
from asap3.io.trajectory import _GetAtoms
try:
from ase.io.bundletrajectory import BundleTrajectory as _BundleTrajectory
from ase.io.bundletrajectory import PickleBundleBackend
except ImportError:
class _BundleTrajectory:
"This version of ASE does not support BundleTrajectory."
def __init__(self, *args, **kwargs):
raise NotImplementedError(self.__doc__)
class PickleBundleBackend:
"This version of ASE does not support BundleTrajectory."
def __init__(self, *args, **kwargs):
raise NotImplementedError(self.__doc__)
class BundleTrajectory(_BundleTrajectory, _GetAtoms):
"""Reads and writes atoms into a .bundle directory.
The BundleTrajectory is an alternative way of storing
trajectories, intended for large-scale molecular dynamics
simulations, where a single flat file becomes unwieldy. Instead,
the data is stored in directory, a 'bundle' (the name bundle is
inspired from bundles in Mac OS, which are really just directories
the user is supposed to think of as a single file-like unit).
Parameters:
filename:
The name of the directory. Preferably ending in .bundle.
mode (optional):
The file opening mode. 'r' means open for reading, 'w' for
writing and 'a' for appending. Default: 'r'. If opening in
write mode, and the filename already exists, the old file is
renamed to .bak (any old .bak file is deleted), except if the
existing file is empty.
atoms (optional):
The atoms that will be written. Can only be specified in
write or append mode. If not specified, the atoms must be
given as an argument to the .write() method instead.
backup (optional):
Use backup=False to disable renaming of an existing file.
split (optional):
If set to True or False, determines whether a split file
format is used instead of the normal one. In the split
format, each processor in a parallel simulation writes its own
files inside the BundleTrajectory, instead of leaving all I/O
to the master. In not specified, a split format is used if
more than one million atoms. Ignored in serial simulations.
iolimit (optional):
Limits the number of MPI tasks performing I/O simultaneously,
to prevent overloading the NFS server. Only enforced if the
number of tasks is somewhat larger than the limit.
"""
def __init__(self, filename, mode='r', atoms=None,
backup=True, split=None, iolimit=10):
if split is None:
# Decide if subtype should be split based on number of atoms
split = (atoms is not None) and atoms.get_number_of_atoms() > 1000000
# Never use subtype split for serial simulations
if not getattr(atoms, "parallel", False):
split = False
# Use the collector object to join all data on master if subtype is normal
# and the simulation is parallel.
if not split and atoms is not None and getattr(atoms, "parallel", False):
atoms = asap3.Collector(atoms)
if split:
self.subtype = 'split'
else:
self.subtype = 'normal'
# self.iolimit may be needed if reading a split bundle.
if world.size < 1.5 * iolimit:
self.iolimit = None
else:
self.iolimit = iolimit
_BundleTrajectory.__init__(self, filename, mode, atoms,
backup=backup)
if self.subtype == 'split':
self.set_extra_data('ID') # So the atoms can be sorted when read.
def _set_defaults(self):
subtype = self.subtype # Preserve it
_BundleTrajectory._set_defaults(self)
self.subtype = subtype
self.datatypes['forces'] = False
def _set_backend(self, backend=None):
"""Set the backed doing the actual I/O."""
if backend is not None:
self.backend_name = backend
if self.backend_name == 'pickle':
if self.subtype == 'normal':
# Use the standard ASE backend
self.backend = PickleBundleBackend(self.master)
elif self.subtype == 'split':
self.backend = PickleSplitBundleBackend(self.master,
self.iolimit)
else:
raise NotImplementedError(
"This version of ASE cannot use BundleTrajectory with backend '%s'"
% self.backend_name)
def write(self, atoms=None):
if self.subtype == 'normal' and atoms is not None and getattr(atoms, "parallel", False):
atoms = asap3.Collector(atoms)
_BundleTrajectory.write(self, atoms)
def _make_bundledir(self, filename):
"""Make the main bundle directory.
Since all MPI tasks might write to it, all tasks must wait for
the directory to appear.
For performance reasons, the first frame directory is created immediately.
"""
assert not os.path.isdir(filename)
world.barrier()
if self.master:
self.log("Making directory "+filename)
os.mkdir(filename)
framedir = os.path.join(filename, "F0")
self.log("Making directory "+ framedir)
os.mkdir(framedir)
else:
i = 0
while not os.path.isdir(filename):
time.sleep(1)
i += 1
if i > 10:
self.log("Waiting %d seconds for %s to appear!"
% (i, filename))
def _make_framedir(self, frame):
"""Make subdirectory for the frame.
For a split bundle, all MPI tasks write to the frame
directory. The slaves must therefore wait until it becomes
available. To minimize the waiting time, frames are
pre-created.
"""
if self.subtype == 'split':
numdirs = 10
else:
numdirs = 1
if self.master:
for i in range(frame, frame+numdirs):
framedir = os.path.join(self.filename, "F"+str(i))
if not os.path.exists(framedir):
self.log("Making directory " + framedir)
os.mkdir(framedir)
framedir = os.path.join(self.filename, "F"+str(frame))
# Wait for the directory to appear
i = 0
while not os.path.isdir(framedir):
time.sleep(1)
i += 1
if i > 10:
self.log("Waiting %d seconds for %s to appear!"
% (i, framedir))
return framedir
def close(self):
"""Clean up when closing."""
if self.state == 'write' and self.master:
i = self.nframes
while True:
fname = os.path.join(self.filename, "F" + str(i))
if not os.path.exists(fname):
break
self.log("Closing, removing empty directory "+fname)
os.rmdir(fname)
i += 1
_BundleTrajectory.close(self)
def __del__(self):
self.close()
class PickleSplitBundleBackend(PickleBundleBackend):
"""A special backend for writing split bundles (ASAP only)."""
def __init__(self, master, iolimit):
# Store if this backend will actually write anything
self.writesmall = master
self.writelarge = True
self.writenonarray = master
self.iolimit = iolimit
if iolimit:
self.iostart = np.round(
np.linspace(0, world.size, iolimit+1)).astype(int)
self.iotag = 413151
self.lastwritedir = None
def write_small(self, framedir, smalldata):
"Write small data to be written jointly."
smalldata['fragments'] = world.size
PickleBundleBackend.write_small(self, framedir, smalldata)
def write(self, framedir, name, data):
"Write data to separate file."
if hasattr(data, "shape"):
# We need to store which kind of data was written in this frame
# so NFS synchronization is possible when closing file.
if framedir != self.lastwritedir:
self.lastwritedir = framedir
self.writenames = []
self.writenames.append(name)
# As expected, we are writing a NumPy array
self.iosync_start()
name = "%s_%d" % (name, world.rank)
PickleBundleBackend.write(self, framedir, name, data)
self.iosync_end()
elif self.writenonarray:
# If the data is not a NumPy array, only the master writes.
PickleBundleBackend.write(self, framedir, name, data)
def read(self, framedir, name):
"Read data from separate file."
self.iosync_start()
x = PickleBundleBackend.read(self, framedir, name)
self.iosync_end()
return x
def iosync_start(self):
"Prevents too many simultaneous IO tasks from trashing server."
if self.iolimit and world.rank not in self.iostart:
# I must wait.
token = np.zeros(1, int)
world.receive(token, world.rank-1, self.iotag)
def iosync_end(self):
if self.iolimit and world.rank+1 not in self.iostart:
# Another task is waiting for me.
token = np.zeros(1, int)
world.send(token, world.rank+1, self.iotag)
def close(self, log=None):
"""Make sure that all data is available on disk for all MPI tasks."""
if self.lastwritedir:
for name in self.writenames:
for part in range(world.size):
fname = os.path.join(self.lastwritedir, "%s_%d.pickle" % (name, part))
if not os.path.exists(fname):
if log:
log.write("Task %i is waiting for '%s' to appear.\n" %
(world.rank, fname))
for i in range(20):
time.sleep(5)
if os.path.exists(fname):
break
if not os.path.exists(fname) and log:
log.write("WARNING: Task %i gave up waiting for '%s'.\n" %
(world.rank, fname))
|
|
# Copyright (c) 2011-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
from tempfile import mkdtemp
from textwrap import dedent
from shutil import rmtree
from lunr.common.config import LunrConfig
from lunr.storage.helper.utils import ProcessError, ServiceUnavailable
from lunr.storage.helper import cgroup
class MockCgroupFs(object):
def __init__(self, empty=False):
if empty:
self.data = {
'blkio.throttle.write_iops_device': [],
'blkio.throttle.read_iops_device': []
}
else:
self.data = {
'blkio.throttle.write_iops_device': [
['1:0', '2'],
['1:1', '1'],
['2:0', '0'],
],
'blkio.throttle.read_iops_device': [
['1:0', '0'],
['1:1', '1'],
['2:0', '2'],
]
}
def read(self, param):
return self.data[param]
def write(self, param, value):
device, throttle = value.split()
entry = [device, throttle]
for line in self.data[param]:
if line[0] == device:
self.data[param].remove(line)
break
self.data[param].append(entry)
class TestCgroupFs(unittest.TestCase):
def setUp(self):
self.scratch = mkdtemp()
def tearDown(self):
rmtree(self.scratch)
def test_read(self):
value = "something"
with open(os.path.join(self.scratch, value), 'w') as f:
f.write("foo 1\n")
f.write("bar 2\n")
f.write("baz 3\n")
cgroup_fs = cgroup.CgroupFs(self.scratch)
self.assertEquals(list(cgroup_fs.read(value)),
[['foo', '1'],
['bar', '2'],
['baz', '3']])
def test_read_fail(self):
cgroup_fs = cgroup.CgroupFs(self.scratch)
self.assertEquals(list(cgroup_fs.read('nonexistent')), [])
def test_write(self):
value = "something"
cgroup_fs = cgroup.CgroupFs(self.scratch)
cgroup_fs.write(value, "foo")
with open(os.path.join(self.scratch, value), 'r') as f:
contents = f.read()
self.assertEquals(contents, "foo")
def test_write_fail(self):
badscratch = os.path.join(self.scratch, 'badpath')
cgroup_fs = cgroup.CgroupFs(badscratch)
cgroup_fs.write('garbage', "foo")
self.assertFalse(os.path.exists(badscratch))
class TestCgroupHelper(unittest.TestCase):
def setUp(self):
self.scratch = mkdtemp()
self.cgroups_path = os.path.join(self.scratch, 'cgroups')
self.conf = LunrConfig({
'storage': {
'run_dir': self.scratch
}
})
self.helper = cgroup.CgroupHelper(self.conf)
self.helper.cgroup_fs = MockCgroupFs()
def tearDown(self):
rmtree(self.scratch)
def test_all_cgroups(self):
data = self.helper.all_cgroups()
writes = data['blkio.throttle.write_iops_device']
self.assertEquals(writes['1:0'], '2')
self.assertEquals(writes['1:1'], '1')
self.assertEquals(writes['2:0'], '0')
reads = data['blkio.throttle.read_iops_device']
self.assertEquals(reads['1:0'], '0')
self.assertEquals(reads['1:1'], '1')
self.assertEquals(reads['2:0'], '2')
def test_get(self):
v1 = {'id': 'v1', 'device_number': '1:0'}
data = self.helper.get(v1)
self.assertEquals(data, {'blkio.throttle.write_iops_device': '2',
'blkio.throttle.read_iops_device': '0'})
def test_set(self):
v1 = {'id': 'v1', 'device_number': '1:0'}
v2 = {'id': 'v2', 'device_number': '1:1'}
self.helper.set(v1, '10')
self.helper.set(v2, '100', 'blkio.throttle.read_iops_device')
data = self.helper.get(v1)
self.assertEquals(data, {'blkio.throttle.write_iops_device': '10',
'blkio.throttle.read_iops_device': '10'})
updates_file = os.path.join(self.cgroups_path, "updates")
with open(updates_file, 'r') as f:
line = f.readline()
self.assertEquals(line, "v1 blkio.throttle.read_iops_device 10\n")
line = f.readline()
self.assertEquals(line, "v1 blkio.throttle.write_iops_device 10\n")
line = f.readline()
self.assertEquals(line, "v2 blkio.throttle.read_iops_device 100\n")
line = f.readline()
self.assertEquals(line, "")
def test_set_negative(self):
self.assertRaises(ValueError, self.helper.set, '1:1', '-42')
def test_set_nonint(self):
self.assertRaises(ValueError, self.helper.set, '1:1', 'monkey')
def test_set_zero(self):
v1 = {'id': 'v1', 'device_number': '42:42'}
self.helper.set(v1, '42')
data = self.helper.get(v1)
self.assertEquals(data, {'blkio.throttle.write_iops_device': '42',
'blkio.throttle.read_iops_device': '42'})
# 0 gets written to cgroupfs
self.helper.set(v1, '0')
data = self.helper.get(v1)
self.assertEquals(data, {'blkio.throttle.write_iops_device': '0',
'blkio.throttle.read_iops_device': '0'})
def test_load_initial_cgroups(self):
self.helper.cgroup_fs = MockCgroupFs(True)
volumes = [
{'id': 'v1', 'device_number': '1:1'},
{'id': 'v2', 'device_number': '1:2'},
{'id': 'v3', 'device_number': '1:3'},
{'id': 'v4', 'device_number': '1:4'},
]
os.mkdir(self.cgroups_path)
updates_file = os.path.join(self.cgroups_path, "updates")
with open(updates_file, 'w') as f:
f.write('v1 blkio.throttle.read_iops_device huh\n')
f.write('v1 blkio.throttle.read_iops_device 72\n')
f.write('v1 blkio.throttle.read_iops_device 100\n')
f.write('v2 blkio.throttle.read_iops_device 200\n')
f.write('v3 blkio.throttle.read_iops_device 300\n')
f.write('v1 blkio.throttle.write_iops_device 101\n')
f.write('v2 blkio.throttle.write_iops_device 202\n')
f.write('v3 blkio.throttle.write_iops_device 303\n')
f.write('v5 blkio.throttle.write_iops_device boom\n')
self.helper.load_initial_cgroups(volumes)
data = self.helper.all_cgroups()
reads = data['blkio.throttle.read_iops_device']
self.assertEquals(reads['1:1'], '100')
self.assertEquals(reads['1:2'], '200')
self.assertEquals(reads['1:3'], '300')
writes = data['blkio.throttle.write_iops_device']
self.assertEquals(writes['1:1'], '101')
self.assertEquals(writes['1:2'], '202')
self.assertEquals(writes['1:3'], '303')
def test_load_initial_cgroups_missing(self):
self.helper.cgroup_fs = MockCgroupFs(True)
self.helper.load_initial_cgroups([])
data = self.helper.all_cgroups()
self.assertEquals(data, {})
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
#
# Copyright 2015 The AMP HTML Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the license.
#
"""A build script which (thus far) works on Ubuntu 14."""
from __future__ import print_function
import argparse
import glob
import logging
import os
import platform
import re
import subprocess
import sys
def Die(msg):
"""Prints error and exits with status 1.
Args:
msg: The error message to emit
"""
print(msg, file=sys.stderr)
sys.exit(1)
def EnsureNodeJsIsInstalled():
"""Ensure Node.js is installed and that 'node' is the command to run."""
logging.info('entering ...')
try:
output = subprocess.check_output(['node', '--eval', 'console.log("42")'])
if b'42' == output.strip():
return
except (subprocess.CalledProcessError, OSError):
pass
Die('Node.js not found. Try "apt-get install nodejs" or follow the install instructions at https://github.com/ampproject/amphtml/blob/master/validator/README.md#installation')
def CheckPrereqs():
"""Checks that various prerequisites for this script are satisfied."""
logging.info('entering ...')
if platform.system() != 'Linux' and platform.system() != 'Darwin':
Die('Sorry, this script assumes Linux or Mac OS X thus far. '
'Please feel free to edit the source and fix it to your needs.')
# Ensure source files are available.
for f in [
'validator-main.protoascii', 'validator.proto', 'validator_gen_js.py',
'package.json', 'engine/validator.js', 'engine/validator_test.js',
'engine/validator-in-browser.js', 'engine/tokenize-css.js',
'engine/definitions.js', 'engine/parse-css.js', 'engine/parse-srcset.js',
'engine/parse-url.js'
]:
if not os.path.exists(f):
Die('%s not found. Must run in amp_validator source directory.' % f)
# Ensure protoc is available.
try:
libprotoc_version = subprocess.check_output(['protoc', '--version'])
except (subprocess.CalledProcessError, OSError):
Die('Protobuf compiler not found. Try "apt-get install protobuf-compiler" or follow the install instructions at https://github.com/ampproject/amphtml/blob/master/validator/README.md#installation.')
# Ensure 'libprotoc 2.5.0' or newer.
m = re.search(b'^(\\w+) (\\d+)\\.(\\d+)\\.(\\d+)', libprotoc_version)
if (m.group(1) != b'libprotoc' or
(int(m.group(2)), int(m.group(3)), int(m.group(4))) < (2, 5, 0)):
Die('Expected libprotoc 2.5.0 or newer, saw: %s' % libprotoc_version)
# Ensure that the Python protobuf package is installed.
for m in ['descriptor', 'text_format', 'json_format']:
module = 'google.protobuf.%s' % m
try:
__import__(module)
except ImportError:
# Python3 needs pip3. Python 2 needs pip.
if sys.version_info < (3, 0):
Die('%s not found. Try "pip install protobuf" or follow the install '
'instructions at https://github.com/ampproject/amphtml/blob/master/'
'validator/README.md#installation' % module)
else:
Die('%s not found. Try "pip3 install protobuf" or follow the install '
'instructions at https://github.com/ampproject/amphtml/blob/master/'
'validator/README.md#installation' % module)
# Ensure that yarn is installed.
try:
subprocess.check_output(['yarn', '--version'])
except (subprocess.CalledProcessError, OSError):
Die('Yarn package manager not found. Run '
'"curl -o- -L https://yarnpkg.com/install.sh | bash" '
'or see https://yarnpkg.com/docs/install.')
# Ensure JVM installed. TODO: Check for version?
try:
subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError, OSError):
Die('Java missing. Try "apt-get install openjdk-7-jre" or follow the install instructions at https://github.com/ampproject/amphtml/blob/master/validator/README.md#installation')
logging.info('... done')
def SetupOutDir(out_dir):
"""Sets up a clean output directory.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
if os.path.exists(out_dir):
subprocess.check_call(['rm', '-rf', out_dir])
os.mkdir(out_dir)
logging.info('... done')
def InstallNodeDependencies():
"""Installs the dependencies using yarn."""
logging.info('entering ...')
# Install the project dependencies specified in package.json into
# node_modules.
logging.info('installing AMP Validator engine dependencies ...')
subprocess.check_call(
['yarn', 'install'],
stdout=(open(os.devnull, 'wb') if os.environ.get('TRAVIS') else sys.stdout))
logging.info('installing AMP Validator nodejs dependencies ...')
subprocess.check_call(
['yarn', 'install'],
cwd='nodejs',
stdout=(open(os.devnull, 'wb') if os.environ.get('TRAVIS') else sys.stdout))
logging.info('... done')
def GenValidatorPb2Py(out_dir):
"""Calls the proto compiler to generate validator_pb2.py.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
subprocess.check_call(
['protoc', 'validator.proto', '--python_out=%s' % out_dir])
open('%s/__init__.py' % out_dir, 'w').close()
logging.info('... done')
def GenValidatorProtoascii(out_dir):
"""Assembles the validator protoascii file from the main and extensions.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
protoascii_segments = [open('validator-main.protoascii').read()]
extensions = glob.glob('extensions/*/validator-*.protoascii')
# In the Github project, the extensions are located in a sibling directory
# to the validator rather than a child directory.
if not extensions:
extensions = glob.glob('../extensions/*/validator-*.protoascii')
extensions.sort()
for extension in extensions:
protoascii_segments.append(open(extension).read())
f = open('%s/validator.protoascii' % out_dir, 'w')
f.write(''.join(protoascii_segments))
f.close()
logging.info('... done')
def GenValidatorProtoGeneratedJs(out_dir):
"""Calls validator_gen_js to generate validator-proto-generated.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
# These imports happen late, within this method because they don't necessarily
# exist when the module starts running, and the ones that probably do
# are checked by CheckPrereqs.
# pylint: disable=g-import-not-at-top
from google.protobuf import text_format
from google.protobuf import descriptor
from dist import validator_pb2
import validator_gen_js
# pylint: enable=g-import-not-at-top
out = []
validator_gen_js.GenerateValidatorGeneratedJs(
specfile=None,
validator_pb2=validator_pb2,
generate_proto_only=True,
generate_spec_only=False,
text_format=text_format,
html_format=None,
descriptor=descriptor,
out=out)
out.append('')
f = open('%s/validator-proto-generated.js' % out_dir, 'w')
f.write('\n'.join(out))
f.close()
logging.info('... done')
def GenValidatorGeneratedJs(out_dir):
"""Calls validator_gen_js to generate validator-generated.js and validator-generated.json.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
# These imports happen late, within this method because they don't necessarily
# exist when the module starts running, and the ones that probably do
# are checked by CheckPrereqs.
# pylint: disable=g-import-not-at-top
from google.protobuf import text_format
from google.protobuf import json_format
from google.protobuf import descriptor
from dist import validator_pb2
import validator_gen_js
# pylint: enable=g-import-not-at-top
out = []
validator_gen_js.GenerateValidatorGeneratedJs(
specfile='%s/validator.protoascii' % out_dir,
validator_pb2=validator_pb2,
generate_proto_only=False,
generate_spec_only=True,
text_format=text_format,
html_format=None,
descriptor=descriptor,
out=out)
out.append('')
f = open('%s/validator-generated.js' % out_dir, 'w')
f.write('\n'.join(out))
f.close()
out = []
validator_gen_js.GenerateValidatorGeneratedJson(
specfile='%s/validator.protoascii' % out_dir,
validator_pb2=validator_pb2,
text_format=text_format,
json_format=json_format,
out=out)
out.append('')
f = open('%s/validator-generated.json' % out_dir, 'w')
f.write('\n'.join(out))
f.close()
logging.info('... done')
def CompileWithClosure(js_files, definitions, entry_points, output_file):
"""Compiles the arguments with the Closure compiler for transpilation to ES5.
Args:
js_files: list of files to compile
definitions: list of definitions flags to closure compiler
entry_points: entry points (these won't be minimized)
output_file: name of the Javascript output file
"""
cmd = [
'java', '-jar', 'node_modules/google-closure-compiler-java/compiler.jar',
'--language_out=ES5_STRICT', '--dependency_mode=STRICT',
'--js_output_file=%s' % output_file
]
cmd += ['--entry_point=%s' % e for e in entry_points]
cmd += ['--output_manifest=%s' % ('%s.manifest' % output_file)]
cmd += [
'node_modules/google-closure-library/closure/**.js',
'!node_modules/google-closure-library/closure/**_test.js',
'node_modules/google-closure-library/third_party/closure/**.js',
'!node_modules/google-closure-library/third_party/closure/**_test.js'
]
cmd += js_files
cmd += definitions
subprocess.check_call(cmd)
def CompileValidatorMinified(out_dir):
"""Generates a minified validator script, which can be imported to validate.
Args:
out_dir: output directory
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/htmlparser.js',
'engine/parse-css.js', 'engine/parse-srcset.js',
'engine/parse-url.js', 'engine/tokenize-css.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir,
'engine/validator-in-browser.js', 'engine/validator.js',
'engine/amp4ads-parse-css.js', 'engine/keyframes-parse-css.js',
'engine/htmlparser-interface.js'
],
definitions=[],
entry_points=[
'amp.validator.validateString',
'amp.validator.renderValidationResult',
'amp.validator.renderErrorMessage'
],
output_file='%s/validator_minified.js' % out_dir)
logging.info('... done')
def RunSmokeTest(out_dir):
"""Runs a smoke test (minimum valid AMP and empty html file).
Args:
out_dir: output directory
"""
logging.info('entering ...')
# Run index.js on the minimum valid amp and observe that it passes.
p = subprocess.Popen(
[
'node', 'nodejs/index.js', '--validator_js',
'%s/validator_minified.js' % out_dir,
'testdata/feature_tests/minimum_valid_amp.html', '--format=text'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if (b'testdata/feature_tests/minimum_valid_amp.html: PASS\n', b'',
p.returncode) != (stdout, stderr, 0):
Die('Smoke test failed. returncode=%d stdout="%s" stderr="%s"' %
(p.returncode, stdout, stderr))
# Run index.js on an empty file and observe that it fails.
p = subprocess.Popen(
[
'node', 'nodejs/index.js', '--validator_js',
'%s/validator_minified.js' % out_dir,
'testdata/feature_tests/empty.html', '--format=text'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode != 1:
Die('smoke test failed. Expected p.returncode==1, saw: %s' % p.returncode)
if not stderr.startswith(b'testdata/feature_tests/empty.html:1:0 '
b'The mandatory tag \'html'):
Die('smoke test failed; stderr was: "%s"' % stderr)
logging.info('... done')
def RunIndexTest():
"""Runs the index_test.js, which tests the NodeJS API.
"""
logging.info('entering ...')
p = subprocess.Popen(
['node', './index_test.js'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd='nodejs')
(stdout, stderr) = p.communicate()
if p.returncode != 0:
Die('index_test.js failed. returncode=%d stdout="%s" stderr="%s"' %
(p.returncode, stdout, stderr))
logging.info('... done')
def CompileValidatorTestMinified(out_dir):
"""Runs closure compiler for validator_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/htmlparser.js',
'engine/parse-css.js', 'engine/parse-srcset.js',
'engine/parse-url.js', 'engine/tokenize-css.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir,
'engine/validator-in-browser.js', 'engine/validator.js',
'engine/amp4ads-parse-css.js', 'engine/keyframes-parse-css.js',
'engine/htmlparser-interface.js', 'engine/validator_test.js'
],
definitions=[],
entry_points=['amp.validator.ValidatorTest'],
output_file='%s/validator_test_minified.js' % out_dir)
logging.info('... success')
def CompileHtmlparserTestMinified(out_dir):
"""Runs closure compiler for htmlparser_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/htmlparser.js', 'engine/htmlparser-interface.js',
'engine/htmlparser_test.js'
],
definitions=[],
entry_points=['amp.htmlparser.HtmlParserTest'],
output_file='%s/htmlparser_test_minified.js' % out_dir)
logging.info('... success')
def CompileParseCssTestMinified(out_dir):
"""Runs closure compiler for parse-css_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/parse-css.js', 'engine/parse-url.js',
'engine/tokenize-css.js', 'engine/json-testutil.js',
'engine/parse-css_test.js', '%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_css.ParseCssTest'],
output_file='%s/parse-css_test_minified.js' % out_dir)
logging.info('... success')
def CompileParseUrlTestMinified(out_dir):
"""Runs closure compiler for parse-url_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/parse-url.js', 'engine/parse-css.js',
'engine/tokenize-css.js', 'engine/json-testutil.js',
'engine/parse-url_test.js', '%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_url.ParseURLTest'],
output_file='%s/parse-url_test_minified.js' % out_dir)
logging.info('... success')
def CompileAmp4AdsParseCssTestMinified(out_dir):
"""Runs closure compiler for amp4ads-parse-css_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/amp4ads-parse-css_test.js',
'engine/parse-css.js', 'engine/parse-url.js',
'engine/amp4ads-parse-css.js', 'engine/tokenize-css.js',
'engine/json-testutil.js', '%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_css.Amp4AdsParseCssTest'],
output_file='%s/amp4ads-parse-css_test_minified.js' % out_dir)
logging.info('... success')
def CompileKeyframesParseCssTestMinified(out_dir):
"""Runs closure compiler for keyframes-parse-css_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/keyframes-parse-css_test.js',
'engine/parse-css.js', 'engine/parse-url.js',
'engine/keyframes-parse-css.js', 'engine/tokenize-css.js',
'engine/json-testutil.js', '%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_css.KeyframesParseCssTest'],
output_file='%s/keyframes-parse-css_test_minified.js' % out_dir)
logging.info('... success')
def CompileParseSrcsetTestMinified(out_dir):
"""Runs closure compiler for parse-srcset_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/parse-srcset.js',
'engine/json-testutil.js', 'engine/parse-srcset_test.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_srcset.ParseSrcsetTest'],
output_file='%s/parse-srcset_test_minified.js' % out_dir)
logging.info('... success')
def GenerateTestRunner(out_dir):
"""Generates a test runner: a nodejs script that runs our minified tests.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
f = open('%s/test_runner' % out_dir, 'w')
extensions_dir = 'extensions'
# In the Github project, the extensions are located in a sibling directory
# to the validator rather than a child directory.
if not os.path.isdir(extensions_dir):
extensions_dir = '../extensions'
f.write("""#!/usr/bin/env node
global.assert = require('assert');
global.fs = require('fs');
global.path = require('path');
var JasmineRunner = require('jasmine');
var jasmine = new JasmineRunner();
process.env.TESTDATA_ROOTS = 'testdata:%s'
require('./validator_test_minified');
require('./htmlparser_test_minified');
require('./parse-css_test_minified');
require('./parse-url_test_minified');
require('./amp4ads-parse-css_test_minified');
require('./keyframes-parse-css_test_minified');
require('./parse-srcset_test_minified');
jasmine.onComplete(function (passed) {
process.exit(passed ? 0 : 1);
});
jasmine.execute();
""" % extensions_dir)
os.chmod('%s/test_runner' % out_dir, 0o750)
logging.info('... success')
def RunTests(update_tests, out_dir):
"""Runs all the minified tests.
Args:
update_tests: a boolean indicating whether or not to update the test
output files.
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
env = os.environ.copy()
if update_tests:
env['UPDATE_VALIDATOR_TEST'] = '1'
subprocess.check_call(['node', '%s/test_runner' % out_dir], env=env)
logging.info('... success')
def Main(parsed_args):
"""The main method, which executes all build steps and runs the tests."""
logging.basicConfig(
format='[[%(filename)s %(funcName)s]] - %(message)s',
level=(logging.ERROR if os.environ.get('TRAVIS') else logging.INFO))
EnsureNodeJsIsInstalled()
CheckPrereqs()
InstallNodeDependencies()
SetupOutDir(out_dir='dist')
GenValidatorProtoascii(out_dir='dist')
GenValidatorPb2Py(out_dir='dist')
GenValidatorProtoGeneratedJs(out_dir='dist')
GenValidatorGeneratedJs(out_dir='dist')
CompileValidatorMinified(out_dir='dist')
RunSmokeTest(out_dir='dist')
RunIndexTest()
CompileValidatorTestMinified(out_dir='dist')
CompileHtmlparserTestMinified(out_dir='dist')
CompileParseCssTestMinified(out_dir='dist')
CompileParseUrlTestMinified(out_dir='dist')
CompileAmp4AdsParseCssTestMinified(out_dir='dist')
CompileKeyframesParseCssTestMinified(out_dir='dist')
CompileParseSrcsetTestMinified(out_dir='dist')
GenerateTestRunner(out_dir='dist')
RunTests(update_tests=parsed_args.update_tests, out_dir='dist')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Build script for the AMP Validator.')
parser.add_argument(
'--update_tests',
action='store_true',
help=('If True, validator_test will overwrite the .out test files with '
'the encountered test output.'))
Main(parser.parse_args())
|
|
# Copyright 2014, 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Wrappers for VirtualIOServer and virtual storage mapping elements."""
import abc
import copy
import functools
import re
import six
from oslo_log import log as logging
import pypowervm.const as c
import pypowervm.entities as ent
from pypowervm.i18n import _
import pypowervm.util as u
import pypowervm.wrappers.base_partition as bp
import pypowervm.wrappers.entry_wrapper as ewrap
import pypowervm.wrappers.iocard as card
import pypowervm.wrappers.logical_partition as lpar
import pypowervm.wrappers.managed_system as ms
import pypowervm.wrappers.network as net
import pypowervm.wrappers.storage as stor
LOG = logging.getLogger(__name__)
# VIO Constants
_VIO_API_CAP = 'APICapable'
_VIO_VNIC_CAP = 'IsVNICCapable'
_VIO_VNIC_FAILOVER_CAP = 'VNICFailOverCapable'
_VIO_SVR_INST_CFG = 'ServerInstallConfiguration'
_VIO_LNAGGS = 'LinkAggregations'
_VIO_MGR_PASSTHRU_CAP = 'ManagerPassthroughCapable'
_VIO_MEDIA_REPOS = 'MediaRepositories'
_VIO_MVR_SVC_PARTITION = 'MoverServicePartition'
_VIO_NET_BOOT_DEVS = 'NetworkBootDevices'
_VIO_PAGING_SVC_PARTITION = 'PagingServicePartition'
_VIO_PVS = stor.PVS
_VIO_SEAS = net.NB_SEAS
_VIO_SSP_CAP = 'SharedStoragePoolCapable'
_VIO_SSP_VER = 'SharedStoragePoolVersion'
_VIO_STOR_POOLS = 'StoragePools'
_VIO_TRUNK_ADPTS = net.SEA_TRUNKS
_VIO_LICENSE = 'VirtualIOServerLicense'
_VIO_LICENSE_ACCEPTED = 'VirtualIOServerLicenseAccepted'
_VIO_VFC_MAPPINGS = 'VirtualFibreChannelMappings'
_VIO_VSCSI_MAPPINGS = 'VirtualSCSIMappings'
_VIO_FREE_IO_ADPTS_FOR_LNAGG = 'FreeIOAdaptersForLinkAggregation'
# "FreeEthernetBackingDevicesForSEA" is really misspelled in the schema.
_VIO_FREE_ETH_BACKDEVS_FOR_SEA = 'FreeEthenetBackingDevicesForSEA'
_VIO_VNIC_BACKDEVS = 'VirtualNICBackingDevices'
_VIO_CAPS = 'VirtualIOServerCapabilities'
_VIO_VSCSI_BUS = 'VirtualSCSIBus'
_VOL_UID = 'VolumeUniqueID'
_VOL_NAME = 'VolumeName'
_RESERVE_POLICY = 'ReservePolicy'
_IO_ADPT_CHOICE = 'IOAdapterChoice'
_IO_ADPT = 'IOAdapter'
_IO_LINK_AGG_ADPT_ID = 'AdapterID'
_IO_LINK_AGG_DESC = 'Description'
_IO_LINK_AGG_DEV_NAME = 'DeviceName'
_IO_LINK_AGG_DEV_TYPE = 'DeviceType'
_IO_LINK_AGG_DRC_NAME = 'DynamicReconfigurationConnectorName'
_IO_LINK_AGG_PHYS_LOC = 'PhysicalLocation'
_IO_LINK_AGG_UDID = 'UniqueDeviceID'
_VIRT_MEDIA_REPOSITORY_PATH = u.xpath(_VIO_MEDIA_REPOS,
'VirtualMediaRepository')
_IF_ADDR = u.xpath('IPInterface', 'IPAddress')
_ETHERNET_BACKING_DEVICE = u.xpath(_VIO_FREE_ETH_BACKDEVS_FOR_SEA,
'IOAdapterChoice', net.ETH_BACK_DEV)
_SEA_PATH = u.xpath(_VIO_SEAS, net.SHARED_ETH_ADPT)
# Mapping Constants
_MAP_STORAGE = 'Storage'
_MAP_TARGET_DEV = 'TargetDevice'
_MAP_CLIENT_LPAR = 'AssociatedLogicalPartition'
_MAP_PORT = 'Port'
_MAP_ORDER = (_MAP_CLIENT_LPAR, stor.CLIENT_ADPT, stor.SERVER_ADPT,
_MAP_STORAGE)
_VFC_MAP_ORDER = (_MAP_CLIENT_LPAR, stor.CLIENT_ADPT, _MAP_PORT,
stor.SERVER_ADPT, _MAP_STORAGE)
# VSCSI Bus Constants
_BUS_ASSOC_MAPS = 'AssociatedMappings'
_BUS_EL_ORDER = (_MAP_CLIENT_LPAR, stor.CLIENT_ADPT, stor.SERVER_ADPT,
_BUS_ASSOC_MAPS)
# VSCSI Storage/Target Device Constants
_STDEV_EL_ORDER = (_MAP_STORAGE, _MAP_TARGET_DEV)
_WWPNS_PATH = u.xpath(_VIO_VFC_MAPPINGS, 'VirtualFibreChannelMapping',
stor.CLIENT_ADPT, 'WWPNs')
_PVS_PATH = u.xpath(stor.PVS, stor.PHYS_VOL)
_VIOS_EL_ORDER = bp.BP_EL_ORDER + (
_VIO_API_CAP, _VIO_VNIC_CAP, _VIO_VNIC_FAILOVER_CAP, _VIO_SVR_INST_CFG,
_VIO_LNAGGS, _VIO_MGR_PASSTHRU_CAP, _VIO_MEDIA_REPOS,
_VIO_MVR_SVC_PARTITION, _VIO_NET_BOOT_DEVS, _VIO_PAGING_SVC_PARTITION,
_VIO_PVS, _VIO_SEAS, _VIO_SSP_CAP, _VIO_SSP_VER, _VIO_STOR_POOLS,
_VIO_TRUNK_ADPTS, _VIO_LICENSE, _VIO_LICENSE_ACCEPTED, _VIO_VFC_MAPPINGS,
_VIO_VSCSI_MAPPINGS, _VIO_FREE_IO_ADPTS_FOR_LNAGG,
_VIO_FREE_ETH_BACKDEVS_FOR_SEA, _VIO_VNIC_BACKDEVS, _VIO_CAPS,
_VIO_VSCSI_BUS)
LinkAggrIOAdapterChoice = card.LinkAggrIOAdapterChoice
class _VIOSXAGs(object):
"""Extended attribute groups relevant to Virtual I/O Server.
DEPRECATED. Use pypowervm.const.XAG and pypowervm.util.xag_attrs().
"""
@functools.total_ordering
class _Handler(object):
def __init__(self, name):
self.name = name
self.attrs = u.xag_attrs(name)
def __str__(self):
return self.name
def __eq__(self, other):
if type(other) is str:
return self.name == other
return self.name == other.name
def __lt__(self, other):
if type(other) is str:
return self.name < other
return self.name < other.name
def __hash__(self):
return hash(self.name)
_vals = dict(
NETWORK=_Handler(c.XAG.VIO_NET),
STORAGE=_Handler(c.XAG.VIO_STOR),
SCSI_MAPPING=_Handler(c.XAG.VIO_SMAP),
FC_MAPPING=_Handler(c.XAG.VIO_FMAP))
def __getattr__(self, item):
if item in self._vals:
import warnings
warnings.warn(_("The 'xags' property of the VIOS EntryWrapper "
"class is deprecated! Please use values from "
"pypowervm.const.XAG instead."),
DeprecationWarning)
return self._vals[item]
@ewrap.EntryWrapper.pvm_type('VirtualIOServer', child_order=_VIOS_EL_ORDER)
class VIOS(bp.BasePartition):
# DEPRECATED. Use pypowervm.const.XAG and pypowervm.util.xag_attrs().
xags = _VIOSXAGs()
@classmethod
def bld(cls, adapter, name, mem_cfg, proc_cfg, io_cfg=None):
"""Creates a new VIOS wrapper."""
return super(VIOS, cls)._bld_base(adapter, name, mem_cfg, proc_cfg,
env=bp.LPARType.VIOS, io_cfg=io_cfg)
@ewrap.Wrapper.xag_property(c.XAG.VIO_STOR)
def media_repository(self):
return self.element.find(_VIRT_MEDIA_REPOSITORY_PATH)
def get_vfc_wwpns(self):
"""Returns a list of the virtual FC WWPN pairs for the vios.
The response is a List of Lists.
Ex. (('c05076065a8b005a', 'c05076065a8b005b'),
('c05076065a8b0060', 'c05076065a8b0061'))
Note: ViosFCMapping extended attribute is required.
"""
return set([frozenset(x.split()) for x in
self._get_vals(_WWPNS_PATH)])
def get_pfc_wwpns(self):
"""Returns a set of the Physical FC Adapter WWPNs on this VIOS."""
path = u.xpath(bp.IO_CFG_ROOT, bp.IO_SLOTS_ROOT,
bp.IO_SLOT_ROOT, bp.ASSOC_IO_SLOT_ROOT,
bp.RELATED_IO_ADPT_ROOT, bp.IO_PFC_ADPT_ROOT,
bp.PFC_PORTS_ROOT, bp.PFC_PORT_ROOT,
bp.PFC_PORT_WWPN)
return set(self._get_vals(path))
def get_active_pfc_wwpns(self):
"""Returns a set of Physical FC Adapter WWPNs of 'active' ports."""
# The logic to check for active ports is poor. Right now it only
# checks if the port has NPIV connections available. If there is a
# FC, non-NPIV card...then this logic fails.
#
# This will suffice until the backing API adds more granular logic.
return [pfc.wwpn for pfc in self.pfc_ports if pfc.npiv_total_ports > 0]
@property
def pfc_ports(self):
"""The physical Fibre Channel ports assigned to the VIOS."""
path = u.xpath(bp.IO_CFG_ROOT, bp.IO_SLOTS_ROOT,
bp.IO_SLOT_ROOT, bp.ASSOC_IO_SLOT_ROOT,
bp.RELATED_IO_ADPT_ROOT, bp.IO_PFC_ADPT_ROOT,
bp.PFC_PORTS_ROOT, bp.PFC_PORT_ROOT)
elems = self._find(path, use_find_all=True)
resp = []
for elem in elems:
resp.append(bp.PhysFCPort.wrap(elem))
return resp
@property
def is_license_accepted(self):
return self._get_val_bool(_VIO_LICENSE_ACCEPTED, default=True)
def hdisk_reserve_policy(self, disk_uuid):
"""Get the reserve policy for an hdisk.
:param disk_uuid: The uuid of the hdisk.
:returns: The reserve policy or None if the disk isn't found.
"""
policy = None
# Get all the physical volume elements and look for a diskname match
volumes = self.element.findall(_PVS_PATH)
for volume in volumes:
vol_uuid = volume.findtext(_VOL_UID)
match = re.search(r'^[0-9]{5}([0-9A-F]{32}).+$', vol_uuid)
if match and match.group(1) == disk_uuid:
policy = volume.findtext(_RESERVE_POLICY)
break
return policy
def hdisk_from_uuid(self, disk_uuid):
"""Get the hdisk name from the volume uuid.
:param disk_uuid: The uuid of the hdisk.
:returns: The associated hdisk name.
"""
name = None
# Get all the physical volume elements and look for a diskname match
volumes = self.element.findall(_PVS_PATH)
for volume in volumes:
vol_uuid = volume.findtext(stor.UDID)
if vol_uuid:
LOG.debug('get_hdisk_from_uuid match: %s' % vol_uuid)
LOG.debug('get_hdisk_from_uuid disk_uuid: %s' % disk_uuid)
if vol_uuid == disk_uuid:
name = volume.findtext(_VOL_NAME)
break
return name
@property
def is_mover_service_partition(self):
return self._get_val_bool(_VIO_MVR_SVC_PARTITION, False)
@is_mover_service_partition.setter
def is_mover_service_partition(self, value):
"""Set the Mover Service Partition designation.
:param value: Boolean indicating whether the VIOS should be designated
as a Mover Service Partition.
"""
self.set_parm_value(_VIO_MVR_SVC_PARTITION,
u.sanitize_bool_for_api(value))
@ewrap.Wrapper.xag_property(c.XAG.VIO_NET)
def ip_addresses(self):
"""Returns a list of IP addresses assigned to the VIOS.
Will only return the IP Addresses that can be made known to the system.
This only includes online Shared Ethernet Adapters and Ethernet Backing
Devices. It will not include, for example, a VLAN adapter.
This is a READ-ONLY list.
"""
ip_list = []
# Get all the shared ethernet adapters and free
# ethernet devices and pull the IPs
seas = self.element.findall(_SEA_PATH)
free_eths = self.element.findall(_ETHERNET_BACKING_DEVICE)
for eth in seas + free_eths:
ip = eth.findtext(_IF_ADDR)
if ip and ip not in ip_list:
ip_list.append(ip)
return tuple(ip_list)
@ewrap.Wrapper.xag_property(c.XAG.VIO_FMAP)
def vfc_mappings(self):
"""Returns a WrapperElemList of the VFCMapping objects."""
es = ewrap.WrapperElemList(self._find_or_seed(
_VIO_VFC_MAPPINGS, attrib=u.xag_attrs(c.XAG.VIO_FMAP)), VFCMapping)
return es
@vfc_mappings.setter
def vfc_mappings(self, new_mappings):
self.replace_list(_VIO_VFC_MAPPINGS, new_mappings,
attrib=u.xag_attrs(c.XAG.VIO_FMAP))
@ewrap.Wrapper.xag_property(c.XAG.VIO_SMAP)
def scsi_mappings(self):
"""Returns a WrapperElemList of the VSCSIMapping objects."""
# TODO(efried): remove parent_entry once VIOS has pg83 in Events
es = ewrap.WrapperElemList(
self._find_or_seed(_VIO_VSCSI_MAPPINGS,
attrib=u.xag_attrs(c.XAG.VIO_SMAP)),
VSCSIMapping, parent_entry=self)
return es
@scsi_mappings.setter
def scsi_mappings(self, new_mappings):
self.replace_list(_VIO_VSCSI_MAPPINGS, new_mappings,
attrib=u.xag_attrs(c.XAG.VIO_SMAP))
@ewrap.Wrapper.xag_property(c.XAG.VIO_NET)
def seas(self):
es = ewrap.WrapperElemList(self._find_or_seed(
_VIO_SEAS, attrib=u.xag_attrs(c.XAG.VIO_NET)), net.SEA)
return es
@ewrap.Wrapper.xag_property(c.XAG.VIO_NET)
def trunk_adapters(self):
es = ewrap.WrapperElemList(
self._find_or_seed(_VIO_TRUNK_ADPTS,
attrib=u.xag_attrs(c.XAG.VIO_NET)),
net.TrunkAdapter)
return es
def derive_orphan_trunk_adapters(self):
"""Builds a list of trunk adapters not attached to a SEA."""
sea_trunks = []
for sea in self.seas:
sea_trunks.append(sea.primary_adpt)
sea_trunks.extend(sea.addl_adpts)
# Subtract the list of our adapters from there.
orig_trunks = copy.copy(self.trunk_adapters)
orphan_trunks = copy.copy(self.trunk_adapters)
for sea_trunk in sea_trunks:
# We can't just remove because the trunk adapters from the SEA
# have the vswitch ref instead of id... So we have to compare
# based off anchors.
for ta in orig_trunks:
if ta.dev_name == sea_trunk.dev_name:
orphan_trunks.remove(ta)
break
return orphan_trunks
@ewrap.Wrapper.xag_property(c.XAG.VIO_STOR)
def phys_vols(self):
"""Will return a list of physical volumes attached to this VIOS.
This list is READ-ONLY.
"""
# TODO(efried): remove parent_entry once VIOS has pg83 in Events
es = ewrap.WrapperElemList(
self._find_or_seed(stor.PVS, attrib=u.xag_attrs(c.XAG.VIO_STOR)),
stor.PV, parent_entry=self)
es_list = [es_val for es_val in es]
return tuple(es_list)
@ewrap.Wrapper.xag_property(c.XAG.VIO_NET)
def io_adpts_for_link_agg(self):
es = ewrap.WrapperElemList(
self._find_or_seed(_VIO_FREE_IO_ADPTS_FOR_LNAGG,
attrib=u.xag_attrs(c.XAG.VIO_NET)),
LinkAggrIOAdapterChoice)
return es
def can_lpm(self, host_w, migr_data=None):
"""Determines if a partition is ready for Live Partition Migration.
:return capable: False, VIOS types are not LPM capable
:return reason: A message that will indicate why it was not
capable of LPM.
"""
return False, _('Partition of VIOS type is not LPM capable')
@property
def vnic_capable(self):
return self._get_val_bool(_VIO_VNIC_CAP)
@property
def vnic_failover_capable(self):
return self._get_val_bool(_VIO_VNIC_FAILOVER_CAP)
@six.add_metaclass(abc.ABCMeta)
@ewrap.Wrapper.base_pvm_type
class VStorageMapping(ewrap.ElementWrapper):
"""Base class for VSCSIMapping and VFCMapping."""
@staticmethod
def crt_related_href(adapter, host_uuid, client_lpar_uuid):
"""Creates the Element for the 'AssociatedLogicalPartition'.
:param adapter: A pypowervm.adapter.Adapter.
:param host_uuid: The UUID of the ManagedSystem. Specify None to get a
ROOT link.
:param client_lpar_uuid: The UUID of the LPAR to which the mapping is
to be attached.
"""
if host_uuid is None:
return adapter.build_href(lpar.LPAR.schema_type,
root_id=client_lpar_uuid,
xag=[])
else:
return adapter.build_href(ms.System.schema_type,
root_id=host_uuid,
child_type=lpar.LPAR.schema_type,
child_id=client_lpar_uuid,
xag=[])
@property
def client_lpar_href(self):
"""Returns the Client LPAR (if any) URI.
If None - then no client is connected.
"""
return self.get_href(_MAP_CLIENT_LPAR, one_result=True)
def _client_lpar_href(self, href):
self.set_href(_MAP_CLIENT_LPAR, href)
@property
def client_adapter(self):
"""Returns the Client side V*ClientAdapterElement.
If None - then no client is connected.
"""
elem = self.element.find(stor.CLIENT_ADPT)
if elem is not None:
return self._client_adapter_cls.wrap(elem)
return None
def _client_adapter(self, ca):
elem = self._find_or_seed(stor.CLIENT_ADPT)
self.element.replace(elem, ca.element)
@property
def server_adapter(self):
"""Returns the Virtual I/O Server side V*ServerAdapterElement."""
return self._server_adapter_cls.wrap(
self.element.find(stor.SERVER_ADPT))
def _server_adapter(self, sa):
elem = self._find_or_seed(stor.SERVER_ADPT)
self.element.replace(elem, sa.element)
@ewrap.Wrapper.base_pvm_type
class _STDevMethods(ewrap.ElementWrapper):
"""Methods for storage and target common to STDev and VSCSIMapping."""
def _set_stg_and_tgt(self, adapter, stg_ref, lua=None, target_name=None):
self.backing_storage = stg_ref
if lua is not None or target_name is not None:
# Build a *TargetDev of the appropriate type for this stg_ref
self._target_dev(stg_ref.target_dev_type.bld(adapter, lua,
target_name))
@property
def backing_storage(self):
"""The backing storage element (if applicable).
This element may be a PV, LU, VirtualDisk, or VirtualOpticalMedia.
May return None.
"""
elem = self.element.find(_MAP_STORAGE)
if elem is None:
return None
# If backing storage exists, it comprises a single child of elem. But
# type is unknown immediately, so call all children and then wrap.
stor_elems = list(elem)
if len(stor_elems) != 1:
return None
# TODO(efried): parent_entry not needed once VIOS has pg83 in Events
parent_entry = getattr(self, 'parent_entry', None)
# The storage element may be any one of VDisk, VOptMedia, PV, or LU.
# Allow ElementWrapper to detect (from the registry) and wrap correctly
return ewrap.ElementWrapper.wrap(stor_elems[0],
parent_entry=parent_entry)
@backing_storage.setter
def backing_storage(self, stg):
"""Sets the backing storage of this mapping to a VDisk, VOpt, LU or PV.
:param stg: Either a VDisk, VOpt, LU or PV wrapper representing the
backing storage to assign.
"""
# Always replace. Because while the storage has one element, it can't
# inject properly if the backing type changes (ex. cloning from vOpt to
# vDisk).
stor_elem = ent.Element(_MAP_STORAGE, self.adapter, attrib={},
children=[])
stor_elem.inject(stg.element)
self.inject(stor_elem)
@property
def target_dev(self):
"""The target device associated with the backing storage.
May be any of {storage_type}TargetDev for {storage_type} in VDisk,
VOpt, LU or PV.
"""
elem = self.element.find(_MAP_TARGET_DEV)
if elem is None:
return None
# If the virtual target device exists, it comprises a single child of
# elem. But the exact type is unknown.
vtd_elems = list(elem)
if len(vtd_elems) != 1:
return None
# Let ElementWrapper.wrap figure out (from the registry) the
# appropriate return type.
return ewrap.ElementWrapper.wrap(vtd_elems[0])
def _target_dev(self, vtd):
"""Sets the target device of this mapping.
:param vtd: A {storage_type}TargetDev ElementWrapper representing the
virtual target device to assign.
"""
vtd_elem = ent.Element(_MAP_TARGET_DEV, self.adapter, attrib={},
children=[])
vtd_elem.inject(vtd.element)
self.inject(vtd_elem)
@ewrap.ElementWrapper.pvm_type('VirtualSCSIStorageAndTargetDevice',
has_metadata=True, child_order=_STDEV_EL_ORDER)
class STDev(_STDevMethods):
"""Mapping backing storage and target device.
Used as a mixin for VSCSIMapping, and first-class internal Element for
VSCSIBus.
"""
@classmethod
def bld(cls, adapter, stg_ref, lua=None):
"""Build a new STDev - only to be used with VSCSIBus.
:param adapter: The pypowervm Adapter that will be used to create the
mapping.
:param stg_ref: The backing storage element (PV, LU, VDisk, or
VOptMedia) to use in the new mapping.
:param lua: (Optional. Default: None) Logical Unit Address to set on
the TargetDevice. If None, the LUA will be assigned by the
server. Should be specified for all of the VSCSIMappings
for a particular bus, or none of them.
:return: The newly-created STDev.
"""
stdev = super(STDev, cls)._bld(adapter)
stdev._set_stg_and_tgt(adapter, stg_ref, lua=lua)
return stdev
@ewrap.ElementWrapper.pvm_type('VirtualSCSIMapping', has_metadata=True,
child_order=_MAP_ORDER)
class VSCSIMapping(VStorageMapping, _STDevMethods):
"""The mapping of a VIOS SCSI adapter to the Client LPAR SCSI adapter.
PowerVM provides a mechanism for Server/Client adapters to provide storage
connectivity (for LPARs that do not have dedicated hardware). This mapping
describes the Virtual I/O Server's Server SCSI Adapter and the Client
LPAR's Client SCSI Adapter.
To create a new Client SCSI Adapter, create a new mapping and update the
Virtual I/O Server. This will be an atomic operation that creates the
adapters on the Virtual I/O Server and Client LPAR, and then maps them
properly. There is no need to pre-create the adapters before creating a
new mapping.
"""
_client_adapter_cls = stor.VSCSIClientAdapterElement
_server_adapter_cls = stor.VSCSIServerAdapterElement
@classmethod
def bld(cls, adapter, host_uuid, client_lpar_uuid, stg_ref,
lpar_slot_num=None, lua=None, target_name=None):
"""Creates a new VSCSIMapping
:param adapter: The pypowervm Adapter that will be used to create the
mapping.
:param host_uuid: Not used.
:param client_lpar_uuid: The client LPAR's UUID.
:param stg_ref: The backing storage element (PV, LU, VDisk, or
VOptMedia) to use in the new mapping.
:param lpar_slot_num: (Optional, Default: None) The client slot number
to use in the new mapping. If None then we let
REST choose the slot number.
:param lua: (Optional. Default: None) Logical Unit Address to set on
the TargetDevice. If None, the LUA will be assigned by the
server. Should be specified for all of the VSCSIMappings
for a particular bus, or none of them.
:param target_name: (Optional, Default: None) Name of the TargetDevice
If None, the target_name will be assigned by the
server.
:return: The newly-created VSCSIMapping.
"""
s_map = super(VSCSIMapping, cls)._bld(adapter)
# Create the 'Associated Logical Partition' element of the mapping.
s_map._client_lpar_href(
cls.crt_related_href(adapter, None, client_lpar_uuid))
s_map._client_adapter(stor.VClientStorageAdapterElement.bld(
adapter, slot_num=lpar_slot_num))
s_map._server_adapter(stor.VServerStorageAdapterElement.bld(adapter))
s_map._set_stg_and_tgt(adapter, stg_ref, lua=lua,
target_name=target_name)
return s_map
@classmethod
def bld_from_existing(cls, existing_map, stg_ref, lpar_slot_num=None,
lua=None, target_name=None):
"""Clones the existing mapping, but swaps in the new storage elem.
:param existing_map: The existing VSCSIMapping to clone.
:param stg_ref: The backing storage element (PV, LU, VDisk, or
VOptMedia) to use in the new mapping. If explicitly
None, the new mapping is created with no storage.
:param lpar_slot_num: (Optional, Default: None) The client slot number
to use in the mapping. If None then the
existing slot number is used.
:param lua: (Optional. Default: None) Logical Unit Address to set on
the TargetDevice. If None, the LUA will be assigned by the
server. Should be specified for all of the VSCSIMappings
for a particular bus, or none of them.
:param target_name: (Optional, Default: None) Name of the TargetDevice
If None, the target_name will be assigned by the
server.
:return: The newly-created VSCSIMapping.
"""
# We do NOT want the source's TargetDevice element, so we explicitly
# copy the pieces we want from the original mapping.
new_map = super(VSCSIMapping, cls)._bld(existing_map.adapter)
if existing_map.client_lpar_href is not None:
new_map._client_lpar_href(existing_map.client_lpar_href)
if existing_map.client_adapter is not None:
new_map._client_adapter(copy.deepcopy(existing_map.client_adapter))
if existing_map.server_adapter is not None:
new_map._server_adapter(copy.deepcopy(existing_map.server_adapter))
if stg_ref is not None:
new_map.backing_storage = copy.deepcopy(stg_ref)
if lpar_slot_num is not None:
# Set the slot number and remove the 'UseNextAvailableSlot' tag.
new_map.client_adapter._lpar_slot_num(lpar_slot_num)
new_map.client_adapter._use_next_slot(False)
if any((lua, target_name)):
if stg_ref is None:
raise ValueError(_("Can't specify target device LUA without a "
"backing storage device!"))
# Build a *TargetDev of the appropriate type for this stg_ref
new_map._target_dev(stg_ref.target_dev_type.bld(
existing_map.adapter, lua, target_name))
return new_map
@ewrap.EntryWrapper.pvm_type('VirtualSCSIBus', child_order=_BUS_EL_ORDER)
class VSCSIBus(ewrap.EntryWrapper, VStorageMapping):
"""Virtual SCSI Bus, first-class CHILD of VirtualIOServer.
PowerVM provides a mechanism for Server/Client adapters to provide storage
connectivity (for LPARs that do not have dedicated hardware). This mapping
describes the Virtual I/O Server's Server SCSI Adapter and the Client
LPAR's Client SCSI Adapter.
To create a new Client SCSI Adapter, create a new mapping and update the
Virtual I/O Server. This will be an atomic operation that creates the
adapters on the Virtual I/O Server and Client LPAR, and then maps them
properly. There is no need to pre-create the adapters before creating a
new mapping.
"""
_client_adapter_cls = stor.VSCSIClientAdapterElement
_server_adapter_cls = stor.VSCSIServerAdapterElement
@classmethod
def bld(cls, adapter, client_lpar_uuid, lpar_slot_num=None):
"""Creates a new VSCSIBus with no storage.
Storage should be added afterwards by modifying stg_targets.
:param adapter: The pypowervm Adapter that will be used to create the
bus.
:param client_lpar_uuid: The client LPAR's UUID.
:param lpar_slot_num: (Optional, Default: None) The client slot number
to use in the new mapping. If None then we let
REST choose the slot number.
:return: The newly-created VSCSIBus.
"""
s_bus = super(VSCSIBus, cls)._bld(adapter)
# Create the 'Associated Logical Partition' element of the mapping.
s_bus._client_lpar_href(adapter.build_href(lpar.LPAR.schema_type,
client_lpar_uuid, xag=[]))
s_bus._client_adapter(stor.VClientStorageAdapterElement.bld(
adapter, slot_num=lpar_slot_num))
s_bus._server_adapter(stor.VServerStorageAdapterElement.bld(adapter))
return s_bus
@classmethod
def bld_from_existing(cls, existing_bus):
"""Clones a bus's LPAR and client/server adapters, but not storage.
:param existing_bus: The existing VSCSIBus to clone.
:return: The newly-created VSCSIBus.
"""
# We do NOT want the source's storage, so we explicitly copy the pieces
# we want from the original bus.
new_bus = super(VSCSIBus, cls)._bld(existing_bus.adapter)
if existing_bus.client_lpar_href is not None:
new_bus._client_lpar_href(existing_bus.client_lpar_href)
if existing_bus.client_adapter is not None:
new_bus._client_adapter(copy.deepcopy(existing_bus.client_adapter))
if existing_bus.server_adapter is not None:
new_bus._server_adapter(copy.deepcopy(existing_bus.server_adapter))
return new_bus
@property
def mappings(self):
return ewrap.WrapperElemList(self._find_or_seed(
_BUS_ASSOC_MAPS), STDev)
@mappings.setter
def mappings(self, stdevs):
self.replace_list(_BUS_ASSOC_MAPS, stdevs)
@ewrap.ElementWrapper.pvm_type('VirtualFibreChannelMapping', has_metadata=True,
child_order=_VFC_MAP_ORDER)
class VFCMapping(VStorageMapping):
"""The mapping of a VIOS FC adapter to the Client LPAR FC adapter.
PowerVM provides a mechanism for Server/Client adapters to provide storage
connectivity (for LPARs that do not have dedicated hardware). This mapping
describes the Virtual I/O Server's Server Fibre Channel (FC) Adapter and
the Client LPAR's Client FC Adapter.
To create a new Client FC Adapter, create a new mapping and update the
Virtual I/O Server. This will be an atomic operation that creates the
adapters on the Virtual I/O Server and Client LPAR, and then maps them
properly. There is no need to pre-create the adapters before creating a
new mapping.
"""
_client_adapter_cls = stor.VFCClientAdapterElement
_server_adapter_cls = stor.VFCServerAdapterElement
@classmethod
def bld(cls, adapter, host_uuid, client_lpar_uuid, backing_phy_port,
client_wwpns=None, lpar_slot_num=None):
"""Creates the VFCMapping object to connect to a Physical FC Port.
This is used when creating a new mapping between a Client LPAR and the
VirtualIOServer. This creates a Fibre Channel connection between an
LPAR and a physical Fibre Port.
The response object should be used for creating the mapping via an
adapter.update() to the Virtual I/O Server. The response object
will not have the UUIDs (as those are not assigned until the update is
done). This holds true for certain other elements as well.
:param adapter: The pypowervm Adapter that will be used to create the
mapping.
:param host_uuid: The host system's UUID.
:param client_lpar_uuid: The client LPAR's UUID that the disk should be
connected to.
:param backing_phy_port: The name of the physical FC port that backs
the virtual adapter.
:param client_wwpns: An optional set of two WWPNs that can be set upon
the mapping. These represent the client VM's
WWPNs on the client FC adapter. If not set, the
system will dynamically generate them.
:param lpar_slot_num: An optional integer to be used as the Virtual
slot number on the client adapter
:returns: The new VFCMapping Wrapper.
"""
s_map = super(VFCMapping, cls)._bld(adapter)
# Create the 'Associated Logical Partition' element of the mapping.
s_map._client_lpar_href(
cls.crt_related_href(adapter, host_uuid, client_lpar_uuid))
s_map._client_adapter(stor.VFCClientAdapterElement.bld(
adapter, wwpns=client_wwpns, slot_num=lpar_slot_num))
# Create the backing port with required 'Port' tag.
s_map.backing_port = bp.PhysFCPort.bld_ref(adapter, backing_phy_port,
ref_tag='Port')
s_map._server_adapter(stor.VFCServerAdapterElement.bld(adapter))
return s_map
@property
def backing_port(self):
"""The Virtual I/O Server backing PhysicalFCPort.
If None - then the vfcmap isn't done and no physical port is backing
it.
"""
elem = self.element.find(_MAP_PORT)
if elem is not None:
return bp.PhysFCPort.wrap(elem)
return None
@backing_port.setter
def backing_port(self, value):
"""Sets the backing port."""
elem = self._find_or_seed(_MAP_PORT)
self.element.replace(elem, value.element)
|
|
# -*- coding: utf-8 -*-
import httplib as http
import logging
from bs4 import BeautifulSoup
from flask import request
from framework.mongo.utils import to_mongo_key
from framework.exceptions import HTTPError
from framework.auth.utils import privacy_info_handle
from framework.auth.decorators import must_be_logged_in
from framework.flask import redirect
from website.addons.wiki import settings
from website.addons.wiki import utils as wiki_utils
from website.profile.utils import get_gravatar
from website.project.views.node import _view_project
from website.project.model import has_anonymous_link
from website.project.decorators import (
must_be_contributor_or_public,
must_have_addon, must_not_be_registration,
must_be_valid_project,
must_have_permission,
must_have_write_permission_or_public_wiki,
)
from website.exceptions import NodeStateError
from .exceptions import (
NameEmptyError,
NameInvalidError,
NameMaximumLengthError,
PageCannotRenameError,
PageConflictError,
PageNotFoundError,
InvalidVersionError,
)
from .model import NodeWikiPage
logger = logging.getLogger(__name__)
WIKI_NAME_EMPTY_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The wiki page name cannot be empty.'
))
WIKI_NAME_MAXIMUM_LENGTH_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The wiki page name cannot be more than 100 characters.'
))
WIKI_PAGE_CANNOT_RENAME_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The wiki page cannot be renamed.'
))
WIKI_PAGE_CONFLICT_ERROR = HTTPError(http.CONFLICT, data=dict(
message_short='Page conflict',
message_long='A wiki page with that name already exists.'
))
WIKI_PAGE_NOT_FOUND_ERROR = HTTPError(http.NOT_FOUND, data=dict(
message_short='Not found',
message_long='A wiki page could not be found.'
))
WIKI_INVALID_VERSION_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The requested version of this wiki page does not exist.'
))
def _get_wiki_versions(node, name, anonymous=False):
key = to_mongo_key(name)
# Skip if wiki_page doesn't exist; happens on new projects before
# default "home" page is created
if key not in node.wiki_pages_versions:
return []
versions = [
NodeWikiPage.load(version_wiki_id)
for version_wiki_id in node.wiki_pages_versions[key]
]
return [
{
'version': version.version,
'user_fullname': privacy_info_handle(version.user.fullname, anonymous, name=True),
'date': version.date.replace(microsecond=0).isoformat(),
}
for version in reversed(versions)
]
def _get_wiki_pages_current(node):
return [
{
'name': sorted_page.page_name,
'url': node.web_url_for('project_wiki_view', wname=sorted_page.page_name, _guid=True),
'wiki_id': sorted_page._primary_key,
'wiki_content': wiki_page_content(sorted_page.page_name, node=node)
}
for sorted_page in [
node.get_wiki_page(sorted_key)
for sorted_key in sorted(node.wiki_pages_current)
]
# TODO: remove after forward slash migration
if sorted_page is not None
]
def _get_wiki_api_urls(node, name, additional_urls=None):
urls = {
'base': node.api_url_for('project_wiki_home'),
'delete': node.api_url_for('project_wiki_delete', wname=name),
'rename': node.api_url_for('project_wiki_rename', wname=name),
'content': node.api_url_for('wiki_page_content', wname=name),
'settings': node.api_url_for('edit_wiki_settings'),
'grid': node.api_url_for('project_wiki_grid_data', wname=name)
}
if additional_urls:
urls.update(additional_urls)
return urls
def _get_wiki_web_urls(node, key, version=1, additional_urls=None):
urls = {
'base': node.web_url_for('project_wiki_home', _guid=True),
'edit': node.web_url_for('project_wiki_view', wname=key, _guid=True),
'home': node.web_url_for('project_wiki_home', _guid=True),
'page': node.web_url_for('project_wiki_view', wname=key, _guid=True),
}
if additional_urls:
urls.update(additional_urls)
return urls
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def wiki_widget(**kwargs):
node = kwargs['node'] or kwargs['project']
wiki = node.get_addon('wiki')
wiki_page = node.get_wiki_page('home')
more = False
use_python_render = False
if wiki_page and wiki_page.html(node):
wiki_html = wiki_page.html(node)
if len(wiki_html) > 500:
wiki_html = BeautifulSoup(wiki_html[:500] + '...', 'html.parser')
more = True
else:
wiki_html = BeautifulSoup(wiki_html)
more = False
use_python_render = wiki_page.rendered_before_update
else:
wiki_html = None
ret = {
'complete': True,
'wiki_content': unicode(wiki_html) if wiki_html else None,
'wiki_content_url': node.api_url_for('wiki_page_content', wname='home'),
'use_python_render': use_python_render,
'more': more,
'include': False,
}
ret.update(wiki.config.to_json())
return ret
@must_be_valid_project
@must_have_write_permission_or_public_wiki
@must_have_addon('wiki', 'node')
def wiki_page_draft(wname, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_page = node.get_wiki_page(wname)
return {
'wiki_content': wiki_page.content if wiki_page else None,
'wiki_draft': (wiki_page.get_draft(node) if wiki_page
else wiki_utils.get_sharejs_content(node, wname)),
}
@must_be_valid_project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def wiki_page_content(wname, wver=None, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_page = node.get_wiki_page(wname, version=wver)
use_python_render = wiki_page.rendered_before_update if wiki_page else False
return {
'wiki_content': wiki_page.content if wiki_page else '',
# Only return rendered version if page was saved before wiki change
'wiki_rendered': wiki_page.html(node) if use_python_render else '',
}
@must_be_valid_project # injects project
@must_have_permission('write') # injects user, project
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_delete(auth, wname, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_name = wname.strip()
wiki_page = node.get_wiki_page(wiki_name)
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, wiki_name)
if not wiki_page:
raise HTTPError(http.NOT_FOUND)
node.delete_node_wiki(wiki_name, auth)
wiki_utils.broadcast_to_sharejs('delete', sharejs_uuid, node)
return {}
@must_be_valid_project # returns project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def project_wiki_view(auth, wname, path=None, **kwargs):
node = kwargs['node'] or kwargs['project']
anonymous = has_anonymous_link(node, auth)
wiki_name = (wname or '').strip()
wiki_key = to_mongo_key(wiki_name)
wiki_page = node.get_wiki_page(wiki_name)
wiki_settings = node.get_addon('wiki')
can_edit = (
auth.logged_in
and not node.is_registration
and (
node.has_permission(auth.user, 'write')
or wiki_settings.is_publicly_editable
)
)
versions = _get_wiki_versions(node, wiki_name, anonymous=anonymous)
# Determine panels used in view
panels = {'view', 'edit', 'compare', 'menu'}
if request.args and set(request.args).intersection(panels):
panels_used = [panel for panel in request.args if panel in panels]
num_columns = len(set(panels_used).intersection({'view', 'edit', 'compare'}))
if num_columns == 0:
panels_used.append('view')
num_columns = 1
else:
panels_used = ['view', 'menu']
num_columns = 1
try:
view = wiki_utils.format_wiki_version(
version=request.args.get('view'),
num_versions=len(versions),
allow_preview=True,
)
compare = wiki_utils.format_wiki_version(
version=request.args.get('compare'),
num_versions=len(versions),
allow_preview=False,
)
except InvalidVersionError:
raise WIKI_INVALID_VERSION_ERROR
# Default versions for view and compare
version_settings = {
'view': view or ('preview' if 'edit' in panels_used else 'current'),
'compare': compare or 'previous',
}
# ensure home is always lower case since it cannot be renamed
if wiki_name.lower() == 'home':
wiki_name = 'home'
if wiki_page:
version = wiki_page.version
is_current = wiki_page.is_current
content = wiki_page.html(node)
use_python_render = wiki_page.rendered_before_update
else:
version = 'NA'
is_current = False
content = ''
use_python_render = False
if can_edit:
if wiki_key not in node.wiki_private_uuids:
wiki_utils.generate_private_uuid(node, wiki_name)
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, wiki_name)
else:
if wiki_key not in node.wiki_pages_current and wiki_key != 'home':
raise WIKI_PAGE_NOT_FOUND_ERROR
if 'edit' in request.args:
if wiki_settings.is_publicly_editable:
raise HTTPError(http.UNAUTHORIZED)
raise HTTPError(http.FORBIDDEN)
sharejs_uuid = None
ret = {
'wiki_id': wiki_page._primary_key if wiki_page else None,
'wiki_name': wiki_page.page_name if wiki_page else wiki_name,
'wiki_content': content,
'use_python_render': use_python_render,
'page': wiki_page,
'version': version,
'versions': versions,
'sharejs_uuid': sharejs_uuid or '',
'sharejs_url': settings.SHAREJS_URL,
'is_current': is_current,
'version_settings': version_settings,
'pages_current': _get_wiki_pages_current(node),
'category': node.category,
'panels_used': panels_used,
'num_columns': num_columns,
'urls': {
'api': _get_wiki_api_urls(node, wiki_name, {
'content': node.api_url_for('wiki_page_content', wname=wiki_name),
'draft': node.api_url_for('wiki_page_draft', wname=wiki_name),
}),
'web': _get_wiki_web_urls(node, wiki_name),
'gravatar': get_gravatar(auth.user, 25),
},
}
ret.update(_view_project(node, auth, primary=True))
ret['user']['can_edit_wiki_body'] = can_edit
return ret
@must_be_valid_project # injects node or project
@must_have_write_permission_or_public_wiki # injects user
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_edit_post(auth, wname, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_name = wname.strip()
wiki_page = node.get_wiki_page(wiki_name)
redirect_url = node.web_url_for('project_wiki_view', wname=wiki_name, _guid=True)
form_wiki_content = request.form['content']
# ensure home is always lower case since it cannot be renamed
if wiki_name.lower() == 'home':
wiki_name = 'home'
if wiki_page:
# Only update node wiki if content has changed
if form_wiki_content != wiki_page.content:
node.update_node_wiki(wiki_page.page_name, form_wiki_content, auth)
ret = {'status': 'success'}
else:
ret = {'status': 'unmodified'}
else:
# update_node_wiki will create a new wiki page because a page
node.update_node_wiki(wiki_name, form_wiki_content, auth)
ret = {'status': 'success'}
return ret, http.FOUND, None, redirect_url
@must_be_valid_project # injects node or project
@must_have_permission('admin')
@must_not_be_registration
@must_have_addon('wiki', 'node')
def edit_wiki_settings(node, auth, **kwargs):
wiki_settings = node.get_addon('wiki')
permissions = request.get_json().get('permission', None)
if not wiki_settings:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='Cannot change wiki settings without a wiki'
))
if permissions == 'public':
permissions = True
elif permissions == 'private':
permissions = False
else:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='Permissions flag used is incorrect.'
))
try:
wiki_settings.set_editing(permissions, auth, log=True)
except NodeStateError as e:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short="Can't change privacy",
message_long=e.message
))
return {
'status': 'success',
'permissions': permissions,
}
@must_be_logged_in
@must_be_valid_project
def get_node_wiki_permissions(node, auth, **kwargs):
return wiki_utils.serialize_wiki_settings(auth.user, [node._id])
@must_be_valid_project
@must_have_addon('wiki', 'node')
def project_wiki_home(**kwargs):
node = kwargs['node'] or kwargs['project']
return redirect(node.web_url_for('project_wiki_view', wname='home', _guid=True))
@must_be_valid_project # injects project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def project_wiki_id_page(auth, wid, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_page = node.get_wiki_page(id=wid)
if wiki_page:
return redirect(node.web_url_for('project_wiki_view', wname=wiki_page.page_name, _guid=True))
else:
raise WIKI_PAGE_NOT_FOUND_ERROR
@must_be_valid_project
@must_have_write_permission_or_public_wiki
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_edit(wname, **kwargs):
node = kwargs['node'] or kwargs['project']
return redirect(node.web_url_for('project_wiki_view', wname=wname, _guid=True) + '?edit&view&menu')
@must_be_valid_project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def project_wiki_compare(wname, wver, **kwargs):
node = kwargs['node'] or kwargs['project']
return redirect(node.web_url_for('project_wiki_view', wname=wname, _guid=True) + '?view&compare={0}&menu'.format(wver))
@must_not_be_registration
@must_have_permission('write')
@must_have_addon('wiki', 'node')
def project_wiki_rename(auth, wname, **kwargs):
"""View that handles user the X-editable input for wiki page renaming.
:param wname: The target wiki page name.
:param-json value: The new wiki page name.
"""
node = kwargs['node'] or kwargs['project']
wiki_name = wname.strip()
new_wiki_name = request.get_json().get('value', None)
try:
node.rename_node_wiki(wiki_name, new_wiki_name, auth)
except NameEmptyError:
raise WIKI_NAME_EMPTY_ERROR
except NameInvalidError as error:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid name',
message_long=error.args[0]
))
except NameMaximumLengthError:
raise WIKI_NAME_MAXIMUM_LENGTH_ERROR
except PageCannotRenameError:
raise WIKI_PAGE_CANNOT_RENAME_ERROR
except PageConflictError:
raise WIKI_PAGE_CONFLICT_ERROR
except PageNotFoundError:
raise WIKI_PAGE_NOT_FOUND_ERROR
else:
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, new_wiki_name)
wiki_utils.broadcast_to_sharejs('redirect', sharejs_uuid, node, new_wiki_name)
@must_be_valid_project # returns project
@must_have_permission('write') # returns user, project
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_validate_name(wname, auth, node, **kwargs):
wiki_name = wname.strip()
wiki_key = to_mongo_key(wiki_name)
if wiki_key in node.wiki_pages_current or wiki_key == 'home':
raise HTTPError(http.CONFLICT, data=dict(
message_short='Wiki page name conflict.',
message_long='A wiki page with that name already exists.'
))
else:
node.update_node_wiki(wiki_name, '', auth)
return {'message': wiki_name}
@must_be_valid_project
@must_be_contributor_or_public
def project_wiki_grid_data(auth, node, **kwargs):
pages = []
project_wiki_pages = {
'title': 'Project Wiki Pages',
'kind': 'folder',
'type': 'heading',
'children': format_project_wiki_pages(node, auth)
}
pages.append(project_wiki_pages)
component_wiki_pages = {
'title': 'Component Wiki Pages',
'kind': 'folder',
'type': 'heading',
'children': format_component_wiki_pages(node, auth)
}
if len(component_wiki_pages['children']) > 0:
pages.append(component_wiki_pages)
return pages
def format_home_wiki_page(node):
home_wiki = node.get_wiki_page('home')
home_wiki_page = {
'page': {
'url': node.web_url_for('project_wiki_home'),
'name': 'Home',
'id': 'None',
}
}
if home_wiki:
home_wiki_page = {
'page': {
'url': node.web_url_for('project_wiki_view', wname='home', _guid=True),
'name': 'Home',
'id': home_wiki._primary_key,
}
}
return home_wiki_page
def format_project_wiki_pages(node, auth):
pages = []
can_edit = node.has_permission(auth.user, 'write') and not node.is_registration
project_wiki_pages = _get_wiki_pages_current(node)
home_wiki_page = format_home_wiki_page(node)
pages.append(home_wiki_page)
for wiki_page in project_wiki_pages:
if wiki_page['name'] != 'home':
has_content = bool(wiki_page['wiki_content'].get('wiki_content'))
page = {
'page': {
'url': wiki_page['url'],
'name': wiki_page['name'],
'id': wiki_page['wiki_id'],
}
}
if can_edit or has_content:
pages.append(page)
return pages
def format_component_wiki_pages(node, auth):
pages = []
for node in node.nodes:
if any([node.is_deleted,
not node.can_view(auth),
not node.has_addon('wiki')]):
continue
else:
serialized = serialize_component_wiki(node, auth)
if serialized:
pages.append(serialized)
return pages
def serialize_component_wiki(node, auth):
children = []
url = node.web_url_for('project_wiki_view', wname='home', _guid=True)
home_has_content = bool(wiki_page_content('home', node=node).get('wiki_content'))
component_home_wiki = {
'page': {
'url': url,
'name': 'Home',
# Handle pointers
'id': node._primary_key if node.primary else node.node._primary_key,
}
}
can_edit = node.has_permission(auth.user, 'write') and not node.is_registration
if can_edit or home_has_content:
children.append(component_home_wiki)
for page in _get_wiki_pages_current(node):
if page['name'] != 'home':
has_content = bool(page['wiki_content'].get('wiki_content'))
component_page = {
'page': {
'url': page['url'],
'name': page['name'],
'id': page['wiki_id'],
}
}
if can_edit or has_content:
children.append(component_page)
if len(children) > 0:
component = {
'page': {
'name': node.title,
'url': url,
},
'kind': 'component',
'category': node.category,
'pointer': not node.primary,
'children': children,
}
return component
return None
|
|
"""The tests for the IMAP email content sensor platform."""
from collections import deque
import email
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import datetime
from threading import Event
import unittest
from homeassistant.helpers.template import Template
from homeassistant.helpers.event import track_state_change
from homeassistant.components.sensor import imap_email_content
from tests.common import get_test_home_assistant
class FakeEMailReader:
"""A test class for sending test emails."""
def __init__(self, messages):
"""Setup the fake email reader."""
self._messages = messages
def connect(self):
"""Stay always Connected."""
return True
def read_next(self):
"""Get the next email."""
if len(self._messages) == 0:
return None
return self._messages.popleft()
class EmailContentSensor(unittest.TestCase):
"""Test the IMAP email content sensor."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_allowed_sender(self):
"""Test emails from allowed sender."""
test_message = email.message.Message()
test_message['From'] = "sender@test.com"
test_message['Subject'] = "Test"
test_message['Date'] = datetime.datetime(2016, 1, 1, 12, 44, 57)
test_message.set_payload("Test Message")
sensor = imap_email_content.EmailContentSensor(
self.hass,
FakeEMailReader(deque([test_message])),
"test_emails_sensor",
["sender@test.com"],
None)
sensor.entity_id = "sensor.emailtest"
sensor.update()
self.assertEqual("Test Message", sensor.state)
self.assertEqual("sender@test.com", sensor.state_attributes["from"])
self.assertEqual("Test", sensor.state_attributes["subject"])
self.assertEqual(datetime.datetime(2016, 1, 1, 12, 44, 57),
sensor.state_attributes["date"])
def test_multi_part_with_text(self):
"""Test multi part emails."""
msg = MIMEMultipart('alternative')
msg['Subject'] = "Link"
msg['From'] = "sender@test.com"
text = "Test Message"
html = "<html><head></head><body>Test Message</body></html>"
textPart = MIMEText(text, 'plain')
htmlPart = MIMEText(html, 'html')
msg.attach(textPart)
msg.attach(htmlPart)
sensor = imap_email_content.EmailContentSensor(
self.hass,
FakeEMailReader(deque([msg])),
"test_emails_sensor",
["sender@test.com"],
None)
sensor.entity_id = "sensor.emailtest"
sensor.update()
self.assertEqual("Test Message", sensor.state)
def test_multi_part_only_html(self):
"""Test multi part emails with only HTML."""
msg = MIMEMultipart('alternative')
msg['Subject'] = "Link"
msg['From'] = "sender@test.com"
html = "<html><head></head><body>Test Message</body></html>"
htmlPart = MIMEText(html, 'html')
msg.attach(htmlPart)
sensor = imap_email_content.EmailContentSensor(
self.hass,
FakeEMailReader(deque([msg])),
"test_emails_sensor",
["sender@test.com"],
None)
sensor.entity_id = "sensor.emailtest"
sensor.update()
self.assertEqual(
"<html><head></head><body>Test Message</body></html>",
sensor.state)
def test_multi_part_only_other_text(self):
"""Test multi part emails with only other text."""
msg = MIMEMultipart('alternative')
msg['Subject'] = "Link"
msg['From'] = "sender@test.com"
other = "Test Message"
htmlPart = MIMEText(other, 'other')
msg.attach(htmlPart)
sensor = imap_email_content.EmailContentSensor(
self.hass,
FakeEMailReader(deque([msg])),
"test_emails_sensor",
["sender@test.com"],
None)
sensor.entity_id = "sensor.emailtest"
sensor.update()
self.assertEqual("Test Message", sensor.state)
def test_multiple_emails(self):
"""Test multiple emails."""
states = []
test_message1 = email.message.Message()
test_message1['From'] = "sender@test.com"
test_message1['Subject'] = "Test"
test_message1['Date'] = datetime.datetime(2016, 1, 1, 12, 44, 57)
test_message1.set_payload("Test Message")
test_message2 = email.message.Message()
test_message2['From'] = "sender@test.com"
test_message2['Subject'] = "Test 2"
test_message2['Date'] = datetime.datetime(2016, 1, 1, 12, 44, 57)
test_message2.set_payload("Test Message 2")
states_received = Event()
def state_changed_listener(entity_id, from_s, to_s):
states.append(to_s)
if len(states) == 2:
states_received.set()
track_state_change(
self.hass,
["sensor.emailtest"],
state_changed_listener)
sensor = imap_email_content.EmailContentSensor(
self.hass,
FakeEMailReader(deque([test_message1, test_message2])),
"test_emails_sensor",
["sender@test.com"],
None)
sensor.entity_id = "sensor.emailtest"
sensor.update()
self.hass.pool.block_till_done()
states_received.wait(5)
self.assertEqual("Test Message", states[0].state)
self.assertEqual("Test Message 2", states[1].state)
self.assertEqual("Test Message 2", sensor.state)
def test_sender_not_allowed(self):
"""Test not whitelisted emails."""
test_message = email.message.Message()
test_message['From'] = "sender@test.com"
test_message['Subject'] = "Test"
test_message['Date'] = datetime.datetime(2016, 1, 1, 12, 44, 57)
test_message.set_payload("Test Message")
sensor = imap_email_content.EmailContentSensor(
self.hass,
FakeEMailReader(deque([test_message])),
"test_emails_sensor",
["other@test.com"],
None)
sensor.entity_id = "sensor.emailtest"
sensor.update()
self.assertEqual(None, sensor.state)
def test_template(self):
"""Test value template."""
test_message = email.message.Message()
test_message['From'] = "sender@test.com"
test_message['Subject'] = "Test"
test_message['Date'] = datetime.datetime(2016, 1, 1, 12, 44, 57)
test_message.set_payload("Test Message")
sensor = imap_email_content.EmailContentSensor(
self.hass,
FakeEMailReader(deque([test_message])),
"test_emails_sensor",
["sender@test.com"],
Template("{{ subject }} from {{ from }} with message {{ body }}",
self.hass))
sensor.entity_id = "sensor.emailtest"
sensor.update()
self.assertEqual(
"Test from sender@test.com with message Test Message",
sensor.state)
|
|
#! /usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# XXXstroucki: this apparently originated from a copy of the primitive
# scheduler code sometime in 2010. It aims to keep a pool of tashi servers
# available, and other servers shut down. Could this be better suited for
# a hook function of the scheduler?
from socket import gethostname
import os
import socket
import sys
import threading
import time
import logging.config
import pickle
from tashi.rpycservices.rpyctypes import *
from tashi.util import getConfig, createClient, instantiateImplementation, boolean
import tashi
from zoni.services.rpycservices import *
import zoni
class Primitive(object):
def __init__(self, config, client):
self.config = config
self.client = client
self.hooks = []
self.log = logging.getLogger(__file__)
self.scheduleDelay = float(self.config.get("Primitive", "scheduleDelay"))
self.densePack = boolean(self.config.get("Primitive", "densePack"))
self.hosts = {}
# Zoni
self.minServersOn = 3
self.shutdownDelay = 300
self.pcm = zoni.services.rpycservices.client("zoni", 12345).createConn()
self.zoniStateFile = "/var/tmp/zoniStateFile"
if os.path.exists(self.zoniStateFile):
self.zoniState = self.__loadZoniState(self.zoniStateFile)
else:
self.zoniState = {}
self.__initState()
items = self.config.items("Primitive")
items.sort()
for item in items:
(name, value) = item
name = name.lower()
if (name.startswith("hook")):
try:
self.hooks.append(instantiateImplementation(value, config, client, False))
except:
self.log.exception("Failed to load hook %s" % (value))
def __loadZoniState(self, filename):
pkl_file = open(filename, "rb")
data = pickle.load(pkl_file)
pkl_file.close()
return data
def __saveZoniState(self, array, filename):
f = open(filename, "wb")
pickle.dump(array, f)
f.close()
def __initState(self):
hosts = {}
_instances = self.client.getInstances()
for h in self.client.getHosts():
hosts[h.id] = h
self.hosts = hosts
used_hosts = []
for k,v in hosts.iteritems():
print "k is ", k
if v.state == 1:
self.zoniState[k] = self.zoniState.get(k, {})
self.zoniState[k]["powerState"] = self.zoniState[k].get("powerState", "On")
self.zoniState[k]["state"] = self.zoniState[k].get("state", "Available")
if v.state > 1:
self.zoniState[k] = self.zoniState.get(k, {})
self.zoniState[k]["powerState"] = self.zoniState[k].get("powerState", "On")
self.zoniState[k]["state"] = self.zoniState[k].get("state", "Not Available")
# Look and mark nodes free of VM instances
for i in _instances:
if i.hostId != None and i.hostId not in used_hosts:
used_hosts.append(i.hostId)
self.zoniState[i.hostId]["state"] = "In Use"
self.__saveZoniState(self.zoniState, self.zoniStateFile)
def __updateState(self):
hosts = {}
used_hosts = []
_instances = self.client.getInstances()
for h in self.client.getHosts():
hosts[h.id] = h
self.hosts = hosts
for k,v in hosts.iteritems():
if v.state == 1:
self.zoniState[k]["state"] = "Available"
if v.state > 1:
self.zoniState[k]["state"] = "Not Available"
# Look and mark nodes free of VM instances
for i in _instances:
if i.hostId != None and i.hostId not in used_hosts:
used_hosts.append(i.hostId)
self.zoniState[i.hostId]["state"] = "In Use"
self.__saveZoniState(self.zoniState, self.zoniStateFile)
def __getAvail(self):
availCount = 0
for host, val in self.zoniState.iteritems():
if val['state'] == "Available" and val['powerState'] == "On":
availCount += 1
return availCount
def conservePower(self):
self.__updateState()
key = "Tashi"
try:
# Get a list of available hosts
for host, val in self.zoniState.iteritems():
if val['state'] == "Available" and self.__getAvail() > self.minServersOn:
#print "working on host ", host, val, self.hosts[host].name
self.log.info("VCM SHUTDOWN_REQUEST %s (%s)" % (self.hosts[host].name, str(host)))
self.zoniState[host]["powerState"] = "Off"
self.zoniState[host]["stateTime"]= int(time.time())
self.pcm.root.powerOff(key, self.hosts[host].name)
self.__saveZoniState(self.zoniState, self.zoniStateFile)
except Exception, e:
print "except", e
for host, val in self.zoniState.iteritems():
if self.__getAvail() < self.minServersOn:
if val['powerState'] == "Off":
# Bring up a node
self.log.info("VCM POWERON_REQUEST %s (%s) - Min Servers requirement not met" % (self.hosts[host].name, str(host)))
self.pcm.root.powerOn(key, self.hosts[host].name)
self.zoniState[host]["powerState"] = "On"
self.zoniState[host]["stateTime"]= int(time.time())
self.__saveZoniState(self.zoniState, self.zoniStateFile)
def start(self):
oldInstances = {}
muffle = {}
while True:
try:
# Generate a list of VMs/host
hosts = {}
load = {}
for h in self.client.getHosts():
hosts[h.id] = h
load[h.id] = []
load[None] = []
_instances = self.client.getInstances()
instances = {}
for i in _instances:
instances[i.id] = i
for i in instances.itervalues():
if (i.hostId or i.state == InstanceState.Pending):
load[i.hostId] = load[i.hostId] + [i.id]
# Check for VMs that have exited
for i in oldInstances:
if (i not in instances and oldInstances[i].state != InstanceState.Pending):
for hook in self.hooks:
hook.postDestroy(oldInstances[i])
# Schedule new VMs
oldInstances = instances
if (len(load.get(None, [])) > 0):
load[None].sort()
for i in load[None]:
inst = instances[i]
try:
minMax = None
minMaxHost = None
targetHost = inst.hints.get("targetHost", None)
try:
allowElsewhere = boolean(inst.hints.get("allowElsewhere", "False"))
except Exception, e:
allowElsewhere = False
# TargetHost specified
if (targetHost != None):
for h in hosts.values():
if ((str(h.id) == targetHost or h.name == targetHost)):
# make sure that host is up, in a normal state and is not reserved
if (h.up == True and h.state == HostState.Normal and len(h.reserved) == 0):
memUsage = reduce(lambda x, y: x + instances[y].memory, load[h.id], inst.memory)
coreUsage = reduce(lambda x, y: x + instances[y].cores, load[h.id], inst.cores)
if (memUsage <= h.memory and coreUsage <= h.cores):
minMax = len(load[h.id])
minMaxHost = h
# If a host machine is reserved, only allow if userid is in reserved list
if ((len(h.reserved) > 0) and inst.userId in h.reserved):
memUsage = reduce(lambda x, y: x + instances[y].memory, load[h.id], inst.memory)
coreUsage = reduce(lambda x, y: x + instances[y].cores, load[h.id], inst.cores)
if (memUsage <= h.memory and coreUsage <= h.cores):
minMax = len(load[h.id])
minMaxHost = h
if ((targetHost == None or allowElsewhere) and minMaxHost == None):
for h in hosts.values():
if (h.up == True and h.state == HostState.Normal and len(h.reserved) == 0):
if (minMax is None or (self.densePack and len(load[h.id]) > minMax) or (not self.densePack and len(load[h.id]) < minMax)):
memUsage = reduce(lambda x, y: x + instances[y].memory, load[h.id], inst.memory)
coreUsage = reduce(lambda x, y: x + instances[y].cores, load[h.id], inst.cores)
if (memUsage <= h.memory and coreUsage <= h.cores):
minMax = len(load[h.id])
minMaxHost = h
if (minMaxHost):
if (not inst.hints.get("__resume_source", None)):
for hook in self.hooks:
hook.preCreate(inst)
self.log.info("Scheduling instance %s (%d mem, %d cores, %d uid) on host %s" % (inst.name, inst.memory, inst.cores, inst.userId, minMaxHost.name))
self.client.activateVm(i, minMaxHost)
load[minMaxHost.id] = load[minMaxHost.id] + [i]
muffle.clear()
else:
if (inst.name not in muffle):
self.log.info("Failed to find a suitable place to schedule %s" % (inst.name))
muffle[inst.name] = True
except Exception, e:
if (inst.name not in muffle):
self.log.exception("Failed to schedule or activate %s" % (inst.name))
muffle[inst.name] = True
time.sleep(self.scheduleDelay)
self.conservePower()
except TashiException, e:
self.log.exception("Tashi exception")
time.sleep(self.scheduleDelay)
except Exception, e:
self.log.exception("General exception")
time.sleep(self.scheduleDelay)
def main():
(config, configFiles) = getConfig(["Agent"])
publisher = instantiateImplementation(config.get("Agent", "publisher"), config)
tashi.publisher = publisher
client = createClient(config)
logging.config.fileConfig(configFiles)
agent = Primitive(config, client)
#agent.conservePower()
agent.start()
if __name__ == "__main__":
main()
|
|
# coding: utf-8
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pylab as plt
import scipy as sp
import pandas as pd
import rost
import os
import pymc
import shutil
import multiprocessing
import datetime
# In[8]:
print os.getcwd()
cell_number_data_file = '../../data/cell_number_data.csv'
# cell_number_data_file = '../../140204_create_test_data/140204_step_data/step_data.pkl'
cell_number_data = pd.read_csv(cell_number_data_file)
# In[9]:
cell_number_data['GF'] = cell_number_data['PCNA'] / cell_number_data['SOX2']
cell_number_data['mi'] = cell_number_data['m'] / cell_number_data['PCNA'] * 13.2 / 50.
# ## pymc model
# In[17]:
outgrowth = pd.Series([0.0, 56.5, 163.28571428571428, 451.75, 1278.5, 2257.25],
index = pd.Index([0.0, 2.0, 3.0, 4.0, 6.0, 8.0], name = 'time'),
name = 'outgrowth')
def make_model(data, mi_mean_min, mi_mean_max, GF_mean_min, GF_mean_max, constant_proliferation = False):
values_SOX2 = {}
values_m = {}
values_nonPCNA = {}
switchpoint = {}
mi_left = {}
GF_left = {}
SOX2_mean_left = {}
mi_right = {}
GF_right = {}
SOX2_mean_right = {}
cells_SOX2_float = {}
cells_nonPCNA = {}
cells_m = {}
ls = 50.0 # length of section
l = pd.read_csv('../../data/cell_length_data.csv')['cell_length'].mean() # length of cell
def step_function(x, switchpoint, left_value, right_value):
''' This function should return something in the same format as the passed array
Specifically, it produces an output that has an array of the same size of the experimental data
but whose contents are the lower average until the switchpoint, and the upper average past the switchpoint.
For all purposes, this builds the model to which we want to compare the data.
'''
return sp.where(x<=switchpoint, left_value, right_value)
def ma(array, fill_value):
return sp.ma.masked_array(array, sp.isnan(array), fill_value = fill_value)
#data = data.dropna(how='all', subset = ['m', 'PCNA', 'SOX2'])
# I'll drop all nan because of the potential bug with the binomials (see my question on stackoverflow)
data = data.dropna(how='all', subset = ['m', 'PCNA', 'SOX2'])
data = data.sort_values(['ID', 'pos'])
# priors for global mean values
# define priors for left side of step function
mi_left_pop= pymc.Uniform('mi_left_pop', lower = mi_mean_min, upper = mi_mean_max, value = 0.02)
GF_left_pop = pymc.Uniform('GF_left_pop', lower = GF_mean_min, upper = GF_mean_max, value = 0.8)
# define priors for right side of step function
if constant_proliferation:
mi_right_pop = mi_left_pop
GF_right_pop = GF_left_pop
else:
mi_right_pop = pymc.Uniform('mi_right_pop', lower = mi_mean_min, upper = mi_mean_max, value = 0.04)
GF_right_pop = pymc.Uniform('GF_right_pop', lower = GF_mean_min, upper = GF_mean_max, value = 0.9)
# stepsizes
@pymc.deterministic(name='step_mi', plot=True)
def step_mi(mi_left = mi_left_pop, mi_right = mi_right_pop):
return mi_right - mi_left
@pymc.deterministic(name='step_GF', plot=True)
def step_GF(GF_left = GF_left_pop, GF_right = GF_right_pop):
return GF_right - GF_left
# prior distribution for sigma beeing uniformly distributed
GF_sigma_inter = pymc.Uniform('GF_sigma_inter', lower = 0.001, upper = 0.2)
mi_sigma_inter = pymc.Uniform('mi_sigma_inter', lower = 0.001, upper = 0.2)
# switchpoint
if not constant_proliferation:
switchpoint_pop = pymc.Uniform('switchpoint_pop',
lower = -2000,
upper = outgrowth[data['time'].iloc[0]],
value = -500)
switchpoint_sigma_inter = pymc.Uniform('switchpoint_sigma_inter', lower=1.0, upper=400.0, value = 50)
for ID, IDdata in data.groupby('ID'):
values_SOX2[ID] = ma(IDdata['SOX2'], 35.5)
values_nonPCNA[ID] = ma(IDdata['SOX2'] - IDdata['PCNA'], 3.5)
values_m[ID] = ma(IDdata['m'], 1.5)
# Model definition
#priors
# switchpoint[ID]: for all observables
if constant_proliferation:
switchpoint[ID] = 0.0
else:
switchpoint[ID] = pymc.Normal('switchpoint_{0}'.format(ID), mu = switchpoint_pop, tau = 1/switchpoint_sigma_inter**2, value = -500,
plot = False)
# number of SOX2 cells
SOX2_mean = sp.mean(values_SOX2[ID])
SOX2_std = sp.std(values_SOX2[ID])
# define priors for left side of step function
mi_left[ID] = pymc.TruncatedNormal('mi_left_{0}'.format(ID), mu = mi_left_pop, tau = 1.0 / mi_sigma_inter**2,
a = 0.0, b = 1.0,
value = 0.02, plot = False)
GF_left[ID] = pymc.TruncatedNormal('GF_left_{0}'.format(ID), mu = GF_left_pop, tau = 1.0 / GF_sigma_inter**2,
a = 0.0, b = 1.0,
value = 0.5, plot = False)
# define priors for right side of step function
mi_right[ID] = pymc.TruncatedNormal('mi_right_{0}'.format(ID), mu = mi_right_pop, tau = 1.0 / mi_sigma_inter**2,
a = 0.0, b = 1.0,
value = 0.02, plot = False)
GF_right[ID] = pymc.TruncatedNormal('GF_right_{0}'.format(ID), mu = GF_right_pop, tau = 1.0 / GF_sigma_inter**2,
a = 0.0, b = 1.0,
value = 0.5, plot = False)
# step functions
@pymc.deterministic(name='mi_{}'.format(ID))
def mi(positions = sp.array(IDdata['pos']), switchpoint = switchpoint[ID],
left_value = mi_left[ID], right_value = mi_right[ID]):
return step_function(positions, switchpoint, left_value, right_value)
@pymc.deterministic(name='GF_{}'.format(ID))
def GF(positions = sp.array(IDdata['pos']), switchpoint = switchpoint[ID],
left_value = GF_left[ID], right_value = GF_right[ID]):
return step_function(positions, switchpoint, left_value, right_value)
@pymc.deterministic(name='SOX2_mean_{}'.format(ID))
def SOX2_mean(positions = sp.array(IDdata['pos']), switchpoint = switchpoint[ID],
left_value = SOX2_mean , right_value = SOX2_mean):
return step_function(positions, switchpoint, left_value, right_value)
#likelihoods
cells_SOX2_float[ID] = pymc.Normal('cells_SOX2_float_{0}'.format(ID), mu=SOX2_mean, tau = 1/SOX2_std**2, value = values_SOX2[ID], plot = False, observed = True)
@pymc.deterministic(name='cells_SOX2_{}'.format(ID))
def cells_SOX2(csf = cells_SOX2_float[ID]):
return sp.around(csf)
cells_nonPCNA[ID] = pymc.Binomial('cells_nonPCNA_{0}'.format(ID), n = cells_SOX2, p = (1.0 - GF), value = values_nonPCNA[ID], observed = True, plot = False )
@pymc.deterministic(name='cells_PCNA_{}'.format(ID))
def cells_PCNA(cnp = cells_nonPCNA[ID], cs = cells_SOX2):
return cs - cnp
@pymc.deterministic(name='cells_PCNA_section_{}'.format(ID))
def cells_PCNA_section(cp = cells_PCNA, ls = ls, l = l):
return cp * ls / l
cells_m[ID] = pymc.Binomial('cells_m_{0}'.format(ID), n = cells_PCNA_section, p = mi, value = values_m[ID], observed = True, plot = False)
values_SOX2 = pymc.Container(values_SOX2)
values_SOX2 = pymc.Container(values_SOX2)
values_m = pymc.Container(values_m)
values_nonPCNA = pymc.Container(values_nonPCNA)
switchpoint = pymc.Container(switchpoint)
mi_left = pymc.Container(mi_left)
GF_left = pymc.Container(GF_left)
SOX2_mean_left = pymc.Container(SOX2_mean_left)
mi_right = pymc.Container(mi_right)
GF_right = pymc.Container(GF_right)
SOX2_mean_right = pymc.Container(SOX2_mean_right)
cells_SOX2_float = pymc.Container(cells_SOX2_float)
cells_nonPCNA = pymc.Container(cells_nonPCNA)
cells_m = pymc.Container(cells_m)
return locals()
# ## Fit the real data
# In[29]:
GF_mean_min = (cell_number_data['PCNA'] / cell_number_data['SOX2']).min()
GF_mean_max = (cell_number_data['PCNA'] / cell_number_data['SOX2']).max()
mi_mean_min = 0.0
mi_mean_max = 0.1
burn = 1e6
iter_ = 1e7+1e6
thin = 100
dir_ = '{0}_{1}_{2}'.format(cell_number_data_file.split('.')[-2].split('/')[-1], datetime.datetime.now().strftime("%y%m%dT%H%M%S"), int(iter_-burn) )
out_path = os.path.join('results', dir_)
try:
shutil.rmtree(out_path)
print('Removed previous results')
except:
print('No previous results to remove?!')
rost.mkdir_p(out_path)
meta = pd.Series()
meta['datafile'] = cell_number_data_file
meta['burn'] = burn
meta['iter_'] = iter_
meta['thin'] = thin
meta.to_csv(os.path.join(out_path, 'meta.txt'), sep = '\t')
cell_number_data.to_pickle(os.path.join(out_path, 'cell_number_data.pkl'))
def fit_model((ID, data)):
print ID
M = pymc.MCMC(make_model(data, mi_mean_min, mi_mean_max, GF_mean_min, GF_mean_max), db='hdf5', dbname = os.path.join(out_path, '{0}.hdf5'.format(ID)))
M.sample(iter=iter_, burn=burn, thin=thin, progress_bar=False)
print()
pymc.Matplot.plot(M, path = out_path, suffix = '_{0}'.format(ID));
plt.close('all')
M.db.close()
l = [[ID, data] for ID, data in cell_number_data.groupby('time')]
p = multiprocessing.Pool(processes=5)
p.map(fit_model, l)
|
|
"""
Internal tasks are tasks that are started from the teuthology infrastructure.
Note that there is no corresponding task defined for this module. All of
the calls are made from other modules, most notably teuthology/run.py
"""
from cStringIO import StringIO
import contextlib
import logging
import os
import time
import yaml
import subprocess
from teuthology import lockstatus
from teuthology import lock
from teuthology import misc
from teuthology import provision
from teuthology.job_status import get_status, set_status
from teuthology.config import config as teuth_config
from teuthology.parallel import parallel
from teuthology.suite import has_packages_for_distro
from ..orchestra import cluster, remote, run
from .. import report
log = logging.getLogger(__name__)
@contextlib.contextmanager
def base(ctx, config):
"""
Create the test directory that we will be using on the remote system
"""
log.info('Creating test directory...')
testdir = misc.get_testdir(ctx)
run.wait(
ctx.cluster.run(
args=[
'mkdir', '-m0755', '--',
testdir,
],
wait=False,
)
)
try:
yield
finally:
log.info('Tidying up after the test...')
# if this fails, one of the earlier cleanups is flawed; don't
# just cram an rm -rf here
run.wait(
ctx.cluster.run(
args=[
'rmdir',
'--',
testdir,
],
wait=False,
),
)
@contextlib.contextmanager
def lock_machines(ctx, config):
"""
Lock machines. Called when the teuthology run finds and locks
new machines. This is not called if the one has teuthology-locked
machines and placed those keys in the Targets section of a yaml file.
"""
# It's OK for os_type and os_version to be None here. If we're trying
# to lock a bare metal machine, we'll take whatever is available. If
# we want a vps, defaults will be provided by misc.get_distro and
# misc.get_distro_version in provision.create_if_vm
os_type = ctx.config.get("os_type")
os_version = ctx.config.get("os_version")
arch = ctx.config.get('arch')
log.info('Locking machines...')
assert isinstance(config[0], int), 'config[0] must be an integer'
machine_type = config[1]
how_many = config[0]
# We want to make sure there are always this many machines available
to_reserve = 5
# change the status during the locking process
report.try_push_job_info(ctx.config, dict(status='waiting'))
while True:
# get a candidate list of machines
machines = lock.list_locks(machine_type=machine_type, up=True,
locked=False, count=how_many + to_reserve)
if machines is None:
if ctx.block:
log.error('Error listing machines, trying again')
time.sleep(20)
continue
else:
raise RuntimeError('Error listing machines')
# make sure there are machines for non-automated jobs to run
if len(machines) < to_reserve + how_many and ctx.owner.startswith('scheduled'):
if ctx.block:
log.info(
'waiting for more machines to be free (need %s + %s, have %s)...',
to_reserve,
how_many,
len(machines),
)
time.sleep(10)
continue
else:
assert 0, ('not enough machines free; need %s + %s, have %s' %
(to_reserve, how_many, len(machines)))
newly_locked = lock.lock_many(ctx, how_many, machine_type, ctx.owner,
ctx.archive, os_type, os_version, arch)
if not newly_locked and not isinstance(newly_locked, list):
raise RuntimeError('Invalid parameters specified')
if len(newly_locked) == how_many:
vmlist = []
for lmach in newly_locked:
if misc.is_vm(lmach):
vmlist.append(lmach)
if vmlist:
log.info('Waiting for virtual machines to come up')
keys_dict = dict()
loopcount = 0
while len(keys_dict) != len(vmlist):
loopcount += 1
time.sleep(10)
keys_dict = lock.ssh_keyscan(vmlist)
log.info('virtual machine is still unavailable')
if loopcount == 40:
loopcount = 0
log.info('virtual machine(s) still not up, ' +
'recreating unresponsive ones.')
for guest in vmlist:
if guest not in keys_dict.keys():
log.info('recreating: ' + guest)
full_name = misc.canonicalize_hostname(guest)
provision.destroy_if_vm(ctx, full_name)
provision.create_if_vm(ctx, full_name)
if lock.do_update_keys(keys_dict):
log.info("Error in virtual machine keys")
newscandict = {}
for dkey in newly_locked.iterkeys():
stats = lockstatus.get_status(dkey)
newscandict[dkey] = stats['ssh_pub_key']
ctx.config['targets'] = newscandict
else:
ctx.config['targets'] = newly_locked
locked_targets = yaml.safe_dump(
ctx.config['targets'],
default_flow_style=False
).splitlines()
log.info('\n '.join(['Locked targets:', ] + locked_targets))
# successfully locked machines, change status back to running
report.try_push_job_info(ctx.config, dict(status='running'))
break
elif not ctx.block:
assert 0, 'not enough machines are available'
log.warn('Could not lock enough machines, waiting...')
time.sleep(10)
try:
yield
finally:
if ctx.config.get('unlock_on_failure', False) or \
get_status(ctx.summary) == 'pass':
log.info('Unlocking machines...')
for machine in ctx.config['targets'].iterkeys():
lock.unlock_one(ctx, machine, ctx.owner, ctx.archive)
def save_config(ctx, config):
"""
Store the config in a yaml file
"""
log.info('Saving configuration')
if ctx.archive is not None:
with file(os.path.join(ctx.archive, 'config.yaml'), 'w') as f:
yaml.safe_dump(ctx.config, f, default_flow_style=False)
def check_lock(ctx, config):
"""
Check lock status of remote machines.
"""
if not teuth_config.lock_server or ctx.config.get('check-locks') is False:
log.info('Lock checking disabled.')
return
log.info('Checking locks...')
for machine in ctx.config['targets'].iterkeys():
status = lockstatus.get_status(machine)
log.debug('machine status is %s', repr(status))
assert status is not None, \
'could not read lock status for {name}'.format(name=machine)
assert status['up'], 'machine {name} is marked down'.format(name=machine)
assert status['locked'], \
'machine {name} is not locked'.format(name=machine)
assert status['locked_by'] == ctx.owner, \
'machine {name} is locked by {user}, not {owner}'.format(
name=machine,
user=status['locked_by'],
owner=ctx.owner,
)
def check_packages(ctx, config):
"""
Checks gitbuilder to determine if there are missing packages for this job.
If there are missing packages, fail the job.
"""
log.info("Checking packages...")
os_type = ctx.config.get("os_type", None)
sha1 = ctx.config.get("sha1", None)
# We can only do this check if there are a defined sha1 and os_type
# in the job config.
if os_type and sha1:
log.info(
"Checking packages for os_type '{os}' and ceph hash '{ver}'".format(
os=os_type,
ver=sha1,
)
)
if not has_packages_for_distro(sha1, os_type):
msg = "Packages for os_type '{os}' and ceph hash '{ver}' not found"
msg = msg.format(
os=os_type,
ver=sha1,
)
log.error(msg)
# set the failure message and update paddles with the status
ctx.summary["failure_reason"] = msg
set_status(ctx.summary, "dead")
report.try_push_job_info(ctx.config, dict(status='dead'))
raise RuntimeError(msg)
else:
log.info(
"Checking packages skipped, missing os_type '{os}' or ceph hash '{ver}'".format(
os=os_type,
ver=sha1,
)
)
@contextlib.contextmanager
def timer(ctx, config):
"""
Start the timer used by teuthology
"""
log.info('Starting timer...')
start = time.time()
try:
yield
finally:
duration = time.time() - start
log.info('Duration was %f seconds', duration)
ctx.summary['duration'] = duration
def connect(ctx, config):
"""
Open a connection to a remote host.
"""
log.info('Opening connections...')
remotes = []
machs = []
for name in ctx.config['targets'].iterkeys():
machs.append(name)
for t, key in ctx.config['targets'].iteritems():
t = misc.canonicalize_hostname(t)
log.debug('connecting to %s', t)
try:
if ctx.config['sshkeys'] == 'ignore':
key = None
except (AttributeError, KeyError):
pass
remotes.append(
remote.Remote(name=t, host_key=key, keep_alive=True, console=None))
ctx.cluster = cluster.Cluster()
if 'roles' in ctx.config:
for rem, roles in zip(remotes, ctx.config['roles']):
assert all(isinstance(role, str) for role in roles), \
"Roles in config must be strings: %r" % roles
ctx.cluster.add(rem, roles)
log.info('roles: %s - %s' % (rem, roles))
else:
for rem in remotes:
ctx.cluster.add(rem, rem.name)
def push_inventory(ctx, config):
if not teuth_config.lock_server:
return
def push():
for rem in ctx.cluster.remotes.keys():
info = rem.inventory_info
lock.update_inventory(info)
try:
push()
except Exception:
log.exception("Error pushing inventory")
def serialize_remote_roles(ctx, config):
"""
Provides an explicit mapping for which remotes have been assigned what roles
So that other software can be loosely coupled to teuthology
"""
if ctx.archive is not None:
with file(os.path.join(ctx.archive, 'info.yaml'), 'r+') as info_file:
info_yaml = yaml.safe_load(info_file)
info_file.seek(0)
info_yaml['cluster'] = dict([(rem.name, {'roles': roles}) for rem, roles in ctx.cluster.remotes.iteritems()])
yaml.safe_dump(info_yaml, info_file, default_flow_style=False)
def check_ceph_data(ctx, config):
"""
Check for old /var/lib/ceph directories and detect staleness.
"""
log.info('Checking for old /var/lib/ceph...')
processes = ctx.cluster.run(
args=[
'test', '!', '-e', '/var/lib/ceph',
],
wait=False,
)
failed = False
for proc in processes:
try:
proc.wait()
except run.CommandFailedError:
log.error('Host %s has stale /var/lib/ceph, check lock and nuke/cleanup.', proc.remote.shortname)
failed = True
if failed:
raise RuntimeError('Stale /var/lib/ceph detected, aborting.')
def check_conflict(ctx, config):
"""
Note directory use conflicts and stale directories.
"""
log.info('Checking for old test directory...')
testdir = misc.get_testdir(ctx)
processes = ctx.cluster.run(
args=[
'test', '!', '-e', testdir,
],
wait=False,
)
failed = False
for proc in processes:
try:
proc.wait()
except run.CommandFailedError:
log.error('Host %s has stale test directory %s, check lock and cleanup.', proc.remote.shortname, testdir)
failed = True
if failed:
raise RuntimeError('Stale jobs detected, aborting.')
def fetch_binaries_for_coredumps(path, remote):
"""
Pul ELFs (debug and stripped) for each coredump found
"""
# Check for Coredumps:
coredump_path = os.path.join(path, 'coredump')
if os.path.isdir(coredump_path):
log.info('Transferring binaries for coredumps...')
for dump in os.listdir(coredump_path):
# Pull program from core file
dump_path = os.path.join(coredump_path, dump)
dump_info = subprocess.Popen(['file', dump_path],
stdout=subprocess.PIPE)
dump_out = dump_info.communicate()
# Parse file output to get program, Example output:
# 1422917770.7450.core: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style, \
# from 'radosgw --rgw-socket-path /home/ubuntu/cephtest/apache/tmp.client.0/fastcgi_soc'
dump_program = dump_out.split("from '")[1].split(' ')[0]
# Find path on remote server:
r = remote.run(args=['which', dump_program], stdout=StringIO())
remote_path = r.stdout.getvalue()
# Pull remote program into coredump folder:
remote._sftp_get_file(remote_path, os.path.join(coredump_path,
dump_program))
# Pull Debug symbols:
debug_path = os.path.join('/usr/lib/debug', remote_path)
# RPM distro's append their non-stripped ELF's with .debug
# When deb based distro's do not.
if remote.system_type == 'rpm':
debug_path = '{debug_path}.debug'.format(debug_path=debug_path)
remote.get_file(debug_path, coredump_path)
@contextlib.contextmanager
def archive(ctx, config):
"""
Handle the creation and deletion of the archive directory.
"""
log.info('Creating archive directory...')
archive_dir = misc.get_archive_dir(ctx)
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--', archive_dir,
],
wait=False,
)
)
try:
yield
except Exception:
# we need to know this below
set_status(ctx.summary, 'fail')
raise
finally:
passed = get_status(ctx.summary) == 'pass'
if ctx.archive is not None and \
not (ctx.config.get('archive-on-error') and passed):
log.info('Transferring archived files...')
logdir = os.path.join(ctx.archive, 'remote')
if (not os.path.exists(logdir)):
os.mkdir(logdir)
for rem in ctx.cluster.remotes.iterkeys():
path = os.path.join(logdir, rem.shortname)
misc.pull_directory(rem, archive_dir, path)
# Check for coredumps and pull binaries
fetch_binaries_for_coredumps(path, rem)
log.info('Removing archive directory...')
run.wait(
ctx.cluster.run(
args=[
'rm',
'-rf',
'--',
archive_dir,
],
wait=False,
),
)
@contextlib.contextmanager
def sudo(ctx, config):
"""
Enable use of sudo
"""
log.info('Configuring sudo...')
sudoers_file = '/etc/sudoers'
backup_ext = '.orig.teuthology'
tty_expr = r's/^\([^#]*\) \(requiretty\)/\1 !\2/g'
pw_expr = r's/^\([^#]*\) !\(visiblepw\)/\1 \2/g'
run.wait(
ctx.cluster.run(
args="sudo sed -i{ext} -e '{tty}' -e '{pw}' {path}".format(
ext=backup_ext, tty=tty_expr, pw=pw_expr,
path=sudoers_file
),
wait=False,
)
)
try:
yield
finally:
log.info('Restoring {0}...'.format(sudoers_file))
ctx.cluster.run(
args="sudo mv -f {path}{ext} {path}".format(
path=sudoers_file, ext=backup_ext
)
)
@contextlib.contextmanager
def coredump(ctx, config):
"""
Stash a coredump of this system if an error occurs.
"""
log.info('Enabling coredump saving...')
archive_dir = misc.get_archive_dir(ctx)
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--',
'{adir}/coredump'.format(adir=archive_dir),
run.Raw('&&'),
'sudo', 'sysctl', '-w', 'kernel.core_pattern={adir}/coredump/%t.%p.core'.format(adir=archive_dir),
],
wait=False,
)
)
try:
yield
finally:
run.wait(
ctx.cluster.run(
args=[
'sudo', 'sysctl', '-w', 'kernel.core_pattern=core',
run.Raw('&&'),
# don't litter the archive dir if there were no cores dumped
'rmdir',
'--ignore-fail-on-non-empty',
'--',
'{adir}/coredump'.format(adir=archive_dir),
],
wait=False,
)
)
# set status = 'fail' if the dir is still there = coredumps were
# seen
for rem in ctx.cluster.remotes.iterkeys():
r = rem.run(
args=[
'if', 'test', '!', '-e', '{adir}/coredump'.format(adir=archive_dir), run.Raw(';'), 'then',
'echo', 'OK', run.Raw(';'),
'fi',
],
stdout=StringIO(),
)
if r.stdout.getvalue() != 'OK\n':
log.warning('Found coredumps on %s, flagging run as failed', rem)
set_status(ctx.summary, 'fail')
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = \
'Found coredumps on {rem}'.format(rem=rem)
@contextlib.contextmanager
def syslog(ctx, config):
"""
start syslog / stop syslog on exit.
"""
if ctx.archive is None:
# disable this whole feature if we're not going to archive the data anyway
yield
return
log.info('Starting syslog monitoring...')
archive_dir = misc.get_archive_dir(ctx)
run.wait(
ctx.cluster.run(
args=[
'mkdir', '-m0755', '--',
'{adir}/syslog'.format(adir=archive_dir),
],
wait=False,
)
)
CONF = '/etc/rsyslog.d/80-cephtest.conf'
conf_fp = StringIO('''
kern.* -{adir}/syslog/kern.log;RSYSLOG_FileFormat
*.*;kern.none -{adir}/syslog/misc.log;RSYSLOG_FileFormat
'''.format(adir=archive_dir))
try:
for rem in ctx.cluster.remotes.iterkeys():
misc.sudo_write_file(
remote=rem,
path=CONF,
data=conf_fp,
)
conf_fp.seek(0)
run.wait(
ctx.cluster.run(
args=[
'sudo',
'service',
# a mere reload (SIGHUP) doesn't seem to make
# rsyslog open the files
'rsyslog',
'restart',
],
wait=False,
),
)
yield
finally:
log.info('Shutting down syslog monitoring...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'rm',
'-f',
'--',
CONF,
run.Raw('&&'),
'sudo',
'service',
'rsyslog',
'restart',
],
wait=False,
),
)
# race condition: nothing actually says rsyslog had time to
# flush the file fully. oh well.
log.info('Checking logs for errors...')
for rem in ctx.cluster.remotes.iterkeys():
log.debug('Checking %s', rem.name)
r = rem.run(
args=[
'egrep', '--binary-files=text',
'\\bBUG\\b|\\bINFO\\b|\\bDEADLOCK\\b',
run.Raw('{adir}/syslog/*.log'.format(adir=archive_dir)),
run.Raw('|'),
'grep', '-v', 'task .* blocked for more than .* seconds',
run.Raw('|'),
'grep', '-v', 'lockdep is turned off',
run.Raw('|'),
'grep', '-v', 'trying to register non-static key',
run.Raw('|'),
'grep', '-v', 'DEBUG: fsize', # xfs_fsr
run.Raw('|'),
'grep', '-v', 'CRON', # ignore cron noise
run.Raw('|'),
'grep', '-v', 'BUG: bad unlock balance detected', # #6097
run.Raw('|'),
'grep', '-v', 'inconsistent lock state', # FIXME see #2523
run.Raw('|'),
'grep', '-v', '*** DEADLOCK ***', # part of lockdep output
run.Raw('|'),
'grep', '-v', 'INFO: possible irq lock inversion dependency detected', # FIXME see #2590 and #147
run.Raw('|'),
'grep', '-v', 'INFO: NMI handler (perf_event_nmi_handler) took too long to run',
run.Raw('|'),
'grep', '-v', 'INFO: recovery required on readonly',
run.Raw('|'),
'head', '-n', '1',
],
stdout=StringIO(),
)
stdout = r.stdout.getvalue()
if stdout != '':
log.error('Error in syslog on %s: %s', rem.name, stdout)
set_status(ctx.summary, 'fail')
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = \
"'{error}' in syslog".format(error=stdout)
log.info('Compressing syslogs...')
run.wait(
ctx.cluster.run(
args=[
'find',
'{adir}/syslog'.format(adir=archive_dir),
'-name',
'*.log',
'-print0',
run.Raw('|'),
'sudo',
'xargs',
'-0',
'--no-run-if-empty',
'--',
'gzip',
'--',
],
wait=False,
),
)
def vm_setup(ctx, config):
"""
Look for virtual machines and handle their initialization
"""
all_tasks = [x.keys()[0] for x in ctx.config['tasks']]
need_chef = False
if 'chef' in all_tasks or 'kernel' in all_tasks:
need_chef = True
with parallel() as p:
editinfo = os.path.join(os.path.dirname(__file__),'edit_sudoers.sh')
for rem in ctx.cluster.remotes.iterkeys():
mname = rem.shortname
if misc.is_vm(mname):
r = rem.run(args=['test', '-e', '/ceph-qa-ready',],
stdout=StringIO(),
check_status=False,)
if r.returncode != 0:
p1 = subprocess.Popen(['cat', editinfo], stdout=subprocess.PIPE)
p2 = subprocess.Popen(
[
'ssh',
'-o', 'StrictHostKeyChecking=no',
'-t', '-t',
str(rem),
'sudo',
'sh'
],
stdin=p1.stdout, stdout=subprocess.PIPE
)
_, err = p2.communicate()
if err:
log.info("Edit of /etc/sudoers failed: %s", err)
if need_chef:
p.spawn(_download_and_run_chef, rem)
def _download_and_run_chef(remote_):
"""
Run ceph_qa_chef.
"""
log.info('Running ceph_qa_chef on %s', remote_)
remote_.run(
args=[
'wget', '-q', '-O-',
'http://ceph.com/git/?p=ceph-qa-chef.git;a=blob_plain;f=solo/solo-from-scratch;hb=HEAD',
run.Raw('|'),
'sh',
],
label="run chef solo-from-scratch"
)
|
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import abc
import cookielib
import datetime
import os
import re
import urllib
import urllib2
import urlparse
from salts_lib import cloudflare
from salts_lib import cf_captcha
import kodi
import log_utils # @UnusedImport
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import Q_ORDER
from salts_lib.constants import SHORT_MONS
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import DEFAULT_TIMEOUT
from salts_lib.db_utils import DB_Connection
from salts_lib.utils2 import i18n, ungz
try:
import urlresolver
except:
kodi.notify(msg=i18n('smu_failed'), duration=5000)
logger = log_utils.Logger.get_logger()
BASE_URL = ''
COOKIEPATH = kodi.translate_path(kodi.get_profile())
MONTHS = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
MAX_RESPONSE = 1024 * 1024 * 5
CF_CAPCHA_ENABLED = kodi.get_setting('cf_captcha') == 'true'
class ScrapeError(Exception):
pass
class NoRedirection(urllib2.HTTPErrorProcessor):
def http_response(self, request, response): # @UnusedVariable
logger.log('Stopping Redirect', log_utils.LOGDEBUG)
return response
https_response = http_response
abstractstaticmethod = abc.abstractmethod
class abstractclassmethod(classmethod):
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super(abstractclassmethod, self).__init__(callable)
class Scraper(object):
__metaclass__ = abc.ABCMeta
base_url = BASE_URL
__db_connection = None
worker_id = None
debrid_resolvers = None
row_pattern = '\s*<a\s+href="(?P<link>[^"]+)">(?P<title>[^<]+)</a>\s+(?P<date>\d+-[a-zA-Z]+-\d+ \d+:\d+)\s+(?P<size>-|\d+)'
def __init__(self, timeout=DEFAULT_TIMEOUT):
pass
@abstractclassmethod
def provides(cls):
"""
Must return a list/set/frozenset of VIDEO_TYPES that are supported by this scraper. Is a class method so that instances of the class
don't have to be instantiated to determine they are not useful
* Datatypes set or frozenset are preferred as existence checking is faster with sets
"""
raise NotImplementedError
@abstractclassmethod
def get_name(cls):
"""
Must return a string that is a name that will be used through out the UI and DB to refer to urls from this source
Should be descriptive enough to be recognized but short enough to be presented in the UI
"""
raise NotImplementedError
def resolve_link(self, link):
"""
Must return a string that is a urlresolver resolvable link given a link that this scraper supports
link: a url fragment associated with this site that can be resolved to a hoster link
* The purpose is many streaming sites provide the actual hoster link in a separate page from link
on the video page.
* This method is called for the user selected source before calling urlresolver on it.
"""
if link.startswith('//'):
return 'http:' + link
elif not link.startswith('http'):
return scraper_utils.urljoin(self.base_url, link)
else:
return link
def format_source_label(self, item):
"""
Must return a string that is to be the label to be used for this source in the "Choose Source" dialog
item: one element of the list that is returned from get_sources for this scraper
"""
label = '[%s]' % (item['quality'])
if '3D' in item and item['3D']:
label += ' (3D)'
if 'format' in item:
label += ' (%s)' % (item['format'])
if 'version' in item:
label += ' %s' % (item['version'])
label += ' %s' % (item['host'])
if 'views' in item and item['views'] is not None:
label += ' (%s views)' % (item['views'])
if 'rating' in item and item['rating'] is not None:
label += ' (%s/100)' % (item['rating'])
if 'size' in item:
label += ' (%s)' % (item['size'])
if 'subs' in item and item['subs']:
label += ' (%s)' % (item['subs'])
if 'extra' in item:
label += ' [%s]' % (item['extra'])
return label
@abc.abstractmethod
def get_sources(self, video):
"""
Must return a list of dictionaries that are potential link to hoster sites (or links to links to hoster sites)
Each dictionary must contain elements of at least:
* multi-part: True if this source is one part of a whole
* class: a reference to an instance of the scraper itself
* host: the hostname of the hoster
* url: the url that is a link to a hoster, or a link to a page that this scraper can resolve to a link to a hoster
* quality: one of the QUALITIES values, or None if unknown; users can sort sources by quality
* views: count of the views from the site for this source or None is unknown; Users can sort sources by views
* rating: a value between 0 and 100; 0 being worst, 100 the best, or None if unknown. Users can sort sources by rating.
* direct: True if url is a direct link to a media file; False if not. If not present; assumption is direct
* other keys are allowed as needed if they would be useful (e.g. for format_source_label)
video is an object of type ScraperVideo:
video_type: one of VIDEO_TYPES for whatever the sources should be for
title: the title of the tv show or movie
year: the year of the tv show or movie
season: only present for tv shows; the season number of the video for which sources are requested
episode: only present for tv shows; the episode number of the video for which sources are requested
ep_title: only present for tv shows; the episode title if available
"""
raise NotImplementedError
def get_url(self, video):
"""
Must return a url for the site this scraper is associated with that is related to this video.
video is an object of type ScraperVideo:
video_type: one of VIDEO_TYPES this url is for (e.g. EPISODE urls might be different than TVSHOW urls)
title: the title of the tv show or movie
year: the year of the tv show or movie
season: only present for season or episode VIDEO_TYPES; the season number for the url being requested
episode: only present for season or episode VIDEO_TYPES; the episode number for the url being requested
ep_title: only present for tv shows; the episode title if available
* Generally speaking, domain should not be included
"""
return self._default_get_url(video)
@abc.abstractmethod
def search(self, video_type, title, year, season=''):
"""
Must return a list of results returned from the site associated with this scraper when doing a search using the input parameters
If it does return results, it must be a list of dictionaries. Each dictionary must contain at least the following:
* title: title of the result
* year: year of the result
* url: a url fragment that is the url on the site associated with this scraper for this season result item
video_type: one of the VIDEO_TYPES being searched for. Only tvshows and movies are expected generally
title: the title being search for
year: the year being search for
season: the season being searched for (only required if video_type == VIDEO_TYPES.SEASON)
* Method must be provided, but can raise NotImplementedError if search not available on the site
"""
raise NotImplementedError
@classmethod
def get_settings(cls):
"""
Returns a list of settings to be used for this scraper. Settings are automatically checked for updates every time scrapers are imported
The list returned by each scraper is aggregated into a big settings.xml string, and then if it differs from the current settings xml in the Scrapers category
the existing settings.xml fragment is removed and replaced by the new string
"""
name = cls.get_name()
return [
' <setting id="%s-enable" type="bool" label="%s %s" default="true" visible="true"/>' % (name, name, i18n('enabled')),
' <setting id="%s-base_url" type="text" label=" %s" default="%s" visible="eq(-1,true)"/>' % (name, i18n('base_url'), cls.base_url),
' <setting id="%s-sub_check" type="bool" label=" %s" default="true" visible="eq(-2,true)"/>' % (name, i18n('page_existence')),
]
@classmethod
def has_proxy(cls):
return False
def _default_get_url(self, video):
url = None
temp_video_type = video.video_type
if video.video_type == VIDEO_TYPES.EPISODE:
if VIDEO_TYPES.TVSHOW in self.provides():
temp_video_type = VIDEO_TYPES.TVSHOW
elif VIDEO_TYPES.SEASON in self.provides():
temp_video_type = VIDEO_TYPES.SEASON
season = video.season if temp_video_type == VIDEO_TYPES.SEASON else ''
if temp_video_type != VIDEO_TYPES.EPISODE:
result = self.db_connection().get_related_url(temp_video_type, video.title, video.year, self.get_name(), season)
if result:
url = result[0][0]
logger.log('Got local related url: |%s|%s|%s|%s|%s|%s|' % (temp_video_type, video.title, video.year, season, self.get_name(), url), log_utils.LOGDEBUG)
else:
results = self.search(temp_video_type, video.title, video.year, season)
if results:
url = results[0]['url']
self.db_connection().set_related_url(temp_video_type, video.title, video.year, self.get_name(), url, season)
if isinstance(url, unicode): url = url.encode('utf-8')
if video.video_type == VIDEO_TYPES.EPISODE:
if url == FORCE_NO_MATCH:
url = None
elif url or temp_video_type == VIDEO_TYPES.EPISODE:
result = self.db_connection().get_related_url(VIDEO_TYPES.EPISODE, video.title, video.year, self.get_name(), video.season, video.episode)
if result:
url = result[0][0]
if isinstance(url, unicode): url = url.encode('utf-8')
logger.log('Got local related url: |%s|%s|%s|' % (video, self.get_name(), url), log_utils.LOGDEBUG)
else:
url = self._get_episode_url(url, video)
if url:
self.db_connection().set_related_url(VIDEO_TYPES.EPISODE, video.title, video.year, self.get_name(), url, video.season, video.episode)
return url
def _http_get(self, url, params=None, data=None, multipart_data=None, headers=None, cookies=None, allow_redirect=True, method=None, require_debrid=False, read_error=False, cache_limit=8):
html = self._cached_http_get(url, self.base_url, self.timeout, params=params, data=data, multipart_data=multipart_data,
headers=headers, cookies=cookies, allow_redirect=allow_redirect, method=method, require_debrid=require_debrid,
read_error=read_error, cache_limit=cache_limit)
sucuri_cookie = scraper_utils.get_sucuri_cookie(html)
if sucuri_cookie:
logger.log('Setting sucuri cookie: %s' % (sucuri_cookie), log_utils.LOGDEBUG)
if cookies is not None:
cookies = cookies.update(sucuri_cookie)
else:
cookies = sucuri_cookie
html = self._cached_http_get(url, self.base_url, self.timeout, params=params, data=data, multipart_data=multipart_data,
headers=headers, cookies=cookies, allow_redirect=allow_redirect, method=method, require_debrid=require_debrid,
read_error=read_error, cache_limit=0)
return html
def _cached_http_get(self, url, base_url, timeout, params=None, data=None, multipart_data=None, headers=None, cookies=None, allow_redirect=True,
method=None, require_debrid=False, read_error=False, cache_limit=8):
if require_debrid:
if Scraper.debrid_resolvers is None:
Scraper.debrid_resolvers = [resolver for resolver in urlresolver.relevant_resolvers() if resolver.isUniversal()]
if not Scraper.debrid_resolvers:
logger.log('%s requires debrid: %s' % (self.__module__, Scraper.debrid_resolvers), log_utils.LOGDEBUG)
return ''
if cookies is None: cookies = {}
if timeout == 0: timeout = None
if headers is None: headers = {}
if url.startswith('//'): url = 'http:' + url
referer = headers['Referer'] if 'Referer' in headers else base_url
if params:
if url == base_url and not url.endswith('/'):
url += '/'
parts = urlparse.urlparse(url)
if parts.query:
params.update(scraper_utils.parse_query(url))
url = urlparse.urlunparse((parts.scheme, parts.netloc, parts.path, parts.params, '', parts.fragment))
url += '?' + urllib.urlencode(params)
logger.log('Getting Url: %s cookie=|%s| data=|%s| extra headers=|%s|' % (url, cookies, data, headers), log_utils.LOGDEBUG)
if data is not None:
if isinstance(data, basestring):
data = data
else:
data = urllib.urlencode(data, True)
if multipart_data is not None:
headers['Content-Type'] = 'multipart/form-data; boundary=X-X-X'
data = multipart_data
_created, _res_header, html = self.db_connection().get_cached_url(url, data, cache_limit)
if html:
logger.log('Returning cached result for: %s' % (url), log_utils.LOGDEBUG)
return html
try:
self.cj = self._set_cookies(base_url, cookies)
if isinstance(url, unicode): url = url.encode('utf-8')
request = urllib2.Request(url, data=data)
headers = headers.copy()
request.add_header('User-Agent', scraper_utils.get_ua())
request.add_header('Accept', '*/*')
request.add_header('Accept-Encoding', 'gzip')
request.add_unredirected_header('Host', request.get_host())
if referer: request.add_unredirected_header('Referer', referer)
if 'Referer' in headers: del headers['Referer']
if 'Host' in headers: del headers['Host']
for key, value in headers.iteritems(): request.add_header(key, value)
self.cj.add_cookie_header(request)
if not allow_redirect:
opener = urllib2.build_opener(NoRedirection)
urllib2.install_opener(opener)
else:
opener = urllib2.build_opener(urllib2.HTTPRedirectHandler)
urllib2.install_opener(opener)
opener2 = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
urllib2.install_opener(opener2)
if method is not None: request.get_method = lambda: method.upper()
response = urllib2.urlopen(request, timeout=timeout)
self.cj.extract_cookies(response, request)
if kodi.get_setting('cookie_debug') == 'true':
logger.log('Response Cookies: %s - %s' % (url, scraper_utils.cookies_as_str(self.cj)), log_utils.LOGDEBUG)
self.cj._cookies = scraper_utils.fix_bad_cookies(self.cj._cookies)
self.cj.save(ignore_discard=True)
if not allow_redirect and (response.getcode() in [301, 302, 303, 307] or response.info().getheader('Refresh')):
if response.info().getheader('Refresh') is not None:
refresh = response.info().getheader('Refresh')
return refresh.split(';')[-1].split('url=')[-1]
else:
redir_url = response.info().getheader('Location')
if redir_url.startswith('='):
redir_url = redir_url[1:]
return redir_url
content_length = response.info().getheader('Content-Length', 0)
if int(content_length) > MAX_RESPONSE:
logger.log('Response exceeded allowed size. %s => %s / %s' % (url, content_length, MAX_RESPONSE), log_utils.LOGWARNING)
if method == 'HEAD':
return ''
else:
if response.info().get('Content-Encoding') == 'gzip':
html = ungz(response.read(MAX_RESPONSE))
else:
html = response.read(MAX_RESPONSE)
except urllib2.HTTPError as e:
if e.info().get('Content-Encoding') == 'gzip':
html = ungz(e.read(MAX_RESPONSE))
else:
html = e.read(MAX_RESPONSE)
if CF_CAPCHA_ENABLED and e.code == 403 and 'cf-captcha-bookmark' in html:
html = cf_captcha.solve(url, self.cj, scraper_utils.get_ua(), self.get_name())
if not html:
return ''
elif e.code == 503 and 'cf-browser-verification' in html:
html = cloudflare.solve(url, self.cj, scraper_utils.get_ua(), extra_headers=headers)
if not html:
return ''
else:
logger.log('Error (%s) during scraper http get: %s' % (str(e), url), log_utils.LOGWARNING)
if not read_error:
return ''
except Exception as e:
logger.log('Error (%s) during scraper http get: %s' % (str(e), url), log_utils.LOGWARNING)
return ''
self.db_connection().cache_url(url, html, data)
return html
def _set_cookies(self, base_url, cookies):
cookie_file = os.path.join(COOKIEPATH, '%s_cookies.lwp' % (self.get_name()))
cj = cookielib.LWPCookieJar(cookie_file)
try: cj.load(ignore_discard=True)
except: pass
if kodi.get_setting('cookie_debug') == 'true':
logger.log('Before Cookies: %s - %s' % (self, scraper_utils.cookies_as_str(cj)), log_utils.LOGDEBUG)
domain = urlparse.urlsplit(base_url).hostname
for key in cookies:
c = cookielib.Cookie(0, key, str(cookies[key]), port=None, port_specified=False, domain=domain, domain_specified=True,
domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=False, comment=None,
comment_url=None, rest={})
cj.set_cookie(c)
cj.save(ignore_discard=True)
if kodi.get_setting('cookie_debug') == 'true':
logger.log('After Cookies: %s - %s' % (self, scraper_utils.cookies_as_str(cj)), log_utils.LOGDEBUG)
return cj
def _default_get_episode_url(self, html, video, episode_pattern, title_pattern='', airdate_pattern=''):
logger.log('Default Episode Url: |%s|%s|' % (self.get_name(), video), log_utils.LOGDEBUG)
if not html: return
try: html = html[0].content
except AttributeError: pass
force_title = scraper_utils.force_title(video)
if not force_title:
if episode_pattern:
match = re.search(episode_pattern, html, re.DOTALL | re.I)
if match:
return scraper_utils.pathify_url(match.group(1))
if kodi.get_setting('airdate-fallback') == 'true' and airdate_pattern and video.ep_airdate:
airdate_pattern = airdate_pattern.replace('{year}', str(video.ep_airdate.year))
airdate_pattern = airdate_pattern.replace('{month}', str(video.ep_airdate.month))
airdate_pattern = airdate_pattern.replace('{p_month}', '%02d' % (video.ep_airdate.month))
airdate_pattern = airdate_pattern.replace('{month_name}', MONTHS[video.ep_airdate.month - 1])
airdate_pattern = airdate_pattern.replace('{short_month}', SHORT_MONS[video.ep_airdate.month - 1])
airdate_pattern = airdate_pattern.replace('{day}', str(video.ep_airdate.day))
airdate_pattern = airdate_pattern.replace('{p_day}', '%02d' % (video.ep_airdate.day))
logger.log('Air Date Pattern: %s' % (airdate_pattern), log_utils.LOGDEBUG)
match = re.search(airdate_pattern, html, re.DOTALL | re.I)
if match:
return scraper_utils.pathify_url(match.group(1))
else:
logger.log('Skipping S&E matching as title search is forced on: %s' % (video.trakt_id), log_utils.LOGDEBUG)
if (force_title or kodi.get_setting('title-fallback') == 'true') and video.ep_title and title_pattern:
norm_title = scraper_utils.normalize_title(video.ep_title)
for match in re.finditer(title_pattern, html, re.DOTALL | re.I):
episode = match.groupdict()
if norm_title == scraper_utils.normalize_title(episode['title']):
return scraper_utils.pathify_url(episode['url'])
def _blog_proc_results(self, html, post_pattern, date_format, video_type, title, year):
results = []
search_date = ''
search_sxe = ''
if video_type == VIDEO_TYPES.EPISODE:
match = re.search('(.*?)\s*(S\d+E\d+)\s*', title)
if match:
show_title, search_sxe = match.groups()
else:
match = re.search('(.*?)\s*(\d{4})[._ -]?(\d{2})[._ -]?(\d{2})\s*', title)
if match:
show_title, search_year, search_month, search_day = match.groups()
search_date = '%s-%s-%s' % (search_year, search_month, search_day)
search_date = scraper_utils.to_datetime(search_date, "%Y-%m-%d").date()
else:
show_title = title
else:
show_title = title
today = datetime.date.today()
for match in re.finditer(post_pattern, html, re.DOTALL):
post_data = match.groupdict()
post_title = post_data['post_title']
post_title = re.sub('<[^>]*>', '', post_title)
if 'quality' in post_data:
post_title += '- [%s]' % (post_data['quality'])
try: filter_days = int(kodi.get_setting('%s-filter' % (self.get_name())))
except ValueError: filter_days = 0
if filter_days and date_format and 'date' in post_data:
post_data['date'] = post_data['date'].strip()
filter_days = datetime.timedelta(days=filter_days)
post_date = scraper_utils.to_datetime(post_data['date'], date_format).date()
if not post_date:
logger.log('Failed date Check in %s: |%s|%s|%s|' % (self.get_name(), post_data['date'], date_format), log_utils.LOGWARNING)
post_date = today
if today - post_date > filter_days:
continue
match_year = ''
match_date = ''
match_sxe = ''
match_title = full_title = post_title
if video_type == VIDEO_TYPES.MOVIE:
meta = scraper_utils.parse_movie_link(post_title)
match_year = meta['year']
else:
meta = scraper_utils.parse_episode_link(post_title)
match_sxe = 'S%02dE%02d' % (int(meta['season']), int(meta['episode']))
match_date = meta['airdate']
match_title = meta['title']
full_title = '%s (%sp) [%s]' % (meta['title'], meta['height'], meta['extra'])
norm_title = scraper_utils.normalize_title(show_title)
match_norm_title = scraper_utils.normalize_title(match_title)
title_match = norm_title and (match_norm_title in norm_title or norm_title in match_norm_title)
year_match = not year or not match_year or year == match_year
sxe_match = not search_sxe or (search_sxe == match_sxe)
date_match = not search_date or (search_date == match_date)
logger.log('Blog Results: |%s|%s|%s| - |%s|%s|%s| - |%s|%s|%s| - |%s|%s|%s| (%s)' %
(match_norm_title, norm_title, title_match, year, match_year, year_match,
search_date, match_date, date_match, search_sxe, match_sxe, sxe_match, self.get_name()),
log_utils.LOGDEBUG)
if title_match and year_match and date_match and sxe_match:
quality = scraper_utils.height_get_quality(meta['height'])
result = {'url': scraper_utils.pathify_url(post_data['url']), 'title': scraper_utils.cleanse_title(full_title), 'year': match_year, 'quality': quality}
results.append(result)
return results
def _blog_get_url(self, video, delim='.'):
url = None
result = self.db_connection().get_related_url(video.video_type, video.title, video.year, self.get_name(), video.season, video.episode)
if result:
url = result[0][0]
logger.log('Got local related url: |%s|%s|%s|%s|%s|' % (video.video_type, video.title, video.year, self.get_name(), url), log_utils.LOGDEBUG)
else:
try: select = int(kodi.get_setting('%s-select' % (self.get_name())))
except: select = 0
if video.video_type == VIDEO_TYPES.EPISODE:
temp_title = re.sub('[^A-Za-z0-9 ]', '', video.title)
if not scraper_utils.force_title(video):
search_title = '%s S%02dE%02d' % (temp_title, int(video.season), int(video.episode))
if isinstance(video.ep_airdate, datetime.date):
fallback_search = '%s %s' % (temp_title, video.ep_airdate.strftime('%Y{0}%m{0}%d'.format(delim)))
else:
fallback_search = ''
else:
if not video.ep_title: return None
search_title = '%s %s' % (temp_title, video.ep_title)
fallback_search = ''
else:
search_title = video.title
fallback_search = ''
results = self.search(video.video_type, search_title, video.year)
if not results and fallback_search:
results = self.search(video.video_type, fallback_search, video.year)
if results:
# TODO: First result isn't always the most recent...
best_result = results[0]
if select != 0:
best_qorder = 0
for result in results:
if 'quality' in result:
quality = result['quality']
else:
match = re.search('\((\d+p)\)', result['title'])
if match:
quality = scraper_utils.height_get_quality(match.group(1))
else:
match = re.search('\[(.*)\]$', result['title'])
q_str = match.group(1) if match else ''
quality = scraper_utils.blog_get_quality(video, q_str, '')
logger.log('result: |%s|%s|%s|' % (result, quality, Q_ORDER[quality]), log_utils.LOGDEBUG)
if Q_ORDER[quality] > best_qorder:
logger.log('Setting best as: |%s|%s|%s|' % (result, quality, Q_ORDER[quality]), log_utils.LOGDEBUG)
best_result = result
best_qorder = Q_ORDER[quality]
url = best_result['url']
self.db_connection().set_related_url(video.video_type, video.title, video.year, self.get_name(), url, video.season, video.episode)
return url
def db_connection(self):
if self.__db_connection is None:
self.__db_connection = DB_Connection()
return self.__db_connection
def _get_cookies(self):
cj = self._set_cookies(self.base_url, {})
cookies = dict((cookie.name, cookie.value) for cookie in cj)
return cookies
def _get_stream_cookies(self):
cookies = ['%s=%s' % (key, value) for key, value in self._get_cookies().iteritems()]
return urllib.quote('; '.join(cookies))
|
|
#!/usr/bin/env python
# TAMP-Project: pep.lib
# Version: 0.02
# Date: 2016-01-29
# Description: This module manage and manipulate the data get by mwcs. Store data as array.
# Function available are: min, max, std, spatial average, temporal average,
# unit conversion, subtract, add
# Author: Moris Pozzati
#
# ChangeLog: 2016-07-07 Added get_as_tiff()
#
#
import os
import numpy as np
import numpy.ma as ma
import gdal
from gdalconst import *
import subprocess
import random
import string
from ConfigParser import SafeConfigParser
import logging
remapper = os.path.dirname(os.path.realpath(__file__))+'/../bin/remap'
installation_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
class TampData:
def __init__(self,filename):
self.filename = filename
dataset = gdal.Open( filename, GA_ReadOnly )
if not dataset is None:
geotransform = dataset.GetGeoTransform()
if not geotransform is None:
self.pixel_size = geotransform[1]
band = dataset.GetRasterBand(1)
self.no_value = band.GetNoDataValue()
self.dataset = dataset
self.data = None
self.output_geotransform = geotransform
self.output_projection = dataset.GetProjection()
self.output_RasterXSize = dataset.RasterXSize
self.output_RasterYSize = dataset.RasterYSize
self.output_DataType = dataset.GetRasterBand(1).DataType
self.output_numBands = dataset.RasterCount
def __band_in_ds(self):
return self.dataset.RasterCount
def get_no_data_value(self):
return self.no_value
def get_pixel_size(self):
return self.pixel_size
def load_data(self):
band = self.dataset.GetRasterBand(1)
data = band.ReadAsArray()
data = data[:, :, np.newaxis]
for i in range(self.__band_in_ds()-1):
band = self.dataset.GetRasterBand(i+2)
data = np.dstack((data,band.ReadAsArray()))
self.data = ma.masked_equal(data,self.no_value)
data = None
band = None
def get_data(self):
return self.data
def __remap_bands(self,i,pixel_size,ur_lat,ll_lon,ll_lat,ur_lon):
#define tmp filename
filename_sl = os.path.splitext(self.filename)[0]+'_'+ str(i).zfill(2)+'.tif'
remapped_sl = os.path.splitext(self.filename)[0]+'_'+ str(i).zfill(2)+'_remapped.tif'
#extract band
band = self.dataset.GetRasterBand(i).ReadAsArray()
#create new tif
driver = gdal.GetDriverByName( 'GTiff' )
data_ds = driver.Create(filename_sl, self.dataset.RasterXSize , self.dataset.RasterYSize, 1, self.dataset.GetRasterBand(1).DataType, ['COMPRESS=LZW'])
data_ds.SetGeoTransform(self.dataset.GetGeoTransform())
data_ds.SetProjection(self.dataset.GetProjection())
band = self.dataset.GetRasterBand(i).ReadAsArray()
data_ds.GetRasterBand(1).WriteArray(band)
band = None
data_ds = None
#remap new tiff
FNULL = open(os.devnull, 'w')
subprocess.call([remapper,'-i',filename_sl,'-o',remapped_sl,'-l',str(ur_lat),str(ll_lon),'-e',str(ll_lat)+','+str(ur_lon),'-s',str(pixel_size),'-n',str(self.no_value)],stdout=FNULL,stderr=FNULL)
#load data from remapped band
remapped_ds = gdal.Open( remapped_sl, GA_ReadOnly )
band = remapped_ds.GetRasterBand(1)
self.output_geotransform = remapped_ds.GetGeoTransform()
self.output_projection = remapped_ds.GetProjection()
self.output_RasterXSize = remapped_ds.RasterXSize
self.output_RasterYSize = remapped_ds.RasterYSize
self.output_DataType = remapped_ds.GetRasterBand(1).DataType
if os.path.isfile(filename_sl):
os.remove(filename_sl)
if os.path.isfile(remapped_sl):
os.remove(remapped_sl)
return band.ReadAsArray()
def remap_in_grid(self,pixel_size,ur_lat,ll_lon,ll_lat,ur_lon):
data =self.__remap_bands(1,pixel_size,ur_lat,ll_lon,ll_lat,ur_lon)
data = data[:, :, np.newaxis]
for i in range(self.__band_in_ds()-1):
band = self.__remap_bands(i+2,pixel_size,ur_lat,ll_lon,ll_lat,ur_lon)
data = np.dstack((data,band))
self.data = ma.masked_equal(data,self.no_value)
return None
def __randomfilename(self):
#LOAD CONFIGURATION
try:
parser = SafeConfigParser()
parser.read("%s/../etc/pep_lib.ini" % installation_dir)
tmp_dir = parser.get("Fs","tmpdir")
except Exception as e:
logging.error( "I am unable to load configuration")
logging.error(str(e))
logging.error('Using default tmp dir /tmp/')
tmp_dir = '/tmp/'
return tmp_dir+''.join(random.choice(string.lowercase) for i in range(20))+'.tif'
def get_as_tiff(self,data = None):
#TODO: maybe the shape len, now, is ever 3
try:
if data is None:
data = self.data
# lo shape[2] e' il numero di bande
if len(data.shape) == 3:
nbands = data.shape[2]
else:
nbands = 1
driver = gdal.GetDriverByName( 'GTiff' )
data_ds_filename = self.__randomfilename()
data_ds = driver.Create(data_ds_filename, self.output_RasterXSize , self.output_RasterYSize, nbands, self.output_DataType)
data_ds.SetGeoTransform(self.dataset.GetGeoTransform())
data_ds.SetProjection(self.dataset.GetProjection())
if len(data.shape) == 3:
for i in range(nbands):
data_ds.GetRasterBand(i+1).WriteArray(data[:,:,i])
data_ds.GetRasterBand(i+1).SetNoDataValue(-9999)
else:
logging.debug(str(data.shape))
logging.debug(str(self.output_RasterXSize))
logging.debug(str(self.output_RasterYSize))
data_ds.GetRasterBand(1).WriteArray(data)
data_ds.GetRasterBand(1).SetNoDataValue(-9999)
data_ds = None
return data_ds_filename
except Exception as e:
logging.error( "Unable to create geotiff")
logging.error(str(e))
raise
return None
def min_data(tamp_data):
return np.ma.min(tamp_data.get_data())
def max_data(tamp_data):
return np.ma.max(tamp_data.get_data())
def std(tamp_data):
return ma.std(tamp_data.get_data())
def spatial_average(tamp_data):
return ma.average(tamp_data.get_data())
def temporal_average(tamp_data):
#very old: return ma.average(ma.average(tamp_data.get_data(),axis=0),axis=0)
#TODO maybe there is an error in num bands
# old: return tamp_data.get_as_tiff(ma.average(tamp_data.get_data(),axis=2))
# I think that tamp_data is already a a temporal average because mwcs don't return multi band tiff. this is the solution for me:
return tamp_data.get_as_tiff()
def unit_conversion(tamp_data,offset,gain):
return tamp_data.get_as_tiff(((tamp_data.get_data()+offset)*gain).filled())
def subtract(tamp_data_1,tamp_data_2,ur_lat,ll_lon,ll_lat,ur_lon):
pixel_size = min(tamp_data_1.get_pixel_size(),tamp_data_2.get_pixel_size())
tamp_data_1.remap_in_grid(pixel_size,ur_lat,ll_lon,ll_lat,ur_lon)
tamp_data_2.remap_in_grid(pixel_size,ur_lat,ll_lon,ll_lat,ur_lon)
return tamp_data_1.get_as_tiff((np.subtract(tamp_data_1.get_data(),tamp_data_2.get_data())).filled())
def add(tamp_data_1,tamp_data_2,ur_lat,ll_lon,ll_lat,ur_lon):
pixel_size = min(tamp_data_1.get_pixel_size(),tamp_data_2.get_pixel_size())
tamp_data_1.remap_in_grid(pixel_size,ur_lat,ll_lon,ll_lat,ur_lon)
tamp_data_2.remap_in_grid(pixel_size,ur_lat,ll_lon,ll_lat,ur_lon)
return tamp_data_1.get_as_tiff((np.add(tamp_data_1.get_data(),tamp_data_2.get_data())).filled())
def correlation(tamp_data_1,tamp_data_2,ur_lat,ll_lon,ll_lat,ur_lon):
logging.error(tamp_data_1.output_RasterXSize)
logging.error(tamp_data_2.output_RasterXSize)
pixel_size = min(tamp_data_1.get_pixel_size(),tamp_data_2.get_pixel_size())
logging.debug('Remap tamp_data_1')
tamp_data_1.remap_in_grid(pixel_size,ur_lat,ll_lon,ll_lat,ur_lon)
logging.debug('Remap tamp_data_2')
tamp_data_2.remap_in_grid(pixel_size,ur_lat,ll_lon,ll_lat,ur_lon)
mask = (tamp_data_1.get_data().mask == True) | (tamp_data_2.get_data().mask == True)
data_result = np.subtract(tamp_data_1.get_data()[:,:,0],tamp_data_2.get_data()[:,:,0])
data_result_masked = ma.masked_array(data_result,mask)
return data_result_masked.mean(), data_result_masked.std(), data_result_masked.min(), data_result_masked.max()
|
|
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Accounting table. Stores aggregated usage metrics. The keys are prefixed
with the type of aggregation and metric stored:
- Per viewpoint: hash_key='vs:<vp_id>'
Aggregate sizes/counts per viewpoint, keyed by the viewpoint
id. Sort keys fall into three categories:
- owned by: 'ow:<user_id>' only found in default viewpoint.
- shared by: 'sb:<user_id>' in shared viewpoint, sum of all photos
in episodes owned by 'user_id'
- visible to: 'vt' in shared viewpoint, sum of all photos. not keyed
by user. a given user's "shared with" stats will be 'vt - sb:<user_id>',
but we do not want to keep per-user shared-by stats.
- Per user: hash_key='us:<user_id>'
Aggregate sizes/counts per user, keyed by user id. Sort keys are:
- owned by: 'ow' sum of all photos in default viewpoint
- shared by: 'sb' sum of all photos in shared viewpoints and episodes owned by this user
- visible to: 'vt' sum of all photos in shared viewpoint (includes 'sb'). to get the
real count of photos shared with this user but not shared by him, compute 'vt - sb:'
"""
__author__ = 'marc@emailscrubbed.com (Marc Berhault)'
from functools import partial
from tornado import gen
from viewfinder.backend.base import util
from viewfinder.backend.db import vf_schema
from viewfinder.backend.db.base import DBObject
from viewfinder.backend.db.db_client import DBKey
from viewfinder.backend.db.operation import Operation
from viewfinder.backend.db.photo import Photo
from viewfinder.backend.db.user import User
from viewfinder.backend.db.range_base import DBRangeObject
@DBObject.map_table_attributes
class Accounting(DBRangeObject):
"""Accounting object. Stores aggregated information. Currently stores
photo count and sizes per (viewpoint, episode) pair.
"""
# Maximum op ids to keep.
_MAX_APPLIED_OP_IDS = 10
# Types of accounting: each type has its own prefix used to build hash keys.
VIEWPOINT_SIZE = 'vs'
USER_SIZE = 'us'
# Categories for each type of accounting. Prefix is used to build sort keys.
OWNED_BY = 'ow'
SHARED_BY = 'sb'
VISIBLE_TO = 'vt'
_table = DBObject._schema.GetTable(vf_schema.ACCOUNTING)
def __init__(self, hash_key=None, sort_key=None):
"""Initialize a new Accounting object."""
super(Accounting, self).__init__()
self.hash_key = hash_key
self.sort_key = sort_key
self.op_ids = None
self._Reset()
def _Reset(self):
"""Reset counters to 0."""
self.num_photos = 0
self.tn_size = 0
self.med_size = 0
self.full_size = 0
self.orig_size = 0
def IncrementFromPhotoDict(self, photo_dict):
"""Increment counters with the photo stats."""
self.num_photos += 1
self.tn_size += photo_dict.get('tn_size', 0)
self.med_size += photo_dict.get('med_size', 0)
self.full_size += photo_dict.get('full_size', 0)
self.orig_size += photo_dict.get('orig_size', 0)
def IncrementFromPhotoDicts(self, photo_dicts):
"""Increment counters with the photo stats."""
for p in photo_dicts:
self.IncrementFromPhotoDict(p)
def IncrementFromPhoto(self, photo):
"""Increment counters with the photo stats."""
def _GetOrZero(val):
if val is not None:
return val
else:
return 0
self.num_photos += 1
self.tn_size += _GetOrZero(photo.tn_size)
self.med_size += _GetOrZero(photo.med_size)
self.full_size += _GetOrZero(photo.full_size)
self.orig_size += _GetOrZero(photo.orig_size)
def IncrementFromPhotos(self, photos):
"""Increment counters with the photo stats."""
for photo in photos:
self.IncrementFromPhoto(photo)
def DecrementFromPhotoDicts(self, photo_dicts):
"""Decrement counters with the photo stats."""
for p in photo_dicts:
self.num_photos -= 1
self.tn_size -= p.get('tn_size', 0)
self.med_size -= p.get('med_size', 0)
self.full_size -= p.get('full_size', 0)
self.orig_size -= p.get('orig_size', 0)
def DecrementFromPhotos(self, photos):
"""Decrement counters with the photo stats."""
def _GetOrZero(val):
if val is not None:
return val
else:
return 0
for p in photos:
self.num_photos -= 1
self.tn_size -= _GetOrZero(p.tn_size)
self.med_size -= _GetOrZero(p.med_size)
self.full_size -= _GetOrZero(p.full_size)
self.orig_size -= _GetOrZero(p.orig_size)
def CopyStatsFrom(self, accounting):
"""Copy the usage stats from another accounting object."""
self.num_photos = accounting.num_photos
self.tn_size = accounting.tn_size
self.med_size = accounting.med_size
self.full_size = accounting.full_size
self.orig_size = accounting.orig_size
def IncrementStatsFrom(self, accounting):
"""Increment stats by another accounting object."""
self.num_photos += accounting.num_photos
self.tn_size += accounting.tn_size
self.med_size += accounting.med_size
self.full_size += accounting.full_size
self.orig_size += accounting.orig_size
def DecrementStatsFrom(self, accounting):
"""Decrement stats by another accounting object."""
self.num_photos -= accounting.num_photos
self.tn_size -= accounting.tn_size
self.med_size -= accounting.med_size
self.full_size -= accounting.full_size
self.orig_size -= accounting.orig_size
def StatsEqual(self, accounting):
"""Return true if all stats match those in 'accounting'."""
return (self.num_photos == accounting.num_photos and
self.tn_size == accounting.tn_size and
self.med_size == accounting.med_size and
self.full_size == accounting.full_size and
self.orig_size == accounting.orig_size)
def IsZero(self):
return self.StatsEqual(Accounting())
def IsOpDuplicate(self, op_id):
"""Check whether the 'op_id' is in 'op_id_list_string'.
If it is, return true and leave the original list of op ids untouched. Otherwise,
add the op_id to the list, trim it to a max length of _MAX_APPLIED_OP_IDS = 10
and return false."""
ids = self.op_ids.split(',') if self.op_ids is not None else []
if op_id in ids:
return True
ids.append(op_id)
# Generate a comma-separated string of at most _MAX_APPLIED_OP_IDS elements.
self.op_ids = ','.join(ids[-self._MAX_APPLIED_OP_IDS:])
return False
@classmethod
def CreateUserOwnedBy(cls, user_id):
"""Create an accounting object (USER_SIZE:<user_id>, OWNED_BY)."""
return Accounting('%s:%d' % (Accounting.USER_SIZE, user_id), Accounting.OWNED_BY)
@classmethod
def CreateUserSharedBy(cls, user_id):
"""Create an accounting object (USER_SIZE:<user_id>, SHARED_BY)."""
return Accounting('%s:%d' % (Accounting.USER_SIZE, user_id), Accounting.SHARED_BY)
@classmethod
def CreateUserVisibleTo(cls, user_id):
"""Create an accounting object (USER_SIZE:<user_id>, VISIBLE_TO)."""
return Accounting('%s:%d' % (Accounting.USER_SIZE, user_id), Accounting.VISIBLE_TO)
@classmethod
def CreateViewpointOwnedBy(cls, viewpoint_id, user_id):
"""Create an accounting object (VIEWPOINT_SIZE:<vp_id>, OWNED_BY:<user_id>)."""
return Accounting('%s:%s' % (Accounting.VIEWPOINT_SIZE, viewpoint_id),
'%s:%d' % (Accounting.OWNED_BY, user_id))
@classmethod
def CreateViewpointSharedBy(cls, viewpoint_id, user_id):
"""Create an accounting object (VIEWPOINT_SIZE:<vp_id>, SHARED_BY:<user_id>)."""
return Accounting('%s:%s' % (Accounting.VIEWPOINT_SIZE, viewpoint_id),
'%s:%d' % (Accounting.SHARED_BY, user_id))
@classmethod
def CreateViewpointVisibleTo(cls, viewpoint_id):
"""Create an accounting object (VIEWPOINT_SIZE:<vp_id>, VISIBLE_TO)."""
return Accounting('%s:%s' % (Accounting.VIEWPOINT_SIZE, viewpoint_id),
Accounting.VISIBLE_TO)
@classmethod
def QueryViewpointSharedBy(cls, client, viewpoint_id, user_id, callback, must_exist=True):
"""Query for an accounting object (VIEWPOINT_SIZE:<vp_id>, SHARED_BY:<user_id>)."""
Accounting.Query(client,
Accounting.VIEWPOINT_SIZE + ':' + viewpoint_id,
Accounting.SHARED_BY + ':%d' % user_id,
None,
callback,
must_exist=must_exist)
@classmethod
def QueryViewpointVisibleTo(cls, client, viewpoint_id, callback, must_exist=True):
"""Query for an accounting object (VIEWPOINT_SIZE:<vp_id>, VISIBLE_TO)."""
Accounting.Query(client,
Accounting.VIEWPOINT_SIZE + ':' + viewpoint_id,
Accounting.VISIBLE_TO,
None,
callback,
must_exist=must_exist)
@classmethod
@gen.coroutine
def QueryUserAccounting(cls, client, user_id):
"""Query a single user's accounting entries. Returns an array of [owned_by, shared_by, visible_to] accounting
entries, any of which may be None (eg: if data was not properly populated).
"""
user_hash = '%s:%d' % (Accounting.USER_SIZE, user_id)
result = yield [gen.Task(Accounting.Query, client, user_hash, Accounting.OWNED_BY, None, must_exist=False),
gen.Task(Accounting.Query, client, user_hash, Accounting.SHARED_BY, None, must_exist=False),
gen.Task(Accounting.Query, client, user_hash, Accounting.VISIBLE_TO, None, must_exist=False)]
raise gen.Return(result)
@classmethod
def ApplyAccounting(cls, client, accounting, callback):
"""Apply an accounting object. This involves a query to fetch stats and applied op ids,
check that this operation has not been applied, increment of values and Update.
"""
op_id = Operation.GetCurrent().operation_id
assert op_id is not None, 'accounting update outside an operation'
def _OnException(accounting, type, value, traceback):
# Entry was modified between Query and Update. Rerun the entire method.
Accounting.ApplyAccounting(client, accounting, callback)
def _OnQueryAccounting(entry):
if entry is None:
# No previous entry. Set op_id and set replace to false.
# We can submit the accounting object directly since it has not been mutated.
accounting.op_ids = op_id
with util.Barrier(callback, on_exception=partial(_OnException, accounting)) as b:
accounting.Update(client, b.Callback(), replace=False)
else:
prev_op_ids = entry.op_ids
# Checks whether the op id has been applied and modifies op_ids accordingly.
found = entry.IsOpDuplicate(op_id)
if found:
# This operation has been applied: skip.
callback()
return
entry.IncrementStatsFrom(accounting)
# Entry exists: modify the object returned by Query and require that the op_ids
# field has not changed since. If the existing entry was created by dbchk, it will
# not have a op_ids field. Setting expected={'op_ids': None} is not equivalent to
# expected={'op_ids': False}.
with util.Barrier(callback, on_exception=partial(_OnException, accounting)) as b:
entry.Update(client, b.Callback(), expected={'op_ids': prev_op_ids or False})
Accounting.Query(client, accounting.hash_key, accounting.sort_key, None, _OnQueryAccounting, must_exist=False)
class AccountingAccumulator(object):
"""Facilitates collection and application of accounting deltas.
Typical usage involves calling into an Accounting method one or more times with one of these
accumulators and finally calling the Apply method to apply all of the accounting deltas.
"""
def __init__(self):
"""Initializes new AccountingAccumulator object."""
self.vp_ow_acc_dict = {}
self.vp_vt_acc_dict = {}
self.vp_sb_acc_dict = {}
self.us_ow_acc_dict = {}
self.us_vt_acc_dict = {}
self.us_sb_acc_dict = {}
def GetViewpointOwnedBy(self, viewpoint_id, user_id):
"""Returns the viewpoint owned_by accounting object for the given viewpoint and user."""
key = (viewpoint_id, user_id)
if key not in self.vp_ow_acc_dict:
self.vp_ow_acc_dict[key] = Accounting.CreateViewpointOwnedBy(viewpoint_id, user_id)
return self.vp_ow_acc_dict[key]
def GetViewpointVisibleTo(self, viewpoint_id):
"""Returns the viewpoint visible_to accounting for the given viewpoint."""
key = viewpoint_id
if key not in self.vp_vt_acc_dict:
self.vp_vt_acc_dict[key] = Accounting.CreateViewpointVisibleTo(viewpoint_id)
return self.vp_vt_acc_dict[key]
def GetViewpointSharedBy(self, viewpoint_id, user_id):
"""Returns the viewpoint shared_by accounting object for the given viewpoint and user."""
key = (viewpoint_id, user_id)
if key not in self.vp_sb_acc_dict:
self.vp_sb_acc_dict[key] = Accounting.CreateViewpointSharedBy(viewpoint_id, user_id)
return self.vp_sb_acc_dict[key]
def GetUserOwnedBy(self, user_id):
"""Returns the user owned_by accounting for the given user."""
key = user_id
if key not in self.us_ow_acc_dict:
self.us_ow_acc_dict[key] = Accounting.CreateUserOwnedBy(user_id)
return self.us_ow_acc_dict[key]
def GetUserVisibleTo(self, user_id):
"""Returns the user visible_to accounting for the given user."""
key = user_id
if key not in self.us_vt_acc_dict:
self.us_vt_acc_dict[key] = Accounting.CreateUserVisibleTo(user_id)
return self.us_vt_acc_dict[key]
def GetUserSharedBy(self, user_id):
"""Returns the user shared_by accounting for the given user."""
key = user_id
if key not in self.us_sb_acc_dict:
self.us_sb_acc_dict[key] = Accounting.CreateUserSharedBy(user_id)
return self.us_sb_acc_dict[key]
@gen.coroutine
def AddFollowers(self, client, viewpoint_id, new_follower_ids):
"""Add accounting changes caused by adding followers to a viewpoint. Each follower
user has VISIBLE_TO incremented by the size of the viewpoint VISIBLE_TO.
"""
if len(new_follower_ids) > 0:
# Query the viewpoint visible_to accounting. New followers' visible_to will be adjusted by this much.
vp_vt_acc = yield gen.Task(Accounting.QueryViewpointVisibleTo, client, viewpoint_id, must_exist=False)
if vp_vt_acc is not None:
# If the viewpoint has data, apply it to the new followers.
for follower_id in new_follower_ids:
self.GetUserVisibleTo(follower_id).CopyStatsFrom(vp_vt_acc)
@gen.coroutine
def MergeAccounts(self, client, viewpoint_id, target_user_id):
"""Add accounting changes caused by adding the given user as a follower of the viewpoint as
part of a merge accounts operation. Increments the target user's VISIBLE_TO by the size of the
viewpoint VISIBLE_TO.
"""
# Query the viewpoint visible_to accounting. The target user's visible_to will be adjusted by this much.
vp_vt_acc = yield gen.Task(Accounting.QueryViewpointVisibleTo, client, viewpoint_id, must_exist=False)
if vp_vt_acc is not None:
self.GetUserVisibleTo(target_user_id).IncrementStatsFrom(vp_vt_acc)
@gen.coroutine
def RemovePhotos(self, client, user_id, viewpoint_id, photo_ids):
"""Add accounting changes caused by removing photos from a user's default viewpoint.
- photo_ids: list of photos that were removed (caller should exclude the ids of any
photos that were already removed).
We need to query all photos for size information. Creates the following entries:
- (vs:<viewpoint>, ow:<user>): stats for user in default viewpoint.
- (us:<user>, ow): overall stats for user.
"""
photo_keys = [DBKey(photo_id, None) for photo_id in photo_ids]
photos = yield gen.Task(Photo.BatchQuery, client, photo_keys, None)
# Decrement owned by stats on both the viewpoint and the user.
self.GetViewpointOwnedBy(viewpoint_id, user_id).DecrementFromPhotos(photos)
# Don't recompute owned-by stats, just copy them from the viewpoint accounting object.
self.GetUserOwnedBy(user_id).CopyStatsFrom(self.GetViewpointOwnedBy(viewpoint_id, user_id))
@gen.coroutine
def RemoveViewpoint(self, client, user_id, viewpoint_id):
"""Generate and update accounting entries for a RemoveViewpoint event.
The user will never be removed from their default viewpoint.
This won't modify the viewpoint stats, but we will query them to determine how much to modify the user stats.
Query:
- (vs:<viewpoint_id>, vt)
- (vs:<viewpoint_id>, sb:<user_id>)
Creates the following entries:
- (us:<user_id>, vt): decrement by (vs:<viewpoint_id>, vt).
- (us:<user_id>, sb): decrement by (vs:<viewpoint_id>, sb:<user_id>).
"""
# Query the current visible_to and shared_by for the given user and viewpoint.
vp_vt, vp_sb = yield [gen.Task(Accounting.QueryViewpointVisibleTo, client, viewpoint_id, must_exist=False),
gen.Task(Accounting.QueryViewpointSharedBy, client, viewpoint_id, user_id, must_exist=False)]
# Decrease the associated user consumption by amounts that the user has associated with the viewpoint.
if vp_vt is not None:
self.GetUserVisibleTo(user_id).DecrementStatsFrom(vp_vt)
if vp_sb is not None:
self.GetUserSharedBy(user_id).DecrementStatsFrom(vp_sb)
@gen.coroutine
def ReviveFollowers(self, client, viewpoint_id, revive_follower_ids):
"""Add accounting changes caused by the revival of the given followers. These followers
had removed the viewpoint (which freed up quota), but now have access to it again. Each
follower has VISIBLE_TO incremented by the size of the viewpoint VISIBLE_TO, and SHARED_BY
incremented by the size of the corresponding viewpoint SHARED_BY.
"""
if len(revive_follower_ids) > 0:
# The VISIBLE_TO adjustment is identical to that done for the AddFollowers operation.
yield self.AddFollowers(client, viewpoint_id, revive_follower_ids)
# Now make the SHARED_BY adjustment.
vp_sb_acc_list = yield [gen.Task(Accounting.QueryViewpointSharedBy,
client,
viewpoint_id,
follower_id,
must_exist=False)
for follower_id in revive_follower_ids]
for follower_id, vp_sb_acc in zip(revive_follower_ids, vp_sb_acc_list):
if vp_sb_acc is not None:
self.GetUserSharedBy(follower_id).IncrementStatsFrom(vp_sb_acc)
@gen.coroutine
def SavePhotos(self, client, user_id, viewpoint_id, photo_ids):
"""Generate and update accounting entries for a SavePhotos event.
- photo_ids: list of *new* photos that were added (caller should exclude the ids of any
photos that already existed).
We need to query all photos for size information. Creates the following entries:
- (vs:<viewpoint>, ow:<user>): stats for user in default viewpoint.
- (us:<user>, ow): overall stats for user.
"""
photo_keys = [DBKey(photo_id, None) for photo_id in photo_ids]
photos = yield gen.Task(Photo.BatchQuery, client, photo_keys, None)
# Increment owned by stats on both the viewpoint and the user.
self.GetViewpointOwnedBy(viewpoint_id, user_id).IncrementFromPhotos(photos)
# Don't recompute owned-by stats, just copy them from the viewpoint accounting object.
self.GetUserOwnedBy(user_id).CopyStatsFrom(self.GetViewpointOwnedBy(viewpoint_id, user_id))
@gen.coroutine
def SharePhotos(self, client, sharer_id, viewpoint_id, photo_ids, follower_ids):
"""Generate and update accounting entries for a ShareNew or ShareExisting event.
- photo_ids: list of *new* photos that were added (caller should exclude the ids of any
photos that already existed).
- follower_ids: list of ids of all followers of the viewpoint, *including* the sharer
if it is not removed from the viewpoint.
We need to query all photos for size information. Creates the following entries:
- (vs:<viewpoint>, sb:<sharer>): sum across all new photos for the sharer
- (vs:<viewpoint>, vt): sum across all new photos
- (us:<sharer>, sb): sum across all new photos for the sharer
"""
photo_keys = [DBKey(photo_id, None) for photo_id in photo_ids]
photos = yield gen.Task(Photo.BatchQuery, client, photo_keys, None)
acc = Accounting()
acc.IncrementFromPhotos(photos)
# Viewpoint visible_to for viewpoint.
self.GetViewpointVisibleTo(viewpoint_id).IncrementStatsFrom(acc)
if sharer_id in follower_ids:
# Viewpoint shared_by for sharer.
self.GetViewpointSharedBy(viewpoint_id, sharer_id).IncrementStatsFrom(acc)
# User shared_by for sharer.
self.GetUserSharedBy(sharer_id).IncrementStatsFrom(acc)
# Viewpoint visible_to for followers.
for follower_id in follower_ids:
self.GetUserVisibleTo(follower_id).IncrementStatsFrom(acc)
@gen.coroutine
def Unshare(self, client, viewpoint, ep_dicts, followers):
"""Generate and update accounting entries for an Unshare event. Multiple episodes may be
impacted and multiple photos per episode.
- viewpoint: viewpoint that contains the episodes and photos in ep_dicts.
- ep_dicts: dict containing episode and photos ids: {ep_id0: [ph_id0, ph_id1], ep_id1: [ph_id2]}.
- followers: list of all followers of the viewpoint, *including* the sharer.
We need to look up all photos to fetch size information. Creates the following entries:
- (vs:<viewpoint>, sb:<user>): stats for episode owners in this viewpoint.
- (vs:<viewpoint>, vt): shared-with stats.
Creates/adjusts entries for user_accountings based on adjustments to this viewpoint:
- (us:<user>, sb): stats for episode owners.
- (us:<followers>, vt): stats for all followers of the viewpoint.
"""
from viewfinder.backend.db.episode import Episode
# Gather db keys for all episodes and photos.
episode_keys = []
photo_keys = []
for episode_id, photo_ids in ep_dicts.iteritems():
episode_keys.append(DBKey(episode_id, None))
for photo_id in photo_ids:
photo_keys.append(DBKey(photo_id, None))
# Query for all episodes and photos in parallel and in batches.
episodes, photos = yield [gen.Task(Episode.BatchQuery,
client,
episode_keys,
None,
must_exist=False),
gen.Task(Photo.BatchQuery,
client,
photo_keys,
None,
must_exist=False)]
viewpoint_id = viewpoint.viewpoint_id
user_id = viewpoint.user_id
ep_iter = iter(episodes)
ph_iter = iter(photos)
for episode_id, photo_ids in ep_dicts.iteritems():
unshare_episode = next(ep_iter)
acc = Accounting()
unshare_photos = []
for photo_id in photo_ids:
acc.IncrementFromPhoto(next(ph_iter))
if viewpoint.IsDefault():
# Decrement owned by stats on both the viewpoint and the user.
self.GetViewpointOwnedBy(viewpoint_id, user_id).DecrementStatsFrom(acc)
# Don't recompute owned-by stats, just copy them from the viewpoint accounting object.
self.GetUserOwnedBy(user_id).CopyStatsFrom(self.GetViewpointOwnedBy(viewpoint_id, user_id))
else:
# Viewpoint shared_by for sharer.
self.GetViewpointSharedBy(viewpoint_id, unshare_episode.user_id).DecrementStatsFrom(acc)
# Viewpoint visible_to for viewpoint.
self.GetViewpointVisibleTo(viewpoint_id).DecrementStatsFrom(acc)
# User shared_by for sharer.
self.GetUserSharedBy(unshare_episode.user_id).DecrementStatsFrom(acc)
# Viewpoint visible_to for followers.
for follower in followers:
if not follower.IsRemoved():
self.GetUserVisibleTo(follower.user_id).DecrementStatsFrom(acc)
@gen.coroutine
def UploadEpisode(self, client, user_id, viewpoint_id, ph_dicts):
"""Generate and update accounting entries for an UploadEpisode event.
- ph_dicts: list of *new* photo dicts that were added (caller should exclude any photos
that already existed).
Creates the following entries:
- (vs:<viewpoint>, ow:<user>): stats for user in default viewpoint.
- (us:<user>, ow): overall stats for user.
"""
# Increment owned by stats on both the viewpoint and the user.
self.GetViewpointOwnedBy(viewpoint_id, user_id).IncrementFromPhotoDicts(ph_dicts)
# Don't recompute owned-by stats, just copy them from the viewpoint accounting object.
self.GetUserOwnedBy(user_id).CopyStatsFrom(self.GetViewpointOwnedBy(viewpoint_id, user_id))
@gen.coroutine
def Apply(self, client):
"""Applies all of the accounting deltas that have been collected in the accumulator."""
# Apply all of the collected user accounting entries.
tasks = []
for us_ow_acc in self.us_ow_acc_dict.values():
tasks.append(gen.Task(Accounting.ApplyAccounting, client, us_ow_acc))
for us_vt_acc in self.us_vt_acc_dict.values():
tasks.append(gen.Task(Accounting.ApplyAccounting, client, us_vt_acc))
for us_sb_acc in self.us_sb_acc_dict.values():
tasks.append(gen.Task(Accounting.ApplyAccounting, client, us_sb_acc))
yield tasks
# NOTE: It's important (for idempotency) to complete all user accounting updates before
# starting the viewpoint accounting updates because the removed follower deltas
# are derived from current values of the viewpoint accounting.
# Apply all of the collected viewpoint accounting entries.
tasks = []
for vp_ow_acc in self.vp_ow_acc_dict.values():
tasks.append(gen.Task(Accounting.ApplyAccounting, client, vp_ow_acc))
for vp_vt_acc in self.vp_vt_acc_dict.values():
tasks.append(gen.Task(Accounting.ApplyAccounting, client, vp_vt_acc))
for vp_sb_acc in self.vp_sb_acc_dict.values():
tasks.append(gen.Task(Accounting.ApplyAccounting, client, vp_sb_acc))
yield tasks
|
|
""" Cisco_IOS_XR_infra_infra_locale_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR infra\-infra\-locale package configuration.
This module contains definitions
for the following management objects\:
locale\: Define the geographical locale
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class LocaleCountryEnum(Enum):
"""
LocaleCountryEnum
Locale country
.. data:: AD = 1
Andorra
.. data:: AE = 2
United Arab Emirates
.. data:: AF = 3
Afghanistan
.. data:: AG = 4
Antigua and Barbuda
.. data:: AI = 5
Anguilla
.. data:: AL = 6
Albania
.. data:: AM = 7
Armenia
.. data:: AN = 8
Netherlands Antilles
.. data:: AO = 9
Angola
.. data:: AQ = 10
Antarctica
.. data:: AR = 11
Argentina
.. data:: AS = 12
American Samoa
.. data:: AT = 13
Austria
.. data:: AU = 14
Australia
.. data:: AW = 15
Aruba
.. data:: AZ = 16
Azerbaijan
.. data:: BA = 17
Bosnia and Herzegovina
.. data:: BB = 18
Barbados
.. data:: BD = 19
Bangladesh
.. data:: BE = 20
Belgium
.. data:: BF = 21
Burkina Faso
.. data:: BG = 22
Bulgaria
.. data:: BH = 23
Bahrain
.. data:: BI = 24
Burundi
.. data:: BJ = 25
Benin
.. data:: BM = 26
Bermuda
.. data:: BN = 27
Brunei Darussalam
.. data:: BO = 28
Bolivia
.. data:: BR = 29
Brazil
.. data:: BS = 30
Bahamas
.. data:: BT = 31
Bhutan
.. data:: BV = 32
Bouvet Island
.. data:: BW = 33
Botswana
.. data:: BY = 34
Belarus
.. data:: BZ = 35
Belize
.. data:: CA = 36
Canada
.. data:: CC = 37
Cocos (Keeling) Islands
.. data:: CD = 38
Congo, The Democratic Republic of the (Zaire)
.. data:: CF = 39
Central African Republic
.. data:: CG = 40
Congo
.. data:: CH = 41
Switzerland
.. data:: CI = 42
Cote D'Ivoire
.. data:: CK = 43
Cook Islands
.. data:: CL = 44
Chile
.. data:: CM = 45
Cameroon
.. data:: CN = 46
China
.. data:: CO = 47
Colombia
.. data:: CR = 48
Costa Rica
.. data:: CU = 49
Cuba
.. data:: CV = 50
Cape Verde
.. data:: CX = 51
Christmas Island
.. data:: CY = 52
Cyprus
.. data:: CZ = 53
Czech Republic
.. data:: DE = 54
Germany
.. data:: DJ = 55
Djibouti
.. data:: DK = 56
Denmark
.. data:: DM = 57
Dominica
.. data:: DO = 58
Dominican Republic
.. data:: DZ = 59
Algeria
.. data:: EC = 60
Ecuador
.. data:: EE = 61
Estonia
.. data:: EG = 62
Egypt
.. data:: EH = 63
Western Sahara
.. data:: ER = 64
Eritrea
.. data:: ES = 65
Spain
.. data:: ET = 66
Ethiopia
.. data:: FI = 67
Finland
.. data:: FJ = 68
Fiji
.. data:: FK = 69
Falkland Islands (Malvinas)
.. data:: FM = 70
Micronesia, Federated States of
.. data:: FO = 71
Faroe Islands
.. data:: FR = 72
France
.. data:: GA = 73
Gabon
.. data:: GB = 74
United Kingdom
.. data:: GD = 75
Grenada
.. data:: GE = 76
Georgia
.. data:: GF = 77
French Guiana
.. data:: GH = 78
Ghana
.. data:: GI = 79
Gibraltar
.. data:: GL = 80
Greenland
.. data:: GM = 81
Gambia
.. data:: GN = 82
Guinea
.. data:: GP = 83
Guadeloupe
.. data:: GQ = 84
Equatorial Guinea
.. data:: GR = 85
Greece
.. data:: GS = 86
South Georgia and the South Sandwich Islands
.. data:: GT = 87
Guatemala
.. data:: GU = 88
Guam
.. data:: GW = 89
Guinea Bissau
.. data:: GY = 90
Guyana
.. data:: HK = 91
Hong Kong
.. data:: HM = 92
Heard Island and McDonald Islands
.. data:: HN = 93
Honduras
.. data:: HR = 94
Croatia
.. data:: HT = 95
Haiti
.. data:: HU = 96
Hungary
.. data:: ID = 97
Indonesia
.. data:: IE = 98
Ireland
.. data:: IL = 99
Israel
.. data:: IN = 100
India
.. data:: IO = 101
British Indian Ocean Territory
.. data:: IQ = 102
Iraq
.. data:: IR = 103
Iran, Islamic Republic of
.. data:: IS = 104
Iceland
.. data:: IT = 105
Italy
.. data:: JM = 106
Jamaica
.. data:: JO = 107
Jordan
.. data:: JP = 108
Japan
.. data:: KE = 109
Kenya
.. data:: KG = 110
Kyrgyzstan
.. data:: KH = 111
Cambodia
.. data:: KI = 112
Kiribati
.. data:: KM = 113
Comoros
.. data:: KN = 114
Saint Kitts and Nevis
.. data:: KP = 115
Korea, Democratic People's Republic of
.. data:: KR = 116
Korea, Republic of
.. data:: KW = 117
Kuwait
.. data:: KY = 118
Cayman Islands
.. data:: KZ = 119
Kazakstan
.. data:: LA = 120
Lao People's Democratic Republic
.. data:: LB = 121
Lebanon
.. data:: LC = 122
Saint Lucia
.. data:: LI = 123
Liechtenstein
.. data:: LK = 124
Sri Lanka
.. data:: LR = 125
Liberia
.. data:: LS = 126
Lesotho
.. data:: LT = 127
Lithuania
.. data:: LU = 128
Luxembourg
.. data:: LV = 129
Latvia
.. data:: LY = 130
Libyan Arab Jamahiriya
.. data:: MA = 131
Morocco
.. data:: MC = 132
Monaco
.. data:: MD = 133
Moldova, Republic of
.. data:: MG = 134
Madagascar
.. data:: MH = 135
Marshall Islands
.. data:: MK = 136
Macedonia, The Former Yugoslav Republic of
.. data:: ML = 137
Mali
.. data:: MM = 138
Myanmar
.. data:: MN = 139
Mongolia
.. data:: MO = 140
Macau
.. data:: MP = 141
Northern Mariana Islands
.. data:: MQ = 142
Martinique
.. data:: MR = 143
Mauritania
.. data:: MS = 144
Montserrat
.. data:: MT = 145
Malta
.. data:: MU = 146
Mauritius
.. data:: MV = 147
Maldives
.. data:: MW = 148
Malawi
.. data:: MX = 149
Mexico
.. data:: MY = 150
Malaysia
.. data:: MZ = 151
Mozambique
.. data:: NA = 152
Namibia
.. data:: NC = 153
New Caledonia
.. data:: NE = 154
Niger
.. data:: NF = 155
Norfolk Island
.. data:: NG = 156
Nigeria
.. data:: NI = 157
Nicaragua
.. data:: NL = 158
Netherlands
.. data:: NO = 159
Norway
.. data:: NP = 160
Nepal
.. data:: NR = 161
Nauru
.. data:: NU = 162
Niue
.. data:: NZ = 163
New Zealand
.. data:: OM = 164
Oman
.. data:: PA = 165
Panama
.. data:: PE = 166
Peru
.. data:: PF = 167
French Polynesia
.. data:: PG = 168
Papua New Guinea
.. data:: PH = 169
Philippines
.. data:: PK = 170
Pakistan
.. data:: PL = 171
Poland
.. data:: PM = 172
Saint Pierre and Miquelon
.. data:: PN = 173
Pitcairn
.. data:: PR = 174
Puerto Rico
.. data:: PT = 175
Portugal
.. data:: PW = 176
Palau
.. data:: PY = 177
Paraguay
.. data:: QA = 178
Qatar
.. data:: RE = 179
Reunion
.. data:: RO = 180
Romania
.. data:: RU = 181
Russian Federation
.. data:: RW = 182
Rwanda
.. data:: SA = 183
Saudi Arabia
.. data:: SB = 184
Solomon Islands
.. data:: SC = 185
Seychelles
.. data:: SD = 186
Sudan
.. data:: SE = 187
Sweden
.. data:: SG = 188
Singapore
.. data:: SH = 189
Saint Helena
.. data:: SI = 190
Slovenia
.. data:: SJ = 191
Svalbard and Jan Mayen
.. data:: SK = 192
Slovakia
.. data:: SL = 193
Sierra Leone
.. data:: SM = 194
San Marino
.. data:: SN = 195
Senegal
.. data:: SO = 196
Somalia
.. data:: SR = 197
Suriname
.. data:: ST = 198
Sao Tome and Principe
.. data:: SV = 199
El Salvador
.. data:: SY = 200
Syrian Arab Republic
.. data:: SZ = 201
Swaziland
.. data:: TC = 202
Turks and Caicos Islands
.. data:: TD = 203
Chad
.. data:: TF = 204
French Southern Territories
.. data:: TG = 205
Togo
.. data:: TH = 206
Thailand
.. data:: TJ = 207
Tajikistan
.. data:: TK = 208
Tokelau
.. data:: TM = 209
Turkmenistan
.. data:: TN = 210
Tunisia
.. data:: TO = 211
Tonga
.. data:: TP = 212
East Timor
.. data:: TR = 213
Turkey
.. data:: TT = 214
Trinidad and Tobago
.. data:: TV = 215
Tuvalu
.. data:: TW = 216
Taiwan, Province of China
.. data:: TZ = 217
Tanzania, United Republic of
.. data:: UA = 218
Ukraine
.. data:: UG = 219
Uganda
.. data:: UM = 220
United States Minor Outlying Islands
.. data:: US = 221
United States
.. data:: UY = 222
Uruguay
.. data:: UZ = 223
Uzbekistan
.. data:: VA = 224
Holy See (Vatican City State)
.. data:: VC = 225
Saint Vincent and The Grenadines
.. data:: VE = 226
Venezuela
.. data:: VG = 227
Virgin Islands, British
.. data:: VI = 228
Virgin Islands, U.S.
.. data:: VN = 229
Vietnam
.. data:: VU = 230
Vanuatu
.. data:: WF = 231
Wallis and Futuna
.. data:: WS = 232
Samoa
.. data:: YE = 233
Yemen
.. data:: YT = 234
Mayotte
.. data:: YU = 235
Yugoslavia
.. data:: ZA = 236
South Africa
.. data:: ZM = 237
Zambia
.. data:: ZW = 238
Zimbabwe
"""
AD = 1
AE = 2
AF = 3
AG = 4
AI = 5
AL = 6
AM = 7
AN = 8
AO = 9
AQ = 10
AR = 11
AS = 12
AT = 13
AU = 14
AW = 15
AZ = 16
BA = 17
BB = 18
BD = 19
BE = 20
BF = 21
BG = 22
BH = 23
BI = 24
BJ = 25
BM = 26
BN = 27
BO = 28
BR = 29
BS = 30
BT = 31
BV = 32
BW = 33
BY = 34
BZ = 35
CA = 36
CC = 37
CD = 38
CF = 39
CG = 40
CH = 41
CI = 42
CK = 43
CL = 44
CM = 45
CN = 46
CO = 47
CR = 48
CU = 49
CV = 50
CX = 51
CY = 52
CZ = 53
DE = 54
DJ = 55
DK = 56
DM = 57
DO = 58
DZ = 59
EC = 60
EE = 61
EG = 62
EH = 63
ER = 64
ES = 65
ET = 66
FI = 67
FJ = 68
FK = 69
FM = 70
FO = 71
FR = 72
GA = 73
GB = 74
GD = 75
GE = 76
GF = 77
GH = 78
GI = 79
GL = 80
GM = 81
GN = 82
GP = 83
GQ = 84
GR = 85
GS = 86
GT = 87
GU = 88
GW = 89
GY = 90
HK = 91
HM = 92
HN = 93
HR = 94
HT = 95
HU = 96
ID = 97
IE = 98
IL = 99
IN = 100
IO = 101
IQ = 102
IR = 103
IS = 104
IT = 105
JM = 106
JO = 107
JP = 108
KE = 109
KG = 110
KH = 111
KI = 112
KM = 113
KN = 114
KP = 115
KR = 116
KW = 117
KY = 118
KZ = 119
LA = 120
LB = 121
LC = 122
LI = 123
LK = 124
LR = 125
LS = 126
LT = 127
LU = 128
LV = 129
LY = 130
MA = 131
MC = 132
MD = 133
MG = 134
MH = 135
MK = 136
ML = 137
MM = 138
MN = 139
MO = 140
MP = 141
MQ = 142
MR = 143
MS = 144
MT = 145
MU = 146
MV = 147
MW = 148
MX = 149
MY = 150
MZ = 151
NA = 152
NC = 153
NE = 154
NF = 155
NG = 156
NI = 157
NL = 158
NO = 159
NP = 160
NR = 161
NU = 162
NZ = 163
OM = 164
PA = 165
PE = 166
PF = 167
PG = 168
PH = 169
PK = 170
PL = 171
PM = 172
PN = 173
PR = 174
PT = 175
PW = 176
PY = 177
QA = 178
RE = 179
RO = 180
RU = 181
RW = 182
SA = 183
SB = 184
SC = 185
SD = 186
SE = 187
SG = 188
SH = 189
SI = 190
SJ = 191
SK = 192
SL = 193
SM = 194
SN = 195
SO = 196
SR = 197
ST = 198
SV = 199
SY = 200
SZ = 201
TC = 202
TD = 203
TF = 204
TG = 205
TH = 206
TJ = 207
TK = 208
TM = 209
TN = 210
TO = 211
TP = 212
TR = 213
TT = 214
TV = 215
TW = 216
TZ = 217
UA = 218
UG = 219
UM = 220
US = 221
UY = 222
UZ = 223
VA = 224
VC = 225
VE = 226
VG = 227
VI = 228
VN = 229
VU = 230
WF = 231
WS = 232
YE = 233
YT = 234
YU = 235
ZA = 236
ZM = 237
ZW = 238
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_infra_locale_cfg as meta
return meta._meta_table['LocaleCountryEnum']
class LocaleLanguageEnum(Enum):
"""
LocaleLanguageEnum
Locale language
.. data:: AA = 1
Afar
.. data:: AB = 2
Abkhazian
.. data:: AF = 3
Afrikaans
.. data:: AM = 4
Amharic
.. data:: AR = 5
Arabic
.. data:: AS = 6
Assamese
.. data:: AY = 7
Aymara
.. data:: AZ = 8
Azerbaijani
.. data:: BA = 9
Bashkir
.. data:: BE = 10
Byelorussian
.. data:: BG = 11
Bulgarian
.. data:: BH = 12
Bihari
.. data:: BI = 13
Bislama
.. data:: BN = 14
Bengali
.. data:: BO = 15
Tibetan
.. data:: BR = 16
Breton
.. data:: CA = 17
Catalan
.. data:: CO = 18
Corsican
.. data:: CS = 19
Czech
.. data:: CY = 20
Welsh
.. data:: DA = 21
Danish
.. data:: DE = 22
German
.. data:: DZ = 23
Bhutani
.. data:: EL = 24
Greek
.. data:: EN = 25
English
.. data:: EO = 26
Esperanto
.. data:: ES = 27
Spanish
.. data:: ET = 28
Estonian
.. data:: EU = 29
Basque
.. data:: FA = 30
Persian
.. data:: FI = 31
Finnish
.. data:: FJ = 32
Fiji
.. data:: FO = 33
Faroese
.. data:: FR = 34
French
.. data:: FY = 35
Frisian
.. data:: GA = 36
Irish
.. data:: GD = 37
Scots Gaelic
.. data:: GL = 38
Galician
.. data:: GN = 39
Guarani
.. data:: GU = 40
Gujarati
.. data:: HA = 41
Hausa
.. data:: HE = 42
Hebrew
.. data:: HI = 43
Hindi
.. data:: HR = 44
Croatian
.. data:: HU = 45
Hungarian
.. data:: HY = 46
Armenian
.. data:: IA = 47
Interlingua
.. data:: ID = 48
Indonesian
.. data:: IE = 49
Interlingue
.. data:: IK = 50
Inupiak
.. data:: IS = 51
Icelandic
.. data:: IT = 52
Italian
.. data:: IU = 53
Inuktitut
.. data:: JA = 54
Japanese
.. data:: JW = 55
Javanese
.. data:: KA = 56
Georgian
.. data:: KK = 57
Kazakh
.. data:: KL = 58
Greenlandic
.. data:: KM = 59
Cambodian
.. data:: KN = 60
Kannada
.. data:: KO = 61
Korean
.. data:: KS = 62
Kashmiri
.. data:: KU = 63
Kurdish
.. data:: KY = 64
Kirghiz
.. data:: LA = 65
Latin
.. data:: LN = 66
Lingala
.. data:: LO = 67
Laothian
.. data:: LT = 68
Lithuanian
.. data:: LV = 69
Latvian, Lettish
.. data:: MG = 70
Malagasy
.. data:: MI = 71
Maori
.. data:: MK = 72
Macedonian
.. data:: ML = 73
Malayalam
.. data:: MN = 74
Mongolian
.. data:: MO = 75
Moldavian
.. data:: MR = 76
Marathi
.. data:: MS = 77
Malay
.. data:: MT = 78
Maltese
.. data:: MY = 79
Burmese
.. data:: NA = 80
Nauru
.. data:: NE = 81
Nepali
.. data:: NL = 82
Dutch
.. data:: NO = 83
Norwegian
.. data:: OC = 84
Occitan
.. data:: OM = 85
(Afan) Oromo
.. data:: OR = 86
Oriya
.. data:: PA = 87
Punjabi
.. data:: PL = 88
Polish
.. data:: PS = 89
Pashto, Pushto
.. data:: PT = 90
Portuguese
.. data:: QU = 91
Quechua
.. data:: RM = 92
Rhaeto Romance
.. data:: RN = 93
Kirundi
.. data:: RO = 94
Romanian
.. data:: RU = 95
Russian
.. data:: RW = 96
Kinyarwanda
.. data:: SA = 97
Sanskrit
.. data:: SD = 98
Sindhi
.. data:: SG = 99
Sangho
.. data:: SH = 100
Serbo Croatian
.. data:: SI = 101
Sinhalese
.. data:: SK = 102
Slovak
.. data:: SL = 103
Slovenian
.. data:: SM = 104
Samoan
.. data:: SN = 105
Shona
.. data:: SO = 106
Somali
.. data:: SQ = 107
Albanian
.. data:: SR = 108
Serbian
.. data:: SS = 109
Siswati
.. data:: ST = 110
Sesotho
.. data:: SU = 111
Sundanese
.. data:: SV = 112
Swedish
.. data:: SW = 113
Swahili
.. data:: TA = 114
Tamil
.. data:: TE = 115
Telugu
.. data:: TG = 116
Tajik
.. data:: TH = 117
Thai
.. data:: TI = 118
Tigrinya
.. data:: TK = 119
Turkmen
.. data:: TL = 120
Tagalog
.. data:: TN = 121
Setswana
.. data:: TO = 122
Tonga
.. data:: TR = 123
Turkish
.. data:: TS = 124
Tsonga
.. data:: TT = 125
Tatar
.. data:: TW = 126
Twi
.. data:: UG = 127
Uighur
.. data:: UK = 128
Ukrainian
.. data:: UR = 129
Urdu
.. data:: UZ = 130
Uzbek
.. data:: VI = 131
Vietnamese
.. data:: VO = 132
Volapuk
.. data:: WO = 133
Wolof
.. data:: XH = 134
Xhosa
.. data:: YI = 135
Yiddish
.. data:: YO = 136
Yoruba
.. data:: ZA = 137
Zhuang
.. data:: ZH = 138
Chinese
.. data:: ZU = 139
Zulu
"""
AA = 1
AB = 2
AF = 3
AM = 4
AR = 5
AS = 6
AY = 7
AZ = 8
BA = 9
BE = 10
BG = 11
BH = 12
BI = 13
BN = 14
BO = 15
BR = 16
CA = 17
CO = 18
CS = 19
CY = 20
DA = 21
DE = 22
DZ = 23
EL = 24
EN = 25
EO = 26
ES = 27
ET = 28
EU = 29
FA = 30
FI = 31
FJ = 32
FO = 33
FR = 34
FY = 35
GA = 36
GD = 37
GL = 38
GN = 39
GU = 40
HA = 41
HE = 42
HI = 43
HR = 44
HU = 45
HY = 46
IA = 47
ID = 48
IE = 49
IK = 50
IS = 51
IT = 52
IU = 53
JA = 54
JW = 55
KA = 56
KK = 57
KL = 58
KM = 59
KN = 60
KO = 61
KS = 62
KU = 63
KY = 64
LA = 65
LN = 66
LO = 67
LT = 68
LV = 69
MG = 70
MI = 71
MK = 72
ML = 73
MN = 74
MO = 75
MR = 76
MS = 77
MT = 78
MY = 79
NA = 80
NE = 81
NL = 82
NO = 83
OC = 84
OM = 85
OR = 86
PA = 87
PL = 88
PS = 89
PT = 90
QU = 91
RM = 92
RN = 93
RO = 94
RU = 95
RW = 96
SA = 97
SD = 98
SG = 99
SH = 100
SI = 101
SK = 102
SL = 103
SM = 104
SN = 105
SO = 106
SQ = 107
SR = 108
SS = 109
ST = 110
SU = 111
SV = 112
SW = 113
TA = 114
TE = 115
TG = 116
TH = 117
TI = 118
TK = 119
TL = 120
TN = 121
TO = 122
TR = 123
TS = 124
TT = 125
TW = 126
UG = 127
UK = 128
UR = 129
UZ = 130
VI = 131
VO = 132
WO = 133
XH = 134
YI = 135
YO = 136
ZA = 137
ZH = 138
ZU = 139
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_infra_locale_cfg as meta
return meta._meta_table['LocaleLanguageEnum']
class Locale(object):
"""
Define the geographical locale
.. attribute:: country
Name of country locale
**type**\: :py:class:`LocaleCountryEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_infra_locale_cfg.LocaleCountryEnum>`
.. attribute:: language
Name of language locale
**type**\: :py:class:`LocaleLanguageEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_infra_locale_cfg.LocaleLanguageEnum>`
"""
_prefix = 'infra-infra-locale-cfg'
_revision = '2015-11-09'
def __init__(self):
self.country = None
self.language = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-infra-locale-cfg:locale'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.country is not None:
return True
if self.language is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_infra_locale_cfg as meta
return meta._meta_table['Locale']['meta_info']
|
|
# pylint: disable-msg=E1101,W0613,W0603
from __future__ import unicode_literals
from __future__ import division
import os
import sys
import requests
import pandas
from pandas.compat import StringIO, bytes_to_str, binary_type
from pandas.util.decorators import Appender
from pyopendata.util import network
_shared_docs = dict()
_base_doc_kwargs = dict(resource_klass='DataResource')
class DataResource(pandas.core.base.StringMixin):
"""
Represents a data contained in the URL
"""
# url being used as default / static
_url = None
# kwargs which should be stored as instance properties
_attrs = []
# instance properties used as cache (overwritten by inherited classes)
_cache_attrs = []
_chunk_size = 1024 * 1024
def __init__(self, format=None, id=None, name=None, url=None, proxies=None,
size=None, **kwargs):
if isinstance(format, pandas.compat.string_types):
format = format.strip().upper()
self.format = format
self.id = id
self.name = name
self.url = self._normalize_url(url)
self.proxies = proxies
self.size = size
for attr in self._attrs:
value = kwargs.pop(attr, None)
setattr(self, attr, value)
# cache for raw content
self._raw_content = None
self._initialize_attrs(self)
self.kwargs = kwargs
def __unicode__(self):
return '{0} ({1})'.format(self.__class__.__name__, self.url)
@classmethod
def _initialize_attrs(cls, obj):
"""
Initialize object with class attributes
"""
# initialize cache attrs
for attr in cls._cache_attrs:
setattr(obj, attr, None)
return obj
@classmethod
def _normalize_url(cls, url):
if url is None:
return cls._url
elif url.endswith('/'):
# remove final slash to handle sitename and filename commonly
return url[:-1]
else:
return url
def _requests_get(self, action='', params=None, url=None):
"""
Internal requests.get to handle proxy
"""
if url is None:
url = self.url
response = requests.get(url + action, params=params, proxies=self.proxies)
return response
_shared_docs['read'] = (
"""Read data from resource
Parameters
----------
raw : bool, default False
If False, return pandas.DataFrame. If True, return raw data
kwargs:
Keywords passed to pandas.read_xxx function
Returns
-------
data : pandas.DataFrame or requests.raw.data
Notes
-----
- Depending on the target format, parsing to ``pandas.DataFrame`` may fail.
Use ``raw=True`` to get raw data in such cases.
""")
@Appender(_shared_docs['read'])
def read(self, raw=False, **kwargs):
if raw:
content = self._read_raw(**kwargs)
return content.getvalue()
else:
return self._read(**kwargs)
def _read(self):
raise NotImplementedError
def _read_raw(self, **kwargs):
if self._raw_content is None:
response = self._requests_get()
content_length = response.headers.get('content-length')
out = StringIO()
try:
content_length = int(content_length)
pb = network.ProgressBar(total=content_length)
for chunk in response.iter_content(self._chunk_size):
if chunk:
out.write(chunk)
pb.update(self._chunk_size)
self._raw_content = out
except Exception as e:
# print(e)
# no content_length or any errors
if isinstance(response.content, binary_type):
out.write(bytes_to_str(response.content))
else:
out.write(response.content)
self._raw_content = out
return self._raw_content
class DataStore(DataResource):
_connection_errors = (requests.exceptions.ConnectionError, ValueError)
_cache_attrs = ['_datasets']
def __new__(cls, kind_or_url=None, proxies=None):
from pyopendata.ckan import CKANStore
from pyopendata.eurostat import EurostatStore
from pyopendata.oecd import OECDStore
from pyopendata.undata import UNdataStore
from pyopendata.worldbank import WorldBankStore
if kind_or_url == 'oecd' or cls is OECDStore:
return OECDStore._initialize(proxies=proxies)
elif kind_or_url == 'eurostat' or cls is EurostatStore:
return EurostatStore._initialize(proxies=proxies)
elif kind_or_url == 'undata' or cls is UNdataStore:
return UNdataStore._initialize(proxies=proxies)
elif kind_or_url == 'worldbank' or cls is WorldBankStore:
return WorldBankStore._initialize(proxies=proxies)
elif cls is CKANStore:
# skip validation if initialized with CKANStore directly
store = CKANStore._initialize(url=kind_or_url, proxies=proxies)
return store
else:
store = CKANStore._initialize(url=kind_or_url, proxies=proxies)
if store.is_valid():
return store
raise ValueError('Unable to initialize DataStore with {0}'.format(kind_or_url))
def __init__(self, kid_or_url=None, proxies=None):
# handle with __new__
pass
@classmethod
def _initialize(cls, url=None, proxies=None):
obj = object.__new__(cls)
obj.url = cls._normalize_url(url)
obj.proxies = proxies
obj = cls._initialize_attrs(obj)
return obj
_shared_docs['is_valid'] = (
"""Check whether the site has valid API.
Returns
-------
is_valid : bool
""")
@Appender(_shared_docs['is_valid'])
def is_valid(self):
return True
_shared_docs['get'] = (
"""Get resource by resource_id.
Parameters
----------
resource_id : str
id to specify resource
Returns
-------
result : %(resource_klass)s
""")
@Appender(_shared_docs['get'] % _base_doc_kwargs)
def get(self, name):
raise NotImplementedError
_shared_docs['search'] = (
"""Search resources by search_string.
Parameters
----------
search storing : str
keyword to search
Returns
-------
result : list of %(resource_klass)s
""")
@Appender(_shared_docs['search'] % _base_doc_kwargs)
def search(self, serch_string):
raise NotImplementedError
@property
def datasets(self):
raise NotImplementedError
|
|
#
# Copyright (c) 2014, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import unittest
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib'))
from testlib import random_string
from systestlib import DutSystemTest
class TestApiSystem(DutSystemTest):
def test_get(self):
for dut in self.duts:
dut.config('default hostname')
resp = dut.api('system').get()
keys = ['hostname', 'iprouting', 'banner_motd', 'banner_login']
self.assertEqual(sorted(keys), sorted(resp.keys()))
def test_get_with_period(self):
for dut in self.duts:
dut.config('hostname host.domain.net')
response = dut.api('system').get()
self.assertEqual(response['hostname'], 'host.domain.net')
def test_get_check_hostname(self):
for dut in self.duts:
dut.config('hostname teststring')
response = dut.api('system').get()
self.assertEqual(response['hostname'], 'teststring')
def test_get_check_banners(self):
for dut in self.duts:
motd_banner_value = random_string() + "\n"
login_banner_value = random_string() + "\n"
dut.config([dict(cmd="banner motd", input=motd_banner_value)])
dut.config([dict(cmd="banner login", input=login_banner_value)])
resp = dut.api('system').get()
self.assertEqual(resp['banner_login'], login_banner_value.rstrip())
self.assertEqual(resp['banner_motd'], motd_banner_value.rstrip())
def test_get_banner_with_EOF(self):
for dut in self.duts:
motd_banner_value = '!!!newlinebaner\nSecondLIneEOF!!!newlinebanner\n'
dut.config([dict(cmd="banner motd", input=motd_banner_value)])
resp = dut.api('system').get()
self.assertEqual(resp['banner_motd'], motd_banner_value.rstrip())
def test_set_hostname_with_value(self):
for dut in self.duts:
dut.config('default hostname')
value = random_string()
response = dut.api('system').set_hostname(value)
self.assertTrue(response, 'dut=%s' % dut)
value = 'hostname %s' % value
self.assertIn(value, dut.running_config)
def test_set_hostname_with_no_value(self):
for dut in self.duts:
dut.config('hostname test')
response = dut.api('system').set_hostname(disable=True)
self.assertTrue(response, 'dut=%s' % dut)
value = 'no hostname'
self.assertIn(value, dut.running_config)
def test_set_hostname_with_default(self):
for dut in self.duts:
dut.config('hostname test')
response = dut.api('system').set_hostname(default=True)
self.assertTrue(response, 'dut=%s' % dut)
value = 'no hostname'
self.assertIn(value, dut.running_config)
def test_set_hostname_default_over_value(self):
for dut in self.duts:
dut.config('hostname test')
response = dut.api('system').set_hostname(value='foo', default=True)
self.assertTrue(response, 'dut=%s' % dut)
value = 'no hostname'
self.assertIn(value, dut.running_config)
def test_set_iprouting_to_true(self):
for dut in self.duts:
dut.config('no ip routing')
resp = dut.api('system').set_iprouting(True)
self.assertTrue(resp, 'dut=%s' % dut)
self.assertNotIn('no ip rotuing', dut.running_config)
def test_set_iprouting_to_false(self):
for dut in self.duts:
dut.config('ip routing')
resp = dut.api('system').set_iprouting(False)
self.assertTrue(resp, 'dut=%s' % dut)
self.assertIn('no ip routing', dut.running_config)
def test_set_iprouting_to_no(self):
for dut in self.duts:
dut.config('ip routing')
resp = dut.api('system').set_iprouting(disable=True)
self.assertTrue(resp, 'dut=%s' % dut)
self.assertIn('no ip routing', dut.running_config)
def test_set_iprouting_to_default(self):
for dut in self.duts:
dut.config('ip routing')
resp = dut.api('system').set_iprouting(default=True)
self.assertTrue(resp, 'dut=%s' % dut)
self.assertIn('no ip routing', dut.running_config)
def test_set_hostname_with_period(self):
for dut in self.duts:
dut.config('hostname localhost')
response = dut.api('system').set_hostname(value='host.domain.net')
self.assertTrue(response, 'dut=%s' % dut)
value = 'hostname host.domain.net'
self.assertIn(value, dut.running_config)
def test_set_banner_motd(self):
for dut in self.duts:
banner_value = random_string()
dut.config([dict(cmd="banner motd",
input=banner_value)])
self.assertIn(banner_value, dut.running_config)
banner_api_value = random_string()
resp = dut.api('system').set_banner("motd", banner_api_value)
self.assertTrue(resp, 'dut=%s' % dut)
self.assertIn(banner_api_value, dut.running_config)
def test_set_banner_motd_donkey(self):
for dut in self.duts:
donkey_chicken = r"""
/\ /\
( \\ // )
\ \\ // /
\_\\||||//_/
\/ _ _ \
\/|(o)(O)|
\/ | |
___________________\/ \ /
// // |____| Cluck cluck cluck!
// || / \
//| \| \ 0 0 /
// \ ) V / \____/
// \ / ( /
"" \ /_________| |_/
/ /\ / | ||
/ / / / \ ||
| | | | | ||
| | | | | ||
|_| |_| |_||
\_\ \_\ \_\\
"""
resp = dut.api('system').set_banner("motd", donkey_chicken)
self.assertTrue(resp, 'dut=%s' % dut)
self.assertIn(donkey_chicken, dut.running_config)
def test_set_banner_motd_default(self):
for dut in self.duts:
dut.config([dict(cmd="banner motd",
input="!!!!REMOVE BANNER TEST!!!!")])
dut.api('system').set_banner('motd', None, True)
self.assertIn('no banner motd', dut.running_config)
def test_set_banner_login(self):
for dut in self.duts:
banner_value = random_string()
dut.config([dict(cmd="banner login",
input=banner_value)])
self.assertIn(banner_value, dut.running_config)
banner_api_value = random_string()
resp = dut.api('system').set_banner("login", banner_api_value)
self.assertTrue(resp, 'dut=%s' % dut)
self.assertIn(banner_api_value, dut.running_config)
config_login_banner = dut.api('system').get()['banner_login']
self.assertTrue(config_login_banner, banner_api_value.strip())
def test_set_banner_login_default(self):
for dut in self.duts:
dut.config([dict(cmd="banner login",
input="!!!!REMOVE LOGIN BANNER TEST!!!!")])
dut.api('system').set_banner('login', None, True)
self.assertIn('no banner login', dut.running_config)
def test_set_banner_login_negate(self):
for dut in self.duts:
dut.config([dict(cmd="banner login",
input="!!!!REMOVE LOGIN BANNER TEST!!!!")])
dut.api('system').set_banner('login', None, False, True)
self.assertIn('no banner login', dut.running_config)
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
import md5
import re
from StringIO import StringIO
import mimetypes
import dateutil.parser
import time
import os
import tempfile
from oauth2client.client import AccessTokenRefreshError
import client as client_module
from utils import *
from apiclient.errors import HttpError
from configuration import Configuration
class User(object):
def __init__(self, login, client, need_folders=True):
# check that we have only the login, not the full email address
if '@' in login:
login = login.partition('@')[0]
self._login = login
self._client = client
self._documents = None
self._folders = UserFolders(self) if need_folders else None
self._black_listed_ids = []
def __repr__(self):
return self._login
@property
def login(self):
return self._login
@property
def folders(self):
return self._folders
@property
def document_generator(self):
# let's filter folders out
return client_module.UserDocumentsGenerator(
self, Document.get_folder_query()
)
# just the doc ids
@property
def document_ids(self):
return [doc.id for doc in self.document_generator]
@property
def drive_service(self):
client = self._client
client.authorize(self)
return client.drive_service
def save_documents(self, backend, owned_only):
Log.verbose(u'Processing docs for {}'.format(self.login))
doc_generator = self.document_generator
for document in doc_generator:
if not backend.need_to_fetch_contents(self, document) \
or (owned_only and not document.is_owned):
# mark as done, and get to the next one
Log.verbose(
u'Not necessary to fetch doc id {}'.format(document.id)
)
doc_generator.add_processed_id(document.id)
continue
Log.verbose(u'Processing {}\'s doc "{}" (id: {})'.format(
self.login, document.title, document.id
))
try:
document.fetch_contents(self._client)
self._save_single_document(backend, document)
except client_module.ExpiredTokenException as ex:
if document.id in self._black_listed_ids:
# we already got a 403 on that one!
explanation = 'Two 403 errors on a row on document id {}'\
.format(document.id)
ex.brive_explanation = explanation
raise
# otherwise try again
Log.verbose(
'403 response, sleeping one minute and re-trying...'
)
time.sleep(60)
# the re-auth is handled by the the list request
doc_generator.reset_to_current_page()
self._black_listed_ids.append(document.id)
continue
except Exception as ex:
explanation = \
'Unexpected error when processing ' \
+ '{}\'s documents '.format(self.login) \
+ u'(doc id: {})'.format(document.id)
ex.brive_explanation = explanation
raise
# mark as done
doc_generator.add_processed_id(document.id)
# let's save some memory
self._cleanup()
backend.close_user(self)
def retrieve_single_document(self, backend, doc_id):
try:
document = self.retrieve_single_document_meta(doc_id)
except Exception as e:
explanation = 'Error while retrieving single document id ' \
+ u'{} for user {}, '.format(doc_id, self.login) \
+ 'it\'s liklely this user isn\'t allowed to see that doc'
e.brive_explanation = explanation
raise
document.fetch_contents(self._client)
self._save_single_document(backend, document)
# NOTE: Google's API doesn't like to get a lot of such calls,
# so use defensively...
@Utils.multiple_tries_decorator()
def retrieve_single_document_meta(self, doc_id, is_folder=False):
try:
meta = self.drive_service.files().get(fileId=doc_id).execute()
klass = Folder if is_folder else Document
return klass(meta, self.folders)
except AccessTokenRefreshError:
# most likely 403
raise client_module.ExpiredTokenException
def _save_single_document(self, backend, document):
try:
Log.verbose(u'Saving {}\'s doc "{}" (id: {})'.format(
self.login, document.title, document.id
))
backend.save(self, document)
except Exception as ex:
explanation = \
'Unexpected error when saving ' \
+ '{}\'s documents '.format(self.login) \
+ u'(doc id: {})'.format(document.id)
ex.brive_explanation = explanation
raise
# no need to keep the potentially big document's contents in memory
document.del_contents()
def _cleanup(self):
del self._documents
del self._folders
del self._black_listed_ids
# keeps tracks of the user's folders, and caches the paths to them
class UserFolders(object):
def __init__(self, user):
self._user = user
self._initialized = False
def get_path(self, folder_id):
if folder_id is None:
# the root has ID None by convention
return ''
self._do_init()
folder = self._get_folder_from_id(folder_id)
parent_path = self.get_path(folder.parent_id)
parent_path += os.sep if parent_path else ''
return parent_path + folder.title
def _do_init(self):
if self._initialized:
return
self._initialized = True
Log.debug(u'Initializing folders for user {}'.format(self._user.login))
# dict that maps a folder id to its object
self._folders = self._build_folders()
def _build_folders(self):
folder_generator = client_module.UserDocumentsGenerator(
self._user, Document.get_folder_query(False), Folder
)
return {folder.id: folder for folder in folder_generator}
# sometimes a folder could have been created during the current run,
# in which case we don't have it in the cache
def _get_folder_from_id(self, folder_id):
try:
return self._folders[folder_id]
except KeyError:
Log.debug(u'Could not find folder {} in cache, trying to fetch it'
.format(folder_id))
folder = self._user.retrieve_single_document_meta(folder_id, True)
self._folders[folder_id] = folder
return folder
class DocumentContent(object):
def __init__(self, client, url, document):
self._client = client
self._url = url
self._document = document
headers, self._content = self._make_request()
self.file_name = self._get_file_name(headers)
self.size = None
_CHUNK_SIZE = 1048576 # 1 Mb
# returns a file-like object
# if size_requested is set to True, then the self.size attribute
# will be accurate after this returns
def get_file_object(self, size_requested=False):
if self._client.streaming:
_, self._content = self._make_request()
if not size_requested or self.size is not None:
return self._content
# we need to copy the whole thing to the disk, and then return it
Log.debug(u'Copying to temp file {}'.format(self.file_name))
result = tempfile.TemporaryFile()
self.write_to_file(result, True)
self.size = os.fstat(result.fileno()).st_size
# let's rewind the file before returning it
result.seek(0)
else:
result = StringIO(self._content)
if size_requested:
self.size = result.len
return result
def write_to_file(self, f, content_up_to_date=False):
if self._client.streaming:
if not content_up_to_date:
_, self._content = self._make_request()
for blck in iter(lambda: self._content.read(self._CHUNK_SIZE), ''):
f.write(blck)
else:
f.write(self._content)
f.flush()
@Utils.multiple_tries_decorator(client_module.ExpiredTokenException)
def _make_request(self):
return self._client.request(
self._url, brive_expected_error_status=403,
brive_streaming=True
)
_split_extension_regex = re.compile(r'\.([^.]+)$')
_name_from_header_regex = re.compile(
r'^attachment;\s*filename(?:="|\*=[A-Za-z0-9-]+\'\')([^"]+)(?:"|$)'
)
def _get_file_name(self, headers):
# get from the headers
content_disposition = headers['content-disposition']
name_matches = self._name_from_header_regex.findall(
content_disposition
)
if not name_matches:
raise Exception(
u'Unexpected "content_disposition" header: {}'.format(
content_disposition
)
)
raw_name = name_matches[0]
# insert the doc id in the name (just before the extension)
# to make sure it's unique
result = u'{}_{}'.format(self._document.title, self._document.id)
extension_matches = self._split_extension_regex.findall(raw_name)
if extension_matches:
extension = extension_matches[0]
result += u'.{}'.format(extension)
return result
class Document(object):
_extension_from_url_regex = re.compile(r'exportFormat=([^&]+)$')
_folder_mime_type = r'application/vnd.google-apps.folder'
_exclusive_formats = dict()
def __init__(self, meta, user_folders):
self._meta = meta
self._user_folders = user_folders
self._contents = None
def __repr__(self):
return u'Meta: {}'.format(self._meta)
@property
def id(self):
return self.get_meta('id')
@property
def contents(self):
return self._contents
@property
def title(self):
# forbid os.sep in the name, and replace it with '_',
# to prevent bugs when saving
return self.get_meta('title').replace(os.sep, '_')
@property
def is_owned(self):
try:
return self.get_meta('userPermission', {})['role'] == 'owner'
except KeyError:
return False
@property
def is_folder(self):
return self.get_meta('mimeType') == Document._folder_mime_type
@property
def path(self):
result = self._user_folders.get_path(self.parent_id)
result += os.sep if result else ''
return result
@property
def parent_id(self):
try:
parent = self.get_meta('parents')[0]
if parent['isRoot']:
return None
return parent['id']
except IndexError:
return None
@property
def modified_timestamp(self):
try:
datetime_object = dateutil.parser.parse(
self.get_meta('modifiedDate')
)
return int(time.mktime(datetime_object.timetuple()))
except Exception:
# not a big deal if that fails from time to time
return 0
# sets contents to be a dict mapping file names to contents
# force_refresh = True forces to re-fetch the contents even if we have
# already done so
def fetch_contents(self, client, force_refresh=False):
if self._contents is None or force_refresh:
self._contents = []
self._do_fetch_contents(client)
# returns the query string to use to call Google's API
@staticmethod
def get_folder_query(exclude=True):
return "%smimeType = '%s'" % (
'not ' if exclude else '', Document._folder_mime_type
)
def _do_fetch_contents(self, client, second_try=False, banned_urls=list()):
debug_msg = u'Fetching contents for doc id {}'.format(self.id)
if second_try:
debug_msg += ', this time ignoring extension preferences'
Log.debug(debug_msg)
urls = self._get_download_urls(second_try, banned_urls)
for url in urls:
try:
Log.verbose(u'Starting download from {}'.format(url))
self._contents.append(self._download_from_url(client, url))
except client_module.FailedRequestException:
Log.error(u'Download from {} for document {} failed'
.format(url, self.id))
banned_urls.append(url)
if not self._contents:
if second_try:
Log.error('Couldn\'t retrieve any version of document id ' +
u'{} (title: {})'.format(self.id, self.title))
else:
# we've failed to retrieve any contents, we try again,
# this time ignoring format preferences
self._do_fetch_contents(client, True, banned_urls)
def del_contents(self):
del self._contents
self._contents = None
def get_meta(self, key, default=None):
if key in self._meta:
return self._meta[key]
return default
def _get_download_urls(self, ignore_preferred=False, banned_urls=list()):
result = []
if 'downloadUrl' in self._meta:
# filter if exclusive formats are set
if Document._is_an_exclusive_format(self.get_meta('mimeType')):
result = [self._meta['downloadUrl']]
elif 'exportLinks' in self._meta:
# no direct download link
urls = self._meta['exportLinks'].values()
url_to_ext = dict()
# filter exclusive and preferred formats
exclusive = Configuration.get('formats_exclusive')
preferred = Configuration.get('formats_preferred')
one_preferred_found = False
for url in urls:
# get the extension from the url
ext_matches = Document._extension_from_url_regex.findall(url)
if not ext_matches:
# shouldn't happen as far as I can tell
Log.error(u'No extension found in url: {} '.format(url) +
u'for document id {}'.format(self.id))
continue
extension = '.' + ext_matches[0]
Log.debug(
u'Found extension {} for document id {}'.format(
extension, self.id
)
)
if exclusive and extension not in exclusive:
Log.debug(u'Ignoring extension {} not '.format(extension) +
u'not in exclusive: {}'.format(exclusive))
continue
if not ignore_preferred and extension in preferred:
one_preferred_found = True
url_to_ext[url] = extension
if one_preferred_found:
result = [url for url in url_to_ext.keys()
if url_to_ext[url] in preferred]
else:
result = url_to_ext.keys()
# filter banned URLs
if banned_urls:
result = [url for url in result if url not in banned_urls]
# and finally, return if anything is left!
if not result:
Log.verbose(
u'No suitable download URL for document id {}'.format(self.id)
)
return result
@staticmethod
def _is_an_exclusive_format(mimeType):
exclusive = Configuration.get('formats_exclusive')
if not exclusive:
return True
if format not in Document._exclusive_formats:
possible_exts = set(
mimetypes.guess_all_extensions(mimeType, strict=False)
)
result = bool(possible_exts.intersection(exclusive))
Document._exclusive_formats[format] = result
return Document._exclusive_formats[format]
def _download_from_url(self, client, url):
try:
return DocumentContent(
client, url, self
)
except (KeyError, client_module.ExpectedFailedRequestException):
# token expired, or an "User Rate Limit Exceeded" error,
raise client_module.ExpiredTokenException
# it's only a folder, no need to keep all the meta data
# (just parent_id and title)
class Folder(Document):
def __init__(self, meta, user_folders):
super(Folder, self).__init__(meta, user_folders)
new_meta = {key: meta[key] for key in ('id', 'parents', 'title')}
del self._meta
self._meta = new_meta
|
|
class ChangeParser():
"""
Provides many different notification change parsers and utility methods.
"""
@staticmethod
def get_changed_field_handler(top_level_type, field):
"""
Returns a change handler for the field and top level object type. If
not found then None is returned.
:param top_level_type: The top level object type.
:type message: str
:param field: The field name.
:type message: str
:returns: function: Returns a change field handler, None if not found
"""
specific_mapped_type = __specific_field_to_change_handler__.get(top_level_type)
# Check for a specific mapped field first, if there isn't one
# then just try to use the general mapped fields.
if specific_mapped_type is not None:
specific_mapped_handler = specific_mapped_type.get(field)
if specific_mapped_handler is not None:
return specific_mapped_handler
return __general_field_to_change_handler__.get(field)
############################################################################
# Generic change parsers
############################################################################
@staticmethod
def flatten_objects_to_list(objects, key):
"""
Flattens an object list to list values from the input key.
:type objects: list of complex objects
:param objects: list(object).
:type key: The name of the field/key to extract for the flattened list
:param key: str
:returns: str: Returns a list of extracted keys.
"""
return_list = []
for object in objects:
return_list.append(object[key])
return return_list
@staticmethod
def generic_child_fields_change_handler(old_value, new_value, fields, base_fqn=None):
"""
Handles processing of changed fields where the input is a dictionary of
values. This will only process the immediate children.
:type old_value: The old mongo object to compare values against.
:param old_value: A Mongo Document
:type new_value: The updated/new mongo object to compare values against.
:param new_value: A Mongo Document
:type fields: list of field names to compare against
:param fields: list(str).
:type base_fqn: The base descriptor of the object
:param base_fqn: str
:returns: str: Returns a message summarizing the changes.
"""
message = ""
for field in fields:
old_field_value = ""
new_field_value = ""
if old_value is not None:
old_field_value = old_value[field]
if new_value is not None:
new_field_value = new_value[field]
if old_field_value != new_field_value:
change_message = ChangeParser.generic_single_field_change_handler(
old_field_value, new_field_value, field, base_fqn)
message += change_message[:1].capitalize() + change_message[1:]
return message
@staticmethod
def generic_list_change_handler(old_value, new_value, changed_field):
"""
Handles the processing of changed fields where the changed field
is a list of items. Displays the changed value in unicode format.
:type old_value: The old mongo object to compare values against.
:param old_value: A Mongo Document
:type new_value: The updated/new mongo object to compare values against.
:param new_value: A Mongo Document
:type changed_field: The field name that the comparisons will be against.
:param changed_field: str
:returns: str: Returns a message summarizing the changes.
"""
removed_names = [x for x in old_value if x not in new_value and x != '']
added_names = [x for x in new_value if x not in old_value and x != '']
message = ""
if len(added_names) > 0:
message += "Added to %s: %s. " % (changed_field, unicode(', '.join(added_names)))
if len(removed_names) > 0:
message += "Removed from %s: %s. " % (changed_field, unicode(', '.join(removed_names)))
return message
@staticmethod
def generic_list_json_change_handler(old_value, new_value, changed_field):
"""
Handles the processing of changed fields where the changed field
is a list of items. Displays the changed value in json format via to_json().
:type old_value: The old mongo object to compare values against.
:param old_value: A Mongo Document
:type new_value: The updated/new mongo object to compare values against.
:param new_value: A Mongo Document
:type changed_field: The field name that the comparisons will be against.
:param changed_field: str
:returns: str: Returns a message summarizing the changes.
"""
removed_names = [x.to_json() for x in old_value if x not in new_value and x != '']
added_names = [x.to_json() for x in new_value if x not in old_value and x != '']
message = ""
if len(added_names) > 0:
message += "Added to %s: %s. " % (changed_field, unicode(', '.join(added_names)))
if len(removed_names) > 0:
message += "Removed from %s: %s. " % (changed_field, unicode(', '.join(removed_names)))
return message
@staticmethod
def generic_single_field_change_handler(old_value, new_value, changed_field, base_fqn=None):
"""
Handles the processing of a changed field where the changed field
is displayable in string format.
:type old_value: The old mongo object to compare values against.
:param old_value: A value that can be stringified
:type new_value: The updated/new mongo object to compare values against.
:param new_value: A value that can be stringified
:type changed_field: The field name that the comparisons will be against.
:param changed_field: str
:type base_fqn: The base descriptor of the object
:param base_fqn: str
:returns: str: Returns a message summarizing the changes.
"""
if base_fqn is None:
return "%s changed from \"%s\" to \"%s\"\n" % (changed_field, old_value, new_value)
else:
return "%s.%s changed from \"%s\" to \"%s\"\n" % (base_fqn, changed_field, old_value, new_value)
@staticmethod
def generic_single_field_json_change_handler(old_value, new_value, changed_field, base_fqn=None):
"""
Handles the processing of a changed field where the changed field
is displayable in json format via to_json().
:type old_value: The old mongo object to compare values against.
:param old_value: A Mongo Document
:type new_value: The updated/new mongo object to compare values against.
:param new_value: A Mongo Document
:type changed_field: The field name that the comparisons will be against.
:param changed_field: str
:type base_fqn: The base descriptor of the object
:param base_fqn: str
:returns: str: Returns a message summarizing the changes.
"""
if base_fqn is None:
return "%s changed from \"%s\" to \"%s\"\n" % (changed_field, old_value.to_json(), new_value.to_json())
else:
return "%s.%s changed from \"%s\" to \"%s\"\n" % (base_fqn, changed_field, old_value.to_json(), new_value.to_json())
@staticmethod
def get_changed_object_list(old_objects, new_objects, object_key):
"""
Detects which objects have changed by comparing the 'object_key'
from both the input old_objects and new_objects parameters.
:type old_objects: A list of the old values.
:param old_objects: An iterable segment of a Mongo Document
:type new_objects: A list of the new values
:param new_objects: An iterable segment of a Mongo Document
:type object_key: The field name that will be the key in the returned dict.
:param object_key: str
:returns: dict(key, dict): Returns a dictionary of changed objects,
in the format of {key: {old, new}}
"""
changed_objects = {}
# Try and detect which objects have changed
for old_object in old_objects:
if old_object not in new_objects and object_key in old_object:
if old_object[object_key] not in changed_objects:
changed_objects[old_object[object_key]] = {'old': old_object}
else:
changed_objects[old_object[object_key]]['old'] = old_object
for new_object in new_objects:
if new_object not in old_objects and object_key in new_object:
if new_object[object_key] not in changed_objects:
changed_objects[new_object[object_key]] = {'new': new_object}
else:
changed_objects[new_object[object_key]]['new'] = new_object
return changed_objects
@staticmethod
def get_changed_primitive_list(old_objects, new_objects):
"""
Detects which objects have changed by comparing the value of
both the input old_objects and new_objects parameters.
:type old_objects: A list of the old values.
:param old_objects: An iterable segment of a Mongo Document
:type new_objects: A list of the new values
:param new_objects: An iterable segment of a Mongo Document
:returns: dict(key, dict): Returns a dictionary of changed objects,
in the format of {key: {old, new}}
"""
changed_objects = {}
# Try and detect which items have changed
for old_object in old_objects:
if old_object not in new_objects:
if old_object not in changed_objects:
changed_objects[old_object] = {'old': old_object}
else:
changed_objects[old_object]['old'] = old_object
for new_object in new_objects:
if new_object not in old_objects:
if new_object not in changed_objects:
changed_objects[new_object] = {'new': new_object}
else:
changed_objects[new_object]['new'] = new_object
return changed_objects
@staticmethod
def get_short_name(obj, summary_handler, default):
"""
Generates and returns a human readable short name of the input object
by using the input summary_handler parameter. Returns the default
parameter if the summary_handler is None.
:param obj: The object.
:type obj: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:type summary_handler: A summary handler function that will be used
to generate the short name, if not None.
:param summary_handler: function
:type default: The default value to use if the summary handler is not
able to generate a short name value.
:param default: str
:returns: str: Returns a short name description.
"""
short_name = default
if summary_handler is not None:
short_name = summary_handler(obj)
return short_name
@staticmethod
def parse_generic_change_object_list(change_dictionary, field_name, object_key,
change_parser_handler=None, summary_handler=None):
"""
Parses a list of complex objects and tries to determine if the object
was modified, added, or deleted. Returns a string of the summary of
changes.
:type change_dictionary: A dict of changes in the format {key: {old, new}}.
:param change_dictionary: dict(key, dict)
:type field_name: A description of the field that changed, e.g. its name.
:param field_name: str
:type object_key: A secondary description of the field that changed, e.g. its name.
:param object_key: str
:type change_parser_handler: A handler function that determines the
fields that were changed for the object.
This is used if the object was modified and
if the handler function is not None
:param change_parser_handler: function
:type summary_handler: A handler function that, if not None, generates
a short description of the compared object.
:param summary_handler: function
"""
message = ""
for changed_key_name in change_dictionary:
old_value = change_dictionary[changed_key_name].get('old')
new_value = change_dictionary[changed_key_name].get('new')
if old_value is not None and new_value is not None:
short_name = ChangeParser.get_short_name(old_value, summary_handler, changed_key_name)
message += "%s %s modified: %s\n" % (field_name, object_key, short_name)
if change_parser_handler is not None:
message += change_parser_handler(old_value, new_value, field_name)
elif old_value is not None and new_value is None:
short_name = ChangeParser.get_short_name(old_value, summary_handler, changed_key_name)
message += "%s %s removed: %s\n" % (field_name, object_key, short_name)
elif old_value is None and new_value is not None:
short_name = ChangeParser.get_short_name(new_value, summary_handler, changed_key_name)
message += "%s %s added: %s\n" % (field_name, object_key, short_name)
else:
message += "Unknown operation on %s %s: %s\n" % (field_name, object_key, changed_key_name)
return message
############################################################################
# Summary generation handlers
#
# These methods generates and returns a human readable short name of
# the input object
############################################################################
@staticmethod
def actions_summary_handler(object):
return "%s - %s" % (object.action_type, unicode(object.date))
@staticmethod
def indicator_activity_summary_handler(object):
return object.description
@staticmethod
def objects_summary_handler(object):
return "%s - %s" % (object.name, object.value)
@staticmethod
def raw_data_highlights_summary_handler(object):
return "line %s: %s" % (object.line, unicode(object.line_data))
@staticmethod
def raw_data_inlines_summary_handler(object):
return "line %s: %s" % (object.line, object.comment)
@staticmethod
def relationships_summary_handler(object):
#target_of_relationship = class_from_id(object.type, object.value)
# TODO: Print out a meaningful relationship summary, should consolidate
# relationships code to generically get the "key" that best describes
# a generic mongo object.
return "%s - %s" % (object.rel_type, object.object_id)
############################################################################
# Specific Change Handlers/Parsers
#
# These methods parse the modified field and determine the specific change
# that was made.
############################################################################
@staticmethod
def actions_change_handler(old_value, new_value, changed_field):
changed_data = ChangeParser.get_changed_object_list(old_value, new_value, 'date')
message = ChangeParser.parse_generic_change_object_list(
changed_data,
changed_field,
'instance',
ChangeParser.actions_parse_handler,
ChangeParser.actions_summary_handler)
return message
@staticmethod
def actions_parse_handler(old_value, new_value, base_fqn):
fields = ['action_type', 'active', 'reason', 'begin_date', 'end_date', 'performed_date']
message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, base_fqn)
return message
@staticmethod
def bucket_list_change_handler(old_value, new_value, changed_field):
return ChangeParser.generic_list_change_handler(old_value, new_value, changed_field)
@staticmethod
def campaign_change_handler(old_value, new_value, changed_field):
changed_data = ChangeParser.get_changed_object_list(old_value, new_value, 'name')
message = ChangeParser.parse_generic_change_object_list(
changed_data, changed_field, 'name',
ChangeParser.campaign_parse_handler)
return message
@staticmethod
def campaign_parse_handler(old_value, new_value, base_fqn):
fields = ['name', 'confidence', 'description']
message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, base_fqn)
return message
@staticmethod
def indicator_activity_change_handler(old_value, new_value, changed_field):
changed_data = ChangeParser.get_changed_object_list(old_value, new_value, 'date')
message = ChangeParser.parse_generic_change_object_list(changed_data, changed_field, 'instance',
ChangeParser.indicator_activity_parse_handler,
ChangeParser.indicator_activity_summary_handler)
return message
@staticmethod
def indicator_activity_parse_handler(old_value, new_value, base_fqn):
fields = ['description', 'end_date', 'start_date']
message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, base_fqn)
return message
@staticmethod
def indicator_confidence_change_handler(old_value, new_value, changed_field):
fields = ['rating']
message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, changed_field)
return message
@staticmethod
def indicator_impact_change_handler(old_value, new_value, changed_field):
fields = ['rating']
message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, changed_field)
return message
@staticmethod
def objects_change_handler(old_value, new_value, changed_field):
changed_objects = ChangeParser.get_changed_object_list(old_value, new_value, 'name')
message = ChangeParser.parse_generic_change_object_list(changed_objects, 'Objects', 'item',
ChangeParser.objects_parse_handler,
ChangeParser.objects_summary_handler)
return message
@staticmethod
def objects_parse_handler(old_value, new_value, base_fqn):
fields = ['name', 'value']
message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, base_fqn)
return message
@staticmethod
def relationships_parse_handler(old_value, new_value, base_fqn):
fields = ['relationship', 'rel_type', 'rel_reason', 'rel_confidence']
message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, base_fqn)
return message
@staticmethod
def raw_data_highlights_change_handler(old_value, new_value, changed_field):
changed_data = ChangeParser.get_changed_object_list(old_value, new_value, 'date')
message = ChangeParser.parse_generic_change_object_list(changed_data, changed_field, 'instance',
ChangeParser.raw_data_highlights_parse_handler,
ChangeParser.raw_data_highlights_summary_handler)
return message
@staticmethod
def raw_data_highlights_parse_handler(old_value, new_value, base_fqn):
fields = ['line', 'line_data']
message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, base_fqn)
return message
@staticmethod
def raw_data_inlines_change_handler(old_value, new_value, changed_field):
changed_data = ChangeParser.get_changed_object_list(old_value, new_value, 'date')
message = ChangeParser.parse_generic_change_object_list(changed_data, changed_field, 'instance',
ChangeParser.raw_data_inlines_parse_handler,
ChangeParser.raw_data_inlines_summary_handler)
return message
@staticmethod
def raw_data_inlines_parse_handler(old_value, new_value, base_fqn):
fields = ['line', 'comment']
message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, base_fqn)
return message
@staticmethod
def relationships_change_handler(old_value, new_value, changed_field):
changed_data = ChangeParser.get_changed_object_list(old_value, new_value, 'date')
message = ChangeParser.parse_generic_change_object_list(changed_data, changed_field, 'instance',
ChangeParser.relationships_parse_handler,
ChangeParser.relationships_summary_handler)
return message
@staticmethod
def screenshots_change_handler(old_value, new_value, changed_field):
changed_screenshots = ChangeParser.get_changed_primitive_list(old_value, new_value)
message = ChangeParser.parse_generic_change_object_list(changed_screenshots, changed_field, 'id')
return message
@staticmethod
def skip_change_handler(old_value, new_value, changed_field):
return None
@staticmethod
def source_change_handler(old_value, new_value, changed_field):
changed_sources = ChangeParser.get_changed_object_list(old_value, new_value, 'name')
message = ChangeParser.parse_generic_change_object_list(changed_sources, changed_field, 'name',
ChangeParser.source_parse_handler)
return {'message': message, 'source_filter': changed_sources.keys()}
@staticmethod
def source_instances_parse_handler(old_value, new_value, base_fqn):
fields = ['method', 'reference']
message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, base_fqn)
return message
@staticmethod
def source_parse_handler(old_value, new_value, base_fqn):
changed_source_instances = ChangeParser.get_changed_object_list(
old_value['instances'], new_value['instances'], 'date')
message = ChangeParser.parse_generic_change_object_list(changed_source_instances, 'source', 'instances',
ChangeParser.source_instances_parse_handler)
return message
@staticmethod
def tickets_change_handler(old_value, new_value, changed_field):
old_tickets_list = ChangeParser.flatten_objects_to_list(old_value, 'ticket_number')
new_tickets_list = ChangeParser.flatten_objects_to_list(new_value, 'ticket_number')
return ChangeParser.generic_list_change_handler(old_tickets_list, new_tickets_list, changed_field)
class MappedMongoFields():
@staticmethod
def get_mapped_mongo_field(top_level_type, field):
specific_mapped_type = __specific_mongo_to_doc_field__.get(top_level_type)
# Check for a specific mapped field first, if there isn't one
# then just try to use the general mapped fields.
if specific_mapped_type is not None:
specific_mapped_value = specific_mapped_type.get(field)
if specific_mapped_value is not None:
return specific_mapped_value
return __general_mongo_to_doc_field__.get(field, field)
class NotificationHeaderManager():
"""
The following generate_*_header() functions generate a meaningful description
for that specific object type.
"""
@staticmethod
def get_header_handler(obj_type):
return __notification_header_handler__.get(obj_type)
@staticmethod
def generate_actor_header(obj):
return "Actor: %s" % (obj.name)
@staticmethod
def generate_backdoor_header(obj):
return "Backdoor: %s" % (obj.name)
@staticmethod
def generate_campaign_header(obj):
return "Campaign: %s" % (obj.name)
@staticmethod
def generate_certificate_header(obj):
return "Certificate: %s" % (obj.filename)
@staticmethod
def generate_domain_header(obj):
return "Domain: %s" % (obj.domain)
@staticmethod
def generate_email_header(obj):
return "Email: %s" % (obj.subject)
@staticmethod
def generate_event_header(obj):
return "Event: %s" % (obj.title)
@staticmethod
def generate_exploit_header(obj):
return "Exploit: %s" % (obj.name)
@staticmethod
def generate_indicator_header(obj):
return "Indicator: %s - %s" % (obj.ind_type, obj.value)
@staticmethod
def generate_ip_header(obj):
return "IP: %s" % (obj.ip)
@staticmethod
def generate_pcap_header(obj):
return "PCAP: %s" % (obj.filename)
@staticmethod
def generate_raw_data_header(obj):
return "RawData: %s (version %s)" % (obj.title, obj.version)
@staticmethod
def generate_sample_header(obj):
return "Sample: %s" % (obj.filename)
@staticmethod
def generate_screenshot_header(obj):
return "Screenshot: %s" % (obj.filename)
@staticmethod
def generate_target_header(obj):
return "Target: %s" % (obj.email_address)
# Use dictionaries to hold the list of handlers because dictionary
# lookup time is O(1) whereas a list or a long 'if/else' block is worst
# case O(n). The consequence of using a dict is that this
# consumes more memory on startup since the dict needs to be constructed.
__general_field_to_change_handler__ = {
"actions": ChangeParser.actions_change_handler,
"analysis": ChangeParser.skip_change_handler,
"bucket_list": ChangeParser.bucket_list_change_handler,
"campaign": ChangeParser.campaign_change_handler,
"obj": ChangeParser.objects_change_handler,
"relationships": ChangeParser.relationships_change_handler,
"screenshots": ChangeParser.screenshots_change_handler,
"source": ChangeParser.source_change_handler,
"tickets": ChangeParser.tickets_change_handler,
}
__specific_field_to_change_handler__ = {
"Indicator": {
"activity": ChangeParser.indicator_activity_change_handler,
"confidence": ChangeParser.indicator_confidence_change_handler,
"impact": ChangeParser.indicator_impact_change_handler,
},
"RawData": {
"tool": ChangeParser.generic_single_field_json_change_handler,
"highlights": ChangeParser.raw_data_highlights_change_handler,
"inlines": ChangeParser.raw_data_inlines_change_handler,
}
}
__notification_header_handler__ = {
"Actor": NotificationHeaderManager.generate_actor_header,
"Backdoor": NotificationHeaderManager.generate_backdoor_header,
"Campaign": NotificationHeaderManager.generate_campaign_header,
"Certificate": NotificationHeaderManager.generate_certificate_header,
"Domain": NotificationHeaderManager.generate_domain_header,
"Email": NotificationHeaderManager.generate_email_header,
"Event": NotificationHeaderManager.generate_event_header,
"Exploit": NotificationHeaderManager.generate_exploit_header,
"Indicator": NotificationHeaderManager.generate_indicator_header,
"IP": NotificationHeaderManager.generate_ip_header,
"PCAP": NotificationHeaderManager.generate_pcap_header,
"RawData": NotificationHeaderManager.generate_raw_data_header,
"Sample": NotificationHeaderManager.generate_sample_header,
"Screenshot": NotificationHeaderManager.generate_screenshot_header,
"Target": NotificationHeaderManager.generate_target_header,
}
__general_mongo_to_doc_field__ = {
"objects": "obj"
}
__specific_mongo_to_doc_field__ = {
"Email": {
"from": "from_address",
"raw_headers": "raw_header",
},
"Indicator": {
"type": "ind_type"
}
}
|
|
import asyncio
import datetime
import importlib
import logging
import os
import platform
import signal
import sys
import time
from typing import Any, Dict, List, Optional, Set, Union, cast
import tomodachi.__version__
import tomodachi.container
import tomodachi.importer
import tomodachi.invoker
from tomodachi.container import ServiceContainer
from tomodachi.helpers.execution_context import clear_execution_context, clear_services, set_execution_context
from tomodachi.helpers.safe_modules import SAFE_MODULES
from tomodachi.importer import ServiceImporter
CancelledError = asyncio.CancelledError
try:
asyncioexceptions = getattr(asyncio, "exceptions")
if asyncioexceptions:
_CancelledError = asyncioexceptions.CancelledError
except (Exception, ModuleNotFoundError, ImportError):
_CancelledError = asyncio.CancelledError
class ServiceLauncher(object):
_close_waiter: Optional[asyncio.Future] = None
_stopped_waiter: Optional[asyncio.Future] = None
restart_services = False
services: Set = set()
@classmethod
def run_until_complete(
cls,
service_files: Union[List, set],
configuration: Optional[Dict] = None,
watcher: Any = None,
) -> None:
def stop_services() -> None:
asyncio.ensure_future(_stop_services())
async def _stop_services() -> None:
if cls._close_waiter and not cls._close_waiter.done():
cls._close_waiter.set_result(None)
for service in cls.services:
try:
service.stop_service()
except Exception:
pass
if cls._stopped_waiter:
cls._stopped_waiter.set_result(None)
if cls._stopped_waiter:
await cls._stopped_waiter
def sigintHandler(*args: Any) -> None:
sys.stdout.write("\b\b\r")
sys.stdout.flush()
logging.getLogger("system").warning("Received <ctrl+c> interrupt [SIGINT]")
cls.restart_services = False
def sigtermHandler(*args: Any) -> None:
logging.getLogger("system").warning("Received termination signal [SIGTERM]")
cls.restart_services = False
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
if loop and loop.is_closed():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
for signame in ("SIGINT", "SIGTERM"):
loop.add_signal_handler(getattr(signal, signame), stop_services)
signal.siginterrupt(signal.SIGTERM, False)
signal.siginterrupt(signal.SIGUSR1, False)
signal.signal(signal.SIGINT, sigintHandler)
signal.signal(signal.SIGTERM, sigtermHandler)
watcher_future = None
if watcher:
async def _watcher_restart(updated_files: Union[List, set]) -> None:
cls.restart_services = True
for file in service_files:
try:
ServiceImporter.import_service_file(file)
except (SyntaxError, IndentationError) as e:
logging.getLogger("exception").exception("Uncaught exception: {}".format(str(e)))
logging.getLogger("watcher.restart").warning("Service cannot restart due to errors")
cls.restart_services = False
return
pre_import_current_modules = [m for m in sys.modules.keys()]
cwd = os.getcwd()
for file in updated_files:
if file.lower().endswith(".py"):
module_name = file[:-3].replace("/", ".")
module_name_full_path = "{}/{}".format(os.path.realpath(cwd), file)[:-3].replace("/", ".")
try:
for m in pre_import_current_modules:
if m == module_name or (len(m) > len(file) and module_name_full_path.endswith(m)):
ServiceImporter.import_module(file)
except (SyntaxError, IndentationError) as e:
logging.getLogger("exception").exception("Uncaught exception: {}".format(str(e)))
logging.getLogger("watcher.restart").warning("Service cannot restart due to errors")
cls.restart_services = False
return
logging.getLogger("watcher.restart").warning("Restarting services")
stop_services()
watcher_future = loop.run_until_complete(watcher.watch(loop=loop, callback_func=_watcher_restart))
cls.restart_services = True
init_modules = [m for m in sys.modules.keys()]
restarting = False
while cls.restart_services:
init_timestamp = time.time()
init_timestamp_str = datetime.datetime.utcfromtimestamp(init_timestamp).isoformat() + "Z"
process_id = os.getpid()
event_loop_alias = ""
event_loop_version = ""
try:
if "uvloop." in str(loop.__class__):
event_loop_alias = "uvloop"
import uvloop # noqa # isort:skip
event_loop_version = str(uvloop.__version__)
elif "asyncio." in str(loop.__class__):
event_loop_alias = "asyncio"
else:
event_loop_alias = "{}.{}".format(loop.__class__.__module__, loop.__class__.__name__)
except Exception:
event_loop_alias = str(loop)
clear_services()
clear_execution_context()
set_execution_context(
{
"tomodachi_version": tomodachi.__version__,
"python_version": platform.python_version(),
"system_platform": platform.system(),
"process_id": process_id,
"init_timestamp": init_timestamp_str,
"event_loop": event_loop_alias,
}
)
if event_loop_alias == "uvloop" and event_loop_version:
set_execution_context(
{
"uvloop_version": event_loop_version,
}
)
if watcher:
tz: Any = None
utc_tz: Any = None
try:
import pytz # noqa # isort:skip
import tzlocal # noqa # isort:skip
utc_tz = pytz.UTC
try:
tz = tzlocal.get_localzone()
if not tz:
tz = pytz.UTC
except Exception:
tz = pytz.UTC
except Exception:
pass
init_local_datetime = (
datetime.datetime.fromtimestamp(init_timestamp)
if tz and tz is not utc_tz and str(tz) != "UTC"
else datetime.datetime.utcfromtimestamp(init_timestamp)
)
print("---")
print("Starting tomodachi services (pid: {}) ...".format(process_id))
for file in service_files:
print("* {}".format(file))
print()
print(
"Current version: tomodachi {} on Python {}".format(
tomodachi.__version__, platform.python_version()
)
)
print(
"Event loop implementation: {}{}".format(
event_loop_alias, " {}".format(event_loop_version) if event_loop_version else ""
)
)
if tz:
print("Local time: {} {}".format(init_local_datetime.strftime("%B %d, %Y - %H:%M:%S,%f"), str(tz)))
print("Timestamp in UTC: {}".format(init_timestamp_str))
print()
print("File watcher is active - code changes will automatically restart services")
print("Quit running services with <ctrl+c>")
print()
cls._close_waiter = asyncio.Future()
cls._stopped_waiter = asyncio.Future()
cls.restart_services = False
try:
cls.services = set(
[
ServiceContainer(ServiceImporter.import_service_file(file), configuration)
for file in service_files
]
)
result = loop.run_until_complete(
asyncio.wait([asyncio.ensure_future(service.run_until_complete()) for service in cls.services])
)
exception = [v.exception() for v in [value for value in result if value][0] if v.exception()]
if exception:
raise cast(Exception, exception[0])
except tomodachi.importer.ServicePackageError:
pass
except Exception as e:
logging.getLogger("exception").exception("Uncaught exception: {}".format(str(e)))
if isinstance(e, ModuleNotFoundError): # pragma: no cover
missing_module_name = str(getattr(e, "name", None) or "")
if missing_module_name:
color = ""
color_reset = ""
try:
import colorama # noqa # isort:skip
color = colorama.Fore.WHITE + colorama.Back.RED
color_reset = colorama.Style.RESET_ALL
except Exception:
pass
print("")
print(
"{}[fatal error] The '{}' package is missing or cannot be imported.{}".format(
color, missing_module_name, color_reset
)
)
print("")
if restarting:
logging.getLogger("watcher.restart").warning("Service cannot restart due to errors")
logging.getLogger("watcher.restart").warning("Trying again in 1.5 seconds")
loop.run_until_complete(asyncio.wait([asyncio.sleep(1.5)]))
if cls._close_waiter and not cls._close_waiter.done():
cls.restart_services = True
else:
for signame in ("SIGINT", "SIGTERM"):
loop.remove_signal_handler(getattr(signal, signame))
else:
for signame in ("SIGINT", "SIGTERM"):
loop.remove_signal_handler(getattr(signal, signame))
current_modules = [m for m in sys.modules.keys()]
for m in current_modules:
if m not in init_modules and m not in SAFE_MODULES:
del sys.modules[m]
importlib.reload(tomodachi.container)
importlib.reload(tomodachi.invoker)
importlib.reload(tomodachi.invoker.base)
importlib.reload(tomodachi.importer)
restarting = True
if watcher:
if watcher_future and not watcher_future.done():
try:
watcher_future.set_result(None)
except RuntimeError: # pragma: no cover
watcher_future.cancel()
if not watcher_future.done(): # pragma: no cover
try:
loop.run_until_complete(watcher_future)
except (Exception, CancelledError, _CancelledError):
pass
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
import re
import shutil
import sys
import time
from datetime import datetime
import requests
import types
import ebstall.errors as errors
import ebstall.osutil as osutil
import ebstall.util as util
import letsencrypt
from ebstall.audit import AuditManager
from ebstall.consts import LE_VERIFY_DNS, PROVISIONING_SERVERS
from softhsm import SoftHsmV1Config
__author__ = 'dusanklinec'
logger = logging.getLogger(__name__)
# noinspection PyMethodMayBeStatic
class Ejbca(object):
"""
EJBCA configuration & builder
https://www.ejbca.org/docs/installation.html#Install
"""
PORT = 8443
PORT_PUBLIC = 8442
# Default home dirs
EJBCA_VERSION = 'ejbca_ce_6_3_1_1'
EJBCA_HOME = '/opt/ejbca_ce_6_3_1_1'
USER_HOME = '/home/ec2-user'
SSH_USER = 'ec2-user'
# EJBCA paths
INSTALL_PROPERTIES_FILE = 'conf/install.properties'
WEB_PROPERTIES_FILE = 'conf/web.properties'
DATABASE_PROPERTIES_FILE = 'conf/database.properties'
EJBCA_PROPERTIES_FILE = 'conf/ejbca.properties'
MAIL_PROPERTIES_FILE = 'conf/mail.properties'
P12_FILE = 'p12/superadmin.p12'
EXCLUDE_REINSTALL = ['vpn', 'vpn_templates', 'p12']
# Storage paths
PASSWORDS_FILE = '/root/ejbca.passwords'
PASSWORDS_BACKUP_DIR = '/root/ejbca.passwords.old'
DB_BACKUPS = '/root/ejbcadb.old'
# MySQL connection
MYSQL_HOST = 'localhost'
MYSQL_PORT = '3306'
MYSQL_DB = 'ejbca'
MYSQL_USER = 'ejbca'
# Default installation settings
INSTALL_PROPERTIES = {
'ca.name': 'SystemCA',
'ca.dn': 'CN=SystemCA,O=Enigma Bridge Ltd,C=GB',
'ca.tokentype': 'soft',
'ca.keytype': 'RSA',
'ca.keyspec': '2048',
'ca.signaturealgorithm': 'SHA256WithRSA',
'ca.validity': '7650',
'ca.policy': 'null'
}
# web.properties file - misc settings.
WEB_PROPERTIES = {
'cryptotoken.p11.lib.255.name': 'EnigmaBridge',
'cryptotoken.p11.lib.255.file': SoftHsmV1Config.SOFTHSM_SO_PATH,
'httpsserver.hostname': 'localhost',
'httpsserver.dn': 'CN=localhost,O=Enigma Bridge Ltd,C=GB',
'superadmin.cn': 'SuperAdmin',
'superadmin.dn': 'CN=SuperAdmin',
'superadmin.batch': 'true',
'vpn.email.from': 'root@localhost',
'vpn.admin.sameemail': 'True'
}
# MySQL database properties
DATABASE_PROPERTIES = {
# 'database.name': 'mysql',
# 'database.url': 'jdbc:mysql://localhost:3306/ejbca?characterEncoding=UTF-8',
# 'database.driver': 'com.mysql.jdbc.Driver',
'database.username': 'ejbca',
'database.password': 'sa'
}
# mail.properties file
MAIL_PROPERTIES = {
'mail.from': 'ejbca@localhost'
}
def __init__(self, install_props=None, web_props=None, print_output=False, eb_config=None, jks_pass=None,
config=None, staging=False, do_vpn=False, db_pass=None, master_p12_pass=None,
sysconfig=None, audit=None, app=None, openvpn=None, jboss=None, mysql=None,
*args, **kwargs):
self.install_props = util.defval(install_props, {})
self.web_props = util.defval(web_props, {})
self.database_props = {}
self.mail_props = {}
self.http_pass = util.defval(jks_pass, util.random_password(16))
self.java_pass = 'changeit' # EJBCA & JBoss bug here
self.superadmin_pass = util.random_password(16)
# MySQL EJBCA user password.
self.db_pass = util.defval(db_pass, util.random_password(16))
# P12 encryption password for VPN user enc.
self.master_p12_pass = util.defval(master_p12_pass, util.random_password(16))
self.do_vpn = do_vpn
self.print_output = print_output
self.hostname = None
self.domains = None
self.cert_dir = None # Certificate directory, filled in by LetsEncrypt
self.staging = staging
self.lets_encrypt = None
self.lets_encrypt_jks = None
self.no_ejbca_update = False
self.eb_config = eb_config
self.config = config
self.reg_svc = None
self.sysconfig = sysconfig
self.audit = audit
self.jboss = jboss
self.openvpn = openvpn
self.mysql = mysql
if self.audit is None:
self.audit = AuditManager(disabled=True)
self.skip_installation = False
self.doing_reinstall = False
# Remove secrets from audit logging
self.audit.add_secrets([self.http_pass, self.superadmin_pass, self.db_pass, self.master_p12_pass])
self.ejbca_install_result = 1
# Initialize settings
self._setup_database_properties()
def get_db_type(self):
"""
Returns DB type to use in the installation
:return: None for default (H2) or database type string, e.g., mysql
"""
return self.config.ejbca_db_type if self.config is not None else None
def get_database_root_password(self):
"""
Returns database root password for database setup. Used for external DBs (e.g, mysql)
:return:
"""
return self.config.mysql_root_password if self.config is not None else None
def get_ejbca_home(self):
"""
Returns EJBCA home, first try to look at env var, then return default val
:return:
"""
if 'EJBCA_HOME' in os.environ and len(os.environ['EJBCA_HOME']) > 0:
return os.path.abspath(os.environ['EJBCA_HOME'])
if self.eb_config is not None:
config_home = self.eb_config.ejbca_home
if config_home is not None:
return config_home
return os.path.abspath(self.EJBCA_HOME)
def get_ejbca_version(self):
"""
Returns EJBCA version
:return:
"""
if 'EJBCA_VERSION' in os.environ and len(os.environ['EJBCA_VERSION']) > 0:
return os.path.abspath(os.environ['EJBCA_VERSION'])
return self.EJBCA_VERSION
def get_ejbca_sh(self):
"""
Returns EJBCA sh script
:return:
"""
return os.path.join(self.get_ejbca_home(), 'bin', 'ejbca.sh')
def get_install_prop_file(self):
return os.path.abspath(os.path.join(self.get_ejbca_home(), self.INSTALL_PROPERTIES_FILE))
def get_web_prop_file(self):
return os.path.abspath(os.path.join(self.get_ejbca_home(), self.WEB_PROPERTIES_FILE))
def get_database_prop_file(self):
return os.path.abspath(os.path.join(self.get_ejbca_home(), self.DATABASE_PROPERTIES_FILE))
def get_email_prop_file(self):
return os.path.abspath(os.path.join(self.get_ejbca_home(), self.MAIL_PROPERTIES_FILE))
def properties_to_string(self, prop):
"""
Converts dict based properties to a string
:return:
"""
result = []
for k in prop:
result.append("%s=%s" % (k, prop[k]))
result = sorted(result)
return '\n'.join(result)
def set_config(self, config):
self.config = config
def load_from_config(self):
"""
Restores passwords and other configuration variables from the config
:return:
"""
self.http_pass = self.config.ejbca_jks_password
self.db_pass = self.config.ejbca_db_password
self.master_p12_pass = self.config.ejbca_p12master_password
self.do_vpn = self.config.vpn_installed
def _setup_database_properties(self):
"""
Setting up database properties from the internal state
e.g., database password, DB type.
:return:
"""
self.database_props['database.password'] = self.db_pass
db_type = self.get_db_type()
if db_type == 'mysql':
# 'database.name': 'mysql',
# 'database.url': 'jdbc:mysql://localhost:3306/ejbca?characterEncoding=UTF-8',
# 'database.driver': 'com.mysql.jdbc.Driver',
self.database_props['database.name'] = 'mysql'
self.database_props['database.driver'] = 'com.mysql.jdbc.Driver'
self.database_props['database.url'] = 'jdbc:mysql://%s:%s/%s?characterEncoding=UTF-8' \
% (self.MYSQL_HOST, self.MYSQL_PORT, self.MYSQL_DB)
else:
# Fallback - default H2 database
return
def set_domains(self, domains, primary=None, set_hostname=True):
"""
Sets the domains EJBCA is reachable on
:param domains:
:param primary:
:param set_hostname:
:return:
"""
if domains is None or len(domains) == 0:
domains = ['localhost']
if not isinstance(domains, types.ListType):
domains = [domains]
# sort by (length, lexicographic)
domains.sort()
domains.sort(key=len, reverse=True)
# if primary domain was not set use the longest one (convention).
if primary is not None:
if primary not in domains:
domains.insert(0, primary)
elif primary != domains[0]:
raise ValueError('Primary domain has to be listed first in the domain list')
else:
primary = domains[0]
self.domains = domains
if set_hostname:
self.set_hostname(primary)
def check_hostname_domains_consistency(self):
"""
Checks if hostname is the first domain in the domain list.
:return:
"""
return self.domains is not None \
and isinstance(self.domains, types.ListType) \
and self.hostname == self.domains[0]
def set_hostname(self, hostname):
"""
Set hostname EJBCA will use - updates properties files in memory
Should not be called outside the module (by user), use set_domains instead.
:return:
"""
if hostname is None:
hostname = 'localhost'
self.hostname = hostname
if not self.check_hostname_domains_consistency():
raise ValueError('Hostname is not consistent with domains, please, rather use set_domains()')
self.web_props['httpsserver.hostname'] = hostname
self.web_props['httpsserver.dn'] = 'CN=%s,O=Enigma Bridge Ltd,C=GB' % hostname
leftmost_domain = util.get_leftmost_domain(hostname)
self.install_props['ca.name'] = 'SystemCA-%s' % leftmost_domain
self.install_props['ca.dn'] = 'CN=SystemCA-%s,O=Enigma Bridge Ltd,C=GB' % leftmost_domain
# Update another hostname related properties
if self.do_vpn:
self.web_props['vpn.email.from'] = 'private-space@%s' % hostname
self.mail_props['mail.from'] = 'private-space@%s' % hostname
return self.web_props
def _update_property_file(self, filepath, properties):
"""
Updates EJBCA property file with backup
:param filepath:
:param properties:
:return:
"""
prop_hdr = '#\n'
prop_hdr += '# Config file generated: %s\n' % (datetime.now().strftime("%Y-%m-%d %H:%M"))
prop_hdr += '#\n'
file_hnd = None
try:
file_hnd, file_backup = util.safe_create_with_backup(filepath, 'w', 0o644)
file_hnd.write(prop_hdr + self.properties_to_string(properties) + "\n\n")
finally:
if file_hnd is not None:
file_hnd.close()
def update_properties(self):
"""
Updates properties files of the ejbca
:return:
"""
self._setup_database_properties()
self.web_props['vpn.ejbca.home'] = self.get_ejbca_home()
if self.do_vpn and self.openvpn is not None:
self.web_props['vpn.vpn.subnet.address'] = self.openvpn.get_ip_net()
self.web_props['vpn.vpn.subnet.size'] = self.openvpn.get_ip_net_size()
self.web_props['vpn.vpn.server'] = self.openvpn.get_ip_vpn_server()
file_web = self.get_web_prop_file()
file_ins = self.get_install_prop_file()
file_db = self.get_database_prop_file()
prop_web = util.merge(self.WEB_PROPERTIES, self.web_props)
prop_ins = util.merge(self.INSTALL_PROPERTIES, self.install_props)
prop_db = util.merge(self.DATABASE_PROPERTIES, self.database_props)
self._update_property_file(file_web, prop_web)
self._update_property_file(file_ins, prop_ins)
self._update_property_file(file_db, prop_db)
if self.do_vpn:
file_mail = self.get_email_prop_file()
prop_mail = util.merge(self.MAIL_PROPERTIES, self.mail_props)
self._update_property_file(file_mail, prop_mail)
def cli_cmd(self, cmd, log_obj=None, write_dots=False, on_out=None, on_err=None, ant_answer=True, cwd=None):
"""
Runs command line task
Used for ant
:return:
"""
default_cwd = self.get_ejbca_home()
if on_out is None and ant_answer is not None:
on_out = self.ant_answer
cwd = cwd if cwd is not None else default_cwd
return self.sysconfig.cli_cmd_sync(cmd, log_obj=log_obj, write_dots=write_dots,
on_out=on_out, on_err=on_err, cwd=cwd)
#
# ANT CLI, calls
#
def ant_cmd(self, cmd, log_obj=None, write_dots=False, on_out=None, on_err=None):
ret, out, err = self.cli_cmd('sudo -E -H -u %s ant %s' % (self.jboss.get_user(), cmd),
log_obj=log_obj, write_dots=write_dots,
on_out=on_out, on_err=on_err, ant_answer=True)
if ret != 0:
sys.stderr.write('\nError, process returned with invalid result code: %s\n' % ret)
if isinstance(log_obj, types.StringTypes):
sys.stderr.write('For more details please refer to %s \n' % log_obj)
if write_dots:
sys.stderr.write('\n')
return ret, out, err
def ant_deploy(self):
"""
ant deploy command
Basic build & deployment to JBoss
:return:
"""
return self.ant_cmd('deploy', log_obj='/tmp/ant-deploy.log', write_dots=self.print_output)
def ant_deployear(self):
return self.ant_cmd('deployear', log_obj='/tmp/ant-deployear.log', write_dots=self.print_output)
# noinspection PyUnusedLocal
def ant_answer(self, out, feeder, p=None, *args, **kwargs):
out = out.strip()
if out.startswith('Please enter'): # default - use default value, no starving
feeder.feed('\n')
elif out.startswith('[input] Please enter'): # default - use default value, no starving
feeder.feed('\n')
# noinspection PyUnusedLocal
def ant_install_answer(self, out, feeder, p=None, *args, **kwargs):
out = out.strip()
if 'truststore with the CA certificate for https' in out:
feeder.feed(self.java_pass + '\n')
elif 'keystore with the TLS key for https' in out:
feeder.feed(self.http_pass + '\n')
elif 'the superadmin password' in out:
feeder.feed(self.superadmin_pass + '\n')
elif 'password CA token password' in out:
feeder.feed('\n')
elif out.startswith('Please enter'): # default - use default value, no starving
feeder.feed('\n')
elif out.startswith('[input] Please enter'): # default - use default value, no starving
feeder.feed('\n')
def ant_install(self):
"""
Installation - env setup, data source setup
:return:
"""
return self.ant_cmd('install', log_obj='/tmp/ant-install.log', write_dots=self.print_output,
on_out=self.ant_install_answer)
def ant_client_tools(self):
"""
Builds toolbox utilities
:return:
"""
return self.ant_cmd('clientToolBox', log_obj='/tmp/ant-clientToolBox.log', write_dots=self.print_output)
#
# JBoss CLI
#
def jboss_reload(self):
"""
Reloads JBoss server via CLI
:return:
"""
return self.jboss.reload()
def jboss_undeploy_fs(self):
"""
Undeploys by removing from the FS
:return:
"""
return self.jboss.undeploy_fs('ejbca.ear')
def jboss_undeploy(self):
"""
Undeploys EJBCA from JBoss via CLI command
:return:
"""
return self.jboss.cli_cmd('undeploy ejbca.ear')
def jboss_remove_datasource(self):
"""
Removes EJBCA Data source
:return:
"""
return self.jboss.cli_cmd('data-source remove --name=ejbcads')
def jboss_add_mysql_jdbc(self):
"""
Adds MySQL JDBC to the JBoss.
Performed only once after JBoss installation.
:return:
"""
return self.jboss.add_mysql_jdbc()
def jboss_rollback_ejbca(self):
cmds = ['/core-service=management/security-realm=SSLRealm/authentication=truststore:remove',
'/core-service=management/security-realm=SSLRealm/server-identity=ssl:remove',
'/core-service=management/security-realm=SSLRealm:remove',
'/socket-binding-group=standard-sockets/socket-binding=httpspub:remove',
'/subsystem=undertow/server=default-server/https-listener=httpspub:remove',
'/subsystem=web/connector=httpspub:remove',
'/socket-binding-group=standard-sockets/socket-binding=httpspriv:remove',
'/subsystem=undertow/server=default-server/https-listener=httpspriv:remove',
'/subsystem=web/connector=httpspriv:remove',
'/socket-binding-group=standard-sockets/socket-binding=http:remove',
'/subsystem=undertow/server=default-server/http-listener=http:remove',
'/subsystem=web/connector=http:remove',
'/subsystem=undertow/server=default-server/http-listener=default:remove',
'/system-property=org.apache.catalina.connector.URI_ENCODING:remove',
'/system-property=org.apache.catalina.connector.USE_BODY_ENCODING_FOR_QUERY_STRING:remove',
'/interface=http:remove',
'/interface=httpspub:remove',
'/interface=httpspriv:remove']
for cmd in cmds:
self.jboss.cli_cmd(cmd)
self.jboss_reload()
def jboss_add_rewrite_ejbca(self):
"""
Adds EJBCA default rewrite rules
:return:
"""
self.jboss.add_rewrite_rule('rule01', '^/$', '/ejbca/adminweb', 'L,QSA,R')
self.jboss.add_rewrite_rule('rule02', '^/pki/?$', '/ejbca/adminweb', 'L,QSA,R')
def jboss_add_rewrite_vpn(self):
"""
Adds default rewrites for VPN configuration
:return:
"""
self.jboss.add_rewrite_rule('rule01', '^/$', '/ejbca/vpn/index.jsf', 'L,QSA,R')
self.jboss.add_rewrite_rule('rule02', '^/admin$', '/ejbca/adminweb/vpn/vpnusers.jsf', 'L,QSA,R')
self.jboss.add_rewrite_rule('rule03', '^/key/?$', '/ejbca/vpn/key.jsf', 'L,QSA,R')
self.jboss.add_rewrite_rule('rule04', '^/pki/?$', '/ejbca/adminweb', 'L,QSA,R')
self.jboss.add_rewrite_rule('rule05', '^/p12/?$', '/ejbca/vpn/p12.jsf', 'L,QSA,R')
self.jboss.add_rewrite_rule('rule06', '^/direct/?$', '/ejbca/vpn/getvpn', 'L,QSA,R')
def jboss_configure_rewrite_ejbca(self):
"""
Configures EJBCA rewrite rules
:return:
"""
self.jboss.enable_default_root()
self.jboss.remove_all_rewrite_rules()
self.jboss_add_rewrite_ejbca()
def jboss_configure_rewrite_vpn(self):
"""
Configures VPN rewrite rules
:return:
"""
self.jboss.enable_default_root()
self.jboss.remove_all_rewrite_rules()
self.jboss_add_rewrite_vpn()
#
# Backup / env reset
#
def backup_mysql_database(self):
"""
Backups EJBCA database in the standard location.
internally uses mysqldump command to create SQL dump
:return:
"""
return self.mysql.backup_database(database_name=self.MYSQL_DB, backup_dir=self.DB_BACKUPS)
def reset_mysql_database(self):
"""
Performs backup of the original MySQL database - if any.
Resets the database to the original state - drop database, drop users, create from scratch.
:return:
"""
self.backup_mysql_database()
self.audit.add_secrets(self.db_pass)
try:
engine = self.mysql.build_engine()
self.mysql.drop_database(self.MYSQL_DB, engine=engine)
self.mysql.create_database(self.MYSQL_DB, engine=engine)
self.mysql.create_user(self.MYSQL_USER, self.db_pass, self.MYSQL_DB, engine=engine)
except Exception as e:
logger.info('Exception in database regeneration %s' % e)
raise
def jboss_backup_database(self):
"""
Removes original database, moving it to a backup location.
:return:
"""
jboss_dir = self.jboss.get_jboss_home()
if not os.path.exists(jboss_dir):
return None, None, None
db1 = os.path.join(jboss_dir, 'ejbcadb.h2.db')
db2 = os.path.join(jboss_dir, 'ejbcadb.trace.db')
db3 = os.path.join(jboss_dir, 'ejbcadb.lock.db')
util.make_or_verify_dir(self.DB_BACKUPS)
backup1 = util.delete_file_backup(db1, backup_dir=self.DB_BACKUPS)
backup2 = util.delete_file_backup(db2, backup_dir=self.DB_BACKUPS)
backup3 = util.delete_file_backup(db3, backup_dir=self.DB_BACKUPS)
if self.get_db_type() == 'mysql':
self.reset_mysql_database()
return backup1, backup2, backup3
def jboss_fix_privileges(self):
"""
Fix privileges to JBoss user
#TODO: use JBoss object
:return:
"""
usr = self.jboss.get_user()
self.sysconfig.exec_shell('sudo chown -R %s:%s %s' % (usr, usr, self.get_ejbca_home()))
self.jboss.fix_privileges()
def jboss_wait_after_deploy(self):
"""
Waits for JBoss to finish initial deployment.
:return:
"""
jboss_works = False
max_attempts = 30
for i in range(0, max_attempts):
if i > 0:
if self.print_output:
sys.stderr.write('.')
time.sleep(3)
# noinspection PyBroadException
try:
ret, out, err = self.jboss.cli_cmd('deploy -l')
if out is None or len(out) == 0:
continue
out_total = '\n'.join(out)
if re.search(r'ejbca.ear.+?\sOK', out_total):
jboss_works = True
break
except Exception as ex:
continue
return jboss_works
def jboss_restart(self):
"""
Restarts JBoss daemon
:return:
"""
return self.jboss.jboss_restart()
def backup_passwords(self):
"""
Backups the generated passwords to /root/ejbca.passwords
:return:
"""
util.make_or_verify_dir(self.PASSWORDS_BACKUP_DIR, mode=0o600)
util.delete_file_backup(self.PASSWORDS_FILE, chmod=0o600, backup_dir=self.PASSWORDS_BACKUP_DIR)
with util.safe_open(self.PASSWORDS_FILE, chmod=0o600) as f:
f.write('httpsserver.password=%s\n' % self.http_pass)
f.write('java.trustpassword=%s\n' % self.java_pass)
f.write('superadmin.password=%s\n' % self.superadmin_pass)
f.write('database.password=%s\n' % self.db_pass)
f.write('masterp12.password=%s\n' % self.master_p12_pass)
f.flush()
self.audit.audit_file_write(self.PASSWORDS_FILE)
def get_p12_file(self):
return os.path.abspath(os.path.join(self.get_ejbca_home(), self.P12_FILE))
def copy_p12_file(self):
"""
Copies p12 file to the home directory & chown-s so user can download it via scp
:return:
"""
p12 = self.get_p12_file()
new_p12 = os.path.abspath(os.path.join(self.USER_HOME, 'ejbca-admin.p12'))
if os.path.exists(new_p12):
os.remove(new_p12)
self.audit.audit_delete(new_p12)
# copy in a safe mode - create file non readable by others, copy
with open(p12, 'r') as src_p12:
with util.safe_open(new_p12, mode='w', chmod=0o600) as dst_p12:
shutil.copyfileobj(src_p12, dst_p12)
self.audit.audit_copy(src=p12, dst=new_p12)
self.sysconfig.exec_shell('sudo chown %s:%s %s' % (self.SSH_USER, self.SSH_USER, new_p12))
return new_p12
#
# EJBCA CLI
#
def ejbca_get_cwd(self):
return os.path.join(self.get_ejbca_home(), 'bin')
def ejbca_get_command(self, cmd):
return 'sudo -E -H -u %s %s %s' % (self.jboss.get_user(), self.get_ejbca_sh(), cmd)
def ejbca_cmd(self, cmd, retry_attempts=3, write_dots=False, on_out=None, on_err=None):
"""
Executes cd $EJBCA_HOME/bin
./ejbca.sh $*
:param cmd:
:param retry_attempts:
:param write_dots:
:param on_out:
:param on_err:
:return: return code, stdout, stderr
"""
cwd = self.ejbca_get_cwd()
ret, out, err = -1, None, None
cmd_exec = self.ejbca_get_command(cmd)
for i in range(0, retry_attempts):
ret, out, err = self.cli_cmd(
cmd_exec,
log_obj=None, write_dots=write_dots,
on_out=on_out, on_err=on_err,
ant_answer=False, cwd=cwd)
if ret == 0:
return ret, out, err
return ret, out, err
#
# PKCS 11 token operations
#
def ejbca_add_softhsm_token(self, softhsm=None, name='EnigmaBridge', slot_id=0):
"""
Adds a new crypto token to the EJBCA using CLI
https://www.ejbca.org/docs/userguide.html#New Crypto Tokens
:param softhsm: SoftHSM object
:param name: name of the HW crypto token used in EJBCA
:param slot_id: slot index in the token to associate with the new EJBCA crypto token
:return:
"""
so_path = softhsm.get_so_path() if softhsm is not None else SoftHsmV1Config.SOFTHSM_SO_PATH
cmd = 'cryptotoken create ' \
'--token "%s" ' \
'--pin 0000 ' \
'--autoactivate TRUE ' \
'--type "PKCS11CryptoToken" ' \
'--lib "%s" ' \
'--slotlabeltype SLOT_INDEX ' \
'--slotlabel %d' % (name, so_path, slot_id)
return self.ejbca_cmd(cmd, retry_attempts=1, write_dots=self.print_output)
def pkcs11_get_cwd(self):
return os.path.join(self.get_ejbca_home(), 'bin')
def pkcs11_get_command(self, cmd):
return 'sudo -E -H -u %s %s/pkcs11HSM.sh %s' % (self.jboss.get_user(), self.pkcs11_get_cwd(), cmd)
def pkcs11_cmd(self, cmd, retry_attempts=3, write_dots=False, on_out=None, on_err=None):
"""
Executes cd $EJBCA_HOME/bin
./pkcs11HSM.sh $*
:param cmd:
:param retry_attempts:
:param write_dots:
:param on_out:
:param on_err:
:return: return code, stdout, stderr
"""
cwd = self.pkcs11_get_cwd()
ret, out, err = -1, None, None
cmd_exec = self.pkcs11_get_command(cmd)
for i in range(0, retry_attempts):
ret, out, err = self.cli_cmd(
cmd_exec,
log_obj=None, write_dots=write_dots,
on_out=on_out, on_err=on_err,
ant_answer=False, cwd=cwd)
if ret == 0:
return ret, out, err
return ret, out, err
# noinspection PyUnusedLocal
def pkcs11_answer(self, out, feeder, p=None, *args, **kwargs):
out = util.strip(out)
if 'Password:' in out:
feeder.feed('0000')
feeder.feed('\n')
def pkcs11_get_generate_key_cmd(self, softhsm=None, bit_size=2048, alias=None, slot_id=0):
so_path = softhsm.get_so_path() if softhsm is not None else SoftHsmV1Config.SOFTHSM_SO_PATH
return 'generate %s %s %s %s' % (so_path, bit_size, alias, slot_id)
def pkcs11_get_test_key_cmd(self, softhsm=None, slot_id=0):
so_path = softhsm.get_so_path() if softhsm is not None else SoftHsmV1Config.SOFTHSM_SO_PATH
return 'test %s %s' % (so_path, slot_id)
def pkcs11_generate_key(self, softhsm=None, bit_size=2048, alias=None, slot_id=0, retry_attempts=3):
"""
Generates keys in the PKCS#11 token.
Can be used with the EJBCA.
cd $EJBCA_HOME/bin
./pkcs11HSM.sh generate /usr/lib64/softhsm/libsofthsm.so 4096 signKey 0
:return:
"""
cmd = self.pkcs11_get_generate_key_cmd(softhsm=softhsm, bit_size=bit_size, alias=alias, slot_id=slot_id)
return self.pkcs11_cmd(cmd=cmd, retry_attempts=retry_attempts, write_dots=self.print_output,
on_out=self.pkcs11_answer, on_err=self.pkcs11_answer)
def pkcs11_generate_default_key_set(self, softhsm=None, slot_id=0, retry_attempts=5,
sign_key_alias='signKey',
default_key_alias='defaultKey',
test_key_alias='testKey'):
"""
Generates a default key set to be used with EJBCA
:param softhsm:
:param slot_id:
:param retry_attempts:
:param sign_key_alias:
:param default_key_alias:
:param test_key_alias:
:return: return code, stdout, stderr
"""
aliases = [sign_key_alias, default_key_alias, test_key_alias]
key_sizes = [2048, 2048, 2048]
for idx, alias in enumerate(aliases):
key_size = key_sizes[idx]
ret, out, cmd = self.pkcs11_generate_key(softhsm=softhsm, bit_size=key_size, alias=alias,
slot_id=slot_id, retry_attempts=retry_attempts)
if ret != 0:
return ret, out, cmd
if self.print_output:
sys.stderr.write('.')
return 0, None, None
#
# VPN ops
#
def vpn_get_ca_properties(self):
"""
Returns contents of a property file for VPN CA. Used when creating VPN CA via comand line
:return: string - property file
"""
props = 'sharedLibrary %s\n' % SoftHsmV1Config.SOFTHSM_SO_PATH
props += 'slotLabelType=SLOT_INDEX\n'
props += 'slotLabelValue=0\n\n'
props += '# auto-activation\n'
props += 'pin=0000\n\n'
props += '# CA key configuration\n'
props += 'defaultKey defaultKey\n'
props += 'certSignKey signKey\n'
props += 'crlSignKey signKey\n'
props += 'testKey testKey\n'
return props
def vpn_create_tmp_ca_prop_file(self):
"""
Creates temporary property file for VPN CA CLI.
:return: fname string
"""
fpath = os.path.join('/tmp', 'vpn.ca.properties')
fobj, fname = util.unique_file(fpath, mode=0o644)
with fobj:
fobj.write(self.vpn_get_ca_properties())
return fname
def vpn_create_ca_cmd(self, prop_file_path):
"""
Returns EJBCA cmd to create VPN CA. CA Validity = 25 years
:param prop_file_path: file path to the property file with CA properties
:return: command string
"""
cmd = "ca init --caname VPN "
cmd += "--dn 'CN=%s'" % self.hostname
cmd += " --tokenType 'org.cesecore.keys.token.PKCS11CryptoToken' "
cmd += "--keyspec 2048 "
cmd += "--keytype RSA "
cmd += "-v 9150 "
cmd += "-s SHA256WithRSA "
cmd += "--tokenPass 0000 "
cmd += "--policy null "
cmd += "--tokenprop '%s'" % prop_file_path
return cmd
def vpn_create_ca(self):
"""
Creates VPN CA using EJBCA CLI.
Corresponding SoftHSM token has to be already prepared with keys generated in it.
:return: 0 on success
"""
fpath_prop = self.vpn_create_tmp_ca_prop_file()
try:
cmd = self.vpn_create_ca_cmd(fpath_prop)
return self.ejbca_cmd(cmd, retry_attempts=1, write_dots=self.print_output)[0]
finally:
util.safely_remove(fpath_prop)
def vpn_create_profiles(self):
"""
Create required VPN certificate and end entity profiles
VPN CA has to be created already
:return: 0 on success
"""
cmd = 'vpn initprofiles'
return self.ejbca_cmd(cmd, retry_attempts=1, write_dots=self.print_output)[0]
def vpn_create_server_certs(self, directory=None):
"""
Creates VPN server credentials
VPN CA and profiles have to be created already
:param directory: if none, default directories are used.
:return: 0 on success
"""
cmd = 'vpn genserver --create --regenerate --pem --password \'%s\'' \
% (util.escape_shell(self.master_p12_pass))
if directory is not None:
cmd += ' --directory \'%s\'' % util.escape_shell(directory)
return self.ejbca_cmd(cmd, retry_attempts=1, write_dots=self.print_output)[0]
def vpn_create_crl(self, force=True):
"""
Creates a new CRL forcefully. Used to generate first CRL to start OpenVPN.
Or to regenerate CRL.
:return: 0 on success
"""
cmd = 'vpn crl'
return self.ejbca_cmd(cmd, retry_attempts=1, write_dots=self.print_output)[0]
def vpn_create_user(self, email, device='default'):
"""
Creates a new VPN user via EJBCA CLI.
Credentials are sent to the user email
:param email:
:param device:
:return: 0 on success
"""
client_password = util.random_password(16)
self.audit.add_secrets(client_password)
cmd = "vpn genclient --email '%s' --device '%s' --password '%s' --regenerate --superadmin" \
% (util.escape_shell(email), util.escape_shell(device), util.escape_shell(client_password))
return self.ejbca_cmd(cmd, retry_attempts=1, write_dots=self.print_output)[0]
def vpn_create_p12_otp(self, user='superadmin', p12_path=None):
"""
Generates p12 OTP, returns the OTP code
:param user:
:param p12_path:
:return:
"""
if p12_path is None:
p12_path = os.path.join(self.get_ejbca_home(), 'p12', 'superadmin.p12')
cmd = "vpn p12 --id '%s' --p12 '%s'" % (user, p12_path)
ret, out, err = self.ejbca_cmd(cmd, retry_attempts=1, write_dots=self.print_output)
if ret != 0:
raise errors.SetupError('Could not create P12 OTP download link')
for line in [x.strip() for x in out]:
if line.startswith('OTP_DOWNLOAD_TOKEN='):
token = line.split('=', 1)[1]
return token
raise errors.SetupError('Could not extract OTP token from the EJBCA CLI response')
def vpn_get_crl_cron_file(self):
"""
Returns contents of the cron.d file for generating a CRL
:return: crl cron file string
"""
crl = '# Check each half an hour if regeneration is needed\n'
crl += '*/30 * * * * %s %s vpn crl\n' % (self.jboss.get_user(), self.get_ejbca_sh())
return crl
def vpn_install_cron(self):
"""
Installs all cron.d files required by the VPN
:return: 0 on success, can throw exception
"""
crl_cron = self.vpn_get_crl_cron_file()
if self.sysconfig is None:
raise ValueError('Sysconfig is None, required for cron installation')
return self.sysconfig.install_crond_file(file_name='ejbca-vpn', file_contents=crl_cron)
def vpn_get_crl_path(self):
"""
Returns path for the CRL file path
:return: string CRL path
"""
return os.path.join(self.get_ejbca_home(), 'vpn', '%s.crl' % self.hostname)
def vpn_get_vpn_client_config_path(self):
"""
Returns path for the client VPN configuration file template.
Template is used when sending/providing for download a new configuration files to clients.
:return: string vpm client path path
"""
return os.path.join(self.get_ejbca_home(), 'vpn_templates/vpnconfig.ovpn')
def vpn_get_server_cert_paths(self):
"""
Returns VPN server paths
:return: (ca, cert, key) paths
"""
vpn_base = os.path.join(self.get_ejbca_home(), 'vpn')
ca = os.path.join(vpn_base, 'VPN_Server-CA.pem')
crt = os.path.join(vpn_base, 'VPN_Server.pem')
key = os.path.join(vpn_base, 'VPN_Server-key.pem')
return ca, crt, key
#
# LetsEncrypt & Cert
#
def get_keystore_path(self):
"""
Returns path to the JBoss key store (for https)
:return:
"""
return self.jboss.get_keystore_path()
def le_enroll(self, le_method=None):
"""
Enrolls to LetsEncrypt with specified domains
:param le_method:
:return:
"""
# Password need to be stored anyway for future renewal / regeneration
self.config.ejbca_jks_password = self.http_pass
# LetsEncrypt certificate is OK. Create JKS.
# Backup previous JKS, delete the old one
jks_path = self.get_keystore_path()
util.make_or_verify_dir(self.DB_BACKUPS)
util.delete_file_backup(jks_path, chmod=0o600, backup_dir=self.DB_BACKUPS)
# Create new JKS
self.audit.add_secrets(self.http_pass)
self.lets_encrypt_jks = letsencrypt.LetsEncryptToJks(
cert_dir=self.cert_dir,
jks_path=jks_path,
jks_alias=self.hostname,
password=self.http_pass,
print_output=self.print_output,
audit=self.audit,
sysconfig=self.sysconfig)
ret = self.lets_encrypt_jks.convert()
if ret != 0:
return 3
self.config.ejbca_domains = self.domains
self.config.ejbca_hostname = self.hostname
return 0
def le_renew(self, cert_dir=None, le_method=None):
"""
Renews LetsEncrypt certificate, updates JKS containing the certificate.
:return: 0 if certificate was renewed and JKS recreated, 1 if OK but no renewal was needed, error otherwise
"""
if cert_dir is not None:
self.cert_dir = cert_dir
jks_path = self.get_keystore_path()
util.delete_file_backup(jks_path, chmod=0o600, backup_dir=self.DB_BACKUPS)
# Create new JKS
self.audit.add_secrets(self.http_pass)
self.lets_encrypt_jks = letsencrypt.LetsEncryptToJks(
cert_dir=self.cert_dir,
jks_path=jks_path,
jks_alias=self.hostname,
password=self.http_pass,
print_output=self.print_output,
audit=self.audit,
sysconfig=self.sysconfig)
ret = self.lets_encrypt_jks.convert()
if ret != 0:
logger.debug('Problem with certificate conversion to JKS, code: %s' % ret)
return 4
self.config.ejbca_hostname = self.hostname
return 0
#
# Updating via provisioning server
#
def download_file(self, url, filename):
"""
Downloads binary file, saves to the file
:param url:
:param filename:
:return:
"""
r = requests.get(url, stream=True, timeout=15)
with open(filename, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return filename
def update_ejbca_from_file(self, archive_path, basedir):
"""
Updates current EJBCA installation using the downloaded archive file.
:param archive_path:
:param basedir:
:return:
"""
cmd = 'sudo tar -xzf %s' % archive_path
ret, out, err = self.sysconfig.cli_cmd_sync(cmd, write_dots=True, cwd=basedir)
if ret != 0:
raise errors.SetupError('Could not extract update archive')
folders = [f for f in os.listdir(basedir) if not os.path.isfile(os.path.join(basedir, f))
and f != '.' and f != '..']
if len(folders) != 1:
raise errors.SetupError('Invalid folder structure after update extraction')
archive_dir = os.path.join(basedir, folders[0])
if not os.path.exists(archive_dir):
raise errors.SetupError('Directory with ejbca not found in the update archive: %s' % archive_dir)
if not os.path.exists(os.path.join(archive_dir, 'build.xml')):
raise errors.SetupError('Invalid update archive, build.xml not found in %s' % archive_dir)
archive_slash = util.add_ending_slash(archive_dir)
dest_slash = util.add_ending_slash(self.get_ejbca_home())
excludes = ''
if self.doing_reinstall:
excludes = ' '.join(['--exclude %s' % util.escape_shell(util.add_ending_slash(x))
for x in self.EXCLUDE_REINSTALL])
cmd = 'sudo rsync -av --delete %s %s %s' \
% (excludes, util.escape_shell(archive_slash), util.escape_shell(dest_slash))
ret, out, err = self.sysconfig.cli_cmd_sync(cmd, write_dots=True, cwd=basedir)
if ret != 0:
raise errors.SetupError('EJBCA sync failed')
self.jboss_fix_privileges()
def update_installation(self, attempts=3):
"""
Downloads a new revision of the EJBCA from the provisioning server, if possible
:return:
"""
if self.no_ejbca_update:
logger.debug('EJBCA update is disabled')
return
try:
logger.debug('Going to download specs from the provisioning servers')
for provserver in PROVISIONING_SERVERS:
url = 'https://%s/ejbca/index.json' % provserver
tmpdir = util.safe_new_dir('/tmp/ejbca-update')
for attempt in range(attempts):
try:
self.audit.audit_evt('prov-ejbca', url=url)
res = requests.get(url=url, timeout=15)
res.raise_for_status()
js = res.json()
self.audit.audit_evt('prov-ejbca', url=url, response=js)
revs = js['versions']['6.3.1.1']['revisions']
top_rev = None
for rev in revs:
if top_rev is None or top_rev['rev'] < rev['rev']:
top_rev = rev
archive_url = top_rev['url']
logger.debug('Revision: %s, url: %s' % (top_rev['rev'], archive_url))
# Download archive.
archive_path = os.path.join(tmpdir, 'ejbca_6_3_1_1.tgz')
self.download_file(archive_url, archive_path)
logger.debug('File downloaded, updating...')
# Update
self.update_ejbca_from_file(archive_path, tmpdir)
return 0
except errors.SetupError as e:
logger.debug('SetupException in updating EJBCA from the provisioning server: %s' % e)
self.audit.audit_exception(e, process='prov-ejbca')
except Exception as e:
logger.debug('Exception in updating EJBCA from the provisioning server: %s' % e)
self.audit.audit_exception(e, process='prov-ejbca')
finally:
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
return 0
except Exception as e:
logger.debug('Exception when updating EJBCA')
self.audit.audit_exception(e)
#
# Actions
#
def undeploy(self):
"""
Undeploys EJBCA installation
:return:
"""
self.jboss_undeploy()
self.jboss_undeploy_fs()
self.jboss_remove_datasource()
self.jboss_rollback_ejbca()
self.jboss_reload()
def undeploy_fast(self):
"""
Undeploys EJBCA installation - faster, keeps datasource & results of "ant install"
:return:
"""
self.jboss_undeploy_fs()
self.jboss_restart()
def configure(self):
"""
Configures EJBCA for installation deployment
:return:
"""
# 1. update properties file
if self.print_output:
print(" - Updating settings")
self.update_properties()
self.backup_passwords()
if self.config is not None:
self.config.ejbca_jks_password = self.http_pass
self.config.ejbca_db_password = self.db_pass
self.config.ejbca_p12master_password = self.master_p12_pass
self.config.vpn_installed = self.do_vpn
# Restart jboss - to make sure it is running
if self.print_output:
print("\n - Restarting application server, please wait")
jboss_works = self.jboss_restart()
if not jboss_works:
print("\n Application server (JBoss) could not be restarted. Please, resolve the problem and start again")
return 100
# 2. Undeploy original EJBCA, make JBoss clean
if self.print_output:
print("\n - Preparing environment for application server")
self.undeploy()
# Restart jboss - so we can delete database after removal
if self.print_output:
print("\n - Restarting application server, please wait")
jboss_works = self.jboss_restart()
if not jboss_works:
print("\n Application server could not be restarted. Please, resolve the problem and start again")
return 100
# Delete & backup database, fix privileges, reload.
self.jboss_backup_database()
self.jboss_fix_privileges()
self.jboss_reload()
# Updating from the provisioning server
print("\n - Updating to the latest revision")
self.update_installation()
self.update_properties()
self.jboss_fix_privileges()
# 3. deploy, 5 attempts
for i in range(0, 5):
if self.print_output:
print("\n - Deploying the PKI system" if i == 0 else
"\n - Deploying the PKI system, attempt %d" % (i+1))
res, out, err = self.ant_deploy()
self.ejbca_install_result = res
if res == 0:
break
if self.ejbca_install_result != 0:
return 2
# 4. install, 3 attempts
for i in range(0, 3):
if self.print_output:
print(" - Installing the PKI system" if i == 0 else
" - Installing the PKI system, attempt %d" % (i+1))
self.jboss_fix_privileges()
self.jboss_wait_after_deploy()
res, out, err = self.ant_install()
self.ejbca_install_result = res
if res == 0:
break
self.ant_client_tools()
self.jboss_fix_privileges()
if self.do_vpn:
self.jboss_configure_rewrite_vpn()
else:
self.jboss_configure_rewrite_ejbca()
self.jboss_reload()
return self.ejbca_install_result
def reinstall(self):
"""
Soft re-installation with preserving user data.
:return:
"""
self.doing_reinstall = True
if self.print_output:
print("\n - Undeploying, please wait")
self.jboss_undeploy_fs()
jboss_works = self.jboss_restart()
if not jboss_works:
print("\n Application server (JBoss) could not be restarted. Please, resolve the problem and start again")
raise errors.SetupError('JBoss restart failed')
# Updating from the provisioning server
print("\n - Updating to the latest revision")
self.update_installation()
self.update_properties()
self.jboss_fix_privileges()
# 3. deploy, 5 attempts
for i in range(0, 5):
if self.print_output:
print("\n - Deploying the PKI system" if i == 0 else
"\n - Deploying the PKI system, attempt %d" % (i + 1))
res, out, err = self.ant_deploy()
self.ejbca_install_result = res
if res == 0:
break
self.ant_client_tools()
self.jboss_fix_privileges()
self.jboss_wait_after_deploy()
self.jboss_reload()
return self.ejbca_install_result
def test_port_open(self, host, timeout=5, attempts=3, port=None):
"""
Tests if port is open to the public
:return:
"""
if port is None:
port = self.PORT
return util.test_port_open(host=host, port=port, timeout=timeout, attempts=attempts,
test_upper_read_write=False)
def test_environment(self):
"""
Tests if the host we run at has necessary assets (e.g., jboss dir, ejbca dir)
Very light check, but prevents from running and failing on hosts without our jboss installation.
:return: true if env is OK (installation could finish successfully)
"""
return os.path.exists(self.get_ejbca_home()) and self.jboss.test_environment()
def setup_os(self):
"""
Configures OS
Allow port on the firewall
:return:
"""
ret = self.sysconfig.allow_port(port=self.PORT, tcp=True)
if ret != 0:
return ret
ret = self.sysconfig.allow_port(port=self.PORT_PUBLIC, tcp=True)
if ret != 0:
return ret
return 0
|
|
#!/usr/bin/env python
'''
The MIT License (MIT)
Copyright (c) <2016> <Mathias Lesche>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
contact: mat.lesche(at)gmail.com
'''
''' python modules '''
import logging
from argparse import ArgumentParser as ArgumentParser
from argparse import RawDescriptionHelpFormatter
from collections import defaultdict
from functools import partial
from os import chmod
from os.path import join as pathjoin
from re import compile
from types import NoneType
''' own modules '''
from report.tex import Tex
from support.io_module import check_Directorylist
from support.io_module import check_Fileslist
from support.io_module import create_Directory
from support.io_module import readFile_getList
from support.io_module import write_list
from support.information import Information
from support.main_logger import MainLogger
class Parser(object):
def __init__(self):
self.__parser = ArgumentParser(description="""
Script parses the rnaseqc html and extract the tables
for the user report. Needs the report.html (-f),
bfx id (-b) and output folder (-o) as input.
If -t is given, tex table layout is created, otherwise
tab separated file is created.
""", formatter_class=RawDescriptionHelpFormatter)
self.initialiseParser()
self.__rnaseqc = ''
self.__tex = False
self.__output = ''
self.__bfxid = ''
self.__logger = logging.getLogger('dsp.report.extract_rnaseqc')
self.parse()
def initialiseParser(self):
self.__parser.add_argument('-f', '--file', type=str, metavar='FILE', dest='file', required = True , help="report or index html from the rnaseqc run")
self.__parser.add_argument('-b', '--bfx', type=str, metavar='STRING', dest='bfx', required = True , help="bfx id of the project")
self.__parser.add_argument('-r', '--report', type=str, metavar='DIRECTORY', dest='output', required=True, help='report directory')
def parse(self, inputstring = None):
if isinstance(inputstring, NoneType):
self.__options = self.__parser.parse_args()
else:
self.__options = self.__parser.parse_args(inputstring)
def show_log(self, level, message):
if level == 'debug':
self.__logger.debug(message)
elif level == 'info':
self.__logger.info(message)
elif level == 'warning':
self.__logger.warning(message)
elif level == 'error':
self.__logger.error(message)
elif level == 'critical':
self.__logger.critical(message)
'''
method check if the output directory exists
'''
def check_output(self):
output = self.__options.output
good = check_Directorylist((output, ))[0]
if len(good) != 1:
self.show_log('error', "check output directory: {0}".format(output))
exit(2)
self.__output = good[0]
'''
method checks the input list which are files, filters for fastq files and returns the valid files
@param filelist: list of string
@return: list
'''
def check_files(self, filelist):
if isinstance(filelist, NoneType):
return []
good = check_Fileslist(filelist)[0]
good = [i for i in good if i.endswith(('report.html', 'index.html'))]
if len(good) != 1:
self.show_log('error', "check input file: {0}".format(filelist))
exit(2)
self.__rnaseqc = good[0]
def main(self):
self.check_files((self.__options.file, ))
self.check_output()
self.__bfxid = self.__options.bfx
def get_rnaseqc(self):
return self.__rnaseqc
def get_bfx(self):
return self.__bfxid
def get_output(self):
return self.__output
bfx = property(get_bfx, None, None, None)
rnaseqc = property(get_rnaseqc, None, None, None)
output = property(get_output, None, None, None)
class RNAseqQC(object):
def __init__(self, rnaseqc, output, bfx):
self.__rnaseqc = readFile_getList(rnaseqc, 'r')
self.__output = output
self.__outputdata = pathjoin(self.__output, 'data')
create_Directory(self.__outputdata)
self.__dictus = defaultdict(partial(dict))
self.__number = compile('[0-9,]+[.0-9]*')
self.__bfx = bfx
self.__orderlist = []
self.__tabone = []
self.__tabtwo = []
self.__outputtabone = []
self.__outputtabtwo = []
self.__outputtabonedata = []
self.__outputtabtwodata = []
self.__alignmentlabel = 'rnaseqc_alignment'
self.__profilelabel = 'rnaseqc_libprofile'
self.__counter = 1
self.__fileoutone = pathjoin(self.__output, '{0}_{1}_rnaseqc_{2}.tex'.format(self.__bfx, self.__alignmentlabel, self.__counter))
self.__fileouttwo = pathjoin(self.__output, '{0}_{1}_rnaseqc_{2}.tex'.format(self.__bfx, self.__profilelabel, self.__counter))
self.__fileoutonedata = pathjoin(self.__outputdata, '{0}_{1}_rnaseqc_{2}.csv'.format(self.__bfx, self.__alignmentlabel, self.__counter))
self.__fileouttwodata = pathjoin(self.__outputdata, '{0}_{1}_rnaseqc_{2}.csv'.format(self.__bfx, self.__profilelabel, self.__counter))
self.__logger = logging.getLogger('dsp.report.extract_rnaseqc')
def show_log(self, level, message):
if level == 'debug':
self.__logger.debug(message)
elif level == 'info':
self.__logger.info(message)
elif level == 'warning':
self.__logger.warning(message)
elif level == 'error':
self.__logger.error(message)
elif level == 'critical':
self.__logger.critical(message)
'''
take the inputstring and extract all numbers. if not possible return 0 as string
@param inputstring: string
@return: string
'''
def return_number(self, inputstring):
try:
return self.__number.findall(inputstring)[0]
except IndexError:
return '0'
'''
methods splits the table rows by </td><td> and extracts the numbers
with a regular expression
'''
def parse_totalreads(self, templist):
for line in templist:
if line.startswith('<tr><td>'):
line = line.replace('<tr><td>', '')
linus = line.split('</td><td')
libid = linus[0]
self.__orderlist.append(libid)
self.__dictus[libid]['totalreads'] = self.return_number(linus[2])
'''
methods splits the table rows by </td><td> and extracts the numbers
with a regular expression
'''
def parse_rrna(self, templist):
for line in templist:
if line.startswith('<tr><td>'):
line = line.replace('<tr><td>', '')
linus = line.split('</td><td')
libid = linus[0]
inputlist = ((2,'mappedreads'), (3,'mappingrate'), (9,'rrna'), (10,'rrnarate'))
for i in inputlist:
self.__dictus[libid][i[1]] = self.return_number(linus[i[0]])
'''
methods splits the table rows by </td><td> and extracts the numbers
with a regular expression
'''
def parse_profiling(self, templist):
for line in templist:
if line.startswith('<tr><td>'):
line = line.replace('<tr><td>', '')
linus = line.split('</td><td')
libid = linus[0]
inputlist = ((3,'exonic'),(4,'intronic'),(5,'intergenic'),(7,'exprofile'),(8,'transcripts'),(9,'genes'))
for i in inputlist:
self.__dictus[libid][i[1]] = self.return_number(linus[i[0]])
'''
method parsed the file and recognizes the tables.
list gets send to the special methods
'''
def parse_file(self):
temp = []
status = 'parsing'
for line in self.__rnaseqc:
if '>Total Reads<' in line:
status, temp = 'totalreads', []
elif '>Mapped Reads<' in line:
status, temp = 'rrna', []
elif '>Transcript-associated Reads<' in line:
status, temp = 'profiling', []
if '</table>' in line and status == 'totalreads':
self.parse_totalreads(temp)
status = 'parsing'
elif '</table>' in line and status == 'rrna':
self.parse_rrna(temp)
status = 'parsing'
elif '</table>' in line and status == 'profiling':
self.parse_profiling(temp)
status = 'parsing'
if status not in ('parsing', ):
temp.append(line)
def build_list(self):
for lib in sorted(self.__orderlist):
self.__tabone.append([lib, self.__dictus[lib]['totalreads'], self.__dictus[lib]['mappedreads'], self.__dictus[lib]['mappingrate'], self.__dictus[lib]['rrna'], self.__dictus[lib]['rrnarate']])
self.__tabtwo.append([lib, self.__dictus[lib]['exonic'], self.__dictus[lib]['intronic'], self.__dictus[lib]['intergenic'], self.__dictus[lib]['exprofile'], self.__dictus[lib]['transcripts'], self.__dictus[lib]['genes']])
'''
method builds the tables for the report.
given the tex variable, it chooses a csv style or tex style
'''
def build_output(self):
self.__outputtabone = [Tex.TABLESTART, Tex.get_replaced_caption(self.__bfx, 'Alignment Stats', Information.CAPTION_RNASEQCRNA), Tex.BEGINCENTER, Tex.get_replaced_tabularstart('l|r|r|r|r|r'), Tex.HLINE]
self.__outputtabone.append(Tex.RNASEQ_ALIGN_HEAD)
self.__outputtabtwo = [Tex.TABLESTART, Tex.get_replaced_caption(self.__bfx, 'Sample Profile', Information.CAPTION_RNASEQCPROFILE), Tex.BEGINCENTER, Tex.get_replaced_tabularstart('l|r|r|r|r|r|r'), Tex.HLINE]
self.__outputtabtwo.extend(Tex.RNASEQ_COMPL_HEAD)
for entry in self.__tabone:
self.__outputtabone.append(Tex.get_tablerowstring([entry[0].replace('_','\_')] + entry[1:]))
self.__outputtabonedata.append('\t'.join(entry).replace(',', '') + '\n')
for entry in self.__tabtwo:
self.__outputtabtwo.append(Tex.get_tablerowstring([entry[0].replace('_','\_')] + entry[1:]))
self.__outputtabtwodata.append('\t'.join(entry).replace(',', '') + '\n')
self.__outputtabone.extend([Tex.TABULAREND, Tex.ENDCENTER, Tex.get_replaced_label(self.__bfx, '{0}_{1}'.format(self.__alignmentlabel, self.__counter), 'tab'), Tex.TABLEEND])
self.__outputtabtwo.extend([Tex.TABULAREND, Tex.ENDCENTER, Tex.get_replaced_label(self.__bfx, '{0}_{1}'.format(self.__profilelabel, self.__counter), 'tab'), Tex.TABLEEND])
self.__outputtabonedata.insert(0, '\t'.join(('Library', 'TotalReads', 'Aligned', 'Rate', 'rRNA', 'rRNArate')) + '\n')
self.__outputtabtwodata.insert(0, '\t'.join(('Library', 'Exonic', 'Intronic', 'Intergenic', 'ExpressionEfficiency', 'Transcript', 'Genes')) + '\n')
'''
method writes files to the output folder
'''
def write_files(self):
self.show_log('info', 'RNASeQC report has been parsed')
self.show_log('info', 'Alignment stat table is: {0}'.format(self.__fileoutone))
write_list(self.__outputtabone, self.__fileoutone)
chmod(self.__fileoutone, 0664)
self.show_log('info', 'Alignment stat table is: {0}'.format(self.__fileoutonedata))
write_list(self.__outputtabonedata, self.__fileoutonedata)
chmod(self.__fileoutonedata, 0664)
self.show_log('info', 'Library profile table is: {0}'.format(self.__fileouttwo))
write_list(self.__outputtabtwo, self.__fileouttwo)
chmod(self.__fileouttwo, 0664)
self.show_log('info', 'Library profile table is: {0}'.format(self.__fileouttwodata))
write_list(self.__outputtabtwodata, self.__fileouttwodata)
chmod(self.__fileouttwo, 0664)
temp = ['{0}\trnaseqc\ttab:{1}_{0}_1\t1\t\trnaseqc\n'.format(self.__alignmentlabel, self.__bfx), '{0}\trnaseqc\ttab:{1}_{0}_1\t1\t\trnaseqc\n'.format(self.__profilelabel, self.__bfx)]
filename = pathjoin(self.__output, Information.LABELFILE.replace('BFXID', self.__bfx))
self.show_log('info', 'Labels are in {0}'.format(filename))
write_list(temp, filename, 'a')
chmod(filename, 0664)
if __name__ == '__main__':
mainlog = MainLogger()
parser = Parser()
parser.main()
inst = RNAseqQC(parser.rnaseqc, parser.output, parser.bfx)
inst.parse_file()
inst.build_list()
inst.build_output()
inst.write_files()
mainlog.close()
logging.shutdown()
|
|
"""Test the Plugwise config flow."""
from unittest.mock import AsyncMock, MagicMock, patch
from plugwise.exceptions import (
ConnectionFailedError,
InvalidAuthentication,
PlugwiseException,
)
import pytest
from homeassistant.components.plugwise.const import (
API,
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
FLOW_NET,
FLOW_TYPE,
PW_TYPE,
)
from homeassistant.config_entries import SOURCE_USER, SOURCE_ZEROCONF
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SOURCE,
CONF_USERNAME,
)
from homeassistant.data_entry_flow import RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM
from tests.common import MockConfigEntry
TEST_HOST = "1.1.1.1"
TEST_HOSTNAME = "smileabcdef"
TEST_HOSTNAME2 = "stretchabc"
TEST_PASSWORD = "test_password"
TEST_PORT = 81
TEST_USERNAME = "smile"
TEST_USERNAME2 = "stretch"
TEST_DISCOVERY = {
"host": TEST_HOST,
"port": DEFAULT_PORT,
"hostname": f"{TEST_HOSTNAME}.local.",
"server": f"{TEST_HOSTNAME}.local.",
"properties": {
"product": "smile",
"version": "1.2.3",
"hostname": f"{TEST_HOSTNAME}.local.",
},
}
TEST_DISCOVERY2 = {
"host": TEST_HOST,
"port": DEFAULT_PORT,
"hostname": f"{TEST_HOSTNAME2}.local.",
"server": f"{TEST_HOSTNAME2}.local.",
"properties": {
"product": "stretch",
"version": "1.2.3",
"hostname": f"{TEST_HOSTNAME2}.local.",
},
}
@pytest.fixture(name="mock_smile")
def mock_smile():
"""Create a Mock Smile for testing exceptions."""
with patch(
"homeassistant.components.plugwise.config_flow.Smile",
) as smile_mock:
smile_mock.PlugwiseException = PlugwiseException
smile_mock.InvalidAuthentication = InvalidAuthentication
smile_mock.ConnectionFailedError = ConnectionFailedError
smile_mock.return_value.connect.return_value = True
yield smile_mock.return_value
async def test_form_flow_gateway(hass):
"""Test we get the form for Plugwise Gateway product type."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={FLOW_TYPE: FLOW_NET}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
assert result["step_id"] == "user_gateway"
async def test_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}, data={FLOW_TYPE: FLOW_NET}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.plugwise.config_flow.Smile.connect",
return_value=True,
), patch(
"homeassistant.components.plugwise.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: TEST_HOST, CONF_PASSWORD: TEST_PASSWORD},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["data"] == {
CONF_HOST: TEST_HOST,
CONF_PASSWORD: TEST_PASSWORD,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: TEST_USERNAME,
PW_TYPE: API,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_ZEROCONF},
data=TEST_DISCOVERY,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.plugwise.config_flow.Smile.connect",
return_value=True,
), patch(
"homeassistant.components.plugwise.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: TEST_PASSWORD},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["data"] == {
CONF_HOST: TEST_HOST,
CONF_PASSWORD: TEST_PASSWORD,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: TEST_USERNAME,
PW_TYPE: API,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_stretch_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_ZEROCONF},
data=TEST_DISCOVERY2,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.plugwise.config_flow.Smile.connect",
return_value=True,
), patch(
"homeassistant.components.plugwise.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: TEST_PASSWORD},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["data"] == {
CONF_HOST: TEST_HOST,
CONF_PASSWORD: TEST_PASSWORD,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: TEST_USERNAME2,
PW_TYPE: API,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_username(hass):
"""Test we get the username data back."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}, data={FLOW_TYPE: FLOW_NET}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.plugwise.config_flow.Smile",
) as smile_mock, patch(
"homeassistant.components.plugwise.async_setup_entry",
return_value=True,
) as mock_setup_entry:
smile_mock.return_value.connect.side_effect = AsyncMock(return_value=True)
smile_mock.return_value.gateway_id = "abcdefgh12345678"
smile_mock.return_value.smile_hostname = TEST_HOST
smile_mock.return_value.smile_name = "Adam"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: TEST_HOST,
CONF_PASSWORD: TEST_PASSWORD,
CONF_USERNAME: TEST_USERNAME2,
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["data"] == {
CONF_HOST: TEST_HOST,
CONF_PASSWORD: TEST_PASSWORD,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: TEST_USERNAME2,
PW_TYPE: API,
}
assert len(mock_setup_entry.mock_calls) == 1
result3 = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_ZEROCONF},
data=TEST_DISCOVERY,
)
assert result3["type"] == RESULT_TYPE_FORM
with patch(
"homeassistant.components.plugwise.config_flow.Smile",
) as smile_mock, patch(
"homeassistant.components.plugwise.async_setup_entry",
return_value=True,
) as mock_setup_entry:
smile_mock.return_value.side_effect = AsyncMock(return_value=True)
smile_mock.return_value.connect.side_effect = AsyncMock(return_value=True)
smile_mock.return_value.gateway_id = "abcdefgh12345678"
smile_mock.return_value.smile_hostname = TEST_HOST
smile_mock.return_value.smile_name = "Adam"
result4 = await hass.config_entries.flow.async_configure(
result3["flow_id"],
user_input={CONF_PASSWORD: TEST_PASSWORD},
)
await hass.async_block_till_done()
assert result4["type"] == "abort"
assert result4["reason"] == "already_configured"
async def test_form_invalid_auth(hass, mock_smile):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}, data={FLOW_TYPE: FLOW_NET}
)
mock_smile.connect.side_effect = InvalidAuthentication
mock_smile.gateway_id = "0a636a4fc1704ab4a24e4f7e37fb187a"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: TEST_HOST, CONF_PASSWORD: TEST_PASSWORD},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass, mock_smile):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}, data={FLOW_TYPE: FLOW_NET}
)
mock_smile.connect.side_effect = ConnectionFailedError
mock_smile.gateway_id = "0a636a4fc1704ab4a24e4f7e37fb187a"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: TEST_HOST, CONF_PASSWORD: TEST_PASSWORD},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_cannot_connect_port(hass, mock_smile):
"""Test we handle cannot connect to port error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}, data={FLOW_TYPE: FLOW_NET}
)
mock_smile.connect.side_effect = ConnectionFailedError
mock_smile.gateway_id = "0a636a4fc1704ab4a24e4f7e37fb187a"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: TEST_HOST,
CONF_PASSWORD: TEST_PASSWORD,
CONF_PORT: TEST_PORT,
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_other_problem(hass, mock_smile):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}, data={FLOW_TYPE: FLOW_NET}
)
mock_smile.connect.side_effect = TimeoutError
mock_smile.gateway_id = "0a636a4fc1704ab4a24e4f7e37fb187a"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: TEST_HOST, CONF_PASSWORD: TEST_PASSWORD},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_options_flow_power(hass, mock_smile) -> None:
"""Test config flow options DSMR environments."""
entry = MockConfigEntry(
domain=DOMAIN,
title=CONF_NAME,
data={CONF_HOST: TEST_HOST, CONF_PASSWORD: TEST_PASSWORD},
options={CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL},
)
hass.data[DOMAIN] = {entry.entry_id: {"api": MagicMock(smile_type="power")}}
entry.add_to_hass(hass)
with patch(
"homeassistant.components.plugwise.async_setup_entry", return_value=True
):
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_SCAN_INTERVAL: 10}
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_SCAN_INTERVAL: 10,
}
async def test_options_flow_thermo(hass, mock_smile) -> None:
"""Test config flow options for thermostatic environments."""
entry = MockConfigEntry(
domain=DOMAIN,
title=CONF_NAME,
data={CONF_HOST: TEST_HOST, CONF_PASSWORD: TEST_PASSWORD},
options={CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL},
)
hass.data[DOMAIN] = {entry.entry_id: {"api": MagicMock(smile_type="thermostat")}}
entry.add_to_hass(hass)
with patch(
"homeassistant.components.plugwise.async_setup_entry", return_value=True
):
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_SCAN_INTERVAL: 60}
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_SCAN_INTERVAL: 60,
}
|
|
'''
Created by:
Juan Sarria
March 15, 2016
'''
import pandas as pd, numpy as np, fiona, timeit
from geopy.distance import vincenty
from shapely import geometry
from utilities import utm_to_latlong, latlong_to_utm
from __builtin__ import False
from pandas.core.frame import DataFrame
PROJECT_ROOT = '../'
def main():
#test values
lat = 49.2668355595
lon = -123.070244095
year = 2010
month = 5
'''
prop_df = pd.read_csv(PROJECT_ROOT + 'data/property_tax_06_15/latlong_property_tax_' + str(2006) + '.csv')
print avg_closest_properties(lat,lon,prop_df=prop_df)
sky_df = pd.read_csv(PROJECT_ROOT + 'data/skytrain_stations/rapid_transit_stations.csv')
print closest_skytrain(lat,lon)
crime_df = pd.read_csv(PROJECT_ROOT+'/data/crime_03_15/crime_latlong.csv')
neighbourhoods = crime_df['NEIGHBOURHOOD'].unique().tolist()
print len(neighbourhoods)
print one_hot_encoding(neighbourhoods[2],neighbourhoods)
a = number_graffiti(lat,lon)
print type(a[0])
'''
data = pd.read_csv(PROJECT_ROOT+'/data/crime_03_15/crime_latlong.csv')
data = data[data['YEAR'] >= 2006].sample(1000)
data = data[['LATITUDE','LONGITUDE', 'NEIGHBOURHOOD']]
data2 = data.apply(lambda row: pd.Series(locate_neighbourhood(row['LATITUDE'], row['LONGITUDE']),
index=['NEIGHBOURHOOD_2']),axis=1)
data = pd.concat([data,data2],axis=1)[['NEIGHBOURHOOD','NEIGHBOURHOOD_2']]
data = data[data['NEIGHBOURHOOD'] != data['NEIGHBOURHOOD_2']][pd.notnull(data['NEIGHBOURHOOD'])]
print data
print data.count()
def avg_closest_properties(lat, lon,year = None, prop_df = None, range_val = 0.0001):
try:
if year is not None:
property_file = PROJECT_ROOT + 'data/property_tax_06_15/latlong_property_tax_' + str(year) + '.csv'
if prop_df is None: prop_df = pd.read_csv(property_file)
# Keep a copy of original df
temp_df = prop_df
# Narrow down options to minimize unnecessary calculations
prop_df = prop_df[prop_df['LATITUDE']< lat+range_val]
prop_df = prop_df[prop_df['LATITUDE']> lat-range_val]
prop_df = prop_df[prop_df['LONGITUDE']< lon+range_val]
prop_df = prop_df[prop_df['LONGITUDE']> lon-range_val]
# If not enough values, start again with a bigger range
if prop_df.count()['VALUE'] < 10:
return avg_closest_properties(lat,lon,prop_df=temp_df,range_val=range_val*10)
# Apply vincenty in the remaining rows
prop_df['DIST_DIF'] = prop_df.apply(lambda row: vincenty((lat,lon),(row['LATITUDE'],row['LONGITUDE'])).m,axis=1)
# Find the top 10 and top 5 closest properties
ten_min_df = prop_df[['VALUE','DIST_DIF']].nsmallest(10,'DIST_DIF')
five_min_df = ten_min_df.nsmallest(5,'DIST_DIF')
# Return average property value for he top 5 and 10
return [five_min_df['VALUE'].mean(),ten_min_df['VALUE'].mean()]
except:
print "Error in avg_closest_properties"
def closest_skytrain(lat,lon, sky_df = None):
skytrain_file = PROJECT_ROOT + 'data/skytrain_stations/rapid_transit_stations.csv'
if sky_df is None: sky_df = pd.read_csv(skytrain_file)
vector = [0]*(sky_df.count()['STATION']+1)
# Find closest skytrain station
sky_df['DIST_DIF'] = sky_df.apply(lambda row: vincenty((lat,lon),(row['LAT'],row['LONG'])).m,axis=1)
min_df = sky_df.nsmallest(1,'DIST_DIF')
vector[list(min_df.index)[0]] = 1
vector[-1] = min_df.iloc[0]['DIST_DIF']
# returns on-hot encoded vector with distance at the end
return vector
'''
def get_weather(year, month, weatherdf = None):
weather_file = PROJECT_ROOT + 'data/weather/VANCOUVER SEA ISLAND CCG/summarydata.csv'
if weatherdf is None:
weatherdf = pd.read_csv(weather_file)
# basic checking to see if we have reasonable data passed in.
if month > 12:
return False
if year >= 2006 and year <= 2015:
filter_year = weatherdf[(weatherdf.YEAR == year)]
line = filter_year[(filter_year.MONTH == month)].drop('YEAR',axis=1).drop('MONTH',axis=1)
return line
else:
filter_month = weatherdf[(weatherdf.MONTH == month)].drop('YEAR',axis=1).drop('MONTH',axis=1).mean(axis=0).to_frame().transpose()
return filter_month
'''
def one_hot_encoding(label, list_of_labels):
vector = [0]*len(list_of_labels)
vector[list_of_labels.index(label)] = 1
return vector
def number_graffiti(lat,lon, graf_df = None, radius1 = 50, radius2 = 100):
graffiti_file = PROJECT_ROOT + 'data/graffiti/graffiti.csv'
if graf_df is None: graf_df = pd.read_csv(graffiti_file)
# Narrow down options
graf_df = graf_df[graf_df['LAT'] < lat+.001]
graf_df = graf_df[graf_df['LAT'] > lat-.001]
graf_df = graf_df[graf_df['LONG'] < lon+.001]
graf_df = graf_df[graf_df['LONG'] < lon+.001]
if graf_df['LAT'].count() == 0: return [0,0]
# Apply vincenty for remaining rows
graf_df['DIST_DIF'] = graf_df.apply(lambda row: vincenty((lat,lon),(row['LAT'],row['LONG'])).m,axis=1)
count_2 = graf_df[graf_df['DIST_DIF'] <= radius2]
count_1 = count_2[count_2['DIST_DIF'] <= radius1]
return [count_1['COUNT'].sum(), count_2['COUNT'].sum()]
def number_street_lights(lat,lon,light_df = None, radius = 50):
light_file = PROJECT_ROOT + 'data/street_lightings/street_lighting_poles.csv'
if light_df is None: light_df = pd.read_csv(light_file)
# Narrow down options
light_df = light_df[light_df['LAT'] < lat+.001]
light_df = light_df[light_df['LAT'] > lat-.001]
light_df = light_df[light_df['LONG'] < lon+.001]
light_df = light_df[light_df['LONG'] < lon+.001]
if light_df['LAT'].count() == 0 : return 0
# Apply vincenty and find number of lights within radius
light_df['DIST_DIF'] = light_df.apply(lambda row: vincenty((lat,lon),(row['LAT'],row['LONG'])).m,axis=1)
min_lights = light_df[light_df['DIST_DIF'] < radius]
return min_lights['DIST_DIF'].count()
def locate_neighbourhood(lat, lon):
with fiona.open(PROJECT_ROOT+'data/neighbourhood_borders/local_area_boundary.shp') as neighbourhoods:
point = geometry.Point(lat,lon)
for n in neighbourhoods:
if n['properties']['NAME'] == 'Arbutus-Ridge': n['properties']['NAME'] = 'Arbutus Ridge'
if n['properties']['NAME'] == 'Downtown': n['properties']['NAME'] = 'Central Business District'
n['geometry']['coordinates'][0] = [utm_to_latlong(x[0],x[1]) for x in n['geometry']['coordinates'][0]]
shape = geometry.asShape(n['geometry'])
if shape.contains(point): return n['properties']['NAME']
return -1
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple tool for generating a client library.
Relevant links:
https://developers.google.com/discovery/v1/reference/apis#resource
"""
import datetime
from six.moves import urllib_parse
from apitools.gen import command_registry
from apitools.gen import message_registry
from apitools.gen import service_registry
from apitools.gen import util
def _ApitoolsVersion():
"""Returns version of the currently installed google-apitools package."""
import pkg_resources
return pkg_resources.get_distribution('google-apitools').version
def _StandardQueryParametersSchema(discovery_doc):
"""Sets up dict of standard query parameters."""
standard_query_schema = {
'id': 'StandardQueryParameters',
'type': 'object',
'description': 'Query parameters accepted by all methods.',
'properties': discovery_doc.get('parameters', {}),
}
# We add an entry for the trace, since Discovery doesn't.
standard_query_schema['properties']['trace'] = {
'type': 'string',
'description': ('A tracing token of the form "token:<tokenid>" '
'to include in api requests.'),
'location': 'query',
}
return standard_query_schema
def _ComputePaths(package, version, discovery_doc):
full_path = urllib_parse.urljoin(
discovery_doc['rootUrl'], discovery_doc['servicePath'])
api_path_component = '/'.join((package, version, ''))
if api_path_component not in full_path:
return full_path, ''
prefix, _, suffix = full_path.rpartition(api_path_component)
return prefix + api_path_component, suffix
class DescriptorGenerator(object):
"""Code generator for a given discovery document."""
def __init__(self, discovery_doc, client_info, names, root_package, outdir,
base_package, protorpc_package, generate_cli=False,
use_proto2=False, unelidable_request_methods=None,
apitools_version=''):
self.__discovery_doc = discovery_doc
self.__client_info = client_info
self.__outdir = outdir
self.__use_proto2 = use_proto2
self.__description = util.CleanDescription(
self.__discovery_doc.get('description', ''))
self.__package = self.__client_info.package
self.__version = self.__client_info.version
self.__revision = discovery_doc.get('revision', '1')
self.__generate_cli = generate_cli
self.__root_package = root_package
self.__base_files_package = base_package
self.__protorpc_package = protorpc_package
self.__names = names
self.__base_url, self.__base_path = _ComputePaths(
self.__package, self.__client_info.url_version,
self.__discovery_doc)
# Order is important here: we need the schemas before we can
# define the services.
self.__message_registry = message_registry.MessageRegistry(
self.__client_info, self.__names, self.__description,
self.__root_package, self.__base_files_package,
self.__protorpc_package)
schemas = self.__discovery_doc.get('schemas', {})
for schema_name, schema in schemas.items():
self.__message_registry.AddDescriptorFromSchema(
schema_name, schema)
# We need to add one more message type for the global parameters.
standard_query_schema = _StandardQueryParametersSchema(
self.__discovery_doc)
self.__message_registry.AddDescriptorFromSchema(
standard_query_schema['id'], standard_query_schema)
# Now that we know all the messages, we need to correct some
# fields from MessageFields to EnumFields.
self.__message_registry.FixupMessageFields()
self.__command_registry = command_registry.CommandRegistry(
self.__package, self.__version, self.__client_info,
self.__message_registry, self.__root_package,
self.__base_files_package, self.__protorpc_package,
self.__base_url, self.__names)
self.__command_registry.AddGlobalParameters(
self.__message_registry.LookupDescriptorOrDie(
'StandardQueryParameters'))
self.__services_registry = service_registry.ServiceRegistry(
self.__client_info,
self.__message_registry,
self.__command_registry,
self.__base_url,
self.__base_path,
self.__names,
self.__root_package,
self.__base_files_package,
unelidable_request_methods or [])
services = self.__discovery_doc.get('resources', {})
for service_name, methods in sorted(services.items()):
self.__services_registry.AddServiceFromResource(
service_name, methods)
# We might also have top-level methods.
api_methods = self.__discovery_doc.get('methods', [])
if api_methods:
self.__services_registry.AddServiceFromResource(
'api', {'methods': api_methods})
# pylint: disable=protected-access
self.__client_info = self.__client_info._replace(
scopes=self.__services_registry.scopes)
# The apitools version that will be used in prerequisites for the
# generated packages.
self.__apitools_version = (
apitools_version if apitools_version else _ApitoolsVersion())
@property
def client_info(self):
return self.__client_info
@property
def discovery_doc(self):
return self.__discovery_doc
@property
def names(self):
return self.__names
@property
def outdir(self):
return self.__outdir
@property
def package(self):
return self.__package
@property
def use_proto2(self):
return self.__use_proto2
@property
def apitools_version(self):
return self.__apitools_version
def _GetPrinter(self, out):
printer = util.SimplePrettyPrinter(out)
return printer
def WriteInit(self, out):
"""Write a simple __init__.py for the generated client."""
printer = self._GetPrinter(out)
printer('"""Common imports for generated %s client library."""',
self.__client_info.package)
printer('# pylint:disable=wildcard-import')
printer()
printer('import pkgutil')
printer()
printer('from %s import *', self.__base_files_package)
if self.__root_package == '.':
import_prefix = ''
else:
import_prefix = '%s.' % self.__root_package
if self.__generate_cli:
printer('from %s%s import *',
import_prefix, self.__client_info.cli_rule_name)
printer('from %s%s import *',
import_prefix, self.__client_info.client_rule_name)
printer('from %s%s import *',
import_prefix, self.__client_info.messages_rule_name)
printer()
printer('__path__ = pkgutil.extend_path(__path__, __name__)')
def WriteIntermediateInit(self, out):
"""Write a simple __init__.py for an intermediate directory."""
printer = self._GetPrinter(out)
printer('#!/usr/bin/env python')
printer('"""Shared __init__.py for apitools."""')
printer()
printer('from pkgutil import extend_path')
printer('__path__ = extend_path(__path__, __name__)')
def WriteSetupPy(self, out):
"""Write a setup.py for upload to PyPI."""
printer = self._GetPrinter(out)
year = datetime.datetime.now().year
printer('# Copyright %s Google Inc. All Rights Reserved.' % year)
printer('#')
printer('# Licensed under the Apache License, Version 2.0 (the'
'"License");')
printer('# you may not use this file except in compliance with '
'the License.')
printer('# You may obtain a copy of the License at')
printer('#')
printer('# http://www.apache.org/licenses/LICENSE-2.0')
printer('#')
printer('# Unless required by applicable law or agreed to in writing, '
'software')
printer('# distributed under the License is distributed on an "AS IS" '
'BASIS,')
printer('# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either '
'express or implied.')
printer('# See the License for the specific language governing '
'permissions and')
printer('# limitations under the License.')
printer()
printer('import setuptools')
printer('REQUIREMENTS = [')
with printer.Indent(indent=' '):
if self.apitools_version.startswith('0.4.'):
printer('"google-apitools>=0.4.8,<0.5",')
else:
printer('"google-apitools==%s",', self.apitools_version)
printer('"httplib2>=0.9",')
printer('"oauth2client>=1.4.12",')
printer(']')
printer('_PACKAGE = "apitools.clients.%s"' % self.__package)
printer()
printer('setuptools.setup(')
# TODO(craigcitro): Allow customization of these options.
with printer.Indent(indent=' '):
printer('name="google-apitools-%s-%s",',
self.__package, self.__version)
if self.apitools_version.startswith('0.4.'):
printer('version="0.4.%s",', self.__revision)
else:
printer('version="%s.%s",',
self.apitools_version, self.__revision)
printer('description="Autogenerated apitools library for %s",' % (
self.__package,))
printer('url="https://github.com/google/apitools",')
printer('author="Craig Citro",')
printer('author_email="craigcitro@google.com",')
printer('packages=setuptools.find_packages(),')
printer('install_requires=REQUIREMENTS,')
printer('classifiers=[')
with printer.Indent(indent=' '):
printer('"Programming Language :: Python :: 2.7",')
printer('"License :: OSI Approved :: Apache Software '
'License",')
printer('],')
printer('license="Apache 2.0",')
printer('keywords="apitools apitools-%s %s",' % (
self.__package, self.__package))
printer(')')
def WriteMessagesFile(self, out):
self.__message_registry.WriteFile(self._GetPrinter(out))
def WriteMessagesProtoFile(self, out):
self.__message_registry.WriteProtoFile(self._GetPrinter(out))
def WriteServicesProtoFile(self, out):
self.__services_registry.WriteProtoFile(self._GetPrinter(out))
def WriteClientLibrary(self, out):
self.__services_registry.WriteFile(self._GetPrinter(out))
def WriteCli(self, out):
self.__command_registry.WriteFile(self._GetPrinter(out))
|
|
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.cloudsearch2.optionstatus import IndexFieldStatus
from boto.cloudsearch2.optionstatus import ServicePoliciesStatus
from boto.cloudsearch2.optionstatus import ExpressionStatus
from boto.cloudsearch2.optionstatus import AvailabilityOptionsStatus
from boto.cloudsearch2.optionstatus import ScalingParametersStatus
from boto.cloudsearch2.document import DocumentServiceConnection
from boto.cloudsearch2.search import SearchConnection
def handle_bool(value):
if value in [True, 'true', 'True', 'TRUE', 1]:
return True
return False
class Domain(object):
"""
A Cloudsearch domain.
:ivar name: The name of the domain.
:ivar id: The internally generated unique identifier for the domain.
:ivar created: A boolean which is True if the domain is
created. It can take several minutes to initialize a domain
when CreateDomain is called. Newly created search domains are
returned with a False value for Created until domain creation
is complete
:ivar deleted: A boolean which is True if the search domain has
been deleted. The system must clean up resources dedicated to
the search domain when delete is called. Newly deleted
search domains are returned from list_domains with a True
value for deleted for several minutes until resource cleanup
is complete.
:ivar processing: True if processing is being done to activate the
current domain configuration.
:ivar num_searchable_docs: The number of documents that have been
submittted to the domain and indexed.
:ivar requires_index_document: True if index_documents needs to be
called to activate the current domain configuration.
:ivar search_instance_count: The number of search instances that are
available to process search requests.
:ivar search_instance_type: The instance type that is being used to
process search requests.
:ivar search_partition_count: The number of partitions across which
the search index is spread.
"""
def __init__(self, layer1, data):
"""
Constructor - Create a domain object from a layer1 and data params
:type layer1: :class:`boto.cloudsearch2.layer1.Layer1` object
:param layer1: A :class:`boto.cloudsearch2.layer1.Layer1` object
which is used to perform operations on the domain.
"""
self.layer1 = layer1
self.update_from_data(data)
def update_from_data(self, data):
self.created = data['Created']
self.deleted = data['Deleted']
self.processing = data['Processing']
self.requires_index_documents = data['RequiresIndexDocuments']
self.domain_id = data['DomainId']
self.domain_name = data['DomainName']
self.search_instance_count = data['SearchInstanceCount']
self.search_instance_type = data.get('SearchInstanceType', None)
self.search_partition_count = data['SearchPartitionCount']
self._doc_service = data['DocService']
self._service_arn = data['ARN']
self._search_service = data['SearchService']
@property
def service_arn(self):
return self._service_arn
@property
def doc_service_endpoint(self):
return self._doc_service['Endpoint']
@property
def search_service_endpoint(self):
return self._search_service['Endpoint']
@property
def created(self):
return self._created
@created.setter
def created(self, value):
self._created = handle_bool(value)
@property
def deleted(self):
return self._deleted
@deleted.setter
def deleted(self, value):
self._deleted = handle_bool(value)
@property
def processing(self):
return self._processing
@processing.setter
def processing(self, value):
self._processing = handle_bool(value)
@property
def requires_index_documents(self):
return self._requires_index_documents
@requires_index_documents.setter
def requires_index_documents(self, value):
self._requires_index_documents = handle_bool(value)
@property
def search_partition_count(self):
return self._search_partition_count
@search_partition_count.setter
def search_partition_count(self, value):
self._search_partition_count = int(value)
@property
def search_instance_count(self):
return self._search_instance_count
@search_instance_count.setter
def search_instance_count(self, value):
self._search_instance_count = int(value)
@property
def name(self):
return self.domain_name
@property
def id(self):
return self.domain_id
def delete(self):
"""
Delete this domain and all index data associated with it.
"""
return self.layer1.delete_domain(self.name)
def get_analysis_schemes(self):
"""
Return a list of Analysis Scheme objects.
"""
return self.layer1.describe_analysis_schemes(self.name)
def get_availability_options(self):
"""
Return a :class:`boto.cloudsearch2.option.AvailabilityOptionsStatus`
object representing the currently defined availability options for
the domain.
:return: OptionsStatus object
:rtype: :class:`boto.cloudsearch2.option.AvailabilityOptionsStatus`
object
"""
return AvailabilityOptionsStatus(
self, refresh_fn=self.layer1.describe_availability_options,
refresh_key=['DescribeAvailabilityOptionsResponse',
'DescribeAvailabilityOptionsResult',
'AvailabilityOptions'],
save_fn=self.layer1.update_availability_options)
def get_scaling_options(self):
"""
Return a :class:`boto.cloudsearch2.option.ScalingParametersStatus`
object representing the currently defined scaling options for the
domain.
:return: ScalingParametersStatus object
:rtype: :class:`boto.cloudsearch2.option.ScalingParametersStatus`
object
"""
return ScalingParametersStatus(
self, refresh_fn=self.layer1.describe_scaling_parameters,
refresh_key=['DescribeScalingParametersResponse',
'DescribeScalingParametersResult',
'ScalingParameters'],
save_fn=self.layer1.update_scaling_parameters)
def get_access_policies(self):
"""
Return a :class:`boto.cloudsearch2.option.ServicePoliciesStatus`
object representing the currently defined access policies for the
domain.
:return: ServicePoliciesStatus object
:rtype: :class:`boto.cloudsearch2.option.ServicePoliciesStatus` object
"""
return ServicePoliciesStatus(
self, refresh_fn=self.layer1.describe_service_access_policies,
refresh_key=['DescribeServiceAccessPoliciesResponse',
'DescribeServiceAccessPoliciesResult',
'AccessPolicies'],
save_fn=self.layer1.update_service_access_policies)
def index_documents(self):
"""
Tells the search domain to start indexing its documents using
the latest text processing options and IndexFields. This
operation must be invoked to make options whose OptionStatus
has OptionState of RequiresIndexDocuments visible in search
results.
"""
self.layer1.index_documents(self.name)
def get_index_fields(self, field_names=None):
"""
Return a list of index fields defined for this domain.
:return: list of IndexFieldStatus objects
:rtype: list of :class:`boto.cloudsearch2.option.IndexFieldStatus`
object
"""
data = self.layer1.describe_index_fields(self.name, field_names)
data = (data['DescribeIndexFieldsResponse']
['DescribeIndexFieldsResult']
['IndexFields'])
return [IndexFieldStatus(self, d) for d in data]
def create_index_field(self, field_name, field_type,
default='', facet=False, returnable=False,
searchable=False, sortable=False,
highlight=False, source_field=None,
analysis_scheme=None):
"""
Defines an ``IndexField``, either replacing an existing
definition or creating a new one.
:type field_name: string
:param field_name: The name of a field in the search index.
:type field_type: string
:param field_type: The type of field. Valid values are
int | double | literal | text | date | latlon |
int-array | double-array | literal-array | text-array | date-array
:type default: string or int
:param default: The default value for the field. If the
field is of type ``int`` this should be an integer value.
Otherwise, it's a string.
:type facet: bool
:param facet: A boolean to indicate whether facets
are enabled for this field or not. Does not apply to
fields of type ``int, int-array, text, text-array``.
:type returnable: bool
:param returnable: A boolean to indicate whether values
of this field can be returned in search results or
used in ranking.
:type searchable: bool
:param searchable: A boolean to indicate whether search
is enabled for this field or not.
:type sortable: bool
:param sortable: A boolean to indicate whether sorting
is enabled for this field or not. Does not apply to
fields of array types.
:type highlight: bool
:param highlight: A boolean to indicate whether highlighting
is enabled for this field or not. Does not apply to
fields of type ``double, int, date, latlon``
:type source_field: list of strings or string
:param source_field: For array types, this is the list of fields
to treat as the source. For singular types, pass a string only.
:type analysis_scheme: string
:param analysis_scheme: The analysis scheme to use for this field.
Only applies to ``text | text-array`` field types
:return: IndexFieldStatus objects
:rtype: :class:`boto.cloudsearch2.option.IndexFieldStatus` object
:raises: BaseException, InternalException, LimitExceededException,
InvalidTypeException, ResourceNotFoundException
"""
index = {
'IndexFieldName': field_name,
'IndexFieldType': field_type
}
if field_type == 'literal':
index['LiteralOptions'] = {
'FacetEnabled': facet,
'ReturnEnabled': returnable,
'SearchEnabled': searchable,
'SortEnabled': sortable
}
if default:
index['LiteralOptions']['DefaultValue'] = default
if source_field:
index['LiteralOptions']['SourceField'] = source_field
elif field_type == 'literal-array':
index['LiteralArrayOptions'] = {
'FacetEnabled': facet,
'ReturnEnabled': returnable,
'SearchEnabled': searchable
}
if default:
index['LiteralArrayOptions']['DefaultValue'] = default
if source_field:
index['LiteralArrayOptions']['SourceFields'] = \
','.join(source_field)
elif field_type == 'int':
index['IntOptions'] = {
'DefaultValue': default,
'FacetEnabled': facet,
'ReturnEnabled': returnable,
'SearchEnabled': searchable,
'SortEnabled': sortable
}
if default:
index['IntOptions']['DefaultValue'] = default
if source_field:
index['IntOptions']['SourceField'] = source_field
elif field_type == 'int-array':
index['IntArrayOptions'] = {
'FacetEnabled': facet,
'ReturnEnabled': returnable,
'SearchEnabled': searchable
}
if default:
index['IntArrayOptions']['DefaultValue'] = default
if source_field:
index['IntArrayOptions']['SourceFields'] = \
','.join(source_field)
elif field_type == 'date':
index['DateOptions'] = {
'FacetEnabled': facet,
'ReturnEnabled': returnable,
'SearchEnabled': searchable,
'SortEnabled': sortable
}
if default:
index['DateOptions']['DefaultValue'] = default
if source_field:
index['DateOptions']['SourceField'] = source_field
elif field_type == 'date-array':
index['DateArrayOptions'] = {
'FacetEnabled': facet,
'ReturnEnabled': returnable,
'SearchEnabled': searchable
}
if default:
index['DateArrayOptions']['DefaultValue'] = default
if source_field:
index['DateArrayOptions']['SourceFields'] = \
','.join(source_field)
elif field_type == 'double':
index['DoubleOptions'] = {
'FacetEnabled': facet,
'ReturnEnabled': returnable,
'SearchEnabled': searchable,
'SortEnabled': sortable
}
if default:
index['DoubleOptions']['DefaultValue'] = default
if source_field:
index['DoubleOptions']['SourceField'] = source_field
elif field_type == 'double-array':
index['DoubleArrayOptions'] = {
'FacetEnabled': facet,
'ReturnEnabled': returnable,
'SearchEnabled': searchable
}
if default:
index['DoubleArrayOptions']['DefaultValue'] = default
if source_field:
index['DoubleArrayOptions']['SourceFields'] = \
','.join(source_field)
elif field_type == 'text':
index['TextOptions'] = {
'ReturnEnabled': returnable,
'HighlightEnabled': highlight,
'SortEnabled': sortable
}
if default:
index['TextOptions']['DefaultValue'] = default
if source_field:
index['TextOptions']['SourceField'] = source_field
if analysis_scheme:
index['TextOptions']['AnalysisScheme'] = analysis_scheme
elif field_type == 'text-array':
index['TextArrayOptions'] = {
'ReturnEnabled': returnable,
'HighlightEnabled': highlight
}
if default:
index['TextArrayOptions']['DefaultValue'] = default
if source_field:
index['TextArrayOptions']['SourceFields'] = \
','.join(source_field)
if analysis_scheme:
index['TextArrayOptions']['AnalysisScheme'] = analysis_scheme
elif field_type == 'latlon':
index['LatLonOptions'] = {
'FacetEnabled': facet,
'ReturnEnabled': returnable,
'SearchEnabled': searchable,
'SortEnabled': sortable
}
if default:
index['LatLonOptions']['DefaultValue'] = default
if source_field:
index['LatLonOptions']['SourceField'] = source_field
data = self.layer1.define_index_field(self.name, index)
data = (data['DefineIndexFieldResponse']
['DefineIndexFieldResult']
['IndexField'])
return IndexFieldStatus(self, data,
self.layer1.describe_index_fields)
def get_expressions(self, names=None):
"""
Return a list of rank expressions defined for this domain.
:return: list of ExpressionStatus objects
:rtype: list of :class:`boto.cloudsearch2.option.ExpressionStatus`
object
"""
fn = self.layer1.describe_expressions
data = fn(self.name, names)
data = (data['DescribeExpressionsResponse']
['DescribeExpressionsResult']
['Expressions'])
return [ExpressionStatus(self, d, fn) for d in data]
def create_expression(self, name, value):
"""
Create a new expression.
:type name: string
:param name: The name of an expression for processing
during a search request.
:type value: string
:param value: The expression to evaluate for ranking
or thresholding while processing a search request. The
Expression syntax is based on JavaScript expressions
and supports:
* Single value, sort enabled numeric fields (int, double, date)
* Other expressions
* The _score variable, which references a document's relevance
score
* The _time variable, which references the current epoch time
* Integer, floating point, hex, and octal literals
* Arithmetic operators: + - * / %
* Bitwise operators: | & ^ ~ << >> >>>
* Boolean operators (including the ternary operator): && || ! ?:
* Comparison operators: < <= == >= >
* Mathematical functions: abs ceil exp floor ln log2 log10 logn
max min pow sqrt pow
* Trigonometric functions: acos acosh asin asinh atan atan2 atanh
cos cosh sin sinh tanh tan
* The haversin distance function
Expressions always return an integer value from 0 to the maximum
64-bit signed integer value (2^63 - 1). Intermediate results are
calculated as double-precision floating point values and the return
value is rounded to the nearest integer. If the expression is
invalid or evaluates to a negative value, it returns 0. If the
expression evaluates to a value greater than the maximum, it
returns the maximum value.
The source data for an Expression can be the name of an
IndexField of type int or double, another Expression or the
reserved name _score. The _score source is
defined to return as a double from 0 to 10.0 (inclusive) to
indicate how relevant a document is to the search request,
taking into account repetition of search terms in the
document and proximity of search terms to each other in
each matching IndexField in the document.
For more information about using rank expressions to
customize ranking, see the Amazon CloudSearch Developer
Guide.
:return: ExpressionStatus object
:rtype: :class:`boto.cloudsearch2.option.ExpressionStatus` object
:raises: BaseException, InternalException, LimitExceededException,
InvalidTypeException, ResourceNotFoundException
"""
data = self.layer1.define_expression(self.name, name, value)
data = (data['DefineExpressionResponse']
['DefineExpressionResult']
['Expression'])
return ExpressionStatus(self, data,
self.layer1.describe_expressions)
def get_document_service(self):
return DocumentServiceConnection(domain=self)
def get_search_service(self):
return SearchConnection(domain=self)
def __repr__(self):
return '<Domain: %s>' % self.domain_name
|
|
# -*- coding: UTF-8 -*-
# extended_widgets.py
# Moved albow related stuff from mceutils.
import resource_packs
from controls import ValueDisplay
from dialogs import alert, ask, Dialog
from controls import Button, Label, ValueButton, CheckBox, AttrRef
from widget import Widget
import root
from layout import Column, Row
from translate import _
from menu import Menu
from fields import FloatField, IntField, TextFieldWrapped
from datetime import timedelta, datetime
class HotkeyColumn(Widget):
is_gl_container = True
#-# Translation live update preparation
def __init__(self, items, keysColumn=None, buttonsColumn=None, item_spacing=None):
self.items = items
self.item_spacing = item_spacing
self.keysColumn = keysColumn
self.buttonsColumn = buttonsColumn
Widget.__init__(self)
self.buildWidgets()
def set_update_translation(self, v):
if v:
self.buildWidgets()
def buildWidgets(self):
keysColumn = self.keysColumn
buttonsColumn = self.buttonsColumn
items = self.items
item_spacing = self.item_spacing
if keysColumn is None or True:
keysColumn = []
if buttonsColumn is None or True:
buttonsColumn = []
labels = []
for w in self.subwidgets:
for _w in w.subwidgets:
w.remove(_w)
self.remove(w)
for t in items:
if len(t) == 3:
(hotkey, title, action) = t
tooltipText = None
else:
(hotkey, title, action, tooltipText) = t
if isinstance(title, (str, unicode)):
button = Button(title, action=action)
else:
button = ValueButton(ref=title, action=action, width=200)
button.anchor = self.anchor
label = Label(hotkey, width=100, margin=button.margin)
label.anchor = "wh"
label.height = button.height
labels.append(label)
if tooltipText:
button.tooltipText = tooltipText
keysColumn.append(label)
buttonsColumn.append(button)
self.buttons = list(buttonsColumn)
#.#
if item_spacing == None:
buttonsColumn = Column(buttonsColumn)
else:
buttonsColumn = Column(buttonsColumn, spacing=item_spacing)
#.#
buttonsColumn.anchor = self.anchor
#.#
if item_spacing == None:
keysColumn = Column(keysColumn)
else:
keysColumn = Column(keysColumn, spacing=item_spacing)
commandRow = Row((keysColumn, buttonsColumn))
self.labels = labels
self.add(commandRow)
self.shrink_wrap()
self.invalidate()
#-#
class MenuButton(Button):
def __init__(self, title, choices, **kw):
Button.__init__(self, title, **kw)
self.choices = choices
self.menu = Menu(title, ((c, c) for c in choices))
def action(self):
index = self.menu.present(self, (0, 0))
if index == -1:
return
self.menu_picked(index)
def menu_picked(self, index):
pass
class ChoiceButton(ValueButton):
align = "c"
choose = None
def __init__(self, choices, scrolling=True, scroll_items=30, **kw):
# passing an empty list of choices is ill-advised
if 'choose' in kw:
self.choose = kw.pop('choose')
#-# Translation live update preparation
self.scrolling = scrolling
self.scroll_items = scroll_items
self.choices = choices or ["[UNDEFINED]"]
ValueButton.__init__(self, action=self.showMenu, **kw)
self.calc_width()
#-#
self.choiceIndex = 0
#-# Translation live update preparation
def set_update_translation(self, v):
ValueButton.set_update_translation(self, v)
self.menu.set_update_translation(v)
def calc_width(self):
widths = [self.font.size(_(c))[0] for c in self.choices] + [self.width]
if len(widths):
self.width = max(widths) + self.margin * 2
def calc_size(self):
ValueButton.calc_size(self)
self.calc_width()
#-#
def showMenu(self):
choiceIndex = self.menu.present(self, (0, 0))
if choiceIndex != -1:
self.choiceIndex = choiceIndex
if self.choose:
self.choose()
def get_value(self):
return self.selectedChoice
@property
def selectedChoice(self):
if self.choiceIndex >= len(self.choices) or self.choiceIndex < 0:
return ""
return self.choices[self.choiceIndex]
@selectedChoice.setter
def selectedChoice(self, val):
idx = self.choices.index(val)
if idx != -1:
self.choiceIndex = idx
@property
def choices(self):
return self._choices
@choices.setter
def choices(self, ch):
self._choices = ch
self.menu = Menu("", [(name, "pickMenu") for name in self._choices],
self.scrolling, self.scroll_items)
def CheckBoxLabel(title, *args, **kw):
tooltipText = kw.pop('tooltipText', None)
cb = CheckBox(*args, **kw)
lab = Label(title, fg_color=cb.fg_color)
lab.mouse_down = cb.mouse_down
if tooltipText:
cb.tooltipText = tooltipText
lab.tooltipText = tooltipText
class CBRow(Row):
margin = 0
@property
def value(self):
return self.checkbox.value
@value.setter
def value(self, val):
self.checkbox.value = val
row = CBRow((lab, cb))
row.checkbox = cb
return row
def FloatInputRow(title, *args, **kw):
return Row((Label(title, tooltipText=kw.get('tooltipText')), FloatField(*args, **kw)))
def IntInputRow(title, *args, **kw):
return Row((Label(title, tooltipText=kw.get('tooltipText')), IntField(*args, **kw)))
def TextInputRow(title, *args, **kw):
return Row((Label(title, tooltipText=kw.get('tooltipText')), TextFieldWrapped(*args, **kw)))
def showProgress(progressText, progressIterator, cancel=False):
"""Show the progress for a long-running synchronous operation.
progressIterator should be a generator-like object that can return
either None, for an indeterminate indicator,
A float value between 0.0 and 1.0 for a determinate indicator,
A string, to update the progress info label
or a tuple of (float value, string) to set the progress and update the label"""
class ProgressWidget(Dialog):
progressFraction = 0.0
firstDraw = False
root = None
def draw(self, surface):
if self.root is None:
self.root = self.get_root()
Widget.draw(self, surface)
frameStart = datetime.now()
frameInterval = timedelta(0, 1, 0) / 2
amount = None
try:
while datetime.now() < frameStart + frameInterval:
amount = progressIterator.next()
if self.firstDraw is False:
self.firstDraw = True
break
except StopIteration:
self.dismiss()
infoText = ""
if amount is not None:
if isinstance(amount, tuple):
if len(amount) > 2:
infoText = ": " + amount[2]
amount, max = amount[:2]
else:
max = amount
maxwidth = (self.width - self.margin * 2)
if amount is None:
self.progressBar.width = maxwidth
self.progressBar.bg_color = (255, 255, 25, 255)
elif isinstance(amount, basestring):
self.statusText = amount
else:
self.progressAmount = amount
if isinstance(amount, (int, float)):
self.progressFraction = float(amount) / (float(max) or 1)
self.progressBar.width = maxwidth * self.progressFraction
self.statusText = str("{0} / {1}".format(amount, max))
else:
self.statusText = str(amount)
if infoText:
self.statusText += infoText
@property
def estimateText(self):
delta = (datetime.now() - self.startTime)
progressPercent = (int(self.progressFraction * 10000))
left = delta * (10000 - progressPercent) / (progressPercent or 1)
return _("Time left: {0}").format(left)
def cancel(self):
if cancel:
self.dismiss(False)
def idleevent(self, evt):
self.invalidate()
def key_down(self, event):
pass
def key_up(self, event):
pass
def mouse_up(self, event):
try:
if "SelectionTool" in str(self.root.editor.currentTool):
if self.root.get_nudge_block().count > 0:
self.root.get_nudge_block().mouse_up(event)
except:
pass
widget = ProgressWidget()
widget.progressText = _(progressText)
widget.statusText = ""
widget.progressAmount = 0.0
progressLabel = ValueDisplay(ref=AttrRef(widget, 'progressText'), width=550)
statusLabel = ValueDisplay(ref=AttrRef(widget, 'statusText'), width=550)
estimateLabel = ValueDisplay(ref=AttrRef(widget, 'estimateText'), width=550)
progressBar = Widget(size=(550, 20), bg_color=(150, 150, 150, 255))
widget.progressBar = progressBar
col = (progressLabel, statusLabel, estimateLabel, progressBar)
if cancel:
cancelButton = Button("Cancel", action=widget.cancel, fg_color=(255, 0, 0, 255))
col += (Column((cancelButton,), align="r"),)
widget.add(Column(col))
widget.shrink_wrap()
widget.startTime = datetime.now()
if widget.present():
return widget.progressAmount
else:
return "Canceled"
|
|
# Copyright (c) OpenMMLab. All rights reserved.
import bisect
import collections
import copy
import math
from collections import defaultdict
import numpy as np
from mmcv.utils import build_from_cfg, print_log
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
from .builder import DATASETS, PIPELINES
from .coco import CocoDataset
@DATASETS.register_module()
class ConcatDataset(_ConcatDataset):
"""A wrapper of concatenated dataset.
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
concat the group flag for image aspect ratio.
Args:
datasets (list[:obj:`Dataset`]): A list of datasets.
separate_eval (bool): Whether to evaluate the results
separately if it is used as validation dataset.
Defaults to True.
"""
def __init__(self, datasets, separate_eval=True):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
self.PALETTE = getattr(datasets[0], 'PALETTE', None)
self.separate_eval = separate_eval
if not separate_eval:
if any([isinstance(ds, CocoDataset) for ds in datasets]):
raise NotImplementedError(
'Evaluating concatenated CocoDataset as a whole is not'
' supported! Please set "separate_eval=True"')
elif len(set([type(ds) for ds in datasets])) != 1:
raise NotImplementedError(
'All the datasets should have same types')
if hasattr(datasets[0], 'flag'):
flags = []
for i in range(0, len(datasets)):
flags.append(datasets[i].flag)
self.flag = np.concatenate(flags)
def get_cat_ids(self, idx):
"""Get category ids of concatenated dataset by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
if idx < 0:
if -idx > len(self):
raise ValueError(
'absolute value of index should not exceed dataset length')
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx].get_cat_ids(sample_idx)
def get_ann_info(self, idx):
"""Get annotation of concatenated dataset by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
if idx < 0:
if -idx > len(self):
raise ValueError(
'absolute value of index should not exceed dataset length')
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx].get_ann_info(sample_idx)
def evaluate(self, results, logger=None, **kwargs):
"""Evaluate the results.
Args:
results (list[list | tuple]): Testing results of the dataset.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str: float]: AP results of the total dataset or each separate
dataset if `self.separate_eval=True`.
"""
assert len(results) == self.cumulative_sizes[-1], \
('Dataset and results have different sizes: '
f'{self.cumulative_sizes[-1]} v.s. {len(results)}')
# Check whether all the datasets support evaluation
for dataset in self.datasets:
assert hasattr(dataset, 'evaluate'), \
f'{type(dataset)} does not implement evaluate function'
if self.separate_eval:
dataset_idx = -1
total_eval_results = dict()
for size, dataset in zip(self.cumulative_sizes, self.datasets):
start_idx = 0 if dataset_idx == -1 else \
self.cumulative_sizes[dataset_idx]
end_idx = self.cumulative_sizes[dataset_idx + 1]
results_per_dataset = results[start_idx:end_idx]
print_log(
f'\nEvaluateing {dataset.ann_file} with '
f'{len(results_per_dataset)} images now',
logger=logger)
eval_results_per_dataset = dataset.evaluate(
results_per_dataset, logger=logger, **kwargs)
dataset_idx += 1
for k, v in eval_results_per_dataset.items():
total_eval_results.update({f'{dataset_idx}_{k}': v})
return total_eval_results
elif any([isinstance(ds, CocoDataset) for ds in self.datasets]):
raise NotImplementedError(
'Evaluating concatenated CocoDataset as a whole is not'
' supported! Please set "separate_eval=True"')
elif len(set([type(ds) for ds in self.datasets])) != 1:
raise NotImplementedError(
'All the datasets should have same types')
else:
original_data_infos = self.datasets[0].data_infos
self.datasets[0].data_infos = sum(
[dataset.data_infos for dataset in self.datasets], [])
eval_results = self.datasets[0].evaluate(
results, logger=logger, **kwargs)
self.datasets[0].data_infos = original_data_infos
return eval_results
@DATASETS.register_module()
class RepeatDataset:
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
self.PALETTE = getattr(dataset, 'PALETTE', None)
if hasattr(self.dataset, 'flag'):
self.flag = np.tile(self.dataset.flag, times)
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx % self._ori_len]
def get_cat_ids(self, idx):
"""Get category ids of repeat dataset by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
return self.dataset.get_cat_ids(idx % self._ori_len)
def get_ann_info(self, idx):
"""Get annotation of repeat dataset by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
return self.dataset.get_ann_info(idx % self._ori_len)
def __len__(self):
"""Length after repetition."""
return self.times * self._ori_len
# Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa
@DATASETS.register_module()
class ClassBalancedDataset:
"""A wrapper of repeated dataset with repeat factor.
Suitable for training on class imbalanced datasets like LVIS. Following
the sampling strategy in the `paper <https://arxiv.org/abs/1908.03195>`_,
in each epoch, an image may appear multiple times based on its
"repeat factor".
The repeat factor for an image is a function of the frequency the rarest
category labeled in that image. The "frequency of category c" in [0, 1]
is defined by the fraction of images in the training set (without repeats)
in which category c appears.
The dataset needs to instantiate :func:`self.get_cat_ids` to support
ClassBalancedDataset.
The repeat factor is computed as followed.
1. For each category c, compute the fraction # of images
that contain it: :math:`f(c)`
2. For each category c, compute the category-level repeat factor:
:math:`r(c) = max(1, sqrt(t/f(c)))`
3. For each image I, compute the image-level repeat factor:
:math:`r(I) = max_{c in I} r(c)`
Args:
dataset (:obj:`CustomDataset`): The dataset to be repeated.
oversample_thr (float): frequency threshold below which data is
repeated. For categories with ``f_c >= oversample_thr``, there is
no oversampling. For categories with ``f_c < oversample_thr``, the
degree of oversampling following the square-root inverse frequency
heuristic above.
filter_empty_gt (bool, optional): If set true, images without bounding
boxes will not be oversampled. Otherwise, they will be categorized
as the pure background class and involved into the oversampling.
Default: True.
"""
def __init__(self, dataset, oversample_thr, filter_empty_gt=True):
self.dataset = dataset
self.oversample_thr = oversample_thr
self.filter_empty_gt = filter_empty_gt
self.CLASSES = dataset.CLASSES
self.PALETTE = getattr(dataset, 'PALETTE', None)
repeat_factors = self._get_repeat_factors(dataset, oversample_thr)
repeat_indices = []
for dataset_idx, repeat_factor in enumerate(repeat_factors):
repeat_indices.extend([dataset_idx] * math.ceil(repeat_factor))
self.repeat_indices = repeat_indices
flags = []
if hasattr(self.dataset, 'flag'):
for flag, repeat_factor in zip(self.dataset.flag, repeat_factors):
flags.extend([flag] * int(math.ceil(repeat_factor)))
assert len(flags) == len(repeat_indices)
self.flag = np.asarray(flags, dtype=np.uint8)
def _get_repeat_factors(self, dataset, repeat_thr):
"""Get repeat factor for each images in the dataset.
Args:
dataset (:obj:`CustomDataset`): The dataset
repeat_thr (float): The threshold of frequency. If an image
contains the categories whose frequency below the threshold,
it would be repeated.
Returns:
list[float]: The repeat factors for each images in the dataset.
"""
# 1. For each category c, compute the fraction # of images
# that contain it: f(c)
category_freq = defaultdict(int)
num_images = len(dataset)
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
if len(cat_ids) == 0 and not self.filter_empty_gt:
cat_ids = set([len(self.CLASSES)])
for cat_id in cat_ids:
category_freq[cat_id] += 1
for k, v in category_freq.items():
category_freq[k] = v / num_images
# 2. For each category c, compute the category-level repeat factor:
# r(c) = max(1, sqrt(t/f(c)))
category_repeat = {
cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))
for cat_id, cat_freq in category_freq.items()
}
# 3. For each image I, compute the image-level repeat factor:
# r(I) = max_{c in I} r(c)
repeat_factors = []
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
if len(cat_ids) == 0 and not self.filter_empty_gt:
cat_ids = set([len(self.CLASSES)])
repeat_factor = 1
if len(cat_ids) > 0:
repeat_factor = max(
{category_repeat[cat_id]
for cat_id in cat_ids})
repeat_factors.append(repeat_factor)
return repeat_factors
def __getitem__(self, idx):
ori_index = self.repeat_indices[idx]
return self.dataset[ori_index]
def get_ann_info(self, idx):
"""Get annotation of dataset by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
ori_index = self.repeat_indices[idx]
return self.dataset.get_ann_info(ori_index)
def __len__(self):
"""Length after repetition."""
return len(self.repeat_indices)
@DATASETS.register_module()
class MultiImageMixDataset:
"""A wrapper of multiple images mixed dataset.
Suitable for training on multiple images mixed data augmentation like
mosaic and mixup. For the augmentation pipeline of mixed image data,
the `get_indexes` method needs to be provided to obtain the image
indexes, and you can set `skip_flags` to change the pipeline running
process. At the same time, we provide the `dynamic_scale` parameter
to dynamically change the output image size.
Args:
dataset (:obj:`CustomDataset`): The dataset to be mixed.
pipeline (Sequence[dict]): Sequence of transform object or
config dict to be composed.
dynamic_scale (tuple[int], optional): The image scale can be changed
dynamically. Default to None. It is deprecated.
skip_type_keys (list[str], optional): Sequence of type string to
be skip pipeline. Default to None.
"""
def __init__(self,
dataset,
pipeline,
dynamic_scale=None,
skip_type_keys=None):
if dynamic_scale is not None:
raise RuntimeError(
'dynamic_scale is deprecated. Please use Resize pipeline '
'to achieve similar functions')
assert isinstance(pipeline, collections.abc.Sequence)
if skip_type_keys is not None:
assert all([
isinstance(skip_type_key, str)
for skip_type_key in skip_type_keys
])
self._skip_type_keys = skip_type_keys
self.pipeline = []
self.pipeline_types = []
for transform in pipeline:
if isinstance(transform, dict):
self.pipeline_types.append(transform['type'])
transform = build_from_cfg(transform, PIPELINES)
self.pipeline.append(transform)
else:
raise TypeError('pipeline must be a dict')
self.dataset = dataset
self.CLASSES = dataset.CLASSES
self.PALETTE = getattr(dataset, 'PALETTE', None)
if hasattr(self.dataset, 'flag'):
self.flag = dataset.flag
self.num_samples = len(dataset)
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
results = copy.deepcopy(self.dataset[idx])
for (transform, transform_type) in zip(self.pipeline,
self.pipeline_types):
if self._skip_type_keys is not None and \
transform_type in self._skip_type_keys:
continue
if hasattr(transform, 'get_indexes'):
indexes = transform.get_indexes(self.dataset)
if not isinstance(indexes, collections.abc.Sequence):
indexes = [indexes]
mix_results = [
copy.deepcopy(self.dataset[index]) for index in indexes
]
results['mix_results'] = mix_results
results = transform(results)
if 'mix_results' in results:
results.pop('mix_results')
return results
def update_skip_type_keys(self, skip_type_keys):
"""Update skip_type_keys. It is called by an external hook.
Args:
skip_type_keys (list[str], optional): Sequence of type
string to be skip pipeline.
"""
assert all([
isinstance(skip_type_key, str) for skip_type_key in skip_type_keys
])
self._skip_type_keys = skip_type_keys
|
|
import logging
import re
from collections import defaultdict
from . import Analysis, register_analysis
from ..knowledge_base import KnowledgeBase
from .. import SIM_PROCEDURES
from ..codenode import HookNode
from ..sim_variable import SimConstantVariable, SimRegisterVariable, SimMemoryVariable, SimStackVariable
l = logging.getLogger("angr.analyses.binary_optimizer")
class ConstantPropagation(object):
def __init__(self, constant, constant_assignment_loc, constant_consuming_loc):
self.constant = constant
self.constant_assignment_loc = constant_assignment_loc
self.constant_consuming_loc = constant_consuming_loc
def __repr__(self):
s = "<Constant %#x propagates from %#x to %#x>" % (
self.constant,
self.constant_assignment_loc.ins_addr,
self.constant_consuming_loc.ins_addr
)
return s
class RedundantStackVariable(object):
def __init__(self, argument, stack_variable, stack_variable_consuming_locs):
self.argument = argument
self.stack_variable = stack_variable
self.stack_variable_consuming_locs = stack_variable_consuming_locs
self.argument_register_as_retval = False
def __repr__(self):
s = "<StackVar %s for %s at %d locations%s>" % (
self.stack_variable,
self.argument,
len(self.stack_variable_consuming_locs),
" - retval" if self.argument_register_as_retval else "",
)
return s
class RegisterReallocation(object):
def __init__(self, stack_variable, register_variable, stack_variable_sources, stack_variable_consumers,
prologue_addr, prologue_size, epilogue_addr, epilogue_size):
"""
Constructor.
:param SimStackVariable stack_variable:
:param SimRegisterVariable register_variable:
:param list stack_variable_sources:
:param list stack_variable_consumers:
:param int prologue_addr:
:param int prologue_size:
:param int epilogue_addr:
:param int epilogue_size:
"""
self.stack_variable = stack_variable
self.register_variable = register_variable
self.stack_variable_sources = stack_variable_sources
self.stack_variable_consumers = stack_variable_consumers
self.prologue_addr = prologue_addr
self.prologue_size = prologue_size
self.epilogue_addr = epilogue_addr
self.epilogue_size = epilogue_size
def __repr__(self):
s = "<RegisterReallocation %s for %s with %d sources and %d consumers>" % (
self.register_variable,
self.stack_variable,
len(self.stack_variable_sources),
len(self.stack_variable_consumers),
)
return s
class DeadAssignment(object):
def __init__(self, pv):
"""
Constructor.
:param angr.analyses.ddg.ProgramVariable pv: The assignment to remove.
"""
self.pv = pv
def __repr__(self):
s = "<DeadAssignmentElimination %s>" % self.pv
return s
class BinaryOptimizer(Analysis):
"""
This is a collection of binary optimization techniques we used in Mechanical Phish during the finals of Cyber Grand
Challange. It focuses on dealing with some serious speed-impacting code constructs, and *sort of* worked on *some*
CGC binaries compiled with O0. Use this analysis as a reference of how to use data dependency graph and such.
There is no guarantee that BinaryOptimizer will ever work on non-CGC binaries. Feel free to give us PR or MR, but
please *do not* ask for support of non-CGC binaries.
"""
BLOCKS_THRESHOLD = 500 # do not optimize a function if it has more than this number of blocks
def __init__(self, cfg, techniques):
self.cfg = cfg
if techniques is None:
raise Exception('At least one optimization technique must be specified.')
supported_techniques = {
'constant_propagation',
'redundant_stack_variable_removal',
'register_reallocation',
'dead_assignment_elimination',
}
if techniques - supported_techniques:
raise Exception('At least one optimization technique specified is not supported.')
self._techniques = techniques.copy()
self.constant_propagations = [ ]
self.redundant_stack_variables = [ ]
self.register_reallocations = [ ]
self.dead_assignments = [ ]
self.optimize()
def optimize(self):
for f in self.kb.functions.itervalues(): # type: angr.knowledge.Function
# if there are unresolved targets in this function, we do not try to optimize it
if any([ n.sim_procedure is SIM_PROCEDURES['stubs']['UnresolvableTarget'] for n in f.graph.nodes_iter()
if isinstance(n, HookNode) ]):
continue
if len(f.block_addrs_set) > self.BLOCKS_THRESHOLD:
continue
self._optimize_function(f)
def _optimize_function(self, function):
"""
:param angr.knowledge.Function function:
:return:
"""
#if function.addr != 0x8048250:
# return
func_kb = KnowledgeBase(self.project, self.project.loader.main_object)
cfg = self.project.analyses.CFGAccurate(kb=func_kb,
call_depth=1,
base_graph=function.graph,
keep_state=True,
starts=(function.addr,),
iropt_level=0,
)
ddg = self.project.analyses.DDG(kb=func_kb,
cfg=cfg
)
if 'constant_propagation' in self._techniques:
self._constant_propagation(function, ddg.simplified_data_graph)
if 'redundant_stack_variable_removal' in self._techniques:
self._redundant_stack_variable_removal(function, ddg.simplified_data_graph)
if 'register_reallocation' in self._techniques:
self._register_reallocation(function, ddg.simplified_data_graph)
if 'dead_assignment_elimination' in self._techniques:
self._dead_assignment_elimination(function, ddg.simplified_data_graph)
def _constant_propagation(self, function, data_graph): #pylint:disable=unused-argument
"""
:param function:
:param networkx.MultiDiGraph data_graph:
:return:
"""
# find all edge sequences that looks like const->reg->memory
for n0 in data_graph.nodes_iter():
if not isinstance(n0.variable, SimConstantVariable):
continue
n1s = data_graph.successors(n0)
if len(n1s) != 1:
continue
n1 = n1s[0]
if not isinstance(n1.variable, SimRegisterVariable):
continue
if len(data_graph.predecessors(n1)) != 1:
continue
n2s = data_graph.successors(n1)
if len(n2s) != 1:
continue
n2 = n2s[0]
if not isinstance(n2.variable, SimMemoryVariable):
continue
n2_inedges = data_graph.in_edges(n2, data=True)
if len([ 0 for _, _, data in n2_inedges if 'type' in data and data['type'] == 'mem_data' ]) != 1:
continue
cp = ConstantPropagation(n0.variable.value, n0.location, n2.location)
self.constant_propagations.append(cp)
# print n0, n1, n2
def _redundant_stack_variable_removal(self, function, data_graph):
"""
If an argument passed from the stack (i.e. dword ptr [ebp+4h]) is saved to a local variable on the stack at the
beginning of the function, and this local variable was never modified anywhere in this function, and no pointer
of any stack variable is saved in any register, then we can replace all references to this local variable to
that argument instead.
:param function:
:param networkx.MultiDiGraph data_graph:
:return:
"""
# check if there is any stack pointer being stored into any register other than esp
# basically check all consumers of stack pointers
stack_ptrs = [ ]
sp_offset = self.project.arch.registers['esp'][0]
bp_offset = self.project.arch.registers['ebp'][0]
for n in data_graph.nodes_iter():
if isinstance(n.variable, SimRegisterVariable) and n.variable.reg in (sp_offset, bp_offset):
stack_ptrs.append(n)
# for each stack pointer variable, make sure none of its consumers is a general purpose register
for stack_ptr in stack_ptrs:
out_edges = data_graph.out_edges(stack_ptr, data=True)
for _, dst, data in out_edges:
if 'type' in data and data['type'] == 'kill':
# we don't care about killing edges
continue
if isinstance(dst.variable, SimRegisterVariable) and dst.variable.reg < 40 and \
dst.variable.reg not in (sp_offset, bp_offset):
# oops
l.debug('Function %s does not satisfy requirements of redundant stack variable removal.',
repr(function)
)
return
argument_variables = [ ]
for n in data_graph.nodes_iter():
if isinstance(n.variable, SimStackVariable) and n.variable.base == 'bp' and n.variable.offset >= 0:
argument_variables.append(n)
if not argument_variables:
return
#print function
#print argument_variables
argument_to_local = { }
argument_register_as_retval = set()
# for each argument, find its correspondence on the local stack frame
for argument_variable in argument_variables:
# is it copied to the stack?
successors0 = data_graph.successors(argument_variable)
if not successors0:
continue
if len(successors0) != 1:
continue
if isinstance(successors0[0].variable, SimRegisterVariable):
# argument -> register -> stack
out_edges = data_graph.out_edges(successors0[0], data=True)
successors1 = [ s for _, s, data in out_edges if 'type' not in data or data['type'] != 'kill' ]
if len(successors1) == 1:
successor1 = successors1[0]
if isinstance(successor1.variable, SimStackVariable):
if (successor1.variable.base == 'sp' and successor1.variable.offset > 0) or \
(successor1.variable.base == 'bp' and successor1.variable.offset < 0):
# yes it's copied onto the stack!
argument_to_local[argument_variable] = successor1
# if the register is eax, and it's not killed later, it might be the return value of this function
# in that case, we cannot eliminate the instruction that moves stack argument to that register
if successors0[0].variable.reg == self.project.arch.registers['eax'][0]:
killers = [ s for _, s, data in out_edges if 'type' in data and data['type'] == 'kill']
if not killers:
# it might be the return value
argument_register_as_retval.add(argument_variable)
else:
# TODO:
import ipdb; ipdb.set_trace()
#import pprint
#pprint.pprint(argument_to_local, width=160)
# find local correspondence that are not modified throughout this function
redundant_stack_variables = [ ]
for argument, local_var in argument_to_local.iteritems():
# local_var cannot be killed anywhere
out_edges = data_graph.out_edges(local_var, data=True)
consuming_locs = [ ]
for _, consumer, data in out_edges:
consuming_locs.append(consumer.location)
if 'type' in data and data['type'] == 'kill':
break
else:
# no killing edges. the value is not changed!
rsv = RedundantStackVariable(argument, local_var, consuming_locs)
if argument in argument_register_as_retval:
rsv.argument_register_as_retval = True
redundant_stack_variables.append(rsv)
self.redundant_stack_variables.extend(redundant_stack_variables)
def _register_reallocation(self, function, data_graph):
"""
Find unused registers throughout the function, and use those registers to replace stack variables.
Only functions that satisfy the following criteria can be optimized in this way:
- The function does not call any other function.
- The function does not use esp to index any stack variable.
- Prologue and epilogue of the function is identifiable.
- At least one register is not used in the entire function.
:param angr.knowledge.Function function:
:param networkx.MultiDiGraph data_graph:
:return: None
"""
# make sure this function does not call other functions
if function.callout_sites:
return
if len(function.endpoints) != 1:
return
# identify function prologue and epilogue
startpoint_block = self.project.factory.block(function.startpoint.addr).capstone
startpoint_insns = startpoint_block.insns
# supported function prologues:
#
# push ebp
# mov ebp, esp
# sub esp, [0-9a-f]+h
#
# push ebp
# mov ebp, esp
# push eax
if len(startpoint_insns) < 3:
return
insn0, insn1, insn2 = startpoint_insns[:3]
if not (insn0.mnemonic == 'push' and insn0.op_str == 'ebp'):
return
if not (insn1.mnemonic == 'mov' and insn1.op_str == 'ebp, esp'):
return
if not (insn2.mnemonic == 'sub' and re.match(r"esp, [0-9a-fx]+", insn2.op_str)) and \
not (insn2.mnemonic == 'push' and insn2.op_str == 'eax'):
return
endpoint_block = self.project.factory.block(function.endpoints[0].addr).capstone
endpoint_insns = endpoint_block.insns
# supported function epilogues:
#
# add esp, [0-9a-f]+h
# pop ebp
# ret
if len(endpoint_insns) < 3:
return
insn3, insn4, insn5 = endpoint_insns[-3:]
if not (insn3.mnemonic == 'add' and re.match(r"esp, [0-9a-fx]+", insn3.op_str)):
return
if not (insn4.mnemonic == 'pop' and insn4.op_str == 'ebp'):
return
if not insn5.mnemonic == 'ret':
return
# make sure esp is not used anywhere else - all stack variables must be indexed using ebp
esp_offset = self.project.arch.registers['esp'][0]
ebp_offset = self.project.arch.registers['ebp'][0]
esp_variables = [ ]
for n in data_graph.nodes_iter():
if isinstance(n.variable, SimRegisterVariable) and n.variable.reg == esp_offset:
esp_variables.append(n)
# find out all call instructions
call_insns = set()
for src, dst, data in function.transition_graph.edges_iter(data=True):
if 'type' in data and data['type'] == 'call':
src_block = function._get_block(src.addr)
call_insns.add(src_block.instruction_addrs[-1])
# there should be six esp variables + all call sites
# push ebp (insn0 - read, insn0 - write) ; sub esp, 0xXX (insn2) ;
# add esp, 0xXX (insn3) ; pop ebp (insn4) ; ret (insn5)
esp_insns = set( n.location.ins_addr for n in esp_variables )
if esp_insns != { insn0.address, insn2.address, insn3.address, insn4.address, insn5.address } | call_insns:
return
prologue_addr = insn0.address
prologue_size = insn0.size + insn1.size + insn2.size
epilogue_addr = insn3.address
epilogue_size = insn3.size + insn4.size + insn5.size
# look at consumer of those esp variables. no other instruction should be consuming them
# esp_consumer_insns = { insn0.address, insn1.address, insn2.address, insn3.address, insn4.address,
# insn5.address} | esp_insns
# for esp_variable in esp_variables: # type: angr.analyses.ddg.ProgramVariable
# consumers = data_graph.successors(esp_variable)
# if any([ consumer.location.ins_addr not in esp_consumer_insns for consumer in consumers ]):
# return
# make sure we never gets the address of those stack variables into any register
# say, lea edx, [ebp-0x4] is forbidden
# check all edges in data graph
for src, dst, data in data_graph.edges_iter(data=True):
if isinstance(dst.variable, SimRegisterVariable) and \
dst.variable.reg != ebp_offset and \
dst.variable.reg < 40:
#to a register other than ebp
if isinstance(src.variable, SimRegisterVariable) and \
src.variable.reg == ebp_offset:
# from ebp
l.debug("Found a lea operation from ebp at %#x. Function %s cannot be optimized.",
dst.location.ins_addr,
repr(function),
)
return
# we definitely don't want to mess with fp or sse operations
for node in data_graph.nodes_iter():
if isinstance(node.variable, SimRegisterVariable) and \
72 <= node.variable.reg < 288: # offset(mm0) <= node.variable.reg < offset(cs)
l.debug('Found a float-point/SSE register access at %#x. Function %s cannot be optimized.',
node.location.ins_addr,
repr(function)
)
return
l.debug("RegisterReallocation: function %s satisfies the criteria.", repr(function))
# nice. let's see if we can optimize this function
# do we have free registers?
used_general_registers = set()
for n in data_graph.nodes_iter():
if isinstance(n.variable, SimRegisterVariable):
if n.variable.reg < 40: # this is a hardcoded limit - we only care about general registers
used_general_registers.add(n.variable.reg)
registers = self.project.arch.registers
all_general_registers = { #registers['eax'][0], registers['ecx'][0], registers['edx'][0],
registers['ebx'][0], registers['edi'][0], registers['esi'][0],
registers['esp'][0], registers['ebp'][0]
}
unused_general_registers = all_general_registers - used_general_registers
if not unused_general_registers:
l.debug("RegisterReallocation: function %s does not have any free register.", repr(function))
return
l.debug("RegisterReallocation: function %s has %d free register(s): %s",
repr(function),
len(unused_general_registers),
", ".join([self.project.arch.register_names[u] for u in unused_general_registers ])
)
# find local stack variables of size 4
stack_variables = set()
for n in data_graph.nodes_iter():
if isinstance(n.variable, SimStackVariable) and \
n.variable.base == 'bp' and \
n.variable.size == 4 and \
n.variable.offset < 0:
stack_variables.add(n)
# alright, now we need to make sure that stack variables are never accessed by indexes
# in other words, they must be accessed directly in forms of 'dword ptr [ebp+x]'
# it's easy to do this: we get mem_addr predecessors of each stack variable, and make sure there are only two of
# them: one is ebp, the other one is a constant
#
# ah, also, since we do not want to mess with crazy fp registers, we further require none of the stack variable
# sources and consumers is a FP register.
filtered_stack_variables = set()
for stack_variable in stack_variables:
failed = False
# check how they are accessed
in_edges = data_graph.in_edges(stack_variable, data=True)
for src, _, data in in_edges:
if 'type' in data and data['type'] == 'mem_addr':
if isinstance(src.variable, SimRegisterVariable) and src.variable.reg == ebp_offset:
# ebp
pass
elif isinstance(src.variable, SimConstantVariable):
# the constant
pass
else:
# ouch
failed = True
break
if isinstance(src.variable, SimRegisterVariable) and src.variable.reg >= 72:
# it comes from a FP register
failed = True
break
if failed:
continue
# check consumers
out_edges = data_graph.out_edges(stack_variable, data=True)
for _, dst, data in out_edges:
if 'type' in data and data['type'] == 'kill':
continue
if isinstance(dst.variable, SimRegisterVariable) and dst.variable.reg >= 72:
# an FP register is the consumer
failed = True
break
if failed:
continue
filtered_stack_variables.add(stack_variable)
# order the stack variables by the sum of their in and out degrees.
stack_variable_to_degree = defaultdict(int)
stack_variable_sources = defaultdict(list)
for sv in filtered_stack_variables:
stack_variable_to_degree[sv.variable] += data_graph.in_degree(sv)
stack_variable_to_degree[sv.variable] += data_graph.out_degree(sv)
stack_variable_sources[sv.variable].append(sv)
sorted_stack_variables = sorted(stack_variable_to_degree.keys(),
key=lambda sv: stack_variable_to_degree[sv],
reverse=True
)
# aha these are the ones that we can replace!
for reg, sv in zip(unused_general_registers, sorted_stack_variables):
non_initial_sources = [src for src in stack_variable_sources[sv] if not src.initial]
if not non_initial_sources:
# we failed to find any source for it, which indicates a failure in our dependence analysis
# skip
continue
# get consumers
consumers = set()
for src in stack_variable_sources[sv]:
out_edges = data_graph.out_edges(src, data=True)
for _, dst, data in out_edges:
if 'type' not in data or data['type'] != 'kill':
consumers.add(dst)
rr = RegisterReallocation(sv, SimRegisterVariable(reg, 4), non_initial_sources,
list(consumers), prologue_addr, prologue_size, epilogue_addr, epilogue_size
)
self.register_reallocations.append(rr)
l.debug("RegisterReallocation: %s will replace %s in function %s.",
rr.register_variable,
rr.stack_variable,
repr(function)
)
def _dead_assignment_elimination(self, function, data_graph): #pylint:disable=unused-argument
"""
Remove assignments to registers that has no consumers, but immediately killed.
BROKEN - DO NOT USE IT
:param angr.knowledge.Function function:
:param networkx.MultiDiGraph data_graph:
:return: None
"""
register_pvs = set()
for node in data_graph.nodes_iter():
if isinstance(node.variable, SimRegisterVariable) and \
node.variable.reg is not None and \
node.variable.reg < 40:
register_pvs.add(node)
for reg in register_pvs:
# does it have a consumer?
out_edges = data_graph.out_edges(reg, data=True)
consumers = [ ]
killers = [ ]
for _, _, data in out_edges:
if 'type' in data and data['type'] == 'kill':
killers.append(data)
else:
consumers.append(data)
if not consumers and killers:
# we can remove the assignment!
da = DeadAssignment(reg)
self.dead_assignments.append(da)
register_analysis(BinaryOptimizer, 'BinaryOptimizer')
|
|
import numpy as np
from menpo.base import Targetable, Vectorizable
from menpo.model.pdm import PDM, GlobalPDM, OrthoPDM
from .base import Transform, VComposable, VInvertible
class ModelDrivenTransform(Transform, Targetable, Vectorizable,
VComposable, VInvertible):
r"""
A transform that couples a traditional landmark-based transform to a
statistical model such that source points of the alignment transform
are the points of the model. The weights of the transform are just
the weights of statistical model.
If no source is provided, the mean of the model is defined as the
source landmarks of the transform.
Parameters
----------
model : :class:`menpo.model.base.StatisticalModel`
A linear statistical shape model.
transform_cls : :class:`menpo.transform.AlignableTransform`
A class of :class:`menpo.transform.base.AlignableTransform`
The align constructor will be called on this with the source
and target landmarks. The target is
set to the points generated from the model using the
provide weights - the source is either given or set to the
model's mean.
source : :class:`menpo.shape.base.PointCloud`
The source landmarks of the transform. If None, the mean of the model
is used.
Default: None
"""
def __init__(self, model, transform_cls, source=None):
self.pdm = PDM(model)
self._cached_points = None
self.transform = transform_cls(source, self.target)
@property
def n_dims(self):
r"""
The number of dimensions that the transform supports.
:type: int
"""
return self.pdm.n_dims
def _apply(self, x, **kwargs):
r"""
Apply this transform to the given object. Uses the internal transform.
Parameters
----------
x : (N, D) ndarray or a transformable object
The object to be transformed.
kwargs : dict
Passed through to transforms ``apply_inplace`` method.
Returns
--------
transformed : (N, D) ndarray or object
The transformed object
"""
return self.transform._apply(x, **kwargs)
@property
def target(self):
return self.pdm.target
def _target_setter(self, new_target):
r"""
On a new target being set, we need to:
Parameters
----------
new_target: :class:`PointCloud`
The new_target that we want to set.
"""
self.pdm.set_target(new_target)
def _new_target_from_state(self):
# We delegate to PDM to handle all our Targetable duties. As a
# result, *we* never need to call _sync_target_for_state, so we have
# no need for an implementation of this method. Of course the
# interface demands it, so the stub is here. Contrast with
# _target_setter, which is required, because we will have to handle
# external calls to set_target().
pass
def _sync_state_from_target(self):
# Let the pdm update its state
self.pdm._sync_state_from_target()
# and update our transform to the new state
self.transform.set_target(self.target)
@property
def n_parameters(self):
r"""
The total number of parameters.
Simply ``n_weights``.
:type: int
"""
return self.pdm.n_parameters
def as_vector(self):
r"""
Return the current weights of this transform - this is the
just the linear model's weights
Returns
-------
params : (``n_parameters``,) ndarray
The vector of weights
"""
return self.pdm.as_vector()
def from_vector_inplace(self, vector):
r"""
Updates the ModelDrivenTransform's state from it's
vectorized form.
"""
self.pdm.from_vector_inplace(vector)
# By here the pdm has updated our target state, we just need to
# update the transform
self.transform.set_target(self.target)
def compose_after_from_vector_inplace(self, delta):
r"""
Composes two ModelDrivenTransforms together based on the
first order approximation proposed by Papandreou and Maragos in [1].
Parameters
----------
delta : (N,) ndarray
Vectorized :class:`ModelDrivenTransform` to be applied **before**
self
Returns
--------
transform : self
self, updated to the result of the composition
References
----------
.. [1] G. Papandreou and P. Maragos, "Adaptive and Constrained
Algorithms for Inverse Compositional Active Appearance Model
Fitting", CVPR08
"""
model_jacobian = self.pdm.model.jacobian
points = self.pdm.model.mean.points
n_points = self.pdm.model.mean.n_points
# compute:
# -> dW/dp when p=0
# -> dW/dp when p!=0
# -> dW/dx when p!=0 evaluated at the source landmarks
# dW/dp when p=0 and when p!=0 are the same and simply given by
# the Jacobian of the model
dW_dp_0 = model_jacobian
dW_dp = dW_dp_0
# dW_dp_0: n_points x n_params x n_dims
# dW_dp: n_points x n_params x n_dims
dW_dx = self.transform.jacobian_points(points)
# dW_dx: n_points x n_dims x n_dims
#TODO: Can we do this without splitting across the two dimensions?
dW_dx_x = dW_dx[:, 0, :].flatten()[..., None]
dW_dx_y = dW_dx[:, 1, :].flatten()[..., None]
dW_dp_0_mat = np.reshape(dW_dp_0, (n_points * self.n_dims,
self.n_parameters))
dW_dx_dW_dp_0 = dW_dp_0_mat * dW_dx_x + dW_dp_0_mat * dW_dx_y
dW_dx_dW_dp_0 = np.reshape(dW_dx_dW_dp_0,
(n_points, self.n_parameters, self.n_dims))
# dW_dx: n_points x n_dims x n_dims
# dW_dp_0: n_points x n_params x n_dims
# dW_dx_dW_dp_0: n_points x n_params x n_dims
J = np.einsum('ijk, ilk -> jl', dW_dp, dW_dx_dW_dp_0)
H = np.einsum('ijk, ilk -> jl', dW_dp, dW_dp)
Jp = np.linalg.solve(H, J)
# Jp: n_params x n_params
self.from_vector_inplace(self.as_vector() + np.dot(Jp, delta))
return self
@property
def has_true_inverse(self):
return False
def _build_pseudoinverse(self):
return self.from_vector(-self.as_vector())
def pseudoinverse_vector(self, vector):
r"""
The vectorized pseudoinverse of a provided vector instance.
Syntactic sugar for
self.from_vector(vector).pseudoinverse.as_vector()
On ModelDrivenTransform this is especially fast - we just negate the
vector provided.
Parameters
----------
vector : (P,) ndarray
A vectorized version of self
Returns
-------
pseudoinverse_vector : (N,) ndarray
The pseudoinverse of the vector provided
"""
return -vector
def jacobian(self, points):
"""
Calculates the Jacobian of the ModelDrivenTransform wrt to
its weights (the weights). This is done by chaining the relative
weight of each point wrt the source landmarks, i.e. the Jacobian of
the warp wrt the source landmarks when the target is assumed to be
equal to the source (dW/dx), together with the Jacobian of the
linear model wrt its weights (dX/dp).
Parameters
-----------
points: (N, D) ndarray
The points at which the Jacobian will be evaluated.
Returns
-------
dW/dp : (N, P, D) ndarray
The Jacobian of the ModelDrivenTransform evaluated at the
previous points.
"""
# check if re-computation of dW/dx can be avoided
if not np.array_equal(self._cached_points, points):
# recompute dW/dx, i.e. the relative weight of each point wrt
# the source landmarks
self.dW_dX = self.transform.weight_points(points)
# cache points
self._cached_points = points
# dX/dp is simply the Jacobian of the model
dX_dp = self.pdm.model.jacobian
# dW_dX: n_points x n_points x n_dims
# dX_dp: n_points x n_params x n_dims
dW_dp = np.einsum('ild, lpd -> ipd', self.dW_dX, dX_dp)
# dW_dp: n_points x n_params x n_dims
return dW_dp
# noinspection PyMissingConstructor
class GlobalMDTransform(ModelDrivenTransform):
r"""
A transform that couples an alignment transform to a
statistical model together with a global similarity transform,
such that the weights of the transform are fully specified by
both the weights of statistical model and the weights of the
similarity transform. The model is assumed to
generate an instance which is then transformed by the similarity
transform; the result defines the target landmarks of the transform.
If no source is provided, the mean of the model is defined as the
source landmarks of the transform.
Parameters
----------
model : :class:`menpo.model.base.StatisticalModel`
A linear statistical shape model.
transform_cls : :class:`menpo.transform.AlignableTransform`
A class of :class:`menpo.transform.base.AlignableTransform`
The align constructor will be called on this with the source
and target landmarks. The target is
set to the points generated from the model using the
provide weights - the source is either given or set to the
model's mean.
global_transform : :class:`menpo.transform.AlignableTransform`
A class of :class:`menpo.transform.base.AlignableTransform`
The global transform that should be applied to the model output.
Doesn't have to have been constructed from the .align() constructor.
Note that the GlobalMDTransform isn't guaranteed to hold on to the
exact object passed in here - so don't expect external changes to
the global_transform to be reflected in the behavior of this object.
source : :class:`menpo.shape.base.PointCloud`, optional
The source landmarks of the transform. If no ``source`` is provided the
mean of the model is used.
weights : (P,) ndarray, optional
The reconstruction weights that will be fed to the model in order to
generate an instance of the target landmarks.
composition: 'both', 'warp' or 'model', optional
The composition approximation employed by this
ModelDrivenTransform.
Default: `both`
"""
def __init__(self, model, transform_cls, global_transform, source=None):
self.pdm = GlobalPDM(model, global_transform)
self._cached_points = None
self.transform = transform_cls(source, self.target)
def compose_after_from_vector_inplace(self, delta):
r"""
Composes two ModelDrivenTransforms together based on the
first order approximation proposed by Papandreou and Maragos in [1].
Parameters
----------
delta : (N,) ndarray
Vectorized :class:`ModelDrivenTransform` to be applied **before**
self
Returns
--------
transform : self
self, updated to the result of the composition
References
----------
.. [1] G. Papandreou and P. Maragos, "Adaptive and Constrained
Algorithms for Inverse Compositional Active Appearance Model
Fitting", CVPR08
"""
model_jacobian = self.pdm.model.jacobian
points = self.pdm.model.mean.points
n_points = self.pdm.model.mean.n_points
# compute:
# -> dW/dp when p=0
# -> dW/dp when p!=0
# -> dW/dx when p!=0 evaluated at the source landmarks
# dW/dq when p=0 and when p!=0 are the same and given by the
# Jacobian of the global transform evaluated at the mean of the
# model
dW_dq = self._global_transform_jacobian(points)
# dW_dq: n_points x n_global_params x n_dims
# dW/db when p=0, is the Jacobian of the model
dW_db_0 = model_jacobian
# dW_db_0: n_points x n_weights x n_dims
# dW/dp when p=0, is simply the concatenation of the previous
# two terms
dW_dp_0 = np.hstack((dW_dq, dW_db_0))
# dW_dp_0: n_points x n_params x n_dims
# by application of the chain rule dW_db when p!=0,
# is the Jacobian of the global transform wrt the points times
# the Jacobian of the model: dX(S)/db = dX/dS * dS/db
dW_dS = self.pdm.global_transform.jacobian_points(points)
dW_db = np.einsum('ilj, idj -> idj', dW_dS, dW_db_0)
# dW_dS: n_points x n_dims x n_dims
# dW_db: n_points x n_weights x n_dims
# dW/dp is simply the concatenation of dX_dq with dX_db
dW_dp = np.hstack((dW_dq, dW_db))
# dW_dp: n_points x n_params x n_dims
dW_dx = self.transform.jacobian_points(points)
#dW_dx = np.dot(dW_dx, self.global_transform.linear_component.T)
# dW_dx: n_points x n_dims x n_dims
#TODO: Can we do this without splitting across the two dimensions?
dW_dx_x = dW_dx[:, 0, :].flatten()[..., None]
dW_dx_y = dW_dx[:, 1, :].flatten()[..., None]
dW_dp_0_mat = np.reshape(dW_dp_0, (n_points * self.n_dims,
self.n_parameters))
dW_dx_dW_dp_0 = dW_dp_0_mat * dW_dx_x + dW_dp_0_mat * dW_dx_y
dW_dx_dW_dp_0 = np.reshape(dW_dx_dW_dp_0,
(n_points, self.n_parameters, self.n_dims))
# dW_dx: n_points x n_dims x n_dims
# dW_dp_0: n_points x n_params x n_dims
# dW_dx_dW_dp_0: n_points x n_params x n_dims
J = np.einsum('ijk, ilk -> jl', dW_dp, dW_dx_dW_dp_0)
H = np.einsum('ijk, ilk -> jl', dW_dp, dW_dp)
Jp = np.linalg.solve(H, J)
# Jp: n_params x n_params
self.from_vector_inplace(self.as_vector() + np.dot(Jp, delta))
def _global_transform_jacobian(self, points):
return self.pdm.global_transform.jacobian(points)
def jacobian(self, points):
"""
Calculates the Jacobian of the ModelDrivenTransform wrt to
its weights (the weights). This is done by chaining the relative
weight of each point wrt the source landmarks, i.e. the Jacobian of
the warp wrt the source landmarks when the target is assumed to be
equal to the source (dW/dx), together with the Jacobian of the
linear model (and of the global transform if present) wrt its
weights (dX/dp).
Parameters
-----------
points: (N, D) ndarray
The points at which the Jacobian will be evaluated.
Returns
-------
dW/dp : (N, P, D) ndarray
The Jacobian of the ModelDrivenTransform evaluated at the
previous points.
"""
# check if re-computation of dW/dx can be avoided
if not np.array_equal(self._cached_points, points):
# recompute dW/dx, i.e. the relative weight of each point wrt
# the source landmarks
self.dW_dX = self.transform.weight_points(points)
# cache points
self._cached_points = points
model_jacobian = self.pdm.model.jacobian
points = self.pdm.model.mean.points
# compute dX/dp
# dX/dq is the Jacobian of the global transform evaluated at the
# mean of the model.
dX_dq = self._global_transform_jacobian(points)
# dX_dq: n_points x n_global_params x n_dims
# by application of the chain rule dX_db is the Jacobian of the
# model transformed by the linear component of the global transform
dS_db = model_jacobian
dX_dS = self.pdm.global_transform.jacobian_points(points)
dX_db = np.einsum('ilj, idj -> idj', dX_dS, dS_db)
# dS_db: n_points x n_weights x n_dims
# dX_dS: n_points x n_dims x n_dims
# dX_db: n_points x n_weights x n_dims
# dX/dp is simply the concatenation of the previous two terms
dX_dp = np.hstack((dX_dq, dX_db))
# dW_dX: n_points x n_points x n_dims
# dX_dp: n_points x n_params x n_dims
dW_dp = np.einsum('ild, lpd -> ipd', self.dW_dX, dX_dp)
# dW_dp: n_points x n_params x n_dims
return dW_dp
# noinspection PyMissingConstructor
class OrthoMDTransform(GlobalMDTransform):
r"""
A transform that couples an alignment transform to a
statistical model together with a global similarity transform,
such that the weights of the transform are fully specified by
both the weights of statistical model and the weights of the
similarity transform. The model is assumed to
generate an instance which is then transformed by the similarity
transform; the result defines the target landmarks of the transform.
If no source is provided, the mean of the model is defined as the
source landmarks of the transform.
This transform (in contrast to the :class:`GlobalMDTransform`)
additionally orthonormalizes both the global and the model basis against
each other, ensuring that orthogonality and normalization is enforced
across the unified bases.
Parameters
----------
model : :class:`menpo.model.base.StatisticalModel`
A linear statistical shape model.
transform_cls : :class:`menpo.transform.AlignableTransform`
A class of :class:`menpo.transform.base.AlignableTransform`
The align constructor will be called on this with the source
and target landmarks. The target is
set to the points generated from the model using the
provide weights - the source is either given or set to the
model's mean.
global_transform : :class:`menpo.transform.AlignableTransform`
A class of :class:`menpo.transform.base.AlignableTransform`
The global transform that should be applied to the model output.
Doesn't have to have been constructed from the .align() constructor.
Note that the GlobalMDTransform isn't guaranteed to hold on to the
exact object passed in here - so don't expect external changes to
the global_transform to be reflected in the behavior of this object.
source : :class:`menpo.shape.base.PointCloud`, optional
The source landmarks of the transform. If no ``source`` is provided the
mean of the model is used.
"""
def __init__(self, model, transform_cls, global_transform, source=None):
self.pdm = OrthoPDM(model, global_transform)
self._cached_points = None
self.transform = transform_cls(source, self.target)
def _global_transform_jacobian(self, points):
return self.pdm.similarity_model.jacobian
|
|
# -*- coding: utf-8 -*-
###############################################################################
from collections import defaultdict
from functools import partial
from multiprocessing import cpu_count
from threading import RLock
from six import iteritems, iterkeys, itervalues
from .executors import ParallelExecutor, ProcessPoolExecutor
from .executors.mixins import PoolOfPoolsMixin
###############################################################################
class ExecutorPool(object):
"""
Executor pool for :py:class:`PoolOfPools` which does accurate and
intelligent management for the pools of predefined classes.
Basically it tries to reuse existing executors if possible. If it is not
possible it creates new ones.
Just an example: you've done a big mapping of the data in 10 threads. As a
rule you need to shutdown and clean this pool. But a bit later you see that
you need for the pool of 4 threads. Why not to reuse existing pool? This
class allow you to do that and it tracks that 6 threads are idle. So if
you will have a task where you need <= 6 threads it will reuse that pool
also. Task with 4 threads may continue to work in parallel but you have
6 threads you can occupy. So this is the main idea.
Also it tries to squash pools into single instance if you have several
which idle by expanding an amount of workers in one instance throwing out
another one.
"""
def __init__(self, worker_class):
"""
Constructor of the class. worker_class has to be a class which
supports required interface and behaviour, it has to be an instance
of :py:class:`streams.executors.mixins.PoolOfPoolsMixin`.
:param PoolOfPoolsMixin worker_class: The class of executors this pool
has to maintain.
"""
assert issubclass(worker_class, PoolOfPoolsMixin)
self.worker_class = worker_class
self.workers = defaultdict(lambda: [])
self.lock = RLock()
def get_any(self):
"""
Returns any map function, it is undetermined how many workers does it
have. As a rule, you get a minimal amount of workers within a pool of
executors.
"""
with self.lock:
return self.get(min(iterkeys(self.workers)))
def get(self, required_workers):
"""
Returns a mapper which guarantees that you can utilize given number of
workers.
:param int required_workers: The number of workers you need to utilize
for your task.
"""
assert required_workers > 0
with self.lock:
self.squash()
worker, availability = self.get_suitable_worker(required_workers)
if worker is None:
worker = self.worker_class(required_workers)
availability = required_workers
availability -= required_workers
if availability > 0:
self.workers[availability].append(worker)
return partial(worker.map,
required_workers=required_workers,
callback=self.worker_finished)
def squash(self):
"""
Squashes pools and tries to minimize the amount of pools available to
avoid unnecessary fragmentation and complexity.
"""
if not self.workers:
return
with self.lock:
for avail in list(iterkeys(self.workers)):
if not self.workers[avail]:
self.workers.pop(avail)
self.squash_workers(self.name_to_worker_mapping(),
self.real_worker_availability())
def get_suitable_worker(self, required_workers):
"""
Returns suitable executor which has required amount of workers. Returns
``None`` if nothing is available.
Actually it returns a tuple of worker and a count of workers available
for utilization within a given pool. It may be more than
``required_workers`` but it can't be less.
:param int required_workers: The amount of workers user requires.
"""
with self.lock:
min_available, suspected_workers = None, None
for availability, workers in iteritems(self.workers):
if availability >= required_workers:
if min_available is None or min_available < availability:
min_available = availability
suspected_workers = workers
if min_available is not None:
return suspected_workers.pop(), min_available
return None, 0
def worker_finished(self, worker, required_workers):
"""
The callback used by
:py:class:`streams.executors.mixins.PoolOfPoolsMixin`.
"""
with self.lock:
self.workers[required_workers].append(worker)
def name_to_worker_mapping(self):
"""
Maps worker names (the result of applying :py:func:`id` to the
executor) to executor instances.
"""
with self.lock:
name_to_workers = {}
for workers in itervalues(self.workers):
name_to_workers.update(
(id(worker), worker) for worker in workers
)
return name_to_workers
def real_worker_availability(self):
"""
Returns mapping of the name for the executor and it real availability.
Since :py:meth:`worker_finished` does not do any defragmentation of
availability it may be possible that internal structure contains
multiple controversial information about worker availability. This
method is intended to restore the truth.
"""
with self.lock:
real_availability = defaultdict(lambda: [])
for avail, workers in iteritems(self.workers):
for wrk in workers:
real_availability[id(wrk)].append(avail)
for name in iterkeys(real_availability):
real_availability[name] = max(real_availability[name])
availability_to_workers = defaultdict(lambda: [])
for worker_name, avail in iteritems(real_availability):
availability_to_workers[avail].append(worker_name)
return availability_to_workers
def squash_workers(self, names, avails):
"""
Does actual squashing/defragmentation of internal structure.
"""
self.workers = defaultdict(lambda: [])
avails_to_traverse = set(iterkeys(avails))
while avails_to_traverse:
minimal_avail = min(avails_to_traverse)
avails_to_traverse.discard(minimal_avail)
workers = avails[minimal_avail]
selected_worker = names[workers[0]]
if len(workers) == 1:
self.workers[minimal_avail] = [selected_worker]
else:
selected_worker.expand(minimal_avail * (len(workers) - 1))
extended_avail = minimal_avail * len(workers)
avails_to_traverse.add(extended_avail)
avails.pop(minimal_avail)
class PoolOfPools(object):
"""
Just a convenient interface to the set of multiple
:py:class:`ExecutorPool` instances, nothing more.
"""
@staticmethod
def get_from_pool(pool, required_workers):
"""
Fetches mapper from the pool.
:param ExecutorPool pool: The pool you want to fetch mapper from.
:param int required_workers: The amount of workers you are requiring.
It can be ``None`` then
:py:meth:`ExecutorPool.get_any` would be
executed.
"""
if required_workers is None:
return pool.get_any()
return pool.get(required_workers)
def __init__(self):
self.parallels = ExecutorPool(ParallelExecutor)
self.processes = ExecutorPool(ProcessPoolExecutor)
self.default_count = cpu_count()
def parallel(self, required_workers):
"""
Fetches parallel executor mapper from the underlying
:py:class:`ExecutorPool`.
:param int required_workers: The amount of workers you are requiring.
It can be ``None`` then
:py:meth:`ExecutorPool.get_any` would be
executed.
"""
return self.get_from_pool(self.parallels, required_workers)
def process(self, required_workers):
"""
Fetches process executor mapper from the underlying
:py:class:`ExecutorPool`.
:param int required_workers: The amount of workers you are requiring.
It can be ``None`` then
:py:meth:`ExecutorPool.get_any` would be
executed.
"""
return self.get_from_pool(self.processes, required_workers)
def get(self, kwargs):
"""
Returns the mapper.
:param dict kwargs: Keyword arguments for the mapper. Please checkout
:py:meth:`streams.Stream.map` documentation
to understand what this dict has to have.
"""
if "parallel" in kwargs:
parallel = kwargs["parallel"]
if parallel in (1, True):
return self.parallel(self.default_count)
if parallel is not None:
return self.parallel(parallel)
if "process" in kwargs:
process = kwargs["process"]
if process in (1, True):
return self.process(self.default_count)
if process is not None:
return self.process(process)
|
|
import numpy
import theano
import theano.tensor as tt
import Mariana.settings as MSET
import Mariana.abstraction as MABS
import Mariana.initializations as MI
import Mariana.useful as MUSE
import Mariana.custom_types as MTYPES
__all__= ["Decorator_ABC", "BatchNormalization", "Center", "Normalize", "Mask", "RandomMask", "BinomialDropout", "Clip", "AdditiveGaussianNoise", "MultiplicativeGaussianNoise"]
class Decorator_ABC(MABS.TrainableAbstraction_ABC, MABS.Apply_ABC) :
"""A decorator is a modifier that is applied on a layer's output. They are always the last the abstraction to be applied."""
def __init__(self, streams, **kwargs):
super(Decorator_ABC, self).__init__(**kwargs)
self.streams = set(self.streams)
self.setHP("streams", streams)
def apply(self, layer, stream) :
"""Apply to a layer and update networks's log"""
if stream in self.streams :
return self.run(layer, stream=stream)
def run(self, layer, stream) :
"""The function that all decorator_ABCs must implement"""
raise NotImplemented("This one should be implemented in child")
class Mask(Decorator_ABC):
"""Applies a fixed mask to the outputs of the layer. This layers does this:
.. math::
outputs = outputs * mask
If you want to remove some parts of the output use 0s, if you want to keep them as they are use 1s.
Anything else will lower or increase the values by the given factors.
:param array/list mask: It should have the same dimensions as the layer's outputs.
"""
def __init__(self, mask, streams=["train"], **kwargs):
super(Mask, self).__init__(streams, **kwargs)
self.mask = tt.cast(mask, theano.config.floatX)
self.setHP("mask", mask)
def run(self, layer, stream) :
layer.outputs[stream] = layer.outputs[stream] * self.mask
class RandomMask(Decorator_ABC):
"""
This decorator takes a list of masks and will randomly apply them to the outputs of the layer it runs.
Could be used as a fast approximation for dropout.
"""
def __init__(self, masks, streams=["train"], **kwargs):
super(RandomMask, self).__init__(streams, **kwargs)
self.masks = tt.cast(mask, theano.config.floatX)
self.setHP("masks", masks)
def run(self, layer, stream) :
rnd = tt.shared_randomstreams.RandomStreams(seed=MSET.RANDOM_SEED)
maskId = rnd.random_integers(low=0, high=self.nbMasks-1, ndim=1)
mask = self.masks[maskId]
layer.outputs[stream] = layer.outputs[stream] * mask
class BinomialDropout(Decorator_ABC):
"""Stochastically mask some parts of the output of the layer. Use it to make things such as denoising autoencoders and dropout layers"""
def __init__(self, dropoutRatio, streams=["train"], **kwargs):
super(BinomialDropout, self).__init__(streams, **kwargs)
assert (dropoutRatio >= 0 and dropoutRatio <= 1)
# self.dropoutRatio = dropoutRatio
# self.seed = MSET.RANDOM_SEED
self.setHP("dropoutRatio", dropoutRatio)
def run(self, layer, stream) :
if self.getHP("dropoutRatio") > 0 :
rnd = tt.shared_randomstreams.RandomStreams(seed=MSET.RANDOM_SEED)
mask = rnd.binomial(n = 1, p = (1-self.getHP("dropoutRatio")), size = layer.outputs[stream].shape, dtype=theano.config.floatX)
# cast to stay in GPU float limit
mask = MUSE.iCast_theano(mask)
layer.outputs[stream] = layer.outputs[stream] * mask
class Center(Decorator_ABC) :
"""Centers the outputs by substracting the mean"""
def __init__(self, streams=["train"], **kwargs):
super(Center, self).__init__(streams, **kwargs)
def run(self, layer, stream) :
layer.outputs[stream] = layer.outputs[stream]-tt.mean(layer.outputs[stream])
class Normalize(Decorator_ABC) :
"""
Normalizes the outputs by substracting the mean and dividing by the standard deviation
:param float epsilon: Actually it is not the std that is used but the approximation: sqrt(Variance + epsilon). Use this parameter to set the epsilon value
"""
def __init__(self, epsilon=1e-6, streams=["train"]) :
super(Normalize, self).__init__(streams, **kwargs)
self.setHP("epsilon", epsilon)
def run(self, layer, stream) :
std = tt.sqrt( tt.var(layer.outputs[stream]) + self.epsilon )
layer.outputs[stream] = ( layer.outputs[stream]-tt.mean(layer.output) / std )
# class BatchNormalization(Decorator_ABC):
# """Applies Batch Normalization to the outputs of the layer.
# Implementation according to Sergey Ioffe and Christian Szegedy (http://arxiv.org/abs/1502.03167)
# .. math::
# \\gamma * \\frac{x - \\mu}{\\sqrt{\\sigma^2 + \\epsilon}} + \\beta
# Where \\gamma and \\beta are learned and std stands for the standard deviation. The mean and the std are computed accross the whole minibatch.
# :param float epsilon: Actually it is not the std that is used but the approximation: sqrt(Variance + epsilon). Use this parameter to set the epsilon value
# """
# def __init__(self, testMu, testSigma, initializations=[MI.SingleValue('gamma', 1), MI.SingleValue('beta', 0)], epsilon=1e-6, streams=["train", "test"], **kwargs) :
# super(BatchNormalization, self).__init__(initializations=initializations, streams=streams, **kwargs)
# self.setHP("testMu", testMu)
# self.setHP("testSigma", testSigma)
# self.setHP("epsilon", epsilon)
# self.addParameters({
# "gamma": MTYPES.Parameter("gamma"),
# "beta": MTYPES.Parameter("beta")
# })
# def getParameterShape_abs(self, param, **kwargs) :
# return self.parent.getShape_abs()
# def run(self, layer, stream) :
# if stream == "train" :
# mu = tt.mean(layer.outputs[stream])
# sigma = tt.sqrt( tt.var(layer.outputs[stream]) + self.getHP("epsilon") )
# elif stream == "test" :
# mu = self.getHP("testMu")
# sigma = self.getHP("testSigma")
# layer.outputs[stream] = self.getP("gamma")() * ( (layer.outputs[stream] - mu) / sigma ) + self.getP("beta")()
class Clip(Decorator_ABC):
"""Clips the neurone activations, preventing them to go beyond the specified range"""
def __init__(self, lower, upper, streams=["train"], **kwargs) :
super(Clip, self).__init__(streams, **kwargs)
assert lower < upper
self.setHP("lower", lower)
self.setHP("upper", upper)
def run(self, layer, stream) :
layer.outputs[stream] = layer.outputs[stream].clip(self.lower, self.upper)
# class ForceUnitRange(Decorator_ABC):
# """Force the output to be between [0, 1]"""
# def __init__(self, streams=["train"], **kwargs):
# super(ForceUnitRange, self).__init__(streams, **kwargs)
# def run(self, layer, stream) :
# layer.outputs[stream] = (layer.outputs[stream] - min(layer.outputs[stream]))
# layer.outputs[stream] /= max(layer.outputs[stream])
class AddGaussianNoise(Decorator_ABC):
"""Add gaussian noise to the output of the layer"""
def __init__(self, std, avg=0, streams=["train"], **kwargs):
assert std > 0
super(AddGaussianNoise, self).__init__(streams, **kwargs)
self.setHP("std", std)
self.setHP("avg", avg)
def run(self, layer, stream) :
rnd = tt.shared_randomstreams.RandomStreams(seed=MSET.RANDOM_SEED)
randomVals = rnd.normal(size = layer.getIntrinsicShape(), avg=self.getHP("avg"), std=self.getHP("std") )
layer.outputs[stream] = layer.outputs[stream] + randomVals
class MultGaussianNoise(Decorator_ABC):
"""Multiply gaussian noise to the output of the layer"""
def __init__(self, std, avg=0, streams=["train"], **kwargs):
assert std > 0
super(MultGaussianNoise, self).__init__(streams, **kwargs)
self.setHP("std", std)
self.setHP("avg", avg)
def run(self, layer, stream) :
rnd = tt.shared_randomstreams.RandomStreams(seed=MSET.RANDOM_SEED)
randomVals = rnd.normal(size = layer.getIntrinsicShape(), avg=self.getHP("avg"), std=self.getHP("std") )
layer.outputs[stream] = layer.outputs[stream] * randomVals
class Scale(Decorator_ABC):
"""Multiplies the output by scale"""
def __init__(self, scale, streams=["train", "test"], **kwargs):
super(Scale, self).__init__(streams, **kwargs)
self.setHP("scale", scale)
def run(self, layer, stream) :
layer.outputs[stream] = layer.outputs[stream] * self.getHP("scale")
class Shift(Decorator_ABC):
"""Shifts (addiction) the output by scale"""
def __init__(self, shift, streams=["train", "test"], **kwargs):
super(Shift, self).__init__(streams, **kwargs)
self.setHP("shift", shift)
def run(self, layer, stream) :
layer.outputs[stream] = layer.outputs[stream] + self.getHP("shift")
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from ..describe import Description, autoDescribeRoute
from ..rest import Resource, filtermodel, setResponseHeader, setContentDisposition
from girder.api import access
from girder.constants import AccessType, TokenScope
from girder.exceptions import RestException
from girder.models.folder import Folder as FolderModel
from girder.utility import ziputil
from girder.utility.progress import ProgressContext
class Folder(Resource):
"""API Endpoint for folders."""
def __init__(self):
super(Folder, self).__init__()
self.resourceName = 'folder'
self._model = FolderModel()
self.route('DELETE', (':id',), self.deleteFolder)
self.route('DELETE', (':id', 'contents'), self.deleteContents)
self.route('GET', (), self.find)
self.route('GET', (':id',), self.getFolder)
self.route('GET', (':id', 'details'), self.getFolderDetails)
self.route('GET', (':id', 'access'), self.getFolderAccess)
self.route('GET', (':id', 'download'), self.downloadFolder)
self.route('GET', (':id', 'rootpath'), self.rootpath)
self.route('POST', (), self.createFolder)
self.route('PUT', (':id',), self.updateFolder)
self.route('PUT', (':id', 'access'), self.updateFolderAccess)
self.route('POST', (':id', 'copy'), self.copyFolder)
self.route('PUT', (':id', 'metadata'), self.setMetadata)
self.route('DELETE', (':id', 'metadata'), self.deleteMetadata)
@access.public(scope=TokenScope.DATA_READ)
@filtermodel(model=FolderModel)
@autoDescribeRoute(
Description('Search for folders by certain properties.')
.notes('You must pass either a "folderId" or "text" field '
'to specify how you are searching for folders. '
'If you omit one of these parameters the request will fail and respond : '
'"Invalid search mode."')
.responseClass('Folder', array=True)
.param('parentType', "Type of the folder's parent", required=False,
enum=['folder', 'user', 'collection'])
.param('parentId', "The ID of the folder's parent.", required=False)
.param('text', 'Pass to perform a text search.', required=False)
.param('name', 'Pass to lookup a folder by exact name match. Must '
'pass parentType and parentId as well when using this.', required=False)
.pagingParams(defaultSort='lowerName')
.errorResponse()
.errorResponse('Read access was denied on the parent resource.', 403)
)
def find(self, parentType, parentId, text, name, limit, offset, sort):
"""
Get a list of folders with given search parameters. Currently accepted
search modes are:
1. Searching by parentId and parentType, with optional additional
filtering by the name field (exact match) or using full text search
within a single parent folder. Pass a "name" parameter or "text"
parameter to invoke these additional filters.
2. Searching with full text search across all folders in the system.
Simply pass a "text" parameter for this mode.
"""
user = self.getCurrentUser()
if parentType and parentId:
parent = self.model(parentType).load(
parentId, user=user, level=AccessType.READ, exc=True)
filters = {}
if text:
filters['$text'] = {
'$search': text
}
if name:
filters['name'] = name
return list(self._model.childFolders(
parentType=parentType, parent=parent, user=user,
offset=offset, limit=limit, sort=sort, filters=filters))
elif text:
return list(self._model.textSearch(
text, user=user, limit=limit, offset=offset, sort=sort))
else:
raise RestException('Invalid search mode.')
@access.public(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Get detailed information about a folder.')
.modelParam('id', model=FolderModel, level=AccessType.READ)
.errorResponse()
.errorResponse('Read access was denied on the folder.', 403)
)
def getFolderDetails(self, folder):
return {
'nItems': self._model.countItems(folder),
'nFolders': self._model.countFolders(
folder, user=self.getCurrentUser(), level=AccessType.READ)
}
@access.cookie
@access.public(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Download an entire folder as a zip archive.')
.modelParam('id', model=FolderModel, level=AccessType.READ)
.jsonParam('mimeFilter', 'JSON list of MIME types to include.', required=False,
requireArray=True)
.produces('application/zip')
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the folder.', 403)
)
def downloadFolder(self, folder, mimeFilter):
"""
Returns a generator function that will be used to stream out a zip
file containing this folder's contents, filtered by permissions.
"""
setResponseHeader('Content-Type', 'application/zip')
setContentDisposition(folder['name'] + '.zip')
user = self.getCurrentUser()
def stream():
zip = ziputil.ZipGenerator(folder['name'])
for (path, file) in self._model.fileList(
folder, user=user, subpath=False, mimeFilter=mimeFilter):
for data in zip.addFile(file, path):
yield data
yield zip.footer()
return stream
@access.user(scope=TokenScope.DATA_WRITE)
@filtermodel(model=FolderModel)
@autoDescribeRoute(
Description('Update a folder or move it into a new parent.')
.responseClass('Folder')
.modelParam('id', model=FolderModel, level=AccessType.WRITE)
.param('name', 'Name of the folder.', required=False, strip=True)
.param('description', 'Description for the folder.', required=False, strip=True)
.param('parentType', "Type of the folder's parent", required=False,
enum=['folder', 'user', 'collection'], strip=True)
.param('parentId', 'Parent ID for the new parent of this folder.', required=False)
.jsonParam('metadata', 'A JSON object containing the metadata keys to add',
paramType='form', requireObject=True, required=False)
.errorResponse('ID was invalid.')
.errorResponse('Write access was denied for the folder or its new parent object.', 403)
)
def updateFolder(self, folder, name, description, parentType, parentId, metadata):
user = self.getCurrentUser()
if name is not None:
folder['name'] = name
if description is not None:
folder['description'] = description
folder = self._model.updateFolder(folder)
if metadata:
folder = self._model.setMetadata(folder, metadata)
if parentType and parentId:
parent = self.model(parentType).load(
parentId, level=AccessType.WRITE, user=user, exc=True)
if (parentType, parent['_id']) != (folder['parentCollection'], folder['parentId']):
folder = self._model.move(folder, parent, parentType)
return folder
@access.user(scope=TokenScope.DATA_OWN)
@filtermodel(model=FolderModel, addFields={'access'})
@autoDescribeRoute(
Description('Update the access control list for a folder.')
.modelParam('id', model=FolderModel, level=AccessType.ADMIN)
.jsonParam('access', 'The JSON-encoded access control list.', requireObject=True)
.jsonParam('publicFlags', 'JSON list of public access flags.', requireArray=True,
required=False)
.param('public', 'Whether the folder should be publicly visible.',
dataType='boolean', required=False)
.param('recurse', 'Whether the policies should be applied to all '
'subfolders under this folder as well.', dataType='boolean',
default=False, required=False)
.param('progress', 'If recurse is set to True, this controls whether '
'progress notifications will be sent.', dataType='boolean',
default=False, required=False)
.errorResponse('ID was invalid.')
.errorResponse('Admin access was denied for the folder.', 403)
)
def updateFolderAccess(self, folder, access, publicFlags, public, recurse, progress):
user = self.getCurrentUser()
progress = progress and recurse # Only enable progress in recursive case
with ProgressContext(progress, user=user, title='Updating permissions',
message='Calculating progress...') as ctx:
if progress:
ctx.update(total=self._model.subtreeCount(
folder, includeItems=False, user=user, level=AccessType.ADMIN))
return self._model.setAccessList(
folder, access, save=True, recurse=recurse, user=user,
progress=ctx, setPublic=public, publicFlags=publicFlags)
@access.user(scope=TokenScope.DATA_WRITE)
@filtermodel(model=FolderModel)
@autoDescribeRoute(
Description('Create a new folder.')
.responseClass('Folder')
.param('parentType', "Type of the folder's parent", required=False,
enum=['folder', 'user', 'collection'], default='folder')
.param('parentId', "The ID of the folder's parent.")
.param('name', 'Name of the folder.', strip=True)
.param('description', 'Description for the folder.', required=False,
default='', strip=True)
.param('reuseExisting', 'Return existing folder if it exists rather than '
'creating a new one.', required=False,
dataType='boolean', default=False)
.param('public', 'Whether the folder should be publicly visible. By '
'default, inherits the value from parent folder, or in the '
'case of user or collection parentType, defaults to False.',
required=False, dataType='boolean')
.jsonParam('metadata', 'A JSON object containing the metadata keys to add',
paramType='form', requireObject=True, required=False)
.errorResponse()
.errorResponse('Write access was denied on the parent', 403)
)
def createFolder(self, public, parentType, parentId, name, description,
reuseExisting, metadata):
user = self.getCurrentUser()
parent = self.model(parentType).load(
id=parentId, user=user, level=AccessType.WRITE, exc=True)
newFolder = self._model.createFolder(
parent=parent, name=name, parentType=parentType, creator=user,
description=description, public=public, reuseExisting=reuseExisting)
if metadata:
newFolder = self._model.setMetadata(newFolder, metadata)
return newFolder
@access.public(scope=TokenScope.DATA_READ)
@filtermodel(model=FolderModel)
@autoDescribeRoute(
Description('Get a folder by ID.')
.responseClass('Folder')
.modelParam('id', model=FolderModel, level=AccessType.READ)
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the folder.', 403)
)
def getFolder(self, folder):
return folder
@access.user(scope=TokenScope.DATA_OWN)
@autoDescribeRoute(
Description('Get the access control list for a folder.')
.responseClass('Folder')
.modelParam('id', model=FolderModel, level=AccessType.ADMIN)
.errorResponse('ID was invalid.')
.errorResponse('Admin access was denied for the folder.', 403)
)
def getFolderAccess(self, folder):
return self._model.getFullAccessList(folder)
@access.user(scope=TokenScope.DATA_OWN)
@autoDescribeRoute(
Description('Delete a folder by ID.')
.modelParam('id', model=FolderModel, level=AccessType.ADMIN)
.param('progress', 'Whether to record progress on this task.',
required=False, dataType='boolean', default=False)
.errorResponse('ID was invalid.')
.errorResponse('Admin access was denied for the folder.', 403)
)
def deleteFolder(self, folder, progress):
with ProgressContext(progress, user=self.getCurrentUser(),
title='Deleting folder %s' % folder['name'],
message='Calculating folder size...') as ctx:
# Don't do the subtree count if we weren't asked for progress
if progress:
ctx.update(total=self._model.subtreeCount(folder))
self._model.remove(folder, progress=ctx)
return {'message': 'Deleted folder %s.' % folder['name']}
@access.user(scope=TokenScope.DATA_WRITE)
@filtermodel(model=FolderModel)
@autoDescribeRoute(
Description('Set metadata fields on an folder.')
.responseClass('Folder')
.notes('Set metadata fields to null in order to delete them.')
.modelParam('id', model=FolderModel, level=AccessType.WRITE)
.jsonParam('metadata', 'A JSON object containing the metadata keys to add',
paramType='body', requireObject=True)
.param('allowNull', 'Whether "null" is allowed as a metadata value.', required=False,
dataType='boolean', default=False)
.errorResponse(('ID was invalid.',
'Invalid JSON passed in request body.',
'Metadata key name was invalid.'))
.errorResponse('Write access was denied for the folder.', 403)
)
def setMetadata(self, folder, metadata, allowNull):
return self._model.setMetadata(folder, metadata, allowNull=allowNull)
@access.user(scope=TokenScope.DATA_WRITE)
@filtermodel(model=FolderModel)
@autoDescribeRoute(
Description('Copy a folder.')
.responseClass('Folder')
.modelParam('id', 'The ID of the original folder.', model=FolderModel,
level=AccessType.READ)
.param('parentType', "Type of the new folder's parent", required=False,
enum=['folder', 'user', 'collection'])
.param('parentId', 'The ID of the parent document.', required=False)
.param('name', 'Name for the new folder.', required=False)
.param('description', "Description for the new folder.", required=False)
.param('public', "Whether the folder should be publicly visible. By "
"default, inherits the value from parent folder, or in the case "
"of user or collection parentType, defaults to False. If "
"'original', use the value of the original folder.",
required=False, enum=['true', 'false', 'original'])
.param('progress', 'Whether to record progress on this task.',
required=False, dataType='boolean', default=False)
.errorResponse(('A parameter was invalid.',
'ID was invalid.'))
.errorResponse('Read access was denied on the original folder.\n\n'
'Write access was denied on the parent.', 403)
)
def copyFolder(self, folder, parentType, parentId, name, description, public, progress):
user = self.getCurrentUser()
parentType = parentType or folder['parentCollection']
if parentId:
parent = self.model(parentType).load(
id=parentId, user=user, level=AccessType.WRITE, exc=True)
else:
parent = None
with ProgressContext(progress, user=self.getCurrentUser(),
title='Copying folder %s' % folder['name'],
message='Calculating folder size...') as ctx:
# Don't do the subtree count if we weren't asked for progress
if progress:
ctx.update(total=self._model.subtreeCount(folder))
return self._model.copyFolder(
folder, creator=user, name=name, parentType=parentType,
parent=parent, description=description, public=public, progress=ctx)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Remove all contents from a folder.')
.notes('Cleans out all the items and subfolders from under a folder, '
'but does not remove the folder itself.')
.modelParam('id', 'The ID of the folder to clean.', model=FolderModel,
level=AccessType.WRITE)
.param('progress', 'Whether to record progress on this task.',
required=False, dataType='boolean', default=False)
.errorResponse('ID was invalid.')
.errorResponse('Write access was denied on the folder.', 403)
)
def deleteContents(self, folder, progress):
with ProgressContext(progress, user=self.getCurrentUser(),
title='Clearing folder %s' % folder['name'],
message='Calculating folder size...') as ctx:
# Don't do the subtree count if we weren't asked for progress
if progress:
ctx.update(total=self._model.subtreeCount(folder) - 1)
self._model.clean(folder, progress=ctx)
return {'message': 'Cleaned folder %s.' % folder['name']}
@access.user(scope=TokenScope.DATA_WRITE)
@filtermodel(FolderModel)
@autoDescribeRoute(
Description('Delete metadata fields on a folder.')
.responseClass('Folder')
.modelParam('id', model=FolderModel, level=AccessType.WRITE)
.jsonParam(
'fields', 'A JSON list containing the metadata fields to delete',
paramType='body', schema={
'type': 'array',
'items': {
'type': 'string'
}
}
)
.errorResponse(('ID was invalid.',
'Invalid JSON passed in request body.',
'Metadata key name was invalid.'))
.errorResponse('Write access was denied for the folder.', 403)
)
def deleteMetadata(self, folder, fields):
return self._model.deleteMetadata(folder, fields)
@access.public(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Get the path to the root of the folder\'s hierarchy.')
.modelParam('id', model=FolderModel, level=AccessType.READ)
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the folder.', 403)
)
def rootpath(self, folder, params):
return self._model.parentsToRoot(folder, user=self.getCurrentUser())
|
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manage hosts in the current zone.
"""
import collections
import functools
import time
try:
from collections import UserDict as IterableUserDict # Python 3
except ImportError:
from UserDict import IterableUserDict # Python 2
import iso8601
from oslo_log import log as logging
from oslo_utils import timeutils
import six
import nova.conf
from nova import context as context_module
from nova import exception
from nova import objects
from nova.pci import stats as pci_stats
from nova.scheduler import filters
from nova.scheduler import weights
from nova import utils
from nova.virt import hardware
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
HOST_INSTANCE_SEMAPHORE = "host_instance"
class ReadOnlyDict(IterableUserDict):
"""A read-only dict."""
def __init__(self, source=None):
self.data = {}
if source:
self.data.update(source)
def __setitem__(self, key, item):
raise TypeError()
def __delitem__(self, key):
raise TypeError()
def clear(self):
raise TypeError()
def pop(self, key, *args):
raise TypeError()
def popitem(self):
raise TypeError()
def update(self):
raise TypeError()
@utils.expects_func_args('self', 'spec_obj')
def set_update_time_on_success(function):
"""Set updated time of HostState when consuming succeed."""
@functools.wraps(function)
def decorated_function(self, spec_obj):
return_value = None
try:
return_value = function(self, spec_obj)
except Exception as e:
# Ignores exception raised from consume_from_request() so that
# booting instance would fail in the resource claim of compute
# node, other suitable node may be chosen during scheduling retry.
LOG.warning("Selected host: %(host)s failed to consume from "
"instance. Error: %(error)s",
{'host': self.host, 'error': e})
else:
now = timeutils.utcnow()
# NOTE(sbauza): Objects are UTC tz-aware by default
self.updated = now.replace(tzinfo=iso8601.UTC)
return return_value
return decorated_function
class HostState(object):
"""Mutable and immutable information tracked for a host.
This is an attempt to remove the ad-hoc data structures
previously used and lock down access.
"""
def __init__(self, host, node, cell_uuid):
self.host = host
self.nodename = node
self.uuid = None
self._lock_name = (host, node)
# Mutable available resources.
# These will change as resources are virtually "consumed".
self.total_usable_ram_mb = 0
self.total_usable_disk_gb = 0
self.disk_mb_used = 0
self.free_ram_mb = 0
self.free_disk_mb = 0
self.vcpus_total = 0
self.vcpus_used = 0
self.pci_stats = None
self.numa_topology = None
# Additional host information from the compute node stats:
self.num_instances = 0
self.num_io_ops = 0
self.failed_builds = 0
# Other information
self.host_ip = None
self.hypervisor_type = None
self.hypervisor_version = None
self.hypervisor_hostname = None
self.cpu_info = None
self.supported_instances = None
# Resource oversubscription values for the compute host:
self.limits = {}
# Generic metrics from compute nodes
self.metrics = None
# List of aggregates the host belongs to
self.aggregates = []
# Instances on this host
self.instances = {}
# Allocation ratios for this host
self.ram_allocation_ratio = None
self.cpu_allocation_ratio = None
self.disk_allocation_ratio = None
# Host cell (v2) membership
self.cell_uuid = cell_uuid
self.updated = None
def update(self, compute=None, service=None, aggregates=None,
inst_dict=None):
"""Update all information about a host."""
@utils.synchronized(self._lock_name)
def _locked_update(self, compute, service, aggregates, inst_dict):
# Scheduler API is inherently multi-threaded as every incoming RPC
# message will be dispatched in it's own green thread. So the
# shared host state should be updated in a consistent way to make
# sure its data is valid under concurrent write operations.
if compute is not None:
LOG.debug("Update host state from compute node: %s", compute)
self._update_from_compute_node(compute)
if aggregates is not None:
LOG.debug("Update host state with aggregates: %s", aggregates)
self.aggregates = aggregates
if service is not None:
LOG.debug("Update host state with service dict: %s", service)
self.service = ReadOnlyDict(service)
if inst_dict is not None:
LOG.debug("Update host state with instances: %s",
list(inst_dict))
self.instances = inst_dict
return _locked_update(self, compute, service, aggregates, inst_dict)
def _update_from_compute_node(self, compute):
"""Update information about a host from a ComputeNode object."""
# NOTE(jichenjc): if the compute record is just created but not updated
# some field such as free_disk_gb can be None
if 'free_disk_gb' not in compute or compute.free_disk_gb is None:
LOG.debug('Ignoring compute node %s as its usage has not been '
'updated yet.', compute.uuid)
return
if (self.updated and compute.updated_at and
self.updated > compute.updated_at):
return
all_ram_mb = compute.memory_mb
self.uuid = compute.uuid
# Assume virtual size is all consumed by instances if use qcow2 disk.
free_gb = compute.free_disk_gb
least_gb = compute.disk_available_least
if least_gb is not None:
if least_gb > free_gb:
# can occur when an instance in database is not on host
LOG.warning(
"Host %(hostname)s has more disk space than database "
"expected (%(physical)s GB > %(database)s GB)",
{'physical': least_gb, 'database': free_gb,
'hostname': compute.hypervisor_hostname})
free_gb = min(least_gb, free_gb)
free_disk_mb = free_gb * 1024
self.disk_mb_used = compute.local_gb_used * 1024
# NOTE(jogo) free_ram_mb can be negative
self.free_ram_mb = compute.free_ram_mb
self.total_usable_ram_mb = all_ram_mb
self.total_usable_disk_gb = compute.local_gb
self.free_disk_mb = free_disk_mb
self.vcpus_total = compute.vcpus
self.vcpus_used = compute.vcpus_used
self.updated = compute.updated_at
# the ComputeNode.numa_topology field is a StringField so deserialize
self.numa_topology = objects.NUMATopology.obj_from_db_obj(
compute.numa_topology) if compute.numa_topology else None
self.pci_stats = pci_stats.PciDeviceStats(
stats=compute.pci_device_pools)
# All virt drivers report host_ip
self.host_ip = compute.host_ip
self.hypervisor_type = compute.hypervisor_type
self.hypervisor_version = compute.hypervisor_version
self.hypervisor_hostname = compute.hypervisor_hostname
self.cpu_info = compute.cpu_info
if compute.supported_hv_specs:
self.supported_instances = [spec.to_list() for spec
in compute.supported_hv_specs]
else:
self.supported_instances = []
# Don't store stats directly in host_state to make sure these don't
# overwrite any values, or get overwritten themselves. Store in self so
# filters can schedule with them.
self.stats = compute.stats or {}
# Track number of instances on host
self.num_instances = int(self.stats.get('num_instances', 0))
self.num_io_ops = int(self.stats.get('io_workload', 0))
# update metrics
self.metrics = objects.MonitorMetricList.from_json(compute.metrics)
# update allocation ratios given by the ComputeNode object
self.cpu_allocation_ratio = compute.cpu_allocation_ratio
self.ram_allocation_ratio = compute.ram_allocation_ratio
self.disk_allocation_ratio = compute.disk_allocation_ratio
# update failed_builds counter reported by the compute
self.failed_builds = int(self.stats.get('failed_builds', 0))
def consume_from_request(self, spec_obj):
"""Incrementally update host state from a RequestSpec object."""
@utils.synchronized(self._lock_name)
@set_update_time_on_success
def _locked(self, spec_obj):
# Scheduler API is inherently multi-threaded as every incoming RPC
# message will be dispatched in its own green thread. So the
# shared host state should be consumed in a consistent way to make
# sure its data is valid under concurrent write operations.
self._locked_consume_from_request(spec_obj)
return _locked(self, spec_obj)
def _locked_consume_from_request(self, spec_obj):
disk_mb = (spec_obj.root_gb +
spec_obj.ephemeral_gb) * 1024
ram_mb = spec_obj.memory_mb
vcpus = spec_obj.vcpus
self.free_ram_mb -= ram_mb
self.free_disk_mb -= disk_mb
self.vcpus_used += vcpus
# Track number of instances on host
self.num_instances += 1
pci_requests = spec_obj.pci_requests
if pci_requests and self.pci_stats:
pci_requests = pci_requests.requests
else:
pci_requests = None
# Calculate the NUMA usage...
if self.numa_topology and spec_obj.numa_topology:
spec_obj.numa_topology = hardware.numa_fit_instance_to_host(
self.numa_topology, spec_obj.numa_topology,
limits=self.limits.get('numa_topology'),
pci_requests=pci_requests, pci_stats=self.pci_stats)
self.numa_topology = hardware.numa_usage_from_instance_numa(
self.numa_topology, spec_obj.numa_topology)
# ...and the PCI usage
if pci_requests:
instance_cells = None
if spec_obj.numa_topology:
instance_cells = spec_obj.numa_topology.cells
self.pci_stats.apply_requests(pci_requests, instance_cells)
# NOTE(sbauza): By considering all cases when the scheduler is called
# and when consume_from_request() is run, we can safely say that there
# is always an IO operation because we want to move the instance
self.num_io_ops += 1
def __repr__(self):
return ("(%(host)s, %(node)s) ram: %(free_ram)sMB "
"disk: %(free_disk)sMB io_ops: %(num_io_ops)s "
"instances: %(num_instances)s" %
{'host': self.host, 'node': self.nodename,
'free_ram': self.free_ram_mb, 'free_disk': self.free_disk_mb,
'num_io_ops': self.num_io_ops,
'num_instances': self.num_instances})
class HostManager(object):
"""Base HostManager class."""
# Can be overridden in a subclass
def host_state_cls(self, host, node, cell, **kwargs):
return HostState(host, node, cell)
def __init__(self):
self.refresh_cells_caches()
self.filter_handler = filters.HostFilterHandler()
filter_classes = self.filter_handler.get_matching_classes(
CONF.filter_scheduler.available_filters)
self.filter_cls_map = {cls.__name__: cls for cls in filter_classes}
self.filter_obj_map = {}
self.enabled_filters = self._choose_host_filters(self._load_filters())
self.weight_handler = weights.HostWeightHandler()
weigher_classes = self.weight_handler.get_matching_classes(
CONF.filter_scheduler.weight_classes)
self.weighers = [cls() for cls in weigher_classes]
# Dict of aggregates keyed by their ID
self.aggs_by_id = {}
# Dict of set of aggregate IDs keyed by the name of the host belonging
# to those aggregates
self.host_aggregates_map = collections.defaultdict(set)
self._init_aggregates()
self.track_instance_changes = (
CONF.filter_scheduler.track_instance_changes)
# Dict of instances and status, keyed by host
self._instance_info = {}
if self.track_instance_changes:
self._init_instance_info()
def _load_filters(self):
return CONF.filter_scheduler.enabled_filters
def _init_aggregates(self):
elevated = context_module.get_admin_context()
aggs = objects.AggregateList.get_all(elevated)
for agg in aggs:
self.aggs_by_id[agg.id] = agg
for host in agg.hosts:
self.host_aggregates_map[host].add(agg.id)
def update_aggregates(self, aggregates):
"""Updates internal HostManager information about aggregates."""
if isinstance(aggregates, (list, objects.AggregateList)):
for agg in aggregates:
self._update_aggregate(agg)
else:
self._update_aggregate(aggregates)
def _update_aggregate(self, aggregate):
self.aggs_by_id[aggregate.id] = aggregate
for host in aggregate.hosts:
self.host_aggregates_map[host].add(aggregate.id)
# Refreshing the mapping dict to remove all hosts that are no longer
# part of the aggregate
for host in self.host_aggregates_map:
if (aggregate.id in self.host_aggregates_map[host] and
host not in aggregate.hosts):
self.host_aggregates_map[host].remove(aggregate.id)
def delete_aggregate(self, aggregate):
"""Deletes internal HostManager information about a specific aggregate.
"""
if aggregate.id in self.aggs_by_id:
del self.aggs_by_id[aggregate.id]
for host in self.host_aggregates_map:
if aggregate.id in self.host_aggregates_map[host]:
self.host_aggregates_map[host].remove(aggregate.id)
def _init_instance_info(self, computes_by_cell=None):
"""Creates the initial view of instances for all hosts.
As this initial population of instance information may take some time,
we don't wish to block the scheduler's startup while this completes.
The async method allows us to simply mock out the _init_instance_info()
method in tests.
:param compute_nodes: a list of nodes to populate instances info for
if is None, compute_nodes will be looked up in database
"""
def _async_init_instance_info(computes_by_cell):
context = context_module.RequestContext()
LOG.debug("START:_async_init_instance_info")
self._instance_info = {}
count = 0
if not computes_by_cell:
computes_by_cell = {}
for cell in self.cells.values():
with context_module.target_cell(context, cell) as cctxt:
cell_cns = objects.ComputeNodeList.get_all(
cctxt).objects
computes_by_cell[cell] = cell_cns
count += len(cell_cns)
LOG.debug("Total number of compute nodes: %s", count)
for cell, compute_nodes in computes_by_cell.items():
# Break the queries into batches of 10 to reduce the total
# number of calls to the DB.
batch_size = 10
start_node = 0
end_node = batch_size
while start_node <= len(compute_nodes):
curr_nodes = compute_nodes[start_node:end_node]
start_node += batch_size
end_node += batch_size
filters = {"host": [curr_node.host
for curr_node in curr_nodes],
"deleted": False}
with context_module.target_cell(context, cell) as cctxt:
result = objects.InstanceList.get_by_filters(
cctxt.elevated(), filters)
instances = result.objects
LOG.debug("Adding %s instances for hosts %s-%s",
len(instances), start_node, end_node)
for instance in instances:
host = instance.host
if host not in self._instance_info:
self._instance_info[host] = {"instances": {},
"updated": False}
inst_dict = self._instance_info[host]
inst_dict["instances"][instance.uuid] = instance
# Call sleep() to cooperatively yield
time.sleep(0)
LOG.debug("END:_async_init_instance_info")
# Run this async so that we don't block the scheduler start-up
utils.spawn_n(_async_init_instance_info, computes_by_cell)
def _choose_host_filters(self, filter_cls_names):
"""Since the caller may specify which filters to use we need
to have an authoritative list of what is permissible. This
function checks the filter names against a predefined set
of acceptable filters.
"""
if not isinstance(filter_cls_names, (list, tuple)):
filter_cls_names = [filter_cls_names]
good_filters = []
bad_filters = []
for filter_name in filter_cls_names:
if filter_name not in self.filter_obj_map:
if filter_name not in self.filter_cls_map:
bad_filters.append(filter_name)
continue
filter_cls = self.filter_cls_map[filter_name]
self.filter_obj_map[filter_name] = filter_cls()
good_filters.append(self.filter_obj_map[filter_name])
if bad_filters:
msg = ", ".join(bad_filters)
raise exception.SchedulerHostFilterNotFound(filter_name=msg)
return good_filters
def get_filtered_hosts(self, hosts, spec_obj, index=0):
"""Filter hosts and return only ones passing all filters."""
def _strip_ignore_hosts(host_map, hosts_to_ignore):
ignored_hosts = []
for host in hosts_to_ignore:
for (hostname, nodename) in list(host_map.keys()):
if host.lower() == hostname.lower():
del host_map[(hostname, nodename)]
ignored_hosts.append(host)
ignored_hosts_str = ', '.join(ignored_hosts)
LOG.info('Host filter ignoring hosts: %s', ignored_hosts_str)
def _match_forced_hosts(host_map, hosts_to_force):
forced_hosts = []
lowered_hosts_to_force = [host.lower() for host in hosts_to_force]
for (hostname, nodename) in list(host_map.keys()):
if hostname.lower() not in lowered_hosts_to_force:
del host_map[(hostname, nodename)]
else:
forced_hosts.append(hostname)
if host_map:
forced_hosts_str = ', '.join(forced_hosts)
LOG.info('Host filter forcing available hosts to %s',
forced_hosts_str)
else:
forced_hosts_str = ', '.join(hosts_to_force)
LOG.info("No hosts matched due to not matching "
"'force_hosts' value of '%s'", forced_hosts_str)
def _match_forced_nodes(host_map, nodes_to_force):
forced_nodes = []
for (hostname, nodename) in list(host_map.keys()):
if nodename not in nodes_to_force:
del host_map[(hostname, nodename)]
else:
forced_nodes.append(nodename)
if host_map:
forced_nodes_str = ', '.join(forced_nodes)
LOG.info('Host filter forcing available nodes to %s',
forced_nodes_str)
else:
forced_nodes_str = ', '.join(nodes_to_force)
LOG.info("No nodes matched due to not matching "
"'force_nodes' value of '%s'", forced_nodes_str)
def _get_hosts_matching_request(hosts, requested_destination):
"""Get hosts through matching the requested destination.
We will both set host and node to requested destination object
and host will never be None and node will be None in some cases.
Starting with API 2.74 microversion, we also can specify the
host/node to select hosts to launch a server:
- If only host(or only node)(or both host and node) is supplied
and we get one node from get_compute_nodes_by_host_or_node which
is called in resources_from_request_spec function,
the destination will be set both host and node.
- If only host is supplied and we get more than one node from
get_compute_nodes_by_host_or_node which is called in
resources_from_request_spec function, the destination will only
include host.
"""
(host, node) = (requested_destination.host,
requested_destination.node)
if node:
requested_nodes = [x for x in hosts
if x.host == host and x.nodename == node]
else:
requested_nodes = [x for x in hosts
if x.host == host]
if requested_nodes:
LOG.info('Host filter only checking host %(host)s and '
'node %(node)s', {'host': host, 'node': node})
else:
# NOTE(sbauza): The API level should prevent the user from
# providing a wrong destination but let's make sure a wrong
# destination doesn't trample the scheduler still.
LOG.info('No hosts matched due to not matching requested '
'destination (%(host)s, %(node)s)',
{'host': host, 'node': node})
return iter(requested_nodes)
ignore_hosts = spec_obj.ignore_hosts or []
force_hosts = spec_obj.force_hosts or []
force_nodes = spec_obj.force_nodes or []
requested_node = spec_obj.requested_destination
if requested_node is not None and 'host' in requested_node:
# NOTE(sbauza): Reduce a potentially long set of hosts as much as
# possible to any requested destination nodes before passing the
# list to the filters
hosts = _get_hosts_matching_request(hosts, requested_node)
if ignore_hosts or force_hosts or force_nodes:
# NOTE(deva): we can't assume "host" is unique because
# one host may have many nodes.
name_to_cls_map = {(x.host, x.nodename): x for x in hosts}
if ignore_hosts:
_strip_ignore_hosts(name_to_cls_map, ignore_hosts)
if not name_to_cls_map:
return []
# NOTE(deva): allow force_hosts and force_nodes independently
if force_hosts:
_match_forced_hosts(name_to_cls_map, force_hosts)
if force_nodes:
_match_forced_nodes(name_to_cls_map, force_nodes)
check_type = ('scheduler_hints' in spec_obj and
spec_obj.scheduler_hints.get('_nova_check_type'))
if not check_type and (force_hosts or force_nodes):
# NOTE(deva,dansmith): Skip filters when forcing host or node
# unless we've declared the internal check type flag, in which
# case we're asking for a specific host and for filtering to
# be done.
if name_to_cls_map:
return name_to_cls_map.values()
else:
return []
hosts = six.itervalues(name_to_cls_map)
return self.filter_handler.get_filtered_objects(self.enabled_filters,
hosts, spec_obj, index)
def get_weighed_hosts(self, hosts, spec_obj):
"""Weigh the hosts."""
return self.weight_handler.get_weighed_objects(self.weighers,
hosts, spec_obj)
def _get_computes_for_cells(self, context, cells, compute_uuids=None):
"""Get a tuple of compute node and service information.
:param context: request context
:param cells: list of CellMapping objects
:param compute_uuids: list of ComputeNode UUIDs. If this is None, all
compute nodes from each specified cell will be returned, otherwise
only the ComputeNode objects with a UUID in the list of UUIDs in
any given cell is returned. If this is an empty list, the returned
compute_nodes tuple item will be an empty dict.
Returns a tuple (compute_nodes, services) where:
- compute_nodes is cell-uuid keyed dict of compute node lists
- services is a dict of services indexed by hostname
"""
def targeted_operation(cctxt):
services = objects.ServiceList.get_by_binary(
cctxt, 'nova-compute', include_disabled=True)
if compute_uuids is None:
return services, objects.ComputeNodeList.get_all(cctxt)
else:
return services, objects.ComputeNodeList.get_all_by_uuids(
cctxt, compute_uuids)
timeout = context_module.CELL_TIMEOUT
results = context_module.scatter_gather_cells(context, cells, timeout,
targeted_operation)
compute_nodes = collections.defaultdict(list)
services = {}
for cell_uuid, result in results.items():
if isinstance(result, Exception):
LOG.warning('Failed to get computes for cell %s', cell_uuid)
elif result is context_module.did_not_respond_sentinel:
LOG.warning('Timeout getting computes for cell %s', cell_uuid)
else:
_services, _compute_nodes = result
compute_nodes[cell_uuid].extend(_compute_nodes)
services.update({service.host: service
for service in _services})
return compute_nodes, services
def _get_cell_by_host(self, ctxt, host):
'''Get CellMapping object of a cell the given host belongs to.'''
try:
host_mapping = objects.HostMapping.get_by_host(ctxt, host)
return host_mapping.cell_mapping
except exception.HostMappingNotFound:
LOG.warning('No host-to-cell mapping found for selected '
'host %(host)s.', {'host': host})
return
def get_compute_nodes_by_host_or_node(self, ctxt, host, node, cell=None):
'''Get compute nodes from given host or node'''
def return_empty_list_for_not_found(func):
def wrapper(*args, **kwargs):
try:
ret = func(*args, **kwargs)
except exception.NotFound:
ret = objects.ComputeNodeList()
return ret
return wrapper
@return_empty_list_for_not_found
def _get_by_host_and_node(ctxt):
compute_node = objects.ComputeNode.get_by_host_and_nodename(
ctxt, host, node)
return objects.ComputeNodeList(objects=[compute_node])
@return_empty_list_for_not_found
def _get_by_host(ctxt):
return objects.ComputeNodeList.get_all_by_host(ctxt, host)
@return_empty_list_for_not_found
def _get_by_node(ctxt):
compute_node = objects.ComputeNode.get_by_nodename(ctxt, node)
return objects.ComputeNodeList(objects=[compute_node])
if host and node:
target_fnc = _get_by_host_and_node
elif host:
target_fnc = _get_by_host
else:
target_fnc = _get_by_node
if host and not cell:
# optimization not to issue queries to every cell DB
cell = self._get_cell_by_host(ctxt, host)
cells = [cell] if cell else self.enabled_cells
timeout = context_module.CELL_TIMEOUT
nodes_by_cell = context_module.scatter_gather_cells(
ctxt, cells, timeout, target_fnc)
# Only one cell should have values for the compute nodes
# so we get them here, or return an empty list if no cell
# has a value; be sure to filter out cell failures.
nodes = next(
(nodes for nodes in nodes_by_cell.values()
if nodes and not context_module.is_cell_failure_sentinel(nodes)),
objects.ComputeNodeList())
return nodes
def refresh_cells_caches(self):
# NOTE(tssurya): This function is called from the scheduler manager's
# reset signal handler and also upon startup of the scheduler.
context = context_module.RequestContext()
temp_cells = objects.CellMappingList.get_all(context)
# NOTE(tssurya): filtering cell0 from the list since it need
# not be considered for scheduling.
for c in temp_cells:
if c.is_cell0():
temp_cells.objects.remove(c)
# once its done break for optimization
break
# NOTE(danms, tssurya): global dict, keyed by cell uuid, of cells
# cached which will be refreshed every time a SIGHUP is sent to the
# scheduler.
self.cells = {cell.uuid: cell for cell in temp_cells}
LOG.debug('Found %(count)i cells: %(cells)s',
{'count': len(self.cells),
'cells': ', '.join(self.cells)})
# NOTE(tssurya): Global cache of only the enabled cells. This way
# scheduling is limited only to the enabled cells. However this
# cache will be refreshed every time a cell is disabled or enabled
# or when a new cell is created as long as a SIGHUP signal is sent
# to the scheduler.
self.enabled_cells = [c for c in temp_cells if not c.disabled]
# Filtering the disabled cells only for logging purposes.
if LOG.isEnabledFor(logging.DEBUG):
disabled_cells = [c for c in temp_cells if c.disabled]
LOG.debug('Found %(count)i disabled cells: %(cells)s',
{'count': len(disabled_cells),
'cells': ', '.join(
[c.identity for c in disabled_cells])})
# Dict, keyed by host name, to cell UUID to be used to look up the
# cell a particular host is in (used with self.cells).
self.host_to_cell_uuid = {}
def get_host_states_by_uuids(self, context, compute_uuids, spec_obj):
if not self.cells:
LOG.warning("No cells were found")
# Restrict to a single cell if and only if the request spec has a
# requested cell and allow_cross_cell_move=False.
if (spec_obj and 'requested_destination' in spec_obj and
spec_obj.requested_destination and
'cell' in spec_obj.requested_destination and
not spec_obj.requested_destination.allow_cross_cell_move):
only_cell = spec_obj.requested_destination.cell
else:
only_cell = None
if only_cell:
cells = [only_cell]
else:
cells = self.enabled_cells
compute_nodes, services = self._get_computes_for_cells(
context, cells, compute_uuids=compute_uuids)
return self._get_host_states(context, compute_nodes, services)
def _get_host_states(self, context, compute_nodes, services):
"""Returns a generator over HostStates given a list of computes.
Also updates the HostStates internal mapping for the HostManager.
"""
# Get resource usage across the available compute nodes:
host_state_map = {}
seen_nodes = set()
for cell_uuid, computes in compute_nodes.items():
for compute in computes:
service = services.get(compute.host)
if not service:
LOG.warning(
"No compute service record found for host %(host)s",
{'host': compute.host})
continue
host = compute.host
node = compute.hypervisor_hostname
state_key = (host, node)
host_state = host_state_map.get(state_key)
if not host_state:
host_state = self.host_state_cls(host, node,
cell_uuid,
compute=compute)
host_state_map[state_key] = host_state
# We force to update the aggregates info each time a
# new request comes in, because some changes on the
# aggregates could have been happening after setting
# this field for the first time
host_state.update(compute,
dict(service),
self._get_aggregates_info(host),
self._get_instance_info(context, compute))
seen_nodes.add(state_key)
return (host_state_map[host] for host in seen_nodes)
def _get_aggregates_info(self, host):
return [self.aggs_by_id[agg_id] for agg_id in
self.host_aggregates_map[host]]
def _get_cell_mapping_for_host(self, context, host_name):
"""Finds the CellMapping for a particular host name
Relies on a cache to quickly fetch the CellMapping if we have looked
up this host before, otherwise gets the CellMapping via the
HostMapping record for the given host name.
:param context: nova auth request context
:param host_name: compute service host name
:returns: CellMapping object
:raises: HostMappingNotFound if the host is not mapped to a cell
"""
# Check to see if we have the host in our cache.
if host_name in self.host_to_cell_uuid:
cell_uuid = self.host_to_cell_uuid[host_name]
if cell_uuid in self.cells:
return self.cells[cell_uuid]
# Something is wrong so log a warning and just fall through to
# lookup the HostMapping.
LOG.warning('Host %s is expected to be in cell %s but that cell '
'uuid was not found in our cache. The service may '
'need to be restarted to refresh the cache.',
host_name, cell_uuid)
# We have not cached this host yet so get the HostMapping, cache the
# result and return the CellMapping.
hm = objects.HostMapping.get_by_host(context, host_name)
cell_mapping = hm.cell_mapping
self.host_to_cell_uuid[host_name] = cell_mapping.uuid
return cell_mapping
def _get_instances_by_host(self, context, host_name):
try:
cm = self._get_cell_mapping_for_host(context, host_name)
except exception.HostMappingNotFound:
# It's possible to hit this when the compute service first starts
# up and casts to update_instance_info with an empty list but
# before the host is mapped in the API database.
LOG.info('Host mapping not found for host %s. Not tracking '
'instance info for this host.', host_name)
return {}
with context_module.target_cell(context, cm) as cctxt:
uuids = objects.InstanceList.get_uuids_by_host(cctxt, host_name)
# Putting the context in the otherwise fake Instance object at
# least allows out of tree filters to lazy-load fields.
return {uuid: objects.Instance(cctxt, uuid=uuid) for uuid in uuids}
def _get_instance_info(self, context, compute):
"""Gets the host instance info from the compute host.
Some sites may disable ``track_instance_changes`` for performance or
isolation reasons. In either of these cases, there will either be no
information for the host, or the 'updated' value for that host dict
will be False. In those cases, we need to grab the current InstanceList
instead of relying on the version in _instance_info.
"""
host_name = compute.host
host_info = self._instance_info.get(host_name)
if host_info and host_info.get("updated"):
inst_dict = host_info["instances"]
else:
# Updates aren't flowing from nova-compute.
inst_dict = self._get_instances_by_host(context, host_name)
return inst_dict
def _recreate_instance_info(self, context, host_name):
"""Get the InstanceList for the specified host, and store it in the
_instance_info dict.
"""
inst_dict = self._get_instances_by_host(context, host_name)
host_info = self._instance_info[host_name] = {}
host_info["instances"] = inst_dict
host_info["updated"] = False
@utils.synchronized(HOST_INSTANCE_SEMAPHORE)
def update_instance_info(self, context, host_name, instance_info):
"""Receives an InstanceList object from a compute node.
This method receives information from a compute node when it starts up,
or when its instances have changed, and updates its view of hosts and
instances with it.
"""
host_info = self._instance_info.get(host_name)
if host_info:
inst_dict = host_info.get("instances")
for instance in instance_info.objects:
# Overwrite the entry (if any) with the new info.
inst_dict[instance.uuid] = instance
host_info["updated"] = True
else:
instances = instance_info.objects
if len(instances) > 1:
# This is a host sending its full instance list, so use it.
host_info = self._instance_info[host_name] = {}
host_info["instances"] = {instance.uuid: instance
for instance in instances}
host_info["updated"] = True
else:
self._recreate_instance_info(context, host_name)
LOG.info("Received an update from an unknown host '%s'. "
"Re-created its InstanceList.", host_name)
@utils.synchronized(HOST_INSTANCE_SEMAPHORE)
def delete_instance_info(self, context, host_name, instance_uuid):
"""Receives the UUID from a compute node when one of its instances is
terminated.
The instance in the local view of the host's instances is removed.
"""
host_info = self._instance_info.get(host_name)
if host_info:
inst_dict = host_info["instances"]
# Remove the existing Instance object, if any
inst_dict.pop(instance_uuid, None)
host_info["updated"] = True
else:
self._recreate_instance_info(context, host_name)
LOG.info("Received a delete update from an unknown host '%s'. "
"Re-created its InstanceList.", host_name)
@utils.synchronized(HOST_INSTANCE_SEMAPHORE)
def sync_instance_info(self, context, host_name, instance_uuids):
"""Receives the uuids of the instances on a host.
This method is periodically called by the compute nodes, which send a
list of all the UUID values for the instances on that node. This is
used by the scheduler's HostManager to detect when its view of the
compute node's instances is out of sync.
"""
host_info = self._instance_info.get(host_name)
if host_info:
local_set = set(host_info["instances"].keys())
compute_set = set(instance_uuids)
if not local_set == compute_set:
self._recreate_instance_info(context, host_name)
LOG.info("The instance sync for host '%s' did not match. "
"Re-created its InstanceList.", host_name)
return
host_info["updated"] = True
LOG.debug("Successfully synced instances from host '%s'.",
host_name)
else:
self._recreate_instance_info(context, host_name)
LOG.info("Received a sync request from an unknown host '%s'. "
"Re-created its InstanceList.", host_name)
|
|
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2021 Tom Kralidis
#
# Authors : Tom Kralidis <tomkralidis@gmail.com>
#
# Contact email: tomkralidis@gmail.com
# =============================================================================
""" CSW 2.0.2 request and response processor """
import inspect
import warnings
from io import BytesIO
import random
from urllib.parse import urlencode
from owslib.etree import etree
from owslib import fes
from owslib import util
from owslib import ows
from owslib.iso import MD_Metadata, FC_FeatureCatalogue
from owslib.fgdc import Metadata
from owslib.dif import DIF
from owslib.gm03 import GM03
from owslib.namespaces import Namespaces
from owslib.util import cleanup_namespaces, bind_url, add_namespaces, OrderedDict, Authentication, openURL, http_post
# default variables
outputformat = 'application/xml'
def get_namespaces():
n = Namespaces()
return n.get_namespaces()
namespaces = get_namespaces()
schema = 'http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd'
schema_location = '%s %s' % (namespaces['csw'], schema)
class CatalogueServiceWeb(object):
""" csw request class """
def __init__(self, url, lang='en-US', version='2.0.2', timeout=10, skip_caps=False,
username=None, password=None, auth=None, headers=None):
"""
Construct and process a GetCapabilities request
Parameters
----------
- url: the URL of the CSW
- lang: the language (default is 'en-US')
- version: version (default is '2.0.2')
- timeout: timeout in seconds
- skip_caps: whether to skip GetCapabilities processing on init (default is False)
- username: username for HTTP basic authentication
- password: password for HTTP basic authentication
- auth: instance of owslib.util.Authentication
- headers: HTTP headers to send with requests
"""
if auth:
if username:
auth.username = username
if password:
auth.password = password
self.url = util.clean_ows_url(url)
self.lang = lang
self.version = version
self.timeout = timeout
self.auth = auth or Authentication(username, password)
self.headers = headers
self.service = 'CSW'
self.exceptionreport = None
self.owscommon = ows.OwsCommon('1.0.0')
if not skip_caps: # process GetCapabilities
# construct request
data = {'service': self.service, 'version': self.version, 'request': 'GetCapabilities'}
self.request = urlencode(data)
self._invoke()
if self.exceptionreport is None:
self.updateSequence = self._exml.getroot().attrib.get('updateSequence')
# ServiceIdentification
val = self._exml.find(util.nspath_eval('ows:ServiceIdentification', namespaces))
if val is not None:
self.identification = ows.ServiceIdentification(val, self.owscommon.namespace)
else:
self.identification = None
# ServiceProvider
val = self._exml.find(util.nspath_eval('ows:ServiceProvider', namespaces))
if val is not None:
self.provider = ows.ServiceProvider(val, self.owscommon.namespace)
else:
self.provider = None
# ServiceOperations metadata
self.operations = []
for elem in self._exml.findall(util.nspath_eval('ows:OperationsMetadata/ows:Operation', namespaces)):
self.operations.append(ows.OperationsMetadata(elem, self.owscommon.namespace))
self.constraints = {}
for elem in self._exml.findall(util.nspath_eval('ows:OperationsMetadata/ows:Constraint', namespaces)):
self.constraints[elem.attrib['name']] = ows.Constraint(elem, self.owscommon.namespace)
self.parameters = {}
for elem in self._exml.findall(util.nspath_eval('ows:OperationsMetadata/ows:Parameter', namespaces)):
self.parameters[elem.attrib['name']] = ows.Parameter(elem, self.owscommon.namespace)
# FilterCapabilities
val = self._exml.find(util.nspath_eval('ogc:Filter_Capabilities', namespaces))
self.filters = fes.FilterCapabilities(val)
def describerecord(self, typename='csw:Record', format=outputformat):
"""
Construct and process DescribeRecord request
Parameters
----------
- typename: the typename to describe (default is 'csw:Record')
- format: the outputFormat (default is 'application/xml')
"""
# construct request
node0 = self._setrootelement('csw:DescribeRecord')
node0.set('service', self.service)
node0.set('version', self.version)
node0.set('outputFormat', format)
node0.set('schemaLanguage', namespaces['xs2'])
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
etree.SubElement(node0, util.nspath_eval('csw:TypeName', namespaces)).text = typename
self.request = node0
self._invoke()
# parse result
# TODO: process the XML Schema (you're on your own for now with self.response)
def getdomain(self, dname, dtype='parameter'):
"""
Construct and process a GetDomain request
Parameters
----------
- dname: the value of the Parameter or Property to query
- dtype: whether to query a parameter (parameter) or property (property)
"""
# construct request
dtypename = 'ParameterName'
node0 = self._setrootelement('csw:GetDomain')
node0.set('service', self.service)
node0.set('version', self.version)
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
if dtype == 'property':
dtypename = 'PropertyName'
etree.SubElement(node0, util.nspath_eval('csw:%s' % dtypename, namespaces)).text = dname
self.request = node0
self._invoke()
if self.exceptionreport is None:
self.results = {}
val = self._exml.find(util.nspath_eval('csw:DomainValues', namespaces)).attrib.get('type')
self.results['type'] = util.testXMLValue(val, True)
val = self._exml.find(util.nspath_eval('csw:DomainValues/csw:%s' % dtypename, namespaces))
self.results[dtype] = util.testXMLValue(val)
# get the list of values associated with the Domain
self.results['values'] = []
for f in self._exml.findall(util.nspath_eval('csw:DomainValues/csw:ListOfValues/csw:Value', namespaces)):
self.results['values'].append(util.testXMLValue(f))
def getrecords(self, qtype=None, keywords=[], typenames='csw:Record', propertyname='csw:AnyText', bbox=None,
esn='summary', sortby=None, outputschema=namespaces['csw'], format=outputformat, startposition=0,
maxrecords=10, cql=None, xml=None, resulttype='results'):
"""
Construct and process a GetRecords request
Parameters
----------
- qtype: type of resource to query (i.e. service, dataset)
- keywords: list of keywords
- typenames: the typeNames to query against (default is csw:Record)
- propertyname: the PropertyName to Filter against
- bbox: the bounding box of the spatial query in the form [minx,miny,maxx,maxy]
- esn: the ElementSetName 'full', 'brief' or 'summary' (default is 'summary')
- sortby: property to sort results on
- outputschema: the outputSchema (default is 'http://www.opengis.net/cat/csw/2.0.2')
- format: the outputFormat (default is 'application/xml')
- startposition: requests a slice of the result set, starting at this position (default is 0)
- maxrecords: the maximum number of records to return. No records are returned if 0 (default is 10)
- cql: common query language text. Note this overrides bbox, qtype, keywords
- xml: raw XML request. Note this overrides all other options
- resulttype: the resultType 'hits', 'results', 'validate' (default is 'results')
"""
warnings.warn("""Please use the updated 'getrecords2' method instead of 'getrecords'.
The 'getrecords' method will be upgraded to use the 'getrecords2' parameters
in a future version of OWSLib.""")
if xml is not None:
self.request = etree.fromstring(xml)
val = self.request.find(util.nspath_eval('csw:Query/csw:ElementSetName', namespaces))
if val is not None:
esn = util.testXMLValue(val)
else:
# construct request
node0 = self._setrootelement('csw:GetRecords')
if etree.__name__ != 'lxml.etree': # apply nsmap manually
node0.set('xmlns:ows', namespaces['ows'])
node0.set('xmlns:gmd', namespaces['gmd'])
node0.set('xmlns:dif', namespaces['dif'])
node0.set('xmlns:fgdc', namespaces['fgdc'])
node0.set('outputSchema', outputschema)
node0.set('outputFormat', format)
node0.set('version', self.version)
node0.set('resultType', resulttype)
node0.set('service', self.service)
if startposition > 0:
node0.set('startPosition', str(startposition))
node0.set('maxRecords', str(maxrecords))
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
node1 = etree.SubElement(node0, util.nspath_eval('csw:Query', namespaces))
node1.set('typeNames', typenames)
etree.SubElement(node1, util.nspath_eval('csw:ElementSetName', namespaces)).text = esn
self._setconstraint(node1, qtype, propertyname, keywords, bbox, cql, None)
if sortby is not None:
fes.setsortby(node1, sortby)
self.request = node0
self._invoke()
if self.exceptionreport is None:
self.results = {}
# process search results attributes
val = self._exml.find(
util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsMatched')
self.results['matches'] = int(util.testXMLValue(val, True))
val = self._exml.find(
util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsReturned')
self.results['returned'] = int(util.testXMLValue(val, True))
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('nextRecord')
self.results['nextrecord'] = int(util.testXMLValue(val, True))
# process list of matching records
self.records = OrderedDict()
self._parserecords(outputschema, esn)
def getrecordbyid(self, id=[], esn='full', outputschema=namespaces['csw'], format=outputformat):
"""
Construct and process a GetRecordById request
Parameters
----------
- id: the list of Ids
- esn: the ElementSetName 'full', 'brief' or 'summary' (default is 'full')
- outputschema: the outputSchema (default is 'http://www.opengis.net/cat/csw/2.0.2')
- format: the outputFormat (default is 'application/xml')
"""
# construct request
data = {
'service': self.service,
'version': self.version,
'request': 'GetRecordById',
'outputFormat': format,
'outputSchema': outputschema,
'elementsetname': esn,
'id': ','.join(id),
}
self.request = urlencode(data)
self._invoke()
if self.exceptionreport is None:
self.results = {}
self.records = OrderedDict()
self._parserecords(outputschema, esn)
def getrecords2(self, constraints=[], sortby=None, typenames='csw:Record', esn='summary',
outputschema=namespaces['csw'], format=outputformat, startposition=0,
maxrecords=10, cql=None, xml=None, resulttype='results',
distributedsearch=False, hopcount=1):
"""
Construct and process a GetRecords request
Parameters
----------
- constraints: the list of constraints (OgcExpression from owslib.fes module)
- sortby: an OGC SortBy object (SortBy from owslib.fes module)
- typenames: the typeNames to query against (default is csw:Record)
- esn: the ElementSetName 'full', 'brief' or 'summary' (default is 'summary')
- outputschema: the outputSchema (default is 'http://www.opengis.net/cat/csw/2.0.2')
- format: the outputFormat (default is 'application/xml')
- startposition: requests a slice of the result set, starting at this position (default is 0)
- maxrecords: the maximum number of records to return. No records are returned if 0 (default is 10)
- cql: common query language text. Note this overrides bbox, qtype, keywords
- xml: raw XML request. Note this overrides all other options
- resulttype: the resultType 'hits', 'results', 'validate' (default is 'results')
- distributedsearch: `bool` of whether to trigger distributed search
- hopcount: number of message hops before search is terminated (default is 1)
"""
if xml is not None:
if xml.startswith(b'<'):
self.request = etree.fromstring(xml)
val = self.request.find(util.nspath_eval('csw:Query/csw:ElementSetName', namespaces))
if val is not None:
esn = util.testXMLValue(val)
val = self.request.attrib.get('outputSchema')
if val is not None:
outputschema = util.testXMLValue(val, True)
else:
self.request = xml
else:
# construct request
node0 = self._setrootelement('csw:GetRecords')
if etree.__name__ != 'lxml.etree': # apply nsmap manually
node0.set('xmlns:ows', namespaces['ows'])
node0.set('xmlns:gmd', namespaces['gmd'])
node0.set('xmlns:dif', namespaces['dif'])
node0.set('xmlns:fgdc', namespaces['fgdc'])
node0.set('outputSchema', outputschema)
node0.set('outputFormat', format)
node0.set('version', self.version)
node0.set('service', self.service)
node0.set('resultType', resulttype)
if startposition > 0:
node0.set('startPosition', str(startposition))
node0.set('maxRecords', str(maxrecords))
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
if distributedsearch:
etree.SubElement(node0, util.nspath_eval('csw:DistributedSearch', namespaces), hopCount=str(hopcount))
node1 = etree.SubElement(node0, util.nspath_eval('csw:Query', namespaces))
node1.set('typeNames', typenames)
etree.SubElement(node1, util.nspath_eval('csw:ElementSetName', namespaces)).text = esn
if any([len(constraints) > 0, cql is not None]):
node2 = etree.SubElement(node1, util.nspath_eval('csw:Constraint', namespaces))
node2.set('version', '1.1.0')
flt = fes.FilterRequest()
if len(constraints) > 0:
node2.append(flt.setConstraintList(constraints))
# Now add a CQL filter if passed in
elif cql is not None:
etree.SubElement(node2, util.nspath_eval('csw:CqlText', namespaces)).text = cql
if sortby is not None and isinstance(sortby, fes.SortBy):
node1.append(sortby.toXML())
self.request = node0
self._invoke()
if self.exceptionreport is None:
self.results = {}
# process search results attributes
val = self._exml.find(
util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsMatched')
self.results['matches'] = int(util.testXMLValue(val, True))
val = self._exml.find(
util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsReturned')
self.results['returned'] = int(util.testXMLValue(val, True))
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('nextRecord')
if val is not None:
self.results['nextrecord'] = int(util.testXMLValue(val, True))
else:
warnings.warn("""CSW Server did not supply a nextRecord value (it is optional), so the client
should page through the results in another way.""")
# For more info, see:
# https://github.com/geopython/OWSLib/issues/100
self.results['nextrecord'] = None
# process list of matching records
self.records = OrderedDict()
self._parserecords(outputschema, esn)
def transaction(self, ttype=None, typename='csw:Record', record=None, propertyname=None, propertyvalue=None,
bbox=None, keywords=[], cql=None, identifier=None):
"""
Construct and process a Transaction request
Parameters
----------
- ttype: the type of transaction 'insert, 'update', 'delete'
- typename: the typename to describe (default is 'csw:Record')
- record: the XML record to insert
- propertyname: the RecordProperty/PropertyName to Filter against
- propertyvalue: the RecordProperty Value to Filter against (for updates)
- bbox: the bounding box of the spatial query in the form [minx,miny,maxx,maxy]
- keywords: list of keywords
- cql: common query language text. Note this overrides bbox, qtype, keywords
- identifier: record identifier. Note this overrides bbox, qtype, keywords, cql
"""
# construct request
node0 = self._setrootelement('csw:Transaction')
node0.set('version', self.version)
node0.set('service', self.service)
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
validtransactions = ['insert', 'update', 'delete']
if ttype not in validtransactions: # invalid transaction
raise RuntimeError('Invalid transaction \'%s\'.' % ttype)
node1 = etree.SubElement(node0, util.nspath_eval('csw:%s' % ttype.capitalize(), namespaces))
if ttype != 'update':
node1.set('typeName', typename)
if ttype == 'insert':
if record is None:
raise RuntimeError('Nothing to insert.')
node1.append(etree.fromstring(record))
if ttype == 'update':
if record is not None:
node1.append(etree.fromstring(record))
else:
if propertyname is not None and propertyvalue is not None:
node2 = etree.SubElement(node1, util.nspath_eval('csw:RecordProperty', namespaces))
etree.SubElement(node2, util.nspath_eval('csw:Name', namespaces)).text = propertyname
etree.SubElement(node2, util.nspath_eval('csw:Value', namespaces)).text = propertyvalue
self._setconstraint(node1, None, propertyname, keywords, bbox, cql, identifier)
if ttype == 'delete':
self._setconstraint(node1, None, propertyname, keywords, bbox, cql, identifier)
self.request = node0
self._invoke()
self.results = {}
if self.exceptionreport is None:
self._parsetransactionsummary()
self._parseinsertresult()
def harvest(self, source, resourcetype, resourceformat=None, harvestinterval=None, responsehandler=None):
"""
Construct and process a Harvest request
Parameters
----------
- source: a URI to harvest
- resourcetype: namespace identifying the type of resource
- resourceformat: MIME type of the resource
- harvestinterval: frequency of harvesting, in ISO8601
- responsehandler: endpoint that CSW should responsd to with response
"""
# construct request
node0 = self._setrootelement('csw:Harvest')
node0.set('version', self.version)
node0.set('service', self.service)
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
etree.SubElement(node0, util.nspath_eval('csw:Source', namespaces)).text = source
etree.SubElement(node0, util.nspath_eval('csw:ResourceType', namespaces)).text = resourcetype
if resourceformat is not None:
etree.SubElement(node0, util.nspath_eval('csw:ResourceFormat', namespaces)).text = resourceformat
if harvestinterval is not None:
etree.SubElement(node0, util.nspath_eval('csw:HarvestInterval', namespaces)).text = harvestinterval
if responsehandler is not None:
etree.SubElement(node0, util.nspath_eval('csw:ResponseHandler', namespaces)).text = responsehandler
self.request = node0
self._invoke()
self.results = {}
if self.exceptionreport is None:
val = self._exml.find(util.nspath_eval('csw:Acknowledgement', namespaces))
if util.testXMLValue(val) is not None:
ts = val.attrib.get('timeStamp')
self.timestamp = util.testXMLValue(ts, True)
id = val.find(util.nspath_eval('csw:RequestId', namespaces))
self.id = util.testXMLValue(id)
else:
self._parsetransactionsummary()
self._parseinsertresult()
def get_operation_by_name(self, name):
"""Return a named operation"""
for item in self.operations:
if item.name.lower() == name.lower():
return item
raise KeyError("No operation named %s" % name)
def getService_urls(self, service_string=None):
"""
Return easily identifiable URLs for all service types
Parameters
----------
- service_string: a URI to lookup
"""
urls = []
for key, rec in list(self.records.items()):
# create a generator object, and iterate through it until the match is found
# if not found, gets the default value (here "none")
url = next((d['url'] for d in rec.references if d['scheme'] == service_string), None)
if url is not None:
urls.append(url)
return urls
def _parseinsertresult(self):
self.results['insertresults'] = []
for i in self._exml.findall('.//' + util.nspath_eval('csw:InsertResult', namespaces)):
for j in i.findall(util.nspath_eval('csw:BriefRecord/dc:identifier', namespaces)):
self.results['insertresults'].append(util.testXMLValue(j))
def _parserecords(self, outputschema, esn):
if outputschema == namespaces['gmd']: # iso 19139
for i in self._exml.findall('.//' + util.nspath_eval('gmd:MD_Metadata', namespaces)) or \
self._exml.findall('.//' + util.nspath_eval('gmi:MI_Metadata', namespaces)):
val = i.find(util.nspath_eval('gmd:fileIdentifier/gco:CharacterString', namespaces))
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = MD_Metadata(i)
for i in self._exml.findall('.//' + util.nspath_eval('gfc:FC_FeatureCatalogue', namespaces)):
identifier = self._setidentifierkey(util.testXMLValue(i.attrib['uuid'], attrib=True))
self.records[identifier] = FC_FeatureCatalogue(i)
elif outputschema == namespaces['fgdc']: # fgdc csdgm
for i in self._exml.findall('.//metadata'):
val = i.find('idinfo/datasetid')
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = Metadata(i)
elif outputschema == namespaces['dif']: # nasa dif
for i in self._exml.findall('.//' + util.nspath_eval('dif:DIF', namespaces)):
val = i.find(util.nspath_eval('dif:Entry_ID', namespaces))
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = DIF(i)
elif outputschema == namespaces['gm03']: # GM03
for i in self._exml.findall('.//' + util.nspath_eval('gm03:TRANSFER', namespaces)):
val = i.find(util.nspath_eval('gm03:fileIdentifier', namespaces))
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = GM03(i)
else: # process default
for i in self._exml.findall('.//' + util.nspath_eval('csw:%s' % self._setesnel(esn), namespaces)):
val = i.find(util.nspath_eval('dc:identifier', namespaces))
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = CswRecord(i)
def _parsetransactionsummary(self):
val = self._exml.find(util.nspath_eval('csw:TransactionResponse/csw:TransactionSummary', namespaces))
if val is not None:
rid = val.attrib.get('requestId')
self.results['requestid'] = util.testXMLValue(rid, True)
ts = val.find(util.nspath_eval('csw:totalInserted', namespaces))
self.results['inserted'] = int(util.testXMLValue(ts))
ts = val.find(util.nspath_eval('csw:totalUpdated', namespaces))
self.results['updated'] = int(util.testXMLValue(ts))
ts = val.find(util.nspath_eval('csw:totalDeleted', namespaces))
self.results['deleted'] = int(util.testXMLValue(ts))
def _setesnel(self, esn):
""" Set the element name to parse depending on the ElementSetName requested """
el = 'Record'
if esn == 'brief':
el = 'BriefRecord'
if esn == 'summary':
el = 'SummaryRecord'
return el
def _setidentifierkey(self, el):
if el is None:
return 'owslib_random_%i' % random.randint(1, 65536)
else:
return el
def _setrootelement(self, el):
if etree.__name__ == 'lxml.etree': # apply nsmap
return etree.Element(util.nspath_eval(el, namespaces), nsmap=namespaces)
else:
return etree.Element(util.nspath_eval(el, namespaces))
def _setconstraint(self, parent, qtype=None, propertyname='csw:AnyText', keywords=[], bbox=None, cql=None,
identifier=None):
if keywords or bbox is not None or qtype is not None or cql is not None or identifier is not None:
node0 = etree.SubElement(parent, util.nspath_eval('csw:Constraint', namespaces))
node0.set('version', '1.1.0')
if identifier is not None: # set identifier filter, overrides all other parameters
flt = fes.FilterRequest()
node0.append(flt.set(identifier=identifier))
elif cql is not None: # send raw CQL query
# CQL passed, overrides all other parameters
node1 = etree.SubElement(node0, util.nspath_eval('csw:CqlText', namespaces))
node1.text = cql
else: # construct a Filter request
flt = fes.FilterRequest()
node0.append(flt.set(qtype=qtype, keywords=keywords, propertyname=propertyname, bbox=bbox))
def _invoke(self):
# do HTTP request
request_url = self.url
# Get correct URL based on Operation list.
# If skip_caps=True, then self.operations has not been set, so use
# default URL.
if hasattr(self, 'operations'):
caller = inspect.stack()[1][3]
if caller == 'getrecords2':
caller = 'getrecords'
try:
op = self.get_operation_by_name(caller)
if isinstance(self.request, str): # GET KVP
get_verbs = [x for x in op.methods if x.get('type').lower() == 'get']
request_url = get_verbs[0].get('url')
else:
post_verbs = [x for x in op.methods if x.get('type').lower() == 'post']
if len(post_verbs) > 1:
# Filter by constraints. We must match a PostEncoding of "XML"
for pv in post_verbs:
for const in pv.get('constraints'):
if const.name.lower() == 'postencoding':
values = [v.lower() for v in const.values]
if 'xml' in values:
request_url = pv.get('url')
break
else:
# Well, just use the first one.
request_url = post_verbs[0].get('url')
elif len(post_verbs) == 1:
request_url = post_verbs[0].get('url')
except Exception: # no such luck, just go with request_url
pass
if isinstance(self.request, str): # GET KVP
self.request = '%s%s' % (bind_url(request_url), self.request)
self.response = openURL(
self.request, None, 'Get', timeout=self.timeout, auth=self.auth,
headers=self.headers).read()
else:
self.request = cleanup_namespaces(self.request)
# Add any namespaces used in the "typeNames" attribute of the
# csw:Query element to the query's xml namespaces.
for query in self.request.findall(util.nspath_eval('csw:Query', namespaces)):
ns = query.get("typeNames", None)
if ns is not None:
# Pull out "gmd" from something like "gmd:MD_Metadata" from the list
# of typenames
ns_keys = [x.split(':')[0] for x in ns.split(' ')]
self.request = add_namespaces(self.request, ns_keys)
self.request = add_namespaces(self.request, 'ows')
self.request = util.element_to_string(self.request, encoding='utf-8')
self.response = http_post(request_url, self.request, self.lang, self.timeout, auth=self.auth).content
# parse result see if it's XML
self._exml = etree.parse(BytesIO(self.response))
# it's XML. Attempt to decipher whether the XML response is CSW-ish """
valid_xpaths = [
util.nspath_eval('ows:ExceptionReport', namespaces),
util.nspath_eval('csw:Capabilities', namespaces),
util.nspath_eval('csw:DescribeRecordResponse', namespaces),
util.nspath_eval('csw:GetDomainResponse', namespaces),
util.nspath_eval('csw:GetRecordsResponse', namespaces),
util.nspath_eval('csw:GetRecordByIdResponse', namespaces),
util.nspath_eval('csw:HarvestResponse', namespaces),
util.nspath_eval('csw:TransactionResponse', namespaces)
]
if self._exml.getroot().tag not in valid_xpaths:
raise RuntimeError('Document is XML, but not CSW-ish')
# check if it's an OGC Exception
val = self._exml.find(util.nspath_eval('ows:Exception', namespaces))
if val is not None:
raise ows.ExceptionReport(self._exml, self.owscommon.namespace)
else:
self.exceptionreport = None
class CswRecord(object):
""" Process csw:Record, csw:BriefRecord, csw:SummaryRecord """
def __init__(self, record):
if hasattr(record, 'getroot'): # standalone document
self.xml = etree.tostring(record.getroot())
else: # part of a larger document
self.xml = etree.tostring(record)
# check to see if Dublin Core record comes from
# rdf:RDF/rdf:Description container
# (child content model is identical)
self.rdf = False
rdf = record.find(util.nspath_eval('rdf:Description', namespaces))
if rdf is not None:
self.rdf = True
record = rdf
# some CSWs return records with multiple identifiers based on
# different schemes. Use the first dc:identifier value to set
# self.identifier, and set self.identifiers as a list of dicts
val = record.find(util.nspath_eval('dc:identifier', namespaces))
self.identifier = util.testXMLValue(val)
self.identifiers = []
for i in record.findall(util.nspath_eval('dc:identifier', namespaces)):
d = {}
d['scheme'] = i.attrib.get('scheme')
d['identifier'] = i.text
self.identifiers.append(d)
val = record.find(util.nspath_eval('dc:type', namespaces))
self.type = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:title', namespaces))
self.title = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:alternative', namespaces))
self.alternative = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:isPartOf', namespaces))
self.ispartof = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:abstract', namespaces))
self.abstract = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:date', namespaces))
self.date = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:created', namespaces))
self.created = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:issued', namespaces))
self.issued = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:relation', namespaces))
self.relation = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:temporal', namespaces))
self.temporal = util.testXMLValue(val)
self.uris = [] # list of dicts
for i in record.findall(util.nspath_eval('dc:URI', namespaces)):
uri = {}
uri['protocol'] = util.testXMLValue(i.attrib.get('protocol'), True)
uri['name'] = util.testXMLValue(i.attrib.get('name'), True)
uri['description'] = util.testXMLValue(i.attrib.get('description'), True)
uri['url'] = util.testXMLValue(i)
self.uris.append(uri)
self.references = [] # list of dicts
for i in record.findall(util.nspath_eval('dct:references', namespaces)):
ref = {}
ref['scheme'] = util.testXMLValue(i.attrib.get('scheme'), True)
ref['url'] = util.testXMLValue(i)
self.references.append(ref)
val = record.find(util.nspath_eval('dct:modified', namespaces))
self.modified = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:creator', namespaces))
self.creator = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:publisher', namespaces))
self.publisher = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:coverage', namespaces))
self.coverage = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:contributor', namespaces))
self.contributor = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:language', namespaces))
self.language = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:source', namespaces))
self.source = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:rightsHolder', namespaces))
self.rightsholder = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:accessRights', namespaces))
self.accessrights = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:license', namespaces))
self.license = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:format', namespaces))
self.format = util.testXMLValue(val)
self.subjects = []
for i in record.findall(util.nspath_eval('dc:subject', namespaces)):
self.subjects.append(util.testXMLValue(i))
self.rights = []
for i in record.findall(util.nspath_eval('dc:rights', namespaces)):
self.rights.append(util.testXMLValue(i))
val = record.find(util.nspath_eval('dct:spatial', namespaces))
self.spatial = util.testXMLValue(val)
val = record.find(util.nspath_eval('ows:BoundingBox', namespaces))
if val is not None:
self.bbox = ows.BoundingBox(val, namespaces['ows'])
else:
self.bbox = None
val = record.find(util.nspath_eval('ows:WGS84BoundingBox', namespaces))
if val is not None:
self.bbox_wgs84 = ows.WGS84BoundingBox(val, namespaces['ows'])
else:
self.bbox_wgs84 = None
|
|
#!/usr/bin/env python
# encoding: utf-8
# Carlos Rafael Giani, 2006 (dv)
# Tamas Pal, 2007 (folti)
# Nicolas Mercier, 2009
# Microsoft Visual C++/Intel C++ compiler support - beta, needs more testing
# usage:
#
# conf.env['MSVC_VERSIONS'] = ['msvc 9.0', 'msvc 8.0', 'wsdk 7.0', 'intel 11', 'PocketPC 9.0', 'Smartphone 8.0']
# conf.env['MSVC_TARGETS'] = ['x64']
# conf.check_tool('msvc')
# OR conf.check_tool('msvc', funs='no_autodetect')
# conf.check_lib_msvc('gdi32')
# conf.check_libs_msvc('kernel32 user32', mandatory=true)
# ...
# obj.uselib = 'KERNEL32 USER32 GDI32'
#
# platforms and targets will be tested in the order they appear;
# the first good configuration will be used
# supported platforms :
# ia64, x64, x86, x86_amd64, x86_ia64
# compilers supported :
# msvc => Visual Studio, versions 7.1 (2003), 8,0 (2005), 9.0 (2008)
# wsdk => Windows SDK, versions 6.0, 6.1, 7.0
# icl => Intel compiler, versions 9,10,11
# Smartphone => Compiler/SDK for Smartphone devices (armv4/v4i)
# PocketPC => Compiler/SDK for PocketPC devices (armv4/v4i)
import os, sys, re, string, optparse
import Utils, TaskGen, Runner, Configure, Task, Options
from Logs import debug, info, warn, error
from TaskGen import after, before, feature
from Configure import conftest, conf
import ccroot, cc, cxx, ar, winres
from libtool import read_la_file
import _winreg
pproc = Utils.pproc
# importlibs provided by MSVC/Platform SDK. Do NOT search them....
g_msvc_systemlibs = """
aclui activeds ad1 adptif adsiid advapi32 asycfilt authz bhsupp bits bufferoverflowu cabinet
cap certadm certidl ciuuid clusapi comctl32 comdlg32 comsupp comsuppd comsuppw comsuppwd comsvcs
credui crypt32 cryptnet cryptui d3d8thk daouuid dbgeng dbghelp dciman32 ddao35 ddao35d
ddao35u ddao35ud delayimp dhcpcsvc dhcpsapi dlcapi dnsapi dsprop dsuiext dtchelp
faultrep fcachdll fci fdi framedyd framedyn gdi32 gdiplus glauxglu32 gpedit gpmuuid
gtrts32w gtrtst32hlink htmlhelp httpapi icm32 icmui imagehlp imm32 iphlpapi iprop
kernel32 ksguid ksproxy ksuser libcmt libcmtd libcpmt libcpmtd loadperf lz32 mapi
mapi32 mgmtapi minidump mmc mobsync mpr mprapi mqoa mqrt msacm32 mscms mscoree
msdasc msimg32 msrating mstask msvcmrt msvcurt msvcurtd mswsock msxml2 mtx mtxdm
netapi32 nmapinmsupp npptools ntdsapi ntdsbcli ntmsapi ntquery odbc32 odbcbcp
odbccp32 oldnames ole32 oleacc oleaut32 oledb oledlgolepro32 opends60 opengl32
osptk parser pdh penter pgobootrun pgort powrprof psapi ptrustm ptrustmd ptrustu
ptrustud qosname rasapi32 rasdlg rassapi resutils riched20 rpcndr rpcns4 rpcrt4 rtm
rtutils runtmchk scarddlg scrnsave scrnsavw secur32 sensapi setupapi sfc shell32
shfolder shlwapi sisbkup snmpapi sporder srclient sti strsafe svcguid tapi32 thunk32
traffic unicows url urlmon user32 userenv usp10 uuid uxtheme vcomp vcompd vdmdbg
version vfw32 wbemuuid webpost wiaguid wininet winmm winscard winspool winstrm
wintrust wldap32 wmiutils wow32 ws2_32 wsnmp32 wsock32 wst wtsapi32 xaswitch xolehlp
""".split()
all_msvc_platforms = [ ('x64', 'amd64'), ('x86', 'x86'), ('ia64', 'ia64'), ('x86_amd64', 'amd64'), ('x86_ia64', 'ia64') ]
all_wince_platforms = [ ('armv4', 'arm'), ('armv4i', 'arm'), ('mipsii', 'mips'), ('mipsii_fp', 'mips'), ('mipsiv', 'mips'), ('mipsiv_fp', 'mips'), ('sh4', 'sh'), ('x86', 'cex86') ]
all_icl_platforms = [ ('intel64', 'amd64'), ('em64t', 'amd64'), ('ia32', 'x86'), ('Itanium', 'ia64')]
def setup_msvc(conf, versions):
platforms = Utils.to_list(conf.env['MSVC_TARGETS']) or [i for i,j in all_msvc_platforms+all_icl_platforms+all_wince_platforms]
desired_versions = conf.env['MSVC_VERSIONS'] or [v for v,_ in versions][::-1]
versiondict = dict(versions)
for version in desired_versions:
try:
targets = dict(versiondict [version])
for target in platforms:
try:
arch,(p1,p2,p3) = targets[target]
compiler,version = version.split()
return compiler,p1,p2,p3
except KeyError: continue
except KeyError: continue
conf.fatal('msvc: Impossible to find a valid architecture for building (in setup_msvc)')
@conf
def get_msvc_version(conf, compiler, version, target, vcvars):
debug('msvc: get_msvc_version: ' + compiler + ' ' + version + ' ' + target + ' ...')
batfile = os.path.join(conf.blddir, 'waf-print-msvc.bat')
f = open(batfile, 'w')
f.write("""@echo off
set INCLUDE=
set LIB=
call "%s" %s
echo PATH=%%PATH%%
echo INCLUDE=%%INCLUDE%%
echo LIB=%%LIB%%
""" % (vcvars,target))
f.close()
sout = Utils.cmd_output(['cmd', '/E:on', '/V:on', '/C', batfile])
lines = sout.splitlines()
for x in ('Setting environment', 'Setting SDK environment', 'Intel(R) C++ Compiler'):
if lines[0].find(x) != -1:
break
else:
debug('msvc: get_msvc_version: %r %r %r -> not found' % (compiler, version, target))
conf.fatal('msvc: Impossible to find a valid architecture for building (in get_msvc_version)')
for line in lines[1:]:
if line.startswith('PATH='):
path = line[5:]
MSVC_PATH = path.split(';')
elif line.startswith('INCLUDE='):
MSVC_INCDIR = [i for i in line[8:].split(';') if i]
elif line.startswith('LIB='):
MSVC_LIBDIR = [i for i in line[4:].split(';') if i]
# Check if the compiler is usable at all.
# The detection may return 64-bit versions even on 32-bit systems, and these would fail to run.
env = {}
env.update(os.environ)
env.update(PATH = path)
compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler)
cxx = conf.find_program(compiler_name, path_list=MSVC_PATH)
# delete CL if exists. because it could contain parameters wich can change cl's behaviour rather catastrophically.
if env.has_key('CL'):
del(env['CL'])
try:
p = pproc.Popen([cxx, '/help'], env=env, stdout=pproc.PIPE, stderr=pproc.PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise Exception('return code: %r: %r' % (p.returncode, err))
except Exception, e:
debug('msvc: get_msvc_version: %r %r %r -> failure' % (compiler, version, target))
debug(str(e))
conf.fatal('msvc: cannot run the compiler (in get_msvc_version)')
else:
debug('msvc: get_msvc_version: %r %r %r -> OK' % (compiler, version, target))
return (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR)
@conf
def gather_wsdk_versions(conf, versions):
version_pattern = re.compile('^v..?.?\...?.?')
try:
all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Microsoft SDKs\\Windows')
except WindowsError:
try:
all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows')
except WindowsError:
return
index = 0
while 1:
try:
version = _winreg.EnumKey(all_versions, index)
except WindowsError:
break
index = index + 1
if not version_pattern.match(version):
continue
try:
msvc_version = _winreg.OpenKey(all_versions, version)
path,type = _winreg.QueryValueEx(msvc_version,'InstallationFolder')
except WindowsError:
continue
if os.path.isfile(os.path.join(path, 'bin', 'SetEnv.cmd')):
targets = []
for target,arch in all_msvc_platforms:
try:
targets.append((target, (arch, conf.get_msvc_version('wsdk', version, '/'+target, os.path.join(path, 'bin', 'SetEnv.cmd')))))
except Configure.ConfigurationError:
pass
versions.append(('wsdk ' + version[1:], targets))
@conf
def gather_msvc_versions(conf, versions):
# checks SmartPhones SDKs
try:
ce_sdk = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Windows CE Tools\\SDKs')
except WindowsError:
try:
ce_sdk = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Windows CE Tools\\SDKs')
except WindowsError:
ce_sdk = ''
if ce_sdk:
supported_wince_platforms = []
ce_index = 0
while 1:
try:
sdk_device = _winreg.EnumKey(ce_sdk, ce_index)
except WindowsError:
break
ce_index = ce_index + 1
sdk = _winreg.OpenKey(ce_sdk, sdk_device)
path,type = _winreg.QueryValueEx(sdk, 'SDKRootDir')
path=str(path)
path,device = os.path.split(path)
if not device:
path,device = os.path.split(path)
for arch,compiler in all_wince_platforms:
platforms = []
if os.path.isdir(os.path.join(path, device, 'Lib', arch)):
platforms.append((arch, compiler, os.path.join(path, device, 'Include', arch), os.path.join(path, device, 'Lib', arch)))
if platforms:
supported_wince_platforms.append((device, platforms))
# checks MSVC
version_pattern = re.compile('^..?\...?')
for vcver,vcvar in [('VCExpress','exp'), ('VisualStudio','')]:
try:
all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\'+vcver)
except WindowsError:
try:
all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\'+vcver)
except WindowsError:
continue
index = 0
while 1:
try:
version = _winreg.EnumKey(all_versions, index)
except WindowsError:
break
index = index + 1
if not version_pattern.match(version):
continue
try:
msvc_version = _winreg.OpenKey(all_versions, version + "\\Setup\\VS")
path,type = _winreg.QueryValueEx(msvc_version, 'ProductDir')
path=str(path)
targets = []
if ce_sdk:
for device,platforms in supported_wince_platforms:
cetargets = []
for platform,compiler,include,lib in platforms:
winCEpath = os.path.join(path, 'VC', 'ce')
if os.path.isdir(winCEpath):
common_bindirs,_1,_2 = conf.get_msvc_version('msvc', version, 'x86', os.path.join(path, 'Common7', 'Tools', 'vsvars32.bat'))
if os.path.isdir(os.path.join(winCEpath, 'lib', platform)):
bindirs = [os.path.join(winCEpath, 'bin', compiler), os.path.join(winCEpath, 'bin', 'x86_'+compiler)] + common_bindirs
incdirs = [include, os.path.join(winCEpath, 'include'), os.path.join(winCEpath, 'atlmfc', 'include')]
libdirs = [lib, os.path.join(winCEpath, 'lib', platform), os.path.join(winCEpath, 'atlmfc', 'lib', platform)]
cetargets.append((platform, (platform, (bindirs,incdirs,libdirs))))
versions.append((device+' '+version, cetargets))
if os.path.isfile(os.path.join(path, 'VC', 'vcvarsall.bat')):
for target,realtarget in all_msvc_platforms[::-1]:
try:
targets.append((target, (realtarget, conf.get_msvc_version('msvc', version, target, os.path.join(path, 'VC', 'vcvarsall.bat')))))
except:
pass
elif os.path.isfile(os.path.join(path, 'Common7', 'Tools', 'vsvars32.bat')):
try:
targets.append(('x86', ('x86', conf.get_msvc_version('msvc', version, 'x86', os.path.join(path, 'Common7', 'Tools', 'vsvars32.bat')))))
except Configure.ConfigurationError:
pass
versions.append(('msvc '+version, targets))
except WindowsError:
continue
@conf
def gather_icl_versions(conf, versions):
version_pattern = re.compile('^...?.?\....?.?')
try:
all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Compilers\\C++')
except WindowsError:
try:
all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Compilers\\C++')
except WindowsError:
return
index = 0
while 1:
try:
version = _winreg.EnumKey(all_versions, index)
except WindowsError:
break
index = index + 1
if not version_pattern.match(version):
continue
targets = []
for target,arch in all_icl_platforms:
try:
icl_version = _winreg.OpenKey(all_versions, version+'\\'+target)
path,type = _winreg.QueryValueEx(icl_version,'ProductDir')
if os.path.isfile(os.path.join(path, 'bin', 'iclvars.bat')):
try:
targets.append((target, (arch, conf.get_msvc_version('intel', version, target, os.path.join(path, 'bin', 'iclvars.bat')))))
except Configure.ConfigurationError:
pass
except WindowsError:
continue
major = version[0:2]
versions.append(('intel ' + major, targets))
@conf
def get_msvc_versions(conf):
if not conf.env['MSVC_INSTALLED_VERSIONS']:
conf.env['MSVC_INSTALLED_VERSIONS'] = []
conf.gather_msvc_versions(conf.env['MSVC_INSTALLED_VERSIONS'])
conf.gather_wsdk_versions(conf.env['MSVC_INSTALLED_VERSIONS'])
conf.gather_icl_versions(conf.env['MSVC_INSTALLED_VERSIONS'])
return conf.env['MSVC_INSTALLED_VERSIONS']
@conf
def print_all_msvc_detected(conf):
for version,targets in conf.env['MSVC_INSTALLED_VERSIONS']:
info(version)
for target,l in targets:
info("\t"+target)
def detect_msvc(conf):
versions = get_msvc_versions(conf)
return setup_msvc(conf, versions)
@conf
def find_lt_names_msvc(self, libname, is_static=False):
"""
Win32/MSVC specific code to glean out information from libtool la files.
this function is not attached to the task_gen class
"""
lt_names=[
'lib%s.la' % libname,
'%s.la' % libname,
]
for path in self.env['LIBPATH']:
for la in lt_names:
laf=os.path.join(path,la)
dll=None
if os.path.exists(laf):
ltdict=read_la_file(laf)
lt_libdir=None
if ltdict.get('libdir', ''):
lt_libdir = ltdict['libdir']
if not is_static and ltdict.get('library_names', ''):
dllnames=ltdict['library_names'].split()
dll=dllnames[0].lower()
dll=re.sub('\.dll$', '', dll)
return (lt_libdir, dll, False)
elif ltdict.get('old_library', ''):
olib=ltdict['old_library']
if os.path.exists(os.path.join(path,olib)):
return (path, olib, True)
elif lt_libdir != '' and os.path.exists(os.path.join(lt_libdir,olib)):
return (lt_libdir, olib, True)
else:
return (None, olib, True)
else:
raise Utils.WafError('invalid libtool object file: %s' % laf)
return (None, None, None)
@conf
def libname_msvc(self, libname, is_static=False, mandatory=False):
lib = libname.lower()
lib = re.sub('\.lib$','',lib)
if lib in g_msvc_systemlibs:
return lib
lib=re.sub('^lib','',lib)
if lib == 'm':
return None
(lt_path, lt_libname, lt_static) = self.find_lt_names_msvc(lib, is_static)
if lt_path != None and lt_libname != None:
if lt_static == True:
# file existance check has been made by find_lt_names
return os.path.join(lt_path,lt_libname)
if lt_path != None:
_libpaths=[lt_path] + self.env['LIBPATH']
else:
_libpaths=self.env['LIBPATH']
static_libs=[
'lib%ss.lib' % lib,
'lib%s.lib' % lib,
'%ss.lib' % lib,
'%s.lib' %lib,
]
dynamic_libs=[
'lib%s.dll.lib' % lib,
'lib%s.dll.a' % lib,
'%s.dll.lib' % lib,
'%s.dll.a' % lib,
'lib%s_d.lib' % lib,
'%s_d.lib' % lib,
'%s.lib' %lib,
]
libnames=static_libs
if not is_static:
libnames=dynamic_libs + static_libs
for path in _libpaths:
for libn in libnames:
if os.path.exists(os.path.join(path, libn)):
debug('msvc: lib found: %s' % os.path.join(path,libn))
return re.sub('\.lib$', '',libn)
#if no lib can be found, just return the libname as msvc expects it
if mandatory:
self.fatal("The library %r could not be found" % libname)
return re.sub('\.lib$', '', libname)
@conf
def check_lib_msvc(self, libname, is_static=False, uselib_store=None, mandatory=False):
"This is the api to use"
libn = self.libname_msvc(libname, is_static, mandatory)
if not uselib_store:
uselib_store = libname.upper()
# Note: ideally we should be able to place the lib in the right env var, either STATICLIB or LIB,
# but we don't distinguish static libs from shared libs.
# This is ok since msvc doesn't have any special linker flag to select static libs (no env['STATICLIB_MARKER'])
if False and is_static: # disabled
self.env['STATICLIB_' + uselib_store] = [libn]
else:
self.env['LIB_' + uselib_store] = [libn]
@conf
def check_libs_msvc(self, libnames, is_static=False, mandatory=False):
for libname in Utils.to_list(libnames):
self.check_lib_msvc(libname, is_static, mandatory=mandatory)
@conftest
def no_autodetect(conf):
conf.eval_rules(detect.replace('autodetect', ''))
detect = '''
autodetect
find_msvc
msvc_common_flags
cc_load_tools
cxx_load_tools
cc_add_flags
cxx_add_flags
link_add_flags
'''
@conftest
def autodetect(conf):
v = conf.env
compiler, path, includes, libdirs = detect_msvc(conf)
v['PATH'] = path
v['CPPPATH'] = includes
v['LIBPATH'] = libdirs
v['MSVC_COMPILER'] = compiler
def _get_prog_names(conf, compiler):
if compiler=='intel':
compiler_name = 'ICL'
linker_name = 'XILINK'
lib_name = 'XILIB'
else:
# assumes CL.exe
compiler_name = 'CL'
linker_name = 'LINK'
lib_name = 'LIB'
return compiler_name, linker_name, lib_name
@conftest
def find_msvc(conf):
# due to path format limitations, limit operation only to native Win32. Yeah it sucks.
if sys.platform != 'win32':
conf.fatal('MSVC module only works under native Win32 Python! cygwin is not supported yet')
v = conf.env
compiler, path, includes, libdirs = detect_msvc(conf)
v['PATH'] = path
v['CPPPATH'] = includes
v['LIBPATH'] = libdirs
compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler)
# compiler
cxx = None
if v['CXX']: cxx = v['CXX']
elif 'CXX' in conf.environ: cxx = conf.environ['CXX']
if not cxx: cxx = conf.find_program(compiler_name, var='CXX', path_list=path)
if not cxx: conf.fatal('%s was not found (compiler)' % compiler_name)
cxx = conf.cmd_to_list(cxx)
# before setting anything, check if the compiler is really msvc
env = dict(conf.environ)
env.update(PATH = ';'.join(path))
if not Utils.cmd_output([cxx, '/nologo', '/?'], silent=True, env=env):
conf.fatal('the msvc compiler could not be identified')
# c/c++ compiler
v['CC'] = v['CXX'] = cxx
v['CC_NAME'] = v['CXX_NAME'] = 'msvc'
# environment flags
try: v.prepend_value('CPPPATH', conf.environ['INCLUDE'])
except KeyError: pass
try: v.prepend_value('LIBPATH', conf.environ['LIB'])
except KeyError: pass
# linker
if not v['LINK_CXX']:
link = conf.find_program(linker_name, path_list=path)
if link: v['LINK_CXX'] = link
else: conf.fatal('%s was not found (linker)' % linker_name)
v['LINK'] = link
if not v['LINK_CC']: v['LINK_CC'] = v['LINK_CXX']
# staticlib linker
if not v['AR']:
stliblink = conf.find_program(lib_name, path_list=path)
if not stliblink: return
v['AR'] = stliblink
v['ARFLAGS'] = ['/NOLOGO']
# manifest tool. Not required for VS 2003 and below. Must have for VS 2005 and later
manifesttool = conf.find_program('MT', path_list=path)
if manifesttool:
v['MT'] = manifesttool
v['MTFLAGS'] = ['/NOLOGO']
conf.check_tool('winres')
if not conf.env['WINRC']:
warn('Resource compiler not found. Compiling resource file is disabled')
@conftest
def msvc_common_flags(conf):
v = conf.env
v['CPPFLAGS'] = ['/W3', '/nologo']
v['CCDEFINES_ST'] = '/D%s'
v['CXXDEFINES_ST'] = '/D%s'
# TODO just use _WIN32, which defined by the compiler itself!
v['CCDEFINES'] = ['WIN32'] # avoid using this, any compiler predefines the _WIN32 marcro anyway
v['CXXDEFINES'] = ['WIN32'] # avoid using this, any compiler predefines the _WIN32 marcro anyway
v['_CCINCFLAGS'] = []
v['_CCDEFFLAGS'] = []
v['_CXXINCFLAGS'] = []
v['_CXXDEFFLAGS'] = []
v['CC_SRC_F'] = ''
v['CC_TGT_F'] = ['/c', '/Fo']
v['CXX_SRC_F'] = ''
v['CXX_TGT_F'] = ['/c', '/Fo']
v['CPPPATH_ST'] = '/I%s' # template for adding include paths
v['AR_TGT_F'] = v['CCLNK_TGT_F'] = v['CXXLNK_TGT_F'] = '/OUT:'
# Subsystem specific flags
v['CPPFLAGS_CONSOLE'] = ['/SUBSYSTEM:CONSOLE']
v['CPPFLAGS_NATIVE'] = ['/SUBSYSTEM:NATIVE']
v['CPPFLAGS_POSIX'] = ['/SUBSYSTEM:POSIX']
v['CPPFLAGS_WINDOWS'] = ['/SUBSYSTEM:WINDOWS']
v['CPPFLAGS_WINDOWSCE'] = ['/SUBSYSTEM:WINDOWSCE']
# CRT specific flags
v['CPPFLAGS_CRT_MULTITHREADED'] = ['/MT']
v['CPPFLAGS_CRT_MULTITHREADED_DLL'] = ['/MD']
# TODO these are defined by the compiler itself!
v['CPPDEFINES_CRT_MULTITHREADED'] = ['_MT'] # this is defined by the compiler itself!
v['CPPDEFINES_CRT_MULTITHREADED_DLL'] = ['_MT', '_DLL'] # these are defined by the compiler itself!
v['CPPFLAGS_CRT_MULTITHREADED_DBG'] = ['/MTd']
v['CPPFLAGS_CRT_MULTITHREADED_DLL_DBG'] = ['/MDd']
# TODO these are defined by the compiler itself!
v['CPPDEFINES_CRT_MULTITHREADED_DBG'] = ['_DEBUG', '_MT'] # these are defined by the compiler itself!
v['CPPDEFINES_CRT_MULTITHREADED_DLL_DBG'] = ['_DEBUG', '_MT', '_DLL'] # these are defined by the compiler itself!
# compiler debug levels
v['CCFLAGS'] = ['/TC']
v['CCFLAGS_OPTIMIZED'] = ['/O2', '/DNDEBUG']
v['CCFLAGS_RELEASE'] = ['/O2', '/DNDEBUG']
v['CCFLAGS_DEBUG'] = ['/Od', '/RTC1', '/ZI']
v['CCFLAGS_ULTRADEBUG'] = ['/Od', '/RTC1', '/ZI']
v['CXXFLAGS'] = ['/TP', '/EHsc']
v['CXXFLAGS_OPTIMIZED'] = ['/O2', '/DNDEBUG']
v['CXXFLAGS_RELEASE'] = ['/O2', '/DNDEBUG']
v['CXXFLAGS_DEBUG'] = ['/Od', '/RTC1', '/ZI']
v['CXXFLAGS_ULTRADEBUG'] = ['/Od', '/RTC1', '/ZI']
# linker
v['LIB'] = []
v['LIB_ST'] = '%s.lib' # template for adding libs
v['LIBPATH_ST'] = '/LIBPATH:%s' # template for adding libpaths
v['STATICLIB_ST'] = 'lib%s.lib' # Note: to be able to distinguish between a static lib and a dll import lib, it's a good pratice to name the static lib 'lib%s.lib' and the dll import lib '%s.lib'
v['STATICLIBPATH_ST'] = '/LIBPATH:%s'
v['LINKFLAGS'] = ['/NOLOGO', '/MANIFEST']
v['LINKFLAGS_DEBUG'] = ['/DEBUG']
v['LINKFLAGS_ULTRADEBUG'] = ['/DEBUG']
# shared library
v['shlib_CCFLAGS'] = ['']
v['shlib_CXXFLAGS'] = ['']
v['shlib_LINKFLAGS']= ['/DLL']
v['shlib_PATTERN'] = '%s.dll'
v['implib_PATTERN'] = '%s.lib'
v['IMPLIB_ST'] = '/IMPLIB:%s'
# static library
v['staticlib_LINKFLAGS'] = ['']
v['staticlib_PATTERN'] = 'lib%s.lib' # Note: to be able to distinguish between a static lib and a dll import lib, it's a good pratice to name the static lib 'lib%s.lib' and the dll import lib '%s.lib'
# program
v['program_PATTERN'] = '%s.exe'
#######################################################################################################
##### conf above, build below
@after('apply_link')
@feature('cc', 'cxx')
def apply_flags_msvc(self):
if self.env.CC_NAME != 'msvc' or not self.link_task:
return
subsystem = getattr(self, 'subsystem', '')
if subsystem:
subsystem = '/subsystem:%s' % subsystem
flags = 'cstaticlib' in self.features and 'ARFLAGS' or 'LINKFLAGS'
self.env.append_value(flags, subsystem)
if getattr(self, 'link_task', None) and not 'cstaticlib' in self.features:
for f in self.env.LINKFLAGS:
d = f.lower()
if d[1:] == 'debug':
pdbnode = self.link_task.outputs[0].change_ext('.pdb')
pdbfile = pdbnode.bldpath(self.env)
self.link_task.outputs.append(pdbnode)
self.bld.install_files(self.install_path, [pdbnode], env=self.env)
break
@feature('cprogram', 'cshlib', 'cstaticlib')
@after('apply_lib_vars')
@before('apply_obj_vars')
def apply_obj_vars_msvc(self):
if self.env['CC_NAME'] != 'msvc':
return
try:
self.meths.remove('apply_obj_vars')
except ValueError:
pass
libpaths = getattr(self, 'libpaths', [])
if not libpaths: self.libpaths = libpaths
env = self.env
app = env.append_unique
cpppath_st = env['CPPPATH_ST']
lib_st = env['LIB_ST']
staticlib_st = env['STATICLIB_ST']
libpath_st = env['LIBPATH_ST']
staticlibpath_st = env['STATICLIBPATH_ST']
for i in env['LIBPATH']:
app('LINKFLAGS', libpath_st % i)
if not libpaths.count(i):
libpaths.append(i)
for i in env['LIBPATH']:
app('LINKFLAGS', staticlibpath_st % i)
if not libpaths.count(i):
libpaths.append(i)
# i doubt that anyone will make a fully static binary anyway
if not env['FULLSTATIC']:
if env['STATICLIB'] or env['LIB']:
app('LINKFLAGS', env['SHLIB_MARKER']) # TODO does SHLIB_MARKER work?
for i in env['STATICLIB']:
app('LINKFLAGS', staticlib_st % i)
for i in env['LIB']:
app('LINKFLAGS', lib_st % i)
# split the manifest file processing from the link task, like for the rc processing
@feature('cprogram', 'cshlib')
@after('apply_link')
def apply_manifest(self):
"""Special linker for MSVC with support for embedding manifests into DLL's
and executables compiled by Visual Studio 2005 or probably later. Without
the manifest file, the binaries are unusable.
See: http://msdn2.microsoft.com/en-us/library/ms235542(VS.80).aspx
Problems with this tool: it is always called whether MSVC creates manifests or not."""
if self.env.CC_NAME != 'msvc':
return
tsk = self.create_task('msvc_manifest')
tsk.set_inputs(self.link_task.outputs[0])
def exec_mf(self):
env = self.env
outfile = self.inputs[0].bldpath(env)
manifest = outfile + '.manifest'
if os.path.exists(manifest):
debug('msvc: manifesttool')
mtool = env['MT']
if not mtool:
return 0
mode = ''
# embedding mode. Different for EXE's and DLL's.
# see: http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx
if 'cprogram' in self.generator.features:
mode = '1'
elif 'cshlib' in self.generator.features:
mode = '2'
debug('msvc: embedding manifest')
#flags = ' '.join(env['MTFLAGS'] or [])
lst = []
lst.extend(Utils.to_list(env['MT']))
lst.extend(Utils.to_list(env['MTFLAGS']))
lst.extend(Utils.to_list("-manifest"))
lst.extend(Utils.to_list(manifest))
lst.extend(Utils.to_list("-outputresource:%s;%s" % (outfile, mode)))
#cmd='%s %s -manifest "%s" -outputresource:"%s";#%s' % (mtool, flags,
# manifest, outfile, mode)
lst = [lst]
return self.exec_command(*lst)
return None
cls = Task.task_type_from_func('msvc_manifest', vars=['MT', 'MTFLAGS'], color='BLUE', func=exec_mf, ext_in='.bin')
cls.quiet = 1
########## stupid evil command modification: concatenate the tokens /Fx, /doc, and /x: with the next token
def exec_command_msvc(self, *k, **kw):
"instead of quoting all the paths and keep using the shell, we can just join the options msvc is interested in"
if self.env['CC_NAME'] == 'msvc':
if isinstance(k[0], list):
lst = []
carry = ''
for a in k[0]:
if len(a) == 3 and a.startswith('/F') or a == '/doc' or a[-1] == ':':
carry = a
else:
lst.append(carry + a)
carry = ''
k = [lst]
env = dict(os.environ)
env.update(PATH = ';'.join(self.env['PATH']))
kw['env'] = env
return self.generator.bld.exec_command(*k, **kw)
for k in 'cc cxx winrc cc_link cxx_link static_link qxx'.split():
cls = Task.TaskBase.classes.get(k, None)
if cls:
cls.exec_command = exec_command_msvc
|
|
# Space Rocks! (asteroids)
# KidsCanCode 2016, 2017
import pygame as pg
import sys
from os import path
from random import choice, randrange
from itertools import repeat
from sprites import *
from settings import *
img_dir = path.join(path.dirname(__file__), 'img')
snd_dir = path.join(path.dirname(__file__), 'snd')
class Game:
def __init__(self):
pg.init()
pg.mixer.init()
pg.mixer.set_num_channels(16)
self.screen = pg.display.set_mode((WIDTH, HEIGHT))
# all drawing is done to game_surface
# then game surface blitted to screen
self.game_surface = pg.Surface((WIDTH, HEIGHT))
self.game_rect = self.game_surface.get_rect()
pg.display.set_caption(TITLE)
self.clock = pg.time.Clock()
# rotation cache to store rotated images of sprites
# rather than repeated transforms every frame
self.rot_cache = {}
self.rot_cache['player'] = {}
self.rot_cache['rock'] = {}
self.load_data()
def draw_text(self, text, size, color, x, y, align='midtop'):
# helper method to render/place text - use in game loop draw section
font = pg.font.Font(path.join(img_dir, FONT_NAME), size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect(**{align: (x, y)})
self.game_surface.blit(text_surface, text_rect)
def draw_hyper(self, x, y):
# draw charge indicator for hyperspace function
box_width = 100
box_height = 12
if self.player.hyper_charge:
pct = 1
else:
pct = (pg.time.get_ticks() - self.player.last_hyper) / HYPER_CHARGE_TIME
fill = box_width * pct
if pct < .50:
col = RED
elif pct < .95:
col = YELLOW
else:
col = GREEN
outline_rect = pg.Rect(x, y, box_width, box_height)
fill_rect = pg.Rect(x, y + 3, fill, box_height - 4)
pg.draw.rect(self.game_surface, col, fill_rect)
pg.draw.rect(self.game_surface, WHITE, outline_rect, 2)
def draw_shield_level(self, x, y):
# draw shield indicator
# TODO: add "warning" flash when shield empty
box_width = 12
box_height = 15
spacer = 3
offset = 25
# icon
img = self.shield_logo
img_rect = img.get_rect()
img_rect.topleft = (x, y)
# outline
outline_rect = pg.Rect(x + offset, y, box_width * 3 + spacer * 4, box_height + spacer * 2)
pg.draw.rect(self.game_surface, GREY, outline_rect, 2)
# fill
fill_colors = [RED, YELLOW, GREEN]
if self.player.shield:
for i in range(self.player.shield.level + 1):
r = pg.Rect(x + offset + spacer + (box_width + spacer) * i, y + spacer, box_width, box_height)
pg.draw.rect(self.game_surface, fill_colors[self.player.shield.level], r)
else:
pass
self.game_surface.blit(img, img_rect)
def draw_lives(self, img, x, y, count):
# draw lives indicator using given icon
for i in range(count):
img_rect = img.get_rect()
img_rect.x = x + 40 * i
img_rect.y = y
self.game_surface.blit(img, img_rect)
def draw_score(self, x, y):
# draw score using image font
digit_rect = self.numbers[0].get_rect()
width = len(str(self.score)) * digit_rect.width
score_surf = pg.Surface([width, digit_rect.height])
score_rect = score_surf.get_rect()
score_rect.midtop = (x, y)
for pos, char in enumerate(str(self.score)):
digit_img = self.numbers[int(char)]
digit_rect.topleft = (pos * digit_rect.width, 0)
score_surf.blit(digit_img, digit_rect)
self.game_surface.blit(score_surf, score_rect)
def new(self):
# initialize all variables and do all the setup for a new game
# various groups for collisions
self.all_sprites = pg.sprite.LayeredUpdates()
self.rocks = pg.sprite.Group()
self.bullets = pg.sprite.Group()
self.bomb_explosions = pg.sprite.Group()
self.powerups = pg.sprite.Group()
self.aliens = pg.sprite.Group()
self.mobs = pg.sprite.Group()
self.player = Player(self, PLAYER_IMG)
for i in range(START_ROCKS):
Rock(self, 3, None)
self.score = 0
self.level = 1
# Experimental lighting
# TODO: improve lighting functionality
self.light = False
# offset for screen shake function
self.offset = repeat((0, 0))
self.last_alien = pg.time.get_ticks()
pg.mixer.music.load(path.join(snd_dir, 'SimpleBeat.ogg'))
pg.mixer.music.play(loops=-1)
def load_data(self):
# load all game assets
# TODO: loading bar (needed?)
# TODO: improve lighting frame rate
self.fog = pg.Surface((WIDTH, HEIGHT)) # , pg.SRCALPHA)
self.fog.fill((255, 255, 255))
self.player_light_img = pg.image.load(path.join(img_dir, 'circle_1000.png')).convert_alpha()
self.player_light_rect = self.player_light_img.get_rect()
self.bullet_light_img = pg.transform.scale(self.player_light_img, (300, 300))
self.bullet_light_rect = self.bullet_light_img.get_rect()
# spritesheets
self.spritesheet = SpritesheetWithXML(path.join(img_dir, 'sheet'))
self.beam_sheet = SpritesheetWithXML(path.join(img_dir, 'beams'))
self.expl_sheet = SpritesheetWithXML(path.join(img_dir, 'spritesheet_regularExplosion'))
self.expl_player_sheet = SpritesheetWithXML(path.join(img_dir, 'spritesheet_sonicExplosion'))
self.ship_particle_img = pg.image.load(path.join(img_dir, PLAYER_THRUST_IMG)).convert_alpha()
# rock images - 4 sizes
for size in ROCK_IMAGES.keys():
for img in ROCK_IMAGES[size]:
self.rot_cache['rock'][img] = {}
# explosions - 3 kinds
self.expl_frames = {}
self.expl_frames['lg'] = []
self.expl_frames['sm'] = []
self.expl_frames['sonic'] = []
for i in range(9):
img_name = 'sonicExplosion0{}.png'.format(i)
img = self.expl_player_sheet.get_image_by_name(img_name)
img.set_colorkey(BLACK)
self.expl_frames['sonic'].append(img)
img_name = 'regularExplosion0{}.png'.format(i)
img = self.expl_sheet.get_image_by_name(img_name)
img.set_colorkey(BLACK)
img_lg = pg.transform.rotozoom(img, 0, 0.6)
self.expl_frames['lg'].append(img_lg)
img_sm = pg.transform.rotozoom(img, 0, 0.3)
self.expl_frames['sm'].append(img_sm)
# image font - numerals for 0-9
self.numbers = []
for i in range(10):
self.numbers.append(self.spritesheet.get_image_by_name('numeral{}.png'.format(i)))
self.background = pg.image.load(path.join(img_dir, 'starfield.png')).convert_alpha()
self.background_rect = self.background.get_rect()
# shield images
self.shield_images = []
for img in SHIELD_IMAGES:
img = pg.transform.rotozoom(self.spritesheet.get_image_by_name(img), 0, PLAYER_SCALE - 0.1)
self.shield_images.append(img)
self.shield_logo = pg.transform.rotozoom(self.spritesheet.get_image_by_name(POW_IMAGES['shield']), 0, 0.6)
# sounds
self.shield_down_sound = pg.mixer.Sound(path.join(snd_dir, SHIELD_DOWN_SOUND))
self.shield_down_sound.set_volume(1.0)
self.alien_fire_sound = pg.mixer.Sound(path.join(snd_dir, ALIEN_BULLET_SOUND))
self.alien_fire_sound.set_volume(1.0)
self.hyper_sound = pg.mixer.Sound(path.join(snd_dir, HYPER_SOUND))
self.bomb_tick_sound = pg.mixer.Sound(path.join(snd_dir, BOMB_TICK_SOUND))
self.bomb_tick_sound.set_volume(0.5)
self.bullet_sounds = []
for sound in BULLET_SOUNDS:
snd = pg.mixer.Sound(path.join(snd_dir, sound))
snd.set_volume(0.5)
self.bullet_sounds.append(snd)
self.bomb_launch_sound = pg.mixer.Sound(path.join(snd_dir, BOMB_LAUNCH_SOUND))
self.rock_exp_sounds = []
for sound in ROCK_EXPL_SOUNDS:
self.rock_exp_sounds.append(pg.mixer.Sound(path.join(snd_dir, sound)))
self.bomb_exp_sounds = []
for sound in BOMB_EXPL_SOUNDS:
self.bomb_exp_sounds.append(pg.mixer.Sound(path.join(snd_dir, sound)))
self.pow_sounds = {}
for pow_type in POW_SOUNDS.keys():
self.pow_sounds[pow_type] = pg.mixer.Sound(path.join(snd_dir, POW_SOUNDS[pow_type]))
def run(self):
# game loop - set self.playing = False to end the game
self.playing = True
while self.playing:
self.dt = self.clock.tick(FPS) / 1000
self.events()
self.update()
self.draw()
def quit(self):
pg.quit()
sys.exit()
def update(self):
# update portion of the game loop
# TODO: clean up - move collisions to sep. functions?
self.all_sprites.update()
# spawn alien?
# TODO: improve alien spawning
now = pg.time.get_ticks()
if now - self.last_alien > ALIEN_SPAWN_TIME + randint(1000, 5000):
self.last_alien = now
Alien(self)
# bomb explosions take out rocks (player too?)
hits = pg.sprite.groupcollide(self.mobs, self.bomb_explosions, False, False)
for hit in hits:
if isinstance(hit, Pow) or isinstance(hit, ABullet):
pass
else:
hit.kill()
self.score += 4 - hit.size
if hit.size > 1:
Explosion(self, hit.rect.center, 'lg')
else:
Explosion(self, hit.rect.center, 'sm')
if hit.size > 0:
Rock(self, hit.size - 1, hit.rect.center)
Rock(self, hit.size - 1, hit.rect.center)
# check for bullet hits
# 1) with rocks 2) with aliens 3) with alien bullets
hits = pg.sprite.groupcollide(self.mobs, self.bullets, False, False, pg.sprite.collide_mask)
for hit in hits.keys():
for bullet in hits[hit]:
if isinstance(bullet, Bomb):
bullet.explode()
if isinstance(hit, Alien):
hit.hit()
bullet.kill()
if hit.health <= 0:
Explosion(self, hit.rect.center, 'sonic')
Pow(self, hit.pos)
hit.kill()
self.score += 10
else:
Explosion(self, bullet.rect.center, 'sm')
if isinstance(hit, Rock):
if randrange(100) < POW_SPAWN_PCT and len(self.powerups) <= 2:
Pow(self, hit.pos)
self.score += 4 - hit.size
if hit.size > 1:
Explosion(self, hit.rect.center, 'lg')
else:
Explosion(self, hit.rect.center, 'sm')
if hit.size > 0:
Rock(self, hit.size - 1, hit.rect.center)
Rock(self, hit.size - 1, hit.rect.center)
hit.kill()
if isinstance(bullet, Bullet):
bullet.kill()
if isinstance(hit, ABullet):
# TODO: decide whether player shots should hit alien shots
# hit.kill()
pass
# check for collisions with player
# 1) Rocks 2) alien shots 3) powerups 4) aliens
hits = pg.sprite.spritecollide(self.player, self.mobs, True, pg.sprite.collide_mask)
for hit in hits:
# type of object
if isinstance(hit, Rock):
# decrease shield / lives
if self.player.shield:
if self.player.shield.level > 0:
self.player.shield.level -= 1
else:
self.shield_down_sound.play()
self.player.shield.kill()
self.player.shield = None
Explosion(self, self.player.rect.center, 'sonic')
else:
self.player.die()
elif isinstance(hit, ABullet):
# decrease shield / lives
if self.player.shield:
if self.player.shield.level > 0:
self.player.shield.level -= 1
else:
self.shield_down_sound.play()
self.player.shield.kill()
self.player.shield = None
Explosion(self, hit.rect.center, 'sm')
else:
self.player.die()
elif isinstance(hit, Pow):
if hit.type == 'shield':
if not self.player.shield:
Shield(self, self.player)
else:
self.player.shield.level = 2
self.pow_sounds[hit.type].play()
elif hit.type == 'gun':
if self.player.gun_level < 4:
self.player.gun_level += 1
self.pow_sounds[hit.type].play()
elif isinstance(hit, Alien):
pass
# destroyed all rocks? next level
# TODO: level change indication
if len(self.rocks) == 0:
self.level += 1
for i in range(self.level + 2):
Rock(self, choice([3, 2]), None)
# game over
if self.player.lives <= 0:
self.playing = False
def shake(self, amount=20, times=2):
# implement screen shake
d = -1
for _ in range(0, times):
for x in range(0, amount, 4):
yield(x * d, x * d)
for x in range(amount, 0, -4):
yield(x * d, x * d)
d *= -1
while True:
yield (0, 0)
def render_fog(self):
self.fog.fill((180, 180, 180))
self.player_light_rect.center = self.player.pos
self.fog.blit(self.player_light_img, self.player_light_rect)
for sprite in self.all_sprites:
if isinstance(sprite, Bullet) or isinstance(sprite, ABullet):
self.bullet_light_rect.center = sprite.pos
self.fog.blit(self.bullet_light_img, self.bullet_light_rect)
if isinstance(sprite, Alien) or isinstance(sprite, Explosion):
self.player_light_rect.center = sprite.pos
self.fog.blit(self.player_light_img, self.player_light_rect)
self.game_surface.blit(self.fog, (0, 0), special_flags=pg.BLEND_RGBA_SUB)
# def render_fog(self):
# self.fog.fill((0, 0, 0, 255))
# steps = 250
# center = (int(self.player.pos.x), int(self.player.pos.y))
# for i in range(steps, 1, -5):
# pg.draw.circle(self.fog, (0, 0, 0, i * 255 / steps), center, i)
# self.game_surface.blit(self.fog, (0, 0))
def draw(self):
# draw everything to the screen
# TODO: disable FPS counter
pg.display.set_caption("{:.2f}".format(self.clock.get_fps()))
self.game_surface.blit(self.background, self.background_rect)
#self.game_surface.fill((40, 40, 40))
self.all_sprites.draw(self.game_surface)
self.player.engine_emitter.draw()
if self.light:
self.render_fog()
self.draw_text(str(self.score), 28, WHITE, WIDTH / 2, 15, align='midtop')
self.draw_text("Level: " + str(self.level), 22, WHITE, 5, 15, align='topleft')
self.draw_lives(self.player.life_image, WIDTH - 150, 15, self.player.lives)
self.draw_shield_level(WIDTH - 150, 55)
self.draw_hyper(WIDTH - 150, 105)
# self.draw_score(WIDTH / 2, 15)
self.screen.blit(self.game_surface, next(self.offset))
pg.display.update()
def events(self):
# catch all events here
for event in pg.event.get():
if event.type == pg.QUIT:
self.quit()
if event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE:
self.quit()
# TODO: remove this
if event.type == pg.KEYDOWN and event.key == pg.K_l:
self.light = not self.light
def show_start_screen(self):
# show the start screen
# TODO: add animation
# TODO: combine with go_screen?
self.game_surface.fill(BGCOLOR)
self.draw_text(TITLE, 48, WHITE, WIDTH / 2, HEIGHT / 4)
self.draw_text("Arrows to move, Space to fire", 22, WHITE, WIDTH / 2, HEIGHT / 2)
self.draw_text("Press a key to play", 22, WHITE, WIDTH / 2, HEIGHT * 3 / 4)
self.screen.blit(self.game_surface, self.game_rect)
pg.display.flip()
self.wait_for_key(0)
def show_go_screen(self):
# show the game over screen
self.game_surface.fill(BGCOLOR)
self.draw_text("GAME OVER", 48, WHITE, WIDTH / 2, HEIGHT / 4)
self.draw_text("Score: " + str(self.score), 22, WHITE, WIDTH / 2, HEIGHT / 2)
self.draw_text("Press a key to play again", 22, WHITE, WIDTH / 2, HEIGHT * 3 / 4)
self.screen.blit(self.game_surface, self.game_rect)
pg.display.flip()
self.wait_for_key(2000)
def wait_for_key(self, delay):
# simple loop to wait for a keyup event
start = pg.time.get_ticks()
pg.event.get()
waiting = True
while waiting:
self.clock.tick(FPS)
for event in pg.event.get():
if event.type == pg.QUIT:
waiting = False
self.quit()
if event.type == pg.KEYUP and pg.time.get_ticks() - start > delay:
if event.key == pg.K_ESCAPE:
self.quit()
else:
waiting = False
# create the game object
g = Game()
g.show_start_screen()
while True:
g.new()
g.run()
g.show_go_screen()
|
|
from __future__ import print_function, division
from sympy.core import S, sympify, Dummy
from sympy.core.function import Function, ArgumentIndexError
from sympy.core.logic import fuzzy_and
from sympy.core.numbers import Integer
from sympy.ntheory import sieve
from math import sqrt as _sqrt
from sympy.core.compatibility import reduce, range
from sympy.core.cache import cacheit
class CombinatorialFunction(Function):
"""Base class for combinatorial functions. """
def _eval_simplify(self, ratio, measure):
from sympy.simplify.simplify import combsimp
expr = combsimp(self)
if measure(expr) <= ratio*measure(self):
return expr
return self
###############################################################################
######################## FACTORIAL and MULTI-FACTORIAL ########################
###############################################################################
class factorial(CombinatorialFunction):
"""Implementation of factorial function over nonnegative integers.
By convention (consistent with the gamma function and the binomial
coefficients), factorial of a negative integer is complex infinity.
The factorial is very important in combinatorics where it gives
the number of ways in which `n` objects can be permuted. It also
arises in calculus, probability, number theory, etc.
There is strict relation of factorial with gamma function. In
fact n! = gamma(n+1) for nonnegative integers. Rewrite of this
kind is very useful in case of combinatorial simplification.
Computation of the factorial is done using two algorithms. For
small arguments naive product is evaluated. However for bigger
input algorithm Prime-Swing is used. It is the fastest algorithm
known and computes n! via prime factorization of special class
of numbers, called here the 'Swing Numbers'.
Examples
========
>>> from sympy import Symbol, factorial, S
>>> n = Symbol('n', integer=True)
>>> factorial(0)
1
>>> factorial(7)
5040
>>> factorial(-2)
zoo
>>> factorial(n)
factorial(n)
>>> factorial(2*n)
factorial(2*n)
>>> factorial(S(1)/2)
factorial(1/2)
See Also
========
factorial2, RisingFactorial, FallingFactorial
"""
def fdiff(self, argindex=1):
from sympy import gamma, polygamma
if argindex == 1:
return gamma(self.args[0] + 1)*polygamma(0, self.args[0] + 1)
else:
raise ArgumentIndexError(self, argindex)
_small_swing = [
1, 1, 1, 3, 3, 15, 5, 35, 35, 315, 63, 693, 231, 3003, 429, 6435, 6435, 109395,
12155, 230945, 46189, 969969, 88179, 2028117, 676039, 16900975, 1300075,
35102025, 5014575, 145422675, 9694845, 300540195, 300540195
]
@classmethod
def _swing(cls, n):
if n < 33:
return cls._small_swing[n]
else:
N, primes = int(_sqrt(n)), []
for prime in sieve.primerange(3, N + 1):
p, q = 1, n
while True:
q //= prime
if q > 0:
if q & 1 == 1:
p *= prime
else:
break
if p > 1:
primes.append(p)
for prime in sieve.primerange(N + 1, n//3 + 1):
if (n // prime) & 1 == 1:
primes.append(prime)
L_product = R_product = 1
for prime in sieve.primerange(n//2 + 1, n + 1):
L_product *= prime
for prime in primes:
R_product *= prime
return L_product*R_product
@classmethod
def _recursive(cls, n):
if n < 2:
return 1
else:
return (cls._recursive(n//2)**2)*cls._swing(n)
@classmethod
def eval(cls, n):
n = sympify(n)
if n.is_Number:
if n is S.Zero:
return S.One
elif n is S.Infinity:
return S.Infinity
elif n.is_Integer:
if n.is_negative:
return S.ComplexInfinity
else:
n, result = n.p, 1
if n < 20:
for i in range(2, n + 1):
result *= i
else:
N, bits = n, 0
while N != 0:
if N & 1 == 1:
bits += 1
N = N >> 1
result = cls._recursive(n)*2**(n - bits)
return Integer(result)
def _eval_rewrite_as_gamma(self, n):
from sympy import gamma
return gamma(n + 1)
def _eval_rewrite_as_Product(self, n):
from sympy import Product
if n.is_nonnegative and n.is_integer:
i = Dummy('i', integer=True)
return Product(i, (i, 1, n))
def _eval_is_integer(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_positive(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_composite(self):
x = self.args[0]
if x.is_integer:
return (x - 3).is_nonnegative
def _eval_is_real(self):
x = self.args[0]
if x.is_nonnegative or x.is_noninteger:
return True
class MultiFactorial(CombinatorialFunction):
pass
class subfactorial(CombinatorialFunction):
"""The subfactorial counts the derangements of n items and is
defined for non-negative integers as::
,
| 1 for n = 0
!n = { 0 for n = 1
| (n - 1)*(!(n - 1) + !(n - 2)) for n > 1
`
It can also be written as int(round(n!/exp(1))) but the recursive
definition with caching is implemented for this function.
This function is generalized to noninteger arguments [2]_ as
.. math:: !x = \Gamma(x + 1, -1)/e
References
==========
.. [1] http://en.wikipedia.org/wiki/Subfactorial
.. [2] http://mathworld.wolfram.com/Subfactorial.html
Examples
========
>>> from sympy import subfactorial
>>> from sympy.abc import n
>>> subfactorial(n + 1)
subfactorial(n + 1)
>>> subfactorial(5)
44
See Also
========
sympy.functions.combinatorial.factorials.factorial,
sympy.utilities.iterables.generate_derangements,
sympy.functions.special.gamma_functions.uppergamma
"""
@classmethod
@cacheit
def _eval(self, n):
if not n:
return S.One
elif n == 1:
return S.Zero
return (n - 1)*(self._eval(n - 1) + self._eval(n - 2))
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg.is_Integer and arg.is_nonnegative:
return cls._eval(arg)
elif arg is S.Infinity:
return arg
def _eval_is_even(self):
if self.args[0].is_odd and self.args[0].is_nonnegative:
return True
def _eval_is_integer(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_rewrite_as_uppergamma(self, arg):
from sympy import uppergamma
return uppergamma(arg + 1, -1)/S.Exp1
def _eval_is_nonnegative(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_odd(self):
if self.args[0].is_even and self.args[0].is_nonnegative:
return True
class factorial2(CombinatorialFunction):
"""The double factorial n!!, not to be confused with (n!)!
The double factorial is defined for nonnegative integers and for odd
negative integers as::
,
| n*(n - 2)*(n - 4)* ... * 1 for n positive odd
n!! = { n*(n - 2)*(n - 4)* ... * 2 for n positive even
| 1 for n = 0
| (n+2)!! / (n+2) for n negative odd
`
References
==========
.. [1] https://en.wikipedia.org/wiki/Double_factorial
Examples
========
>>> from sympy import factorial2, var
>>> var('n')
n
>>> factorial2(n + 1)
factorial2(n + 1)
>>> factorial2(5)
15
>>> factorial2(-1)
1
>>> factorial2(-5)
1/3
See Also
========
factorial, RisingFactorial, FallingFactorial
"""
@classmethod
def eval(cls, arg):
# TODO: extend this to complex numbers?
if arg.is_Number:
if arg.is_infinite:
return
# This implementation is faster than the recursive one
# It also avoids "maximum recursion depth exceeded" runtime error
if arg.is_nonnegative:
if arg.is_even:
k = arg / 2
return 2 ** k * factorial(k)
return factorial(arg) / factorial2(arg - 1)
if arg.is_even:
raise ValueError("argument must be nonnegative or odd")
return arg * (S.NegativeOne) ** ((1 - arg) / 2) / factorial2(-arg)
def _eval_is_even(self):
# Double factorial is even for every positive even input
n = self.args[0]
if n.is_integer:
if n.is_odd:
return False
if n.is_even:
if n.is_positive:
return True
if n.is_zero:
return False
def _eval_is_integer(self):
# Double factorial is an integer for every nonnegative input, and for
# -1 and -3
n = self.args[0]
if n.is_integer:
if (n + 1).is_nonnegative:
return True
if n.is_odd:
return (n + 3).is_nonnegative
def _eval_is_odd(self):
# Double factorial is odd for every odd input not smaller than -3, and
# for 0
n = self.args[0]
if n.is_odd:
return (n + 3).is_nonnegative
if n.is_even:
if n.is_positive:
return False
if n.is_zero:
return True
def _eval_is_positive(self):
# Double factorial is positive for every nonnegative input, and for
# every odd negative input which is of the form -1-4k for an
# nonnegative integer k
n = self.args[0]
if n.is_integer:
if (n + 1).is_nonnegative:
return True
if n.is_odd:
return ((n + 1) / 2).is_even
###############################################################################
######################## RISING and FALLING FACTORIALS ########################
###############################################################################
class RisingFactorial(CombinatorialFunction):
"""Rising factorial (also called Pochhammer symbol) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by:
rf(x, k) = x * (x+1) * ... * (x + k-1)
where 'x' can be arbitrary expression and 'k' is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/RisingFactorial.html page.
Examples
========
>>> from sympy import rf
>>> from sympy.abc import x
>>> rf(x, 0)
1
>>> rf(1, 5)
120
>>> rf(x, 5) == x*(1 + x)*(2 + x)*(3 + x)*(4 + x)
True
See Also
========
factorial, factorial2, FallingFactorial
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN:
return S.NaN
elif x is S.One:
return factorial(k)
elif k.is_Integer:
if k is S.NaN:
return S.NaN
elif k is S.Zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
return reduce(lambda r, i: r*(x + i), range(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
return 1/reduce(lambda r, i: r*(x - i), range(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k):
from sympy import gamma
return gamma(x + k) / gamma(x)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
def _sage_(self):
import sage.all as sage
return sage.rising_factorial(self.args[0]._sage_(), self.args[1]._sage_())
class FallingFactorial(CombinatorialFunction):
"""Falling factorial (related to rising factorial) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by
ff(x, k) = x * (x-1) * ... * (x - k+1)
where 'x' can be arbitrary expression and 'k' is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/FallingFactorial.html page.
>>> from sympy import ff
>>> from sympy.abc import x
>>> ff(x, 0)
1
>>> ff(5, 5)
120
>>> ff(x, 5) == x*(x-1)*(x-2)*(x-3)*(x-4)
True
See Also
========
factorial, factorial2, RisingFactorial
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN:
return S.NaN
elif k.is_Integer:
if k is S.NaN:
return S.NaN
elif k is S.Zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
return reduce(lambda r, i: r*(x - i), range(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
return 1/reduce(lambda r, i: r*(x + i), range(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k):
from sympy import gamma
return (-1)**k * gamma(-x + k) / gamma(-x)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
def _sage_(self):
import sage.all as sage
return sage.falling_factorial(self.args[0]._sage_(), self.args[1]._sage_())
rf = RisingFactorial
ff = FallingFactorial
###############################################################################
########################### BINOMIAL COEFFICIENTS #############################
###############################################################################
class binomial(CombinatorialFunction):
"""Implementation of the binomial coefficient. It can be defined
in two ways depending on its desired interpretation:
C(n,k) = n!/(k!(n-k)!) or C(n, k) = ff(n, k)/k!
First, in a strict combinatorial sense it defines the
number of ways we can choose 'k' elements from a set of
'n' elements. In this case both arguments are nonnegative
integers and binomial is computed using an efficient
algorithm based on prime factorization.
The other definition is generalization for arbitrary 'n',
however 'k' must also be nonnegative. This case is very
useful when evaluating summations.
For the sake of convenience for negative 'k' this function
will return zero no matter what valued is the other argument.
To expand the binomial when n is a symbol, use either
expand_func() or expand(func=True). The former will keep the
polynomial in factored form while the latter will expand the
polynomial itself. See examples for details.
Examples
========
>>> from sympy import Symbol, Rational, binomial, expand_func
>>> n = Symbol('n', integer=True, positive=True)
>>> binomial(15, 8)
6435
>>> binomial(n, -1)
0
Rows of Pascal's triangle can be generated with the binomial function:
>>> for N in range(8):
... print([ binomial(N, i) for i in range(N + 1)])
...
[1]
[1, 1]
[1, 2, 1]
[1, 3, 3, 1]
[1, 4, 6, 4, 1]
[1, 5, 10, 10, 5, 1]
[1, 6, 15, 20, 15, 6, 1]
[1, 7, 21, 35, 35, 21, 7, 1]
As can a given diagonal, e.g. the 4th diagonal:
>>> N = -4
>>> [ binomial(N, i) for i in range(1 - N)]
[1, -4, 10, -20, 35]
>>> binomial(Rational(5, 4), 3)
-5/128
>>> binomial(Rational(-5, 4), 3)
-195/128
>>> binomial(n, 3)
binomial(n, 3)
>>> binomial(n, 3).expand(func=True)
n**3/6 - n**2/2 + n/3
>>> expand_func(binomial(n, 3))
n*(n - 2)*(n - 1)/6
"""
def fdiff(self, argindex=1):
from sympy import polygamma
if argindex == 1:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/01/
n, k = self.args
return binomial(n, k)*(polygamma(0, n + 1) - \
polygamma(0, n - k + 1))
elif argindex == 2:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/02/
n, k = self.args
return binomial(n, k)*(polygamma(0, n - k + 1) - \
polygamma(0, k + 1))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def _eval(self, n, k):
# n.is_Number and k.is_Integer and k != 1 and n != k
if k.is_Integer:
if n.is_Integer and n >= 0:
n, k = int(n), int(k)
if k > n:
return S.Zero
elif k > n // 2:
k = n - k
M, result = int(_sqrt(n)), 1
for prime in sieve.primerange(2, n + 1):
if prime > n - k:
result *= prime
elif prime > n // 2:
continue
elif prime > M:
if n % prime < k % prime:
result *= prime
else:
N, K = n, k
exp = a = 0
while N > 0:
a = int((N % prime) < (K % prime + a))
N, K = N // prime, K // prime
exp = a + exp
if exp > 0:
result *= prime**exp
return Integer(result)
else:
d = result = n - k + 1
for i in range(2, k + 1):
d += 1
result *= d
result /= i
return result
@classmethod
def eval(cls, n, k):
n, k = map(sympify, (n, k))
d = n - k
if d.is_zero:
return S.One
elif d.is_zero is False:
if (k - 1).is_zero:
return n
elif k.is_negative:
return S.Zero
elif k.is_zero:
return S.One
elif n.is_integer and n.is_nonnegative and d.is_negative:
return S.Zero
if k.is_Integer and k > 0 and n.is_Number:
return cls._eval(n, k)
def _eval_expand_func(self, **hints):
"""
Function to expand binomial(n,k) when m is positive integer
Also,
n is self.args[0] and k is self.args[1] while using binomial(n, k)
"""
n = self.args[0]
if n.is_Number:
return binomial(*self.args)
k = self.args[1]
if k.is_Add and n in k.args:
k = n - k
if k.is_Integer:
if k == S.Zero:
return S.One
elif k < 0:
return S.Zero
else:
n = self.args[0]
result = n - k + 1
for i in range(2, k + 1):
result *= n - k + i
result /= i
return result
else:
return binomial(*self.args)
def _eval_rewrite_as_factorial(self, n, k):
return factorial(n)/(factorial(k)*factorial(n - k))
def _eval_rewrite_as_gamma(self, n, k):
from sympy import gamma
return gamma(n + 1)/(gamma(k + 1)*gamma(n - k + 1))
def _eval_is_integer(self):
return self.args[0].is_integer and self.args[1].is_integer
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the system configuration methods work properly."""
from absl.testing import parameterized
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
def reset_eager(fn):
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
finally:
# Reset the context.
context._reset_jit_compiler_flags()
context._reset_context()
ops.enable_eager_execution_internal()
assert context._context is not None
return wrapper
@test_util.with_eager_op_as_function
class ConfigTest(test.TestCase, parameterized.TestCase):
@test_util.disable_eager_op_as_function('b/204320409')
@test_util.run_gpu_only
@reset_eager
def testDevicePolicy(self):
self.assertEqual(context.DEVICE_PLACEMENT_SILENT,
context.context().device_policy)
# If no op has been executed we should be able to set the device policy as
# well as any init-time configs.
config.set_intra_op_parallelism_threads(1)
config.set_device_policy('silent')
config.set_intra_op_parallelism_threads(2)
context.ensure_initialized()
def copy_tensor(dtype=dtypes.int32):
with ops.device('CPU:0'):
cpu_tensor = constant_op.constant(1, dtype=dtype)
gpu_tensor = cpu_tensor.gpu()
self.assertAllEqual(cpu_tensor + gpu_tensor, 2.0)
config.set_device_policy('silent')
self.assertEqual(config.get_device_policy(), 'silent')
self.assertEqual(context.DEVICE_PLACEMENT_SILENT,
context.context().device_policy)
copy_tensor()
config.set_device_policy('silent_for_int32')
self.assertEqual(config.get_device_policy(), 'silent_for_int32')
self.assertEqual(context.DEVICE_PLACEMENT_SILENT_FOR_INT32,
context.context().device_policy)
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Tensors on conflicting devices'):
copy_tensor(dtypes.float32)
copy_tensor()
config.set_device_policy('warn')
self.assertEqual(config.get_device_policy(), 'warn')
self.assertEqual(context.DEVICE_PLACEMENT_WARN,
context.context().device_policy)
copy_tensor()
config.set_device_policy('explicit')
self.assertEqual(config.get_device_policy(), 'explicit')
self.assertEqual(context.DEVICE_PLACEMENT_EXPLICIT,
context.context().device_policy)
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Tensors on conflicting devices'):
copy_tensor()
config.set_device_policy(None)
self.assertEqual(config.get_device_policy(), 'silent')
@reset_eager
def testExecutionMode(self):
self.assertTrue(config.get_synchronous_execution())
self.assertEqual(context.SYNC, context.context().execution_mode)
# If no op has been executed we should be able to set the execution mode as
# well as any init-time configs.
config.set_intra_op_parallelism_threads(1)
config.set_synchronous_execution(False)
config.set_intra_op_parallelism_threads(2)
config.set_synchronous_execution(True)
self.assertTrue(config.get_synchronous_execution())
self.assertEqual(context.SYNC, context.context().execution_mode)
config.set_synchronous_execution(False)
self.assertFalse(config.get_synchronous_execution())
self.assertEqual(context.ASYNC, context.context().execution_mode)
@reset_eager
def testIntraOpParallelismThreads(self):
config.set_intra_op_parallelism_threads(10)
self.assertEqual(config.get_intra_op_parallelism_threads(),
context.context().intra_op_parallelism_threads)
context.ensure_initialized()
with self.assertRaises(RuntimeError):
config.set_intra_op_parallelism_threads(1)
config.set_intra_op_parallelism_threads(10)
@reset_eager
def testInterOpParallelismThreads(self):
config.set_inter_op_parallelism_threads(10)
self.assertEqual(config.get_inter_op_parallelism_threads(),
context.context().inter_op_parallelism_threads)
context.ensure_initialized()
with self.assertRaises(RuntimeError):
config.set_inter_op_parallelism_threads(1)
config.set_inter_op_parallelism_threads(10)
@test_util.run_gpu_only
@reset_eager
def testSoftPlacement(self):
if context.executing_eagerly():
self.assertTrue(config.get_soft_device_placement())
else:
self.assertFalse(config.get_soft_device_placement())
def test_attr():
with ops.device('/device:GPU:0'):
return test_ops.test_attr(T=dtypes.float32, name='test_attr')
config.set_soft_device_placement(True)
self.assertEqual(config.get_soft_device_placement(), True)
self.assertEqual(config.get_soft_device_placement(),
context.context().soft_device_placement)
# Since soft placement is enabled, the test_attr operation should fallback
# to CPU with pure eager execution as well as functions
test_attr()
def_function.function(test_attr)()
config.set_soft_device_placement(False)
self.assertEqual(config.get_soft_device_placement(), False)
self.assertEqual(config.get_soft_device_placement(),
context.context().soft_device_placement)
# Since soft placement is disabled, the test_attr operation should fail on
# GPU with pure eager execution as well as functions
with self.assertRaises(errors.InvalidArgumentError):
test_attr()
with self.assertRaises(errors.InvalidArgumentError):
def_function.function(test_attr)()
@reset_eager
def testLogDevicePlacement(self):
self.assertFalse(context.get_log_device_placement())
context.set_log_device_placement(True)
self.assertEqual(context.get_log_device_placement(), True)
self.assertEqual(context.get_log_device_placement(),
context.context().log_device_placement)
context.set_log_device_placement(False)
self.assertEqual(context.get_log_device_placement(), False)
self.assertEqual(context.get_log_device_placement(),
context.context().log_device_placement)
context.ensure_initialized()
# Changing the device placement should not throw an exception
context.set_log_device_placement(True)
@reset_eager
def testEnableMlirBridge(self):
# Default value of enable_mlir_bridge is false.
self.assertFalse(context.context().config.experimental.enable_mlir_bridge)
self.assertEqual(
context.context().config.experimental.mlir_bridge_rollout,
config_pb2.ConfigProto.Experimental.MLIR_BRIDGE_ROLLOUT_UNSPECIFIED)
# Tests enabling mlir bridge.
config.enable_mlir_bridge()
self.assertTrue(context.context().config.experimental.enable_mlir_bridge)
self.assertEqual(
context.context().config.experimental.mlir_bridge_rollout,
config_pb2.ConfigProto.Experimental.MLIR_BRIDGE_ROLLOUT_ENABLED)
# Tests disabling mlir bridge.
config.disable_mlir_bridge()
self.assertFalse(context.context().config.experimental.enable_mlir_bridge)
self.assertEqual(
context.context().config.experimental.mlir_bridge_rollout,
config_pb2.ConfigProto.Experimental.MLIR_BRIDGE_ROLLOUT_DISABLED)
@reset_eager
def testResetMlirFlags(self):
# Default value of enable_mlir_bridge is false.
self.assertFalse(context.context().config.experimental.enable_mlir_bridge)
self.assertEqual(
context.context().config.experimental.mlir_bridge_rollout,
config_pb2.ConfigProto.Experimental.MLIR_BRIDGE_ROLLOUT_UNSPECIFIED)
@reset_eager
def testEnableMlirGraphOptimization(self):
# Default value of enable_mlir_graph_optimization is false.
self.assertFalse(
context.context().config.experimental.enable_mlir_graph_optimization)
# Tests enabling mlir graph optimization.
config.enable_mlir_graph_optimization()
self.assertTrue(
context.context().config.experimental.enable_mlir_graph_optimization)
# Tests disabling mlir graph optimization.
config.disable_mlir_graph_optimization()
self.assertFalse(
context.context().config.experimental.enable_mlir_graph_optimization)
@test_util.run_gpu_only
@reset_eager
def testJit(self):
self.assertEqual(config.get_optimizer_jit(), '')
# the following function should cause Op fusion to occur. However, there is
# unfortunately no straightforward way to ensure this. We will just have to
# settle for creating a test that can trigger JIT.
@def_function.function
def fun(a, b):
c = a * b
d = c + a
return d
a = constant_op.constant([2., 2.])
b = constant_op.constant([2., 2.])
self.evaluate(fun(a, b))
config.set_optimizer_jit('autoclustering')
self.assertEqual(config.get_optimizer_jit(), 'autoclustering')
self.evaluate(fun(a, b))
config.set_optimizer_jit('')
self.assertEqual(config.get_optimizer_jit(), '')
self.evaluate(fun(a, b))
@parameterized.named_parameters(
('LayoutOptimizer', 'layout_optimizer'),
('ConstantFolding', 'constant_folding'),
('ShapeOptimization', 'shape_optimization'), ('Remapping', 'remapping'),
('ArithmeticOptimization', 'arithmetic_optimization'),
('DependencyOptimization', 'dependency_optimization'),
('LoopOptimization', 'loop_optimization'),
('FunctionOptimization', 'function_optimization'),
('DebugStripper', 'debug_stripper'),
('ScopedAllocatorOptimization', 'scoped_allocator_optimization'),
('ImplementationSelector', 'implementation_selector'),
('AutoMixedPrecision', 'auto_mixed_precision'))
@reset_eager
def testOptimizerToggleOption(self, field):
# TODO(b/128531235): Improve testing of option
options = config.get_optimizer_experimental_options()
self.assertIsNone(options.get(field))
config.set_optimizer_experimental_options({field: True})
options[field] = True
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(context.context().get_optimizer_experimental_options(),
options)
config.set_optimizer_experimental_options({field: False})
options[field] = False
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(context.context().get_optimizer_experimental_options(),
options)
@parameterized.named_parameters(
('DisableModelPruning', 'disable_model_pruning'),
('DisableMetaOptimizer', 'disable_meta_optimizer'))
@reset_eager
def testOptimizerBoolOption(self, field):
# TODO(b/128531235): Improve testing of option
options = config.get_optimizer_experimental_options()
self.assertFalse(options.get(field))
config.set_optimizer_experimental_options({field: True})
options[field] = True
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(context.context().get_optimizer_experimental_options(),
options)
config.set_optimizer_experimental_options({field: False})
options[field] = False
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(context.context().get_optimizer_experimental_options(),
options)
@test_util.run_gpu_only
@reset_eager
def testOptimizerToggleOptionPinToHost(self):
options = config.get_optimizer_experimental_options()
self.assertIsNone(options.get('pin_to_host_optimization'))
@def_function.function
def fun():
op = test_ops.device_placement_op()
return op
# Force optimizer to run for all graphs
config.set_optimizer_experimental_options({'min_graph_nodes': -1})
options['min_graph_nodes'] = -1
# Since pin to host is disabled, the operation should go on GPU
gpu = self.evaluate(fun())
self.assertIn(compat.as_bytes('GPU'), gpu)
config.set_optimizer_experimental_options(
{'pin_to_host_optimization': True})
options['pin_to_host_optimization'] = True
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(context.context().get_optimizer_experimental_options(),
options)
# Since pin to host is enabled, the operation should go on CPU
cpu = self.evaluate(fun())
self.assertIn(compat.as_bytes('CPU'), cpu)
config.set_optimizer_experimental_options(
{'pin_to_host_optimization': False})
options['pin_to_host_optimization'] = False
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(context.context().get_optimizer_experimental_options(),
options)
# Since pin to host is disabled again, the operation should go on GPU
gpu2 = self.evaluate(fun())
self.assertIn(compat.as_bytes('GPU'), gpu2)
class DeviceTest(test.TestCase):
@reset_eager
def testPhysicalDevices(self):
cpus = config.list_physical_devices('CPU')
self.assertGreater(len(cpus), 0)
if test_util.is_gpu_available():
gpus = config.list_physical_devices('GPU')
self.assertGreater(len(gpus), 0)
@reset_eager
def testCpuMultiple(self):
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
context.ensure_initialized()
vcpus = config.list_logical_devices('CPU')
self.assertEqual(len(vcpus), 2)
with ops.device('/device:CPU:0'):
a = constant_op.constant(1.0)
self.evaluate(a)
with ops.device('/device:CPU:1'):
b = constant_op.constant(1.0)
self.evaluate(b)
with ops.device('/device:CPU:2'):
c = constant_op.constant(1.0)
self.evaluate(c)
if test_util.is_gpu_available():
self.assertIn('GPU:0', c.device)
else:
self.assertIn('CPU:0', c.device)
# Ensure we can place ops on each of the device names
for vcpu in vcpus:
with ops.device(vcpu.name):
d = constant_op.constant(1.0)
self.evaluate(d)
# Modifying the CPU configuration is not supported
with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
# Setting the same CPU configuration is fine
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
@test_util.run_gpu_only
@reset_eager
def testGpuNone(self):
config.set_soft_device_placement(False)
gpus = config.list_physical_devices('GPU')
self.assertGreater(len(gpus), 0)
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
self.assertEqual(len(config.get_visible_devices('CPU')), 1)
self.assertGreater(len(config.get_visible_devices('GPU')), 0)
self.assertEqual(len(config.get_visible_devices('XLA_GPU')), 0)
config.set_visible_devices(cpus[0])
self.assertEqual(len(config.get_visible_devices('CPU')), 1)
self.assertEqual(len(config.get_visible_devices('GPU')), 0)
self.assertEqual(len(config.list_logical_devices('XLA_GPU')), 0)
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Could not satisfy'):
with ops.device('/device:GPU:0'):
a = array_ops.identity(1.0)
self.evaluate(a)
# Modifying the visible devices is not supported
with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
config.set_visible_devices(gpus)
# Setting the same visible devices is fine
config.set_visible_devices(cpus[0])
@reset_eager
def testGpuMultiple(self):
gpus = config.list_physical_devices('GPU')
if len(gpus) < 2:
self.skipTest('Need at least 2 GPUs')
context.ensure_initialized()
for i in range(0, len(gpus)):
with ops.device('/device:GPU:' + str(i)):
a = constant_op.constant(1.0)
self.evaluate(a)
with self.assertRaisesRegex(RuntimeError, 'unknown device'):
with ops.device('/device:GPU:' + str(len(gpus))):
a = constant_op.constant(1.0)
self.evaluate(a)
@reset_eager
def testDeviceDetails(self):
(cpu,) = config.list_physical_devices('CPU')
details = config.get_device_details(cpu)
self.assertEqual(details, {})
if not test_util.is_gpu_available():
return
gpus = config.list_physical_devices('GPU')
details = config.get_device_details(gpus[0])
self.assertIsInstance(details['device_name'], str)
self.assertNotEmpty(details['device_name'])
if test.is_built_with_rocm():
# AMD GPUs do not have a compute capability
self.assertNotIn('compute_capability', details)
else:
cc = details['compute_capability']
self.assertIsInstance(cc, tuple)
major, minor = cc
self.assertGreater(major, 0)
self.assertGreaterEqual(minor, 0)
# Test GPU returned from get_visible_devices
if len(gpus) > 2:
config.set_visible_devices(gpus[1], 'GPU')
(visible_gpu,) = config.get_visible_devices('GPU')
details = config.get_device_details(visible_gpu)
self.assertIsInstance(details['device_name'], str)
@reset_eager
def testDeviceDetailsErrors(self):
logical_devices = config.list_logical_devices()
with self.assertRaisesRegex(ValueError,
'must be a tf.config.PhysicalDevice'):
config.get_device_details(logical_devices[0])
phys_dev = context.PhysicalDevice('/physical_device:CPU:100', 'CPU')
with self.assertRaisesRegex(
ValueError, 'The PhysicalDevice must be one obtained from '
'calling `tf.config.list_physical_devices`'):
config.get_device_details(phys_dev)
@test_util.run_gpu_only
@reset_eager
def testVirtualGpu(self):
config.set_soft_device_placement(False)
gpus = config.list_physical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
self.assertIsNone(config.get_logical_device_configuration(gpus[-1]))
config.set_logical_device_configuration(gpus[-1], [
context.LogicalDeviceConfiguration(memory_limit=10),
context.LogicalDeviceConfiguration(memory_limit=10)
])
self.assertEqual(len(config.get_logical_device_configuration(gpus[-1])), 2)
logical_gpus = config.list_logical_devices('GPU')
self.assertTrue(len(logical_gpus), len(gpus) + 1)
for i in range(0, len(logical_gpus)):
with ops.device('/device:GPU:' + str(i)):
a = array_ops.identity(1.0)
self.evaluate(a)
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Could not satisfy'):
with ops.device('/device:GPU:' + str(len(logical_gpus))):
a = array_ops.identity(1.0)
self.evaluate(a)
# Modifying the GPU configuration is not supported
with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
config.set_logical_device_configuration(gpus[-1], [
context.LogicalDeviceConfiguration(memory_limit=20),
context.LogicalDeviceConfiguration(memory_limit=20)
])
with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
config.set_logical_device_configuration(gpus[-1], [
context.LogicalDeviceConfiguration(memory_limit=10),
context.LogicalDeviceConfiguration(memory_limit=10),
context.LogicalDeviceConfiguration(memory_limit=10)
])
# Setting the same GPU configuration is fine
config.set_logical_device_configuration(gpus[-1], [
context.LogicalDeviceConfiguration(memory_limit=10),
context.LogicalDeviceConfiguration(memory_limit=10)
])
@test_util.run_gpu_only
@reset_eager
def testGpuGrowth(self):
gpus = config.list_physical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
self.assertIsNone(config.get_memory_growth(gpus[-1]))
for gpu in gpus:
config.set_memory_growth(gpu, True)
c = context.context().config
self.assertTrue(c.gpu_options.allow_growth)
logical_gpus = config.list_logical_devices('GPU')
self.assertTrue(len(logical_gpus), len(gpus))
# Modifying the GPU configuration is not supported
with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
for gpu in gpus:
config.set_memory_growth(gpu, False)
# Setting the same GPU configuration is fine
for gpu in gpus:
config.set_memory_growth(gpu, True)
@test_util.run_gpu_or_tpu
@reset_eager
def testGetMemoryInfoBasic(self, device_type):
with ops.device(f'{device_type}:0'):
device = array_ops.zeros([]).backing_device
info = config.get_memory_info(device)
self.assertGreater(info['current'], 0)
self.assertGreater(info['peak'], 0)
self.assertEqual(info.keys(), {'current', 'peak'})
self.assertEqual(config.get_memory_usage(device), info['current'])
@test_util.run_gpu_or_tpu
@reset_eager
def testGetMemoryUsageSubstring(self, device_type):
info = config.get_memory_info(f'{device_type}:0')
self.assertGreater(info['current'], 0)
@reset_eager
def testGetMemoryInfoCPU(self):
if test_util.IsMklEnabled():
# TODO(gzmkl) work with Google team to address design issue in allocator.h
self.skipTest('MklCPUAllocator does not throw exception. So skip test.')
with self.assertRaisesRegex(ValueError, 'Allocator stats not available'):
config.get_memory_info('CPU:0')
with self.assertRaisesRegex(ValueError, 'Allocator stats not available'):
config.get_memory_usage('CPU:0')
@reset_eager
def testGetMemoryInfoUnknownDevice(self):
with self.assertRaisesRegex(ValueError, 'No matching devices found'):
config.get_memory_info('unknown_device:0')
with self.assertRaisesRegex(ValueError, 'No matching devices found'):
config.get_memory_usage('unknown_device:0')
@reset_eager
def testGetMemoryInfoInvalidDeviceString(self):
with self.assertRaisesRegex(ValueError, 'Failed parsing device name'):
context.context().get_memory_info('GPU')
with self.assertRaisesRegex(ValueError, 'Failed parsing device name'):
context.context().get_memory_info('GPU:')
with self.assertRaisesRegex(ValueError, 'Failed parsing device name'):
context.context().get_memory_info('GPU:CPU')
@test_util.run_gpu_or_tpu
@reset_eager
def testPeakMemoryUsage(self, device_type):
device = f'{device_type}:0'
with ops.device(device):
x1 = array_ops.zeros((1000, 1000))
peak1 = config.get_memory_info(device)['peak']
self.assertGreaterEqual(peak1, 4 * 1000 * 1000)
with ops.device(device):
x2 = array_ops.ones((1000, 1000))
peak2 = config.get_memory_info(device)['peak']
self.assertGreaterEqual(peak2, peak1 + 4 * 1000 * 1000)
del x1, x2 # With CPython, causes tensor memory to be immediately freed
peak3 = config.get_memory_info(device)['peak']
self.assertGreaterEqual(peak3, peak2)
self.assertGreaterEqual(peak3, config.get_memory_info(device)['current'])
@test_util.run_gpu_or_tpu
@reset_eager
def testResetMemoryStats(self, device_type):
device = f'{device_type}:0'
with ops.device(device):
x = array_ops.zeros((1000, 1000), dtype=dtypes.float32)
config.reset_memory_stats(device)
info1 = config.get_memory_info(device)
self.assertGreaterEqual(info1['peak'], 4 * 1000 * 1000)
self.assertGreaterEqual(info1['peak'], info1['current'])
self.assertGreater(info1['current'], 0)
del x # With CPython, causes tensor memory to be immediately freed
config.reset_memory_stats(device)
info2 = config.get_memory_info(device)
self.assertLess(info2['peak'], info1['peak'])
@reset_eager
def testResetMemoryStatsCPU(self):
if test_util.IsMklEnabled():
# TODO(gzmkl) work with Google team to address design issue in allocator.h
self.skipTest('MklCPUAllocator does not throw exception. So skip test.')
with self.assertRaisesRegex(ValueError, 'Cannot reset memory stats'):
config.reset_memory_stats('CPU:0')
@reset_eager
def testResetMemoryStatsUnknownDevice(self):
with self.assertRaisesRegex(ValueError, 'No matching devices found'):
config.reset_memory_stats('unknown_device:0')
@reset_eager
def testResetMemoryStatsInvalidDeviceString(self):
with self.assertRaisesRegex(ValueError, 'Failed parsing device name'):
context.context().reset_memory_stats('GPU')
with self.assertRaisesRegex(ValueError, 'Failed parsing device name'):
context.context().reset_memory_stats('GPU:')
with self.assertRaisesRegex(ValueError, 'Failed parsing device name'):
context.context().reset_memory_stats('GPU:CPU')
@test_util.run_gpu_only
@reset_eager
def testGpuInvalidConfig(self):
gpus = config.list_physical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
if len(gpus) > 1:
# Assert if other GPUs were not configured
config.set_memory_growth(gpus[0], True)
with self.assertRaisesRegex(ValueError, 'cannot differ'):
c = context.context().config
# If we limit visibility to GPU 0, growth is fine
config.set_visible_devices(gpus[0], 'GPU')
c = context.context().config
self.assertTrue(c.gpu_options.allow_growth)
# Default setting for second GPU is False and works if we set visibility
config.set_visible_devices(gpus[1], 'GPU')
c = context.context().config
self.assertFalse(c.gpu_options.allow_growth)
# Growth now fails because all the GPUs are visible and not the same
config.set_visible_devices(gpus, 'GPU')
with self.assertRaisesRegex(ValueError, 'cannot differ'):
c = context.context().config
for gpu in gpus:
config.set_memory_growth(gpu, True)
c = context.context().config
self.assertTrue(c.gpu_options.allow_growth)
with self.assertRaisesRegex(ValueError, 'memory limit'):
config.set_logical_device_configuration(gpus[-1], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
self.assertIsNone(config.get_logical_device_configuration(gpus[-1]))
config.set_logical_device_configuration(gpus[-1], [
context.LogicalDeviceConfiguration(memory_limit=10),
context.LogicalDeviceConfiguration(memory_limit=10)
])
c = context.context().config
self.assertFalse(c.gpu_options.allow_growth)
with self.assertRaisesRegex(ValueError, 'virtual devices'):
config.set_memory_growth(gpus[-1], False)
@test_util.run_gpu_only
@reset_eager
def testRemote(self):
gpus = config.list_logical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
context.ensure_initialized()
gpus = config.list_logical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
for gpu in gpus:
self.assertIsNotNone(gpu.name)
context.ensure_initialized()
job_name = 'test'
cluster_def = cluster_pb2.ClusterDef()
job_def = cluster_def.job.add()
job_def.name = job_name
job_def.tasks[0] = 'localhost:0'
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def, job_name=job_name, task_index=0, protocol='grpc')
context.set_server_def(server_def)
gpus = config.list_logical_devices('GPU')
for gpu in gpus:
self.assertIsNotNone(gpu.name)
@reset_eager
def testV1CompatibilityDummyInvisibleDeviceList(self):
gpus = config.list_physical_devices('GPU')
if gpus:
self.skipTest('Test requires no GPUs')
# Ensure GPU options left untouched on CPU only environments
context.context()._physical_devices = None
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list='0'))
new_config = context.context().config
self.assertEqual(new_config.gpu_options.visible_device_list, '0')
@test_util.run_gpu_only
@reset_eager
def testV1Compatibility(self):
# Ensure we set 1 CPU by default
context.context()._config = config_pb2.ConfigProto()
new_config = context.context().config
self.assertEqual(new_config.device_count['CPU'], 1)
context.context()._physical_devices = None
# Ensure CPU is split
context.context()._config = config_pb2.ConfigProto(device_count={'CPU': 2})
new_config = context.context().config
self.assertEqual(new_config.device_count['CPU'], 2)
context.context()._physical_devices = None
# Handle empty visible device list
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list=''))
gpus = config.list_physical_devices('GPU')
gpu_count = len(gpus)
new_config = context.context().config
self.assertEqual(new_config.gpu_options.visible_device_list,
','.join(str(i) for i in range(len(gpus))))
context.context()._physical_devices = None
# Handle invalid visible device list
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list=str(gpu_count)))
with self.assertRaisesRegex(ValueError, 'Invalid visible device index'):
gpus = config.list_physical_devices('GPU')
new_config = context.context().config
context.context()._physical_devices = None
# Handle single visible device list
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(
visible_device_list=str(gpu_count - 1)))
gpus = config.list_physical_devices('GPU')
new_config = context.context().config
self.assertEqual(new_config.gpu_options.visible_device_list,
str(gpu_count - 1))
context.context()._physical_devices = None
def testConfigureCollectiveOps(self):
context.context().configure_collective_ops(
collective_leader='/job:worker/replica:0/task:0',
scoped_allocator_enabled_ops=('CollectiveReduce',),
use_nccl_communication=False,
device_filters=['/job:worker/task:1'])
new_config = context.context().config
# Verify group leader
self.assertEqual('/job:worker/replica:0/task:0',
new_config.experimental.collective_group_leader)
# Verify device filters.
self.assertEqual(['/job:worker/task:1'], new_config.device_filters)
# Verify rewrite options.
new_rewrite_options = new_config.graph_options.rewrite_options
self.assertEqual(rewriter_config_pb2.RewriterConfig.ON,
new_rewrite_options.scoped_allocator_optimization)
self.assertEqual(['CollectiveReduce'],
new_rewrite_options.scoped_allocator_opts.enable_op)
def testDeterminism(self):
# This does not test any ops are deterministic, because that is tested by
# many kernel tests.
try:
config.disable_op_determinism()
self.assertFalse(config.is_op_determinism_enabled())
config.enable_op_determinism()
self.assertTrue(config.is_op_determinism_enabled())
finally:
config.disable_op_determinism()
class TensorFloat32Test(test.TestCase):
def tearDown(self):
super(TensorFloat32Test, self).tearDown()
config.enable_tensor_float_32_execution(True)
def test_tensor_float_32_global_variable(self):
self.assertTrue(config.tensor_float_32_execution_enabled())
self.assertTrue(test_ops.is_tensor_float32_enabled())
config.enable_tensor_float_32_execution(False)
self.assertFalse(config.tensor_float_32_execution_enabled())
self.assertFalse(test_ops.is_tensor_float32_enabled())
config.enable_tensor_float_32_execution(True)
self.assertTrue(config.tensor_float_32_execution_enabled())
self.assertTrue(test_ops.is_tensor_float32_enabled())
def _skip_if_tensor_float_32_unsupported(self):
if not test_util.is_gpu_available(
cuda_only=True, min_cuda_compute_capability=(8, 0)):
self.skipTest('TensorFloat-32 requires an NVIDIA GPU with compute '
'capability of at least 8.0')
def test_tensor_float_32_enabled(self):
self._skip_if_tensor_float_32_unsupported()
self.assertTrue(config.tensor_float_32_execution_enabled())
x = array_ops.fill((8, 8), 1 + 2**-20)
y = array_ops.ones((8, 8))
out = math_ops.matmul(x, y)
# In TensorFloat-32, each element of x is rounded to 1, so the output will
# be 8s.
expected = array_ops.fill((8, 8), 8)
self.assertAllEqual(out, expected)
def test_tensor_float_32_disabled(self):
self._skip_if_tensor_float_32_unsupported()
self.assertTrue(config.tensor_float_32_execution_enabled())
config.enable_tensor_float_32_execution(False)
self.assertFalse(config.tensor_float_32_execution_enabled())
x = array_ops.fill((8, 8), 1 + 2**-20)
y = array_ops.ones((8, 8))
out = math_ops.matmul(x, y)
expected = array_ops.fill((8, 8), 8 * (1 + 2**-20))
self.assertAllEqual(out, expected)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
|
#!/usr/bin/env python3
#--------------------------------------------------------------------------
# SBXScan - Sequenced Box container Scanner
#
# Created: 06/03/2017
#
# Copyright (C) 2017 Marco Pontello - http://mark0.net/
#
# Licence:
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#--------------------------------------------------------------------------
import os
import sys
import argparse
import binascii
from time import sleep, time
import sqlite3
import seqbox
PROGRAM_VER = "1.0.1"
def get_cmdline():
"""Evaluate command line parameters, usage & help."""
parser = argparse.ArgumentParser(
description=("scan files/devices for SBx blocks and create a "+
"detailed report plus an index to be used with "+
"SBXScan"),
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prefix_chars='-', fromfile_prefix_chars='@')
parser.add_argument("-v", "--version", action='version',
version='SeqBox - Sequenced Box container - ' +
'Scanner v%s - (C) 2017 by M.Pontello' % PROGRAM_VER)
parser.add_argument("filename", action="store", nargs="+",
help="file(s) to scan")
parser.add_argument("-d", "--database", action="store", dest="dbfilename",
metavar="filename",
help="where to save recovery info",
default="sbxscan.db3")
parser.add_argument("-o", "--offset", type=int, default=0,
help=("offset from the start"), metavar="n")
parser.add_argument("-st", "--step", type=int, default=0,
help=("scan step"), metavar="n")
parser.add_argument("-b", "--buffer", type=int, default=1024,
help=("read buffer in KB"), metavar="n")
parser.add_argument("-sv", "--sbxver", type=int, default=1,
help="SBX blocks version to search for", metavar="n")
parser.add_argument("-p", "--password", type=str, default="",
help="encrypt with password", metavar="pass")
res = parser.parse_args()
return res
def errexit(errlev=1, mess=""):
"""Display an error and exit."""
if mess != "":
sys.stderr.write("%s: error: %s\n" %
(os.path.split(sys.argv[0])[1], mess))
sys.exit(errlev)
def getFileSize(filename):
"""Calc file size - works on devices too"""
ftemp = os.open(filename, os.O_RDONLY)
try:
return os.lseek(ftemp, 0, os.SEEK_END)
finally:
os.close(ftemp)
def main():
cmdline = get_cmdline()
filenames = []
for filename in cmdline.filename:
if os.path.exists(filename):
filenames.append(filename)
else:
errexit(1, "file '%s' not found!" % (filename))
filenames = sorted(set(filenames), key=os.path.getsize)
dbfilename = cmdline.dbfilename
if os.path.isdir(dbfilename):
dbfilename = os.path.join(dbfilename, "sbxscan.db3")
#create database tables
print("creating '%s' database..." % (dbfilename))
if os.path.exists(dbfilename):
os.remove(dbfilename)
conn = sqlite3.connect(dbfilename)
c = conn.cursor()
c.execute("CREATE TABLE sbx_source (id INTEGER, name TEXT)")
c.execute("CREATE TABLE sbx_meta (uid INTEGER, size INTEGER, name TEXT, sbxname TEXT, datetime INTEGER, sbxdatetime INTEGER, fileid INTEGER)")
c.execute("CREATE TABLE sbx_uids (uid INTEGER, ver INTEGER)")
c.execute("CREATE TABLE sbx_blocks (uid INTEGER, num INTEGER, fileid INTEGER, pos INTEGER )")
c.execute("CREATE INDEX blocks ON sbx_blocks (uid, num, pos)")
#scan all the files/devices
sbx = seqbox.SbxBlock(ver=cmdline.sbxver,pswd=cmdline.password)
offset = cmdline.offset
filenum = 0
uids = {}
magic = b'SBx' + bytes([cmdline.sbxver])
if cmdline.password:
magic = seqbox.EncDec(cmdline.password, len(magic)).xor(magic)
scanstep = cmdline.step
if scanstep == 0:
scanstep = sbx.blocksize
for filename in filenames:
filenum += 1
print("scanning file/device '%s' (%i/%i)..." %
(filename, filenum, len(filenames)))
filesize = getFileSize(filename)
c.execute("INSERT INTO sbx_source (id, name) VALUES (?, ?)",
(filenum, filename))
conn.commit()
fin = open(filename, "rb", buffering=cmdline.buffer*1024)
blocksfound = 0
blocksmetafound = 0
updatetime = time() - 1
starttime = time()
docommit = False
for pos in range(offset, filesize, scanstep):
fin.seek(pos, 0)
buffer = fin.read(sbx.blocksize)
#check for magic
if buffer[:4] == magic:
#check for valid block
try:
sbx.decode(buffer)
#update uids table & list
if not sbx.uid in uids:
uids[sbx.uid] = True
c.execute(
"INSERT INTO sbx_uids (uid, ver) VALUES (?, ?)",
(int.from_bytes(sbx.uid, byteorder='big'),
sbx.ver))
docommit = True
#update blocks table
blocksfound+=1
c.execute(
"INSERT INTO sbx_blocks (uid, num, fileid, pos) VALUES (?, ?, ?, ?)",
(int.from_bytes(sbx.uid, byteorder='big'),
sbx.blocknum, filenum, pos))
docommit = True
#update meta table
if sbx.blocknum == 0:
blocksmetafound += 1
if not "filedatetime" in sbx.metadata:
sbx.metadata["filedatetime"] = -1
sbx.metadata["sbxdatetime"] = -1
c.execute(
"INSERT INTO sbx_meta (uid , size, name, sbxname, datetime, sbxdatetime, fileid) VALUES (?, ?, ?, ?, ?, ?, ?)",
(int.from_bytes(sbx.uid, byteorder='big'),
sbx.metadata["filesize"],
sbx.metadata["filename"], sbx.metadata["sbxname"],
sbx.metadata["filedatetime"], sbx.metadata["sbxdatetime"],
filenum))
docommit = True
except seqbox.SbxDecodeError:
pass
#status update
if (time() > updatetime) or (pos >= filesize - scanstep):
etime = (time()-starttime)
if etime == 0:
etime = 1
print("%5.1f%% blocks: %i - meta: %i - files: %i - %.2fMB/s" %
(pos*100.0/(filesize-scanstep), blocksfound,
blocksmetafound, len(uids), pos/(1024*1024)/etime),
end = "\r", flush=True)
if docommit:
conn.commit()
docommit = False
updatetime = time() + .5
fin.close()
print()
c.close()
conn.close()
print("scan completed!")
if __name__ == '__main__':
main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class Site(Resource):
"""A web app, a mobile app backend, or an API app.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:param name: Resource Name.
:type name: str
:param kind: Kind of resource.
:type kind: str
:param location: Resource Location.
:type location: str
:param type: Resource type.
:type type: str
:param tags: Resource tags.
:type tags: dict
:ivar state: Current state of the app.
:vartype state: str
:ivar host_names: Hostnames associated with the app.
:vartype host_names: list of str
:ivar repository_site_name: Name of the repository site.
:vartype repository_site_name: str
:ivar usage_state: State indicating whether the app has exceeded its quota
usage. Read-only. Possible values include: 'Normal', 'Exceeded'
:vartype usage_state: str or :class:`UsageState
<azure.mgmt.web.models.UsageState>`
:param enabled: <code>true</code> if the app is enabled; otherwise,
<code>false</code>. Setting this value to false disables the app (takes
the app offline).
:type enabled: bool
:ivar enabled_host_names: Enabled hostnames for the app.Hostnames need to
be assigned (see HostNames) AND enabled. Otherwise,
the app is not served on those hostnames.
:vartype enabled_host_names: list of str
:ivar availability_state: Management information availability state for
the app. Possible values include: 'Normal', 'Limited',
'DisasterRecoveryMode'
:vartype availability_state: str or :class:`SiteAvailabilityState
<azure.mgmt.web.models.SiteAvailabilityState>`
:param host_name_ssl_states: Hostname SSL states are used to manage the
SSL bindings for app's hostnames.
:type host_name_ssl_states: list of :class:`HostNameSslState
<azure.mgmt.web.models.HostNameSslState>`
:param server_farm_id: Resource ID of the associated App Service plan,
formatted as:
"/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
:type server_farm_id: str
:param reserved: <code>true</code> if reserved; otherwise,
<code>false</code>. Default value: False .
:type reserved: bool
:ivar last_modified_time_utc: Last time the app was modified, in UTC.
Read-only.
:vartype last_modified_time_utc: datetime
:param site_config: Configuration of the app.
:type site_config: :class:`SiteConfig <azure.mgmt.web.models.SiteConfig>`
:ivar traffic_manager_host_names: Azure Traffic Manager hostnames
associated with the app. Read-only.
:vartype traffic_manager_host_names: list of str
:ivar premium_app_deployed: Indicates whether app is deployed as a premium
app.
:vartype premium_app_deployed: bool
:param scm_site_also_stopped: <code>true</code> to stop SCM (KUDU) site
when the app is stopped; otherwise, <code>false</code>. The default is
<code>false</code>. Default value: False .
:type scm_site_also_stopped: bool
:ivar target_swap_slot: Specifies which deployment slot this app will swap
into. Read-only.
:vartype target_swap_slot: str
:param hosting_environment_profile: App Service Environment to use for the
app.
:type hosting_environment_profile: :class:`HostingEnvironmentProfile
<azure.mgmt.web.models.HostingEnvironmentProfile>`
:param micro_service: Micro services like apps, logic apps. Default value:
"WebSites" .
:type micro_service: str
:param gateway_site_name: Name of gateway app associated with the app.
:type gateway_site_name: str
:param client_affinity_enabled: <code>true</code> to enable client
affinity; <code>false</code> to stop sending session affinity cookies,
which route client requests in the same session to the same instance.
Default is <code>true</code>.
:type client_affinity_enabled: bool
:param client_cert_enabled: <code>true</code> to enable client certificate
authentication (TLS mutual authentication); otherwise, <code>false</code>.
Default is <code>false</code>.
:type client_cert_enabled: bool
:param host_names_disabled: <code>true</code> to disable the public
hostnames of the app; otherwise, <code>false</code>.
If <code>true</code>, the app is only accessible via API management
process.
:type host_names_disabled: bool
:ivar outbound_ip_addresses: List of IP addresses that the app uses for
outbound connections (e.g. database access). Read-only.
:vartype outbound_ip_addresses: str
:param container_size: Size of the function container.
:type container_size: int
:param daily_memory_time_quota: Maximum allowed daily memory-time quota
(applicable on dynamic apps only).
:type daily_memory_time_quota: int
:ivar suspended_till: App suspended till in case memory-time quota is
exceeded.
:vartype suspended_till: datetime
:ivar max_number_of_workers: Maximum number of workers.
This only applies to Functions container.
:vartype max_number_of_workers: int
:param cloning_info: If specified during app creation, the app is cloned
from a source app.
:type cloning_info: :class:`CloningInfo
<azure.mgmt.web.models.CloningInfo>`
:ivar resource_group: Name of the resource group the app belongs to.
Read-only.
:vartype resource_group: str
:ivar is_default_container: <code>true</code> if the app is a default
container; otherwise, <code>false</code>.
:vartype is_default_container: bool
:ivar default_host_name: Default hostname of the app. Read-only.
:vartype default_host_name: str
:ivar slot_swap_status: Status of the last deployment slot swap operation.
:vartype slot_swap_status: :class:`SlotSwapStatus
<azure.mgmt.web.models.SlotSwapStatus>`
"""
_validation = {
'id': {'readonly': True},
'location': {'required': True},
'state': {'readonly': True},
'host_names': {'readonly': True},
'repository_site_name': {'readonly': True},
'usage_state': {'readonly': True},
'enabled_host_names': {'readonly': True},
'availability_state': {'readonly': True},
'last_modified_time_utc': {'readonly': True},
'traffic_manager_host_names': {'readonly': True},
'premium_app_deployed': {'readonly': True},
'target_swap_slot': {'readonly': True},
'outbound_ip_addresses': {'readonly': True},
'suspended_till': {'readonly': True},
'max_number_of_workers': {'readonly': True},
'resource_group': {'readonly': True},
'is_default_container': {'readonly': True},
'default_host_name': {'readonly': True},
'slot_swap_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'state': {'key': 'properties.state', 'type': 'str'},
'host_names': {'key': 'properties.hostNames', 'type': '[str]'},
'repository_site_name': {'key': 'properties.repositorySiteName', 'type': 'str'},
'usage_state': {'key': 'properties.usageState', 'type': 'UsageState'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'enabled_host_names': {'key': 'properties.enabledHostNames', 'type': '[str]'},
'availability_state': {'key': 'properties.availabilityState', 'type': 'SiteAvailabilityState'},
'host_name_ssl_states': {'key': 'properties.hostNameSslStates', 'type': '[HostNameSslState]'},
'server_farm_id': {'key': 'properties.serverFarmId', 'type': 'str'},
'reserved': {'key': 'properties.reserved', 'type': 'bool'},
'last_modified_time_utc': {'key': 'properties.lastModifiedTimeUtc', 'type': 'iso-8601'},
'site_config': {'key': 'properties.siteConfig', 'type': 'SiteConfig'},
'traffic_manager_host_names': {'key': 'properties.trafficManagerHostNames', 'type': '[str]'},
'premium_app_deployed': {'key': 'properties.premiumAppDeployed', 'type': 'bool'},
'scm_site_also_stopped': {'key': 'properties.scmSiteAlsoStopped', 'type': 'bool'},
'target_swap_slot': {'key': 'properties.targetSwapSlot', 'type': 'str'},
'hosting_environment_profile': {'key': 'properties.hostingEnvironmentProfile', 'type': 'HostingEnvironmentProfile'},
'micro_service': {'key': 'properties.microService', 'type': 'str'},
'gateway_site_name': {'key': 'properties.gatewaySiteName', 'type': 'str'},
'client_affinity_enabled': {'key': 'properties.clientAffinityEnabled', 'type': 'bool'},
'client_cert_enabled': {'key': 'properties.clientCertEnabled', 'type': 'bool'},
'host_names_disabled': {'key': 'properties.hostNamesDisabled', 'type': 'bool'},
'outbound_ip_addresses': {'key': 'properties.outboundIpAddresses', 'type': 'str'},
'container_size': {'key': 'properties.containerSize', 'type': 'int'},
'daily_memory_time_quota': {'key': 'properties.dailyMemoryTimeQuota', 'type': 'int'},
'suspended_till': {'key': 'properties.suspendedTill', 'type': 'iso-8601'},
'max_number_of_workers': {'key': 'properties.maxNumberOfWorkers', 'type': 'int'},
'cloning_info': {'key': 'properties.cloningInfo', 'type': 'CloningInfo'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'is_default_container': {'key': 'properties.isDefaultContainer', 'type': 'bool'},
'default_host_name': {'key': 'properties.defaultHostName', 'type': 'str'},
'slot_swap_status': {'key': 'properties.slotSwapStatus', 'type': 'SlotSwapStatus'},
}
def __init__(self, location, name=None, kind=None, type=None, tags=None, enabled=None, host_name_ssl_states=None, server_farm_id=None, reserved=False, site_config=None, scm_site_also_stopped=False, hosting_environment_profile=None, micro_service="WebSites", gateway_site_name=None, client_affinity_enabled=None, client_cert_enabled=None, host_names_disabled=None, container_size=None, daily_memory_time_quota=None, cloning_info=None):
super(Site, self).__init__(name=name, kind=kind, location=location, type=type, tags=tags)
self.state = None
self.host_names = None
self.repository_site_name = None
self.usage_state = None
self.enabled = enabled
self.enabled_host_names = None
self.availability_state = None
self.host_name_ssl_states = host_name_ssl_states
self.server_farm_id = server_farm_id
self.reserved = reserved
self.last_modified_time_utc = None
self.site_config = site_config
self.traffic_manager_host_names = None
self.premium_app_deployed = None
self.scm_site_also_stopped = scm_site_also_stopped
self.target_swap_slot = None
self.hosting_environment_profile = hosting_environment_profile
self.micro_service = micro_service
self.gateway_site_name = gateway_site_name
self.client_affinity_enabled = client_affinity_enabled
self.client_cert_enabled = client_cert_enabled
self.host_names_disabled = host_names_disabled
self.outbound_ip_addresses = None
self.container_size = container_size
self.daily_memory_time_quota = daily_memory_time_quota
self.suspended_till = None
self.max_number_of_workers = None
self.cloning_info = cloning_info
self.resource_group = None
self.is_default_container = None
self.default_host_name = None
self.slot_swap_status = None
|
|
# -*- coding: utf-8 -*-
import os
from babelfish import Language
import pytest
from vcr import VCR
from subliminal.exceptions import ConfigurationError
from subliminal.providers.opensubtitles import OpenSubtitlesProvider, OpenSubtitlesSubtitle, Unauthorized
vcr = VCR(path_transformer=lambda path: path + '.yaml',
record_mode=os.environ.get('VCR_RECORD_MODE', 'once'),
match_on=['method', 'scheme', 'host', 'port', 'path', 'query', 'body'],
cassette_library_dir=os.path.join('tests', 'cassettes', 'opensubtitles'))
def test_get_matches_movie_hash(movies):
subtitle = OpenSubtitlesSubtitle(Language('deu'), False, None, '1953771409', 'moviehash', 'movie',
'5b8f8f4e41ccb21e', 'Man of Steel',
'Man.of.Steel.German.720p.BluRay.x264-EXQUiSiTE', 2013, 770828, 0, 0, None)
matches = subtitle.get_matches(movies['man_of_steel'])
assert matches == {'title', 'year', 'video_codec', 'imdb_id', 'hash', 'resolution', 'format', 'hearing_impaired'}
def test_get_matches_episode(episodes):
subtitle = OpenSubtitlesSubtitle(Language('ell'), False, None, '1953579014', 'fulltext', 'episode',
'0', '"Game of Thrones" Mhysa',
' Game.of.Thrones.S03E10.HDTV.XviD-AFG', 2013, 2178796, 3, 10, None)
matches = subtitle.get_matches(episodes['got_s03e10'])
assert matches == {'imdb_id', 'series', 'year', 'episode', 'season', 'title', 'hearing_impaired'}
def test_get_matches_imdb_id(movies):
subtitle = OpenSubtitlesSubtitle(Language('fra'), True, None, '1953767650', 'imdbid', 'movie', 0, 'Man of Steel',
'man.of.steel.2013.720p.bluray.x264-felony', 2013, 770828, 0, 0, None)
matches = subtitle.get_matches(movies['man_of_steel'], hearing_impaired=True)
assert matches == {'title', 'year', 'video_codec', 'imdb_id', 'resolution', 'format', 'release_group',
'hearing_impaired'}
def test_get_matches_no_match(episodes):
subtitle = OpenSubtitlesSubtitle(Language('fra'), False, None, '1953767650', 'imdbid', 'movie', 0, 'Man of Steel',
'man.of.steel.2013.720p.bluray.x264-felony', 2013, 770828, 0, 0, None)
matches = subtitle.get_matches(episodes['got_s03e10'], hearing_impaired=True)
assert matches == set()
def test_configuration_error_no_username():
with pytest.raises(ConfigurationError):
OpenSubtitlesProvider(password='subliminal')
def test_configuration_error_no_password():
with pytest.raises(ConfigurationError):
OpenSubtitlesProvider(username='subliminal')
@pytest.mark.integration
@vcr.use_cassette
def test_login():
provider = OpenSubtitlesProvider('python-subliminal', 'subliminal')
assert provider.token is None
provider.initialize()
assert provider.token is not None
@pytest.mark.integration
@vcr.use_cassette
def test_login_bad_password():
provider = OpenSubtitlesProvider('python-subliminal', 'lanimilbus')
with pytest.raises(Unauthorized):
provider.initialize()
@pytest.mark.integration
@vcr.use_cassette
def test_logout():
provider = OpenSubtitlesProvider('python-subliminal', 'subliminal')
provider.initialize()
provider.terminate()
assert provider.token is None
@pytest.mark.integration
@vcr.use_cassette
def test_no_operation():
with OpenSubtitlesProvider() as provider:
provider.no_operation()
@pytest.mark.integration
@vcr.use_cassette
def test_query_not_enough_information():
languages = {Language('eng')}
with OpenSubtitlesProvider() as provider:
with pytest.raises(ValueError) as excinfo:
provider.query(languages)
assert str(excinfo.value) == 'Not enough information'
@pytest.mark.integration
@vcr.use_cassette
def test_query_query_movie(movies):
video = movies['man_of_steel']
languages = {Language('fra')}
expected_subtitles = {'1953767244', '1953770526', '1953150292', '1953647841', '1953767650'}
with OpenSubtitlesProvider() as provider:
subtitles = provider.query(languages, query=video.title)
assert {subtitle.id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_query_query_episode(episodes):
video = episodes['dallas_2012_s01e03']
languages = {Language('fra')}
expected_subtitles = {'1953147577'}
with OpenSubtitlesProvider() as provider:
subtitles = provider.query(languages, query=video.series, season=video.season, episode=video.episode)
assert {subtitle.id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_query_imdb_id(movies):
video = movies['man_of_steel']
languages = {Language('deu')}
expected_subtitles = {'1953771409', '1953768982'}
with OpenSubtitlesProvider() as provider:
subtitles = provider.query(languages, imdb_id=video.imdb_id)
assert {subtitle.id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_query_hash_size(movies):
video = movies['man_of_steel']
languages = {Language('eng')}
expected_subtitles = {'1953767678', '1953800590', '1953766751', '1953621994', '1953766883', '1953766882',
'1953767330', '1953766488', '1953766413', '1953766280', '1953767141', '1953766279',
'1953785668', '1953767218'}
with OpenSubtitlesProvider() as provider:
subtitles = provider.query(languages, hash=video.hashes['opensubtitles'], size=video.size)
assert {subtitle.id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_query_wrong_hash_wrong_size():
languages = {Language('eng')}
with OpenSubtitlesProvider() as provider:
subtitles = provider.query(languages, hash='123456787654321', size=99999)
assert len(subtitles) == 0
@pytest.mark.integration
@vcr.use_cassette
def test_query_query_season_episode(episodes):
video = episodes['bbt_s07e05']
languages = {Language('deu')}
expected_subtitles = {'1953771908'}
with OpenSubtitlesProvider() as provider:
subtitles = provider.query(languages, query=video.series, season=video.season, episode=video.episode)
assert {subtitle.id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_movie(movies):
video = movies['man_of_steel']
languages = {Language('deu'), Language('fra')}
expected_subtitles = {'1953767244', '1953647841', '1953767650', '1953771409', '1953768982', '1953770526'}
with OpenSubtitlesProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert {subtitle.id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_movie_no_hash(movies):
video = movies['enders_game']
languages = {Language('deu')}
expected_subtitles = {'1954157398', '1954156756', '1954443141'}
with OpenSubtitlesProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert {subtitle.id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_episode(episodes):
video = episodes['marvels_agents_of_shield_s02e06']
languages = {Language('hun')}
expected_subtitles = {'1954464403', '1954454544'}
with OpenSubtitlesProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert {subtitle.id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_download_subtitle(movies):
video = movies['man_of_steel']
languages = {Language('deu'), Language('fra')}
with OpenSubtitlesProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
provider.download_subtitle(subtitles[0])
assert subtitles[0].content is not None
assert subtitles[0].is_valid() is True
assert subtitles[0].encoding == 'cp1252'
|
|
from __future__ import unicode_literals
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises
from nose.tools import assert_raises
import boto3
import boto
from boto.exception import EC2ResponseError
import sure # noqa
from moto import mock_ec2, mock_ec2_deprecated
SAMPLE_DOMAIN_NAME = u'example.com'
SAMPLE_NAME_SERVERS = [u'10.0.0.6', u'10.0.0.7']
@mock_ec2_deprecated
def test_dhcp_options_associate():
""" associate dhcp option """
conn = boto.connect_vpc('the_key', 'the_secret')
dhcp_options = conn.create_dhcp_options(
SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS)
vpc = conn.create_vpc("10.0.0.0/16")
rval = conn.associate_dhcp_options(dhcp_options.id, vpc.id)
rval.should.be.equal(True)
@mock_ec2_deprecated
def test_dhcp_options_associate_invalid_dhcp_id():
""" associate dhcp option bad dhcp options id """
conn = boto.connect_vpc('the_key', 'the_secret')
vpc = conn.create_vpc("10.0.0.0/16")
with assert_raises(EC2ResponseError) as cm:
conn.associate_dhcp_options("foo", vpc.id)
cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_dhcp_options_associate_invalid_vpc_id():
""" associate dhcp option invalid vpc id """
conn = boto.connect_vpc('the_key', 'the_secret')
dhcp_options = conn.create_dhcp_options(
SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS)
with assert_raises(EC2ResponseError) as cm:
conn.associate_dhcp_options(dhcp_options.id, "foo")
cm.exception.code.should.equal('InvalidVpcID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_dhcp_options_delete_with_vpc():
"""Test deletion of dhcp options with vpc"""
conn = boto.connect_vpc('the_key', 'the_secret')
dhcp_options = conn.create_dhcp_options(
SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS)
dhcp_options_id = dhcp_options.id
vpc = conn.create_vpc("10.0.0.0/16")
rval = conn.associate_dhcp_options(dhcp_options_id, vpc.id)
rval.should.be.equal(True)
with assert_raises(EC2ResponseError) as cm:
conn.delete_dhcp_options(dhcp_options_id)
cm.exception.code.should.equal('DependencyViolation')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
vpc.delete()
with assert_raises(EC2ResponseError) as cm:
conn.get_all_dhcp_options([dhcp_options_id])
cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_create_dhcp_options():
"""Create most basic dhcp option"""
conn = boto.connect_vpc('the_key', 'the_secret')
dhcp_option = conn.create_dhcp_options(
SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS)
dhcp_option.options[u'domain-name'][0].should.be.equal(SAMPLE_DOMAIN_NAME)
dhcp_option.options[
u'domain-name-servers'][0].should.be.equal(SAMPLE_NAME_SERVERS[0])
dhcp_option.options[
u'domain-name-servers'][1].should.be.equal(SAMPLE_NAME_SERVERS[1])
@mock_ec2_deprecated
def test_create_dhcp_options_invalid_options():
"""Create invalid dhcp options"""
conn = boto.connect_vpc('the_key', 'the_secret')
servers = ["f", "f", "f", "f", "f"]
with assert_raises(EC2ResponseError) as cm:
conn.create_dhcp_options(ntp_servers=servers)
cm.exception.code.should.equal('InvalidParameterValue')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
with assert_raises(EC2ResponseError) as cm:
conn.create_dhcp_options(netbios_node_type="0")
cm.exception.code.should.equal('InvalidParameterValue')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_describe_dhcp_options():
"""Test dhcp options lookup by id"""
conn = boto.connect_vpc('the_key', 'the_secret')
dhcp_option = conn.create_dhcp_options()
dhcp_options = conn.get_all_dhcp_options([dhcp_option.id])
dhcp_options.should.be.length_of(1)
dhcp_options = conn.get_all_dhcp_options()
dhcp_options.should.be.length_of(1)
@mock_ec2_deprecated
def test_describe_dhcp_options_invalid_id():
"""get error on invalid dhcp_option_id lookup"""
conn = boto.connect_vpc('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.get_all_dhcp_options(["1"])
cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_delete_dhcp_options():
"""delete dhcp option"""
conn = boto.connect_vpc('the_key', 'the_secret')
dhcp_option = conn.create_dhcp_options()
dhcp_options = conn.get_all_dhcp_options([dhcp_option.id])
dhcp_options.should.be.length_of(1)
conn.delete_dhcp_options(dhcp_option.id) # .should.be.equal(True)
with assert_raises(EC2ResponseError) as cm:
conn.get_all_dhcp_options([dhcp_option.id])
cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_delete_dhcp_options_invalid_id():
conn = boto.connect_vpc('the_key', 'the_secret')
conn.create_dhcp_options()
with assert_raises(EC2ResponseError) as cm:
conn.delete_dhcp_options("dopt-abcd1234")
cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_delete_dhcp_options_malformed_id():
conn = boto.connect_vpc('the_key', 'the_secret')
conn.create_dhcp_options()
with assert_raises(EC2ResponseError) as cm:
conn.delete_dhcp_options("foo-abcd1234")
cm.exception.code.should.equal('InvalidDhcpOptionsId.Malformed')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_dhcp_tagging():
conn = boto.connect_vpc('the_key', 'the_secret')
dhcp_option = conn.create_dhcp_options()
dhcp_option.add_tag("a key", "some value")
tag = conn.get_all_tags()[0]
tag.name.should.equal("a key")
tag.value.should.equal("some value")
# Refresh the DHCP options
dhcp_option = conn.get_all_dhcp_options()[0]
dhcp_option.tags.should.have.length_of(1)
dhcp_option.tags["a key"].should.equal("some value")
@mock_ec2_deprecated
def test_dhcp_options_get_by_tag():
conn = boto.connect_vpc('the_key', 'the_secret')
dhcp1 = conn.create_dhcp_options('example.com', ['10.0.10.2'])
dhcp1.add_tag('Name', 'TestDhcpOptions1')
dhcp1.add_tag('test-tag', 'test-value')
dhcp2 = conn.create_dhcp_options('example.com', ['10.0.20.2'])
dhcp2.add_tag('Name', 'TestDhcpOptions2')
dhcp2.add_tag('test-tag', 'test-value')
filters = {'tag:Name': 'TestDhcpOptions1', 'tag:test-tag': 'test-value'}
dhcp_options_sets = conn.get_all_dhcp_options(filters=filters)
dhcp_options_sets.should.have.length_of(1)
dhcp_options_sets[0].options[
'domain-name'][0].should.be.equal('example.com')
dhcp_options_sets[0].options[
'domain-name-servers'][0].should.be.equal('10.0.10.2')
dhcp_options_sets[0].tags['Name'].should.equal('TestDhcpOptions1')
dhcp_options_sets[0].tags['test-tag'].should.equal('test-value')
filters = {'tag:Name': 'TestDhcpOptions2', 'tag:test-tag': 'test-value'}
dhcp_options_sets = conn.get_all_dhcp_options(filters=filters)
dhcp_options_sets.should.have.length_of(1)
dhcp_options_sets[0].options[
'domain-name'][0].should.be.equal('example.com')
dhcp_options_sets[0].options[
'domain-name-servers'][0].should.be.equal('10.0.20.2')
dhcp_options_sets[0].tags['Name'].should.equal('TestDhcpOptions2')
dhcp_options_sets[0].tags['test-tag'].should.equal('test-value')
filters = {'tag:test-tag': 'test-value'}
dhcp_options_sets = conn.get_all_dhcp_options(filters=filters)
dhcp_options_sets.should.have.length_of(2)
@mock_ec2_deprecated
def test_dhcp_options_get_by_id():
conn = boto.connect_vpc('the_key', 'the_secret')
dhcp1 = conn.create_dhcp_options('test1.com', ['10.0.10.2'])
dhcp1.add_tag('Name', 'TestDhcpOptions1')
dhcp1.add_tag('test-tag', 'test-value')
dhcp1_id = dhcp1.id
dhcp2 = conn.create_dhcp_options('test2.com', ['10.0.20.2'])
dhcp2.add_tag('Name', 'TestDhcpOptions2')
dhcp2.add_tag('test-tag', 'test-value')
dhcp2_id = dhcp2.id
dhcp_options_sets = conn.get_all_dhcp_options()
dhcp_options_sets.should.have.length_of(2)
dhcp_options_sets = conn.get_all_dhcp_options(
filters={'dhcp-options-id': dhcp1_id})
dhcp_options_sets.should.have.length_of(1)
dhcp_options_sets[0].options['domain-name'][0].should.be.equal('test1.com')
dhcp_options_sets[0].options[
'domain-name-servers'][0].should.be.equal('10.0.10.2')
dhcp_options_sets = conn.get_all_dhcp_options(
filters={'dhcp-options-id': dhcp2_id})
dhcp_options_sets.should.have.length_of(1)
dhcp_options_sets[0].options['domain-name'][0].should.be.equal('test2.com')
dhcp_options_sets[0].options[
'domain-name-servers'][0].should.be.equal('10.0.20.2')
@mock_ec2
def test_dhcp_options_get_by_value_filter():
ec2 = boto3.resource('ec2', region_name='us-west-1')
ec2.create_dhcp_options(DhcpConfigurations=[
{'Key': 'domain-name', 'Values': ['example.com']},
{'Key': 'domain-name-servers', 'Values': ['10.0.10.2']}
])
ec2.create_dhcp_options(DhcpConfigurations=[
{'Key': 'domain-name', 'Values': ['example.com']},
{'Key': 'domain-name-servers', 'Values': ['10.0.20.2']}
])
ec2.create_dhcp_options(DhcpConfigurations=[
{'Key': 'domain-name', 'Values': ['example.com']},
{'Key': 'domain-name-servers', 'Values': ['10.0.30.2']}
])
filters = [{'Name': 'value', 'Values': ['10.0.10.2']}]
dhcp_options_sets = list(ec2.dhcp_options_sets.filter(Filters=filters))
dhcp_options_sets.should.have.length_of(1)
@mock_ec2
def test_dhcp_options_get_by_key_filter():
ec2 = boto3.resource('ec2', region_name='us-west-1')
ec2.create_dhcp_options(DhcpConfigurations=[
{'Key': 'domain-name', 'Values': ['example.com']},
{'Key': 'domain-name-servers', 'Values': ['10.0.10.2']}
])
ec2.create_dhcp_options(DhcpConfigurations=[
{'Key': 'domain-name', 'Values': ['example.com']},
{'Key': 'domain-name-servers', 'Values': ['10.0.20.2']}
])
ec2.create_dhcp_options(DhcpConfigurations=[
{'Key': 'domain-name', 'Values': ['example.com']},
{'Key': 'domain-name-servers', 'Values': ['10.0.30.2']}
])
filters = [{'Name': 'key', 'Values': ['domain-name']}]
dhcp_options_sets = list(ec2.dhcp_options_sets.filter(Filters=filters))
dhcp_options_sets.should.have.length_of(3)
@mock_ec2_deprecated
def test_dhcp_options_get_by_invalid_filter():
conn = boto.connect_vpc('the_key', 'the_secret')
conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS)
filters = {'invalid-filter': 'invalid-value'}
conn.get_all_dhcp_options.when.called_with(
filters=filters).should.throw(NotImplementedError)
|
|
#!/usr/bin/env python
# coding: utf-8
from datetime import datetime
from distutils import spawn
import argparse
import json
import os
import platform
import shutil
import socket
import sys
import urllib
import main
###############################################################################
# Options
###############################################################################
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'-d', '--dependencies', dest='install_dependencies', action='store_true',
help='install virtualenv and python dependencies',
)
PARSER.add_argument(
'-s', '--start', dest='start', action='store_true',
help='starts the dev_appserver.py with storage_path pointing to temp',
)
PARSER.add_argument(
'-o', '--host', dest='host', action='store', default='127.0.0.1',
help='the host to start the dev_appserver.py',
)
PARSER.add_argument(
'-p', '--port', dest='port', action='store', default='8080',
help='the port to start the dev_appserver.py',
)
PARSER.add_argument(
'--appserver-args', dest='args', nargs=argparse.REMAINDER, default=[],
help='all following args are passed to dev_appserver.py',
)
PARSER.add_argument(
'-v', '--version', dest='show_version', action='store_true',
help='Show gae-init version',
)
ARGS = PARSER.parse_args()
###############################################################################
# Globals
###############################################################################
BAD_ENDINGS = ['pyc', 'pyo', '~']
GAE_PATH = ''
IS_WINDOWS = platform.system() == 'Windows'
###############################################################################
# Directories
###############################################################################
DIR_MAIN = 'main'
DIR_TEMP = 'temp'
DIR_VENV = os.path.join(DIR_TEMP, 'venv')
DIR_LIB = os.path.join(DIR_MAIN, 'lib')
DIR_LIBX = os.path.join(DIR_MAIN, 'libx')
FILE_LIB = '%s.zip' % DIR_LIB
FILE_REQUIREMENTS = 'requirements.txt'
FILE_PIP_GUARD = os.path.join(DIR_TEMP, 'pip.guard')
FILE_VENV = os.path.join(DIR_VENV, 'Scripts', 'activate.bat') \
if IS_WINDOWS \
else os.path.join(DIR_VENV, 'bin', 'activate')
DIR_STORAGE = os.path.join(DIR_TEMP, 'storage')
FILE_UPDATE = os.path.join(DIR_TEMP, 'update.json')
###############################################################################
# Other global variables
###############################################################################
CORE_VERSION_URL = 'https://gae-init.appspot.com/_s/version/'
INTERNET_TEST_URL = 'https://www.google.com'
REQUIREMENTS_URL = 'http://docs.gae-init.appspot.com/requirement/'
###############################################################################
# Helpers
###############################################################################
def print_out(script, filename=''):
timestamp = datetime.now().strftime('%H:%M:%S')
if not filename:
filename = '-' * 46
script = script.rjust(12, '-')
print ("[%s] %s %s" % (timestamp, script, filename))
def make_dirs(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def listdir(directory, split_ext=False):
try:
if split_ext:
return [os.path.splitext(dir_)[0] for dir_ in os.listdir(directory)]
else:
return os.listdir(directory)
except OSError:
return []
def site_packages_path():
if IS_WINDOWS:
return os.path.join(DIR_VENV, 'Lib', 'site-packages')
py_version = 'python%s.%s' % sys.version_info[:2]
return os.path.join(DIR_VENV, 'lib', py_version, 'site-packages')
def create_virtualenv():
if not os.path.exists(FILE_VENV):
os.system('virtualenv --no-site-packages %s' % DIR_VENV)
os.system('echo %s >> %s' % (
'set PYTHONPATH=' if IS_WINDOWS else 'unset PYTHONPATH', FILE_VENV
))
pth_file = os.path.join(site_packages_path(), 'gae.pth')
echo_to = 'echo %s >> {pth}'.format(pth=pth_file)
os.system(echo_to % find_gae_path())
os.system(echo_to % os.path.abspath(DIR_LIBX))
fix_path_cmd = 'import dev_appserver; dev_appserver.fix_sys_path()'
os.system(echo_to % (
fix_path_cmd if IS_WINDOWS else '"%s"' % fix_path_cmd
))
return True
def exec_pip_commands(command):
script = []
if create_virtualenv():
activate_cmd = 'call %s' if IS_WINDOWS else 'source %s'
activate_cmd %= FILE_VENV
script.append(activate_cmd)
script.append('echo %s' % command)
script.append('%s SKIP_GOOGLEAPICLIENT_COMPAT_CHECK=1' %
('set' if IS_WINDOWS else 'export'))
script.append(command)
script = '&'.join(script) if IS_WINDOWS else \
'/bin/bash -c "%s"' % ';'.join(script)
os.system(script)
def make_guard(fname, cmd, spec):
with open(fname, 'w') as guard:
guard.write('Prevents %s execution if newer than %s' % (cmd, spec))
def guard_is_newer(guard, watched):
if os.path.exists(guard):
return os.path.getmtime(guard) > os.path.getmtime(watched)
return False
def check_if_pip_should_run():
return not guard_is_newer(FILE_PIP_GUARD, FILE_REQUIREMENTS)
def install_py_libs():
if not check_if_pip_should_run() and os.path.exists(DIR_LIB):
return
exec_pip_commands('pip install -q -r %s' % FILE_REQUIREMENTS)
exclude_ext = ['.pth', '.pyc', '.egg-info', '.dist-info', '.so']
exclude_prefix = ['setuptools-', 'pip-', 'Pillow-']
exclude = [
'test', 'tests', 'pip', 'setuptools', '_markerlib', 'PIL',
'easy_install.py', 'pkg_resources.py'
]
def _exclude_prefix(pkg):
for prefix in exclude_prefix:
if pkg.startswith(prefix):
return True
return False
def _exclude_ext(pkg):
for ext in exclude_ext:
if pkg.endswith(ext):
return True
return False
def _get_dest(pkg):
make_dirs(DIR_LIB)
return os.path.join(DIR_LIB, pkg)
site_packages = site_packages_path()
dir_libs = listdir(DIR_LIB)
dir_libs.extend(listdir(DIR_LIBX))
for dir_ in listdir(site_packages):
if dir_ in dir_libs or dir_ in exclude:
continue
if _exclude_prefix(dir_) or _exclude_ext(dir_):
continue
src_path = os.path.join(site_packages, dir_)
copy = shutil.copy if os.path.isfile(src_path) else shutil.copytree
copy(src_path, _get_dest(dir_))
make_guard(FILE_PIP_GUARD, 'pip', FILE_REQUIREMENTS)
def install_dependencies():
make_dirs(DIR_TEMP)
install_py_libs()
def print_out_update(force_show=False):
try:
import pip
SemVer = pip.util.version.SemanticVersion
except AttributeError:
import pip._vendor.distlib.version
SemVer = pip._vendor.distlib.version.SemanticVersion
try:
with open(FILE_UPDATE, 'r') as update_json:
data = json.load(update_json)
if SemVer(main.__version__) < SemVer(data['version']) or force_show:
print_out('UPDATE')
print_out(data['version'], 'Latest version of gae-init')
print_out(main.__version__, 'Your version is a bit behind')
print_out('CHANGESET', data['changeset'])
except (ValueError, KeyError):
os.remove(FILE_UPDATE)
except IOError:
pass
###############################################################################
# Doctor
###############################################################################
def check_requirement(check_func):
result, name, help_url_id = check_func()
if not result:
print_out('NOT FOUND', name)
if help_url_id:
print ('Please see %s%s' % (REQUIREMENTS_URL, help_url_id))
return False
return True
def find_gae_path():
global GAE_PATH
if GAE_PATH:
return GAE_PATH
if IS_WINDOWS:
gae_path = None
for path in os.environ['PATH'].split(os.pathsep):
if os.path.isfile(os.path.join(path, 'dev_appserver.py')):
gae_path = path
else:
gae_path = spawn.find_executable('dev_appserver.py')
if gae_path:
gae_path = os.path.dirname(os.path.realpath(gae_path))
if not gae_path:
return ''
gcloud_exec = 'gcloud.cmd' if IS_WINDOWS else 'gcloud'
if not os.path.isfile(os.path.join(gae_path, gcloud_exec)):
GAE_PATH = gae_path
else:
gae_path = os.path.join(gae_path, '..', 'platform', 'google_appengine')
if os.path.exists(gae_path):
GAE_PATH = os.path.realpath(gae_path)
return GAE_PATH
def check_gae():
return bool(find_gae_path()), 'Google App Engine SDK', '#gae'
def check_git():
return bool(spawn.find_executable('git')), 'Git', '#git'
def check_nodejs():
return bool(spawn.find_executable('node')), 'Node.js', '#nodejs'
def check_pip():
return bool(spawn.find_executable('pip')), 'pip', '#pip'
def check_virtualenv():
return bool(spawn.find_executable('virtualenv')), 'virtualenv', '#virtualenv'
def doctor_says_ok():
checkers = [check_gae, check_git, check_nodejs, check_pip, check_virtualenv]
if False in [check_requirement(check) for check in checkers]:
sys.exit(1)
return True
###############################################################################
# Main
###############################################################################
def run_start():
make_dirs(DIR_STORAGE)
port = int(ARGS.port)
run_command = ' '.join(map(str, [
'python2 $GAE_PYTHON_HOME/dev_appserver.py',
DIR_MAIN,
'--host %s' % ARGS.host,
'--port %s' % port,
'--admin_port %s' % (port + 1),
'--storage_path=%s' % DIR_STORAGE,
'--skip_sdk_update_check',
] + ARGS.args))
os.system(run_command)
def run():
if len(sys.argv) == 1 or (ARGS.args and not ARGS.start):
PARSER.print_help()
sys.exit(1)
os.chdir(os.path.dirname(os.path.realpath(__file__)))
if doctor_says_ok():
install_dependencies()
if ARGS.show_version:
print_out_update(force_show=True)
else:
print_out_update()
if ARGS.start:
run_start()
if ARGS.install_dependencies:
install_dependencies()
if __name__ == '__main__':
run()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkSecurityGroupsOperations(object):
"""NetworkSecurityGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkSecurityGroup"
"""Gets the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkSecurityGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_08_01.models.NetworkSecurityGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
parameters, # type: "_models.NetworkSecurityGroup"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkSecurityGroup"
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkSecurityGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
parameters, # type: "_models.NetworkSecurityGroup"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.NetworkSecurityGroup"]
"""Creates or updates a network security group in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param parameters: Parameters supplied to the create or update network security group
operation.
:type parameters: ~azure.mgmt.network.v2018_08_01.models.NetworkSecurityGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NetworkSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_08_01.models.NetworkSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkSecurityGroup"
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.NetworkSecurityGroup"]
"""Updates a network security group tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param parameters: Parameters supplied to update network security group tags.
:type parameters: ~azure.mgmt.network.v2018_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NetworkSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_08_01.models.NetworkSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkSecurityGroupListResult"]
"""Gets all network security groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_08_01.models.NetworkSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkSecurityGroups'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkSecurityGroupListResult"]
"""Gets all network security groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_08_01.models.NetworkSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups'} # type: ignore
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''python %prog [options] platform chromium_os_flag template
platform specifies which platform source is being generated for
and can be one of (win, mac, linux)
chromium_os_flag should be 1 if this is a Chromium OS build
template is the path to a .json policy template file.'''
from __future__ import with_statement
from functools import partial
import json
from optparse import OptionParser
import re
import sys
import textwrap
import types
CHROME_POLICY_KEY = 'SOFTWARE\\\\Policies\\\\Google\\\\Chrome'
CHROMIUM_POLICY_KEY = 'SOFTWARE\\\\Policies\\\\Chromium'
class PolicyDetails:
"""Parses a policy template and caches all its details."""
# Maps policy types to a tuple with 3 other types:
# - the equivalent base::Value::Type or 'TYPE_EXTERNAL' if the policy
# references external data
# - the equivalent Protobuf field type
# - the name of one of the protobufs for shared policy types
# TODO(joaodasilva): refactor the 'dict' type into a more generic 'json' type
# that can also be used to represent lists of other JSON objects.
TYPE_MAP = {
'dict': ('TYPE_DICTIONARY', 'string', 'String'),
'external': ('TYPE_EXTERNAL', 'string', 'String'),
'int': ('TYPE_INTEGER', 'int64', 'Integer'),
'int-enum': ('TYPE_INTEGER', 'int64', 'Integer'),
'list': ('TYPE_LIST', 'StringList', 'StringList'),
'main': ('TYPE_BOOLEAN', 'bool', 'Boolean'),
'string': ('TYPE_STRING', 'string', 'String'),
'string-enum': ('TYPE_STRING', 'string', 'String'),
'string-enum-list': ('TYPE_LIST', 'StringList', 'StringList'),
}
class EnumItem:
def __init__(self, item):
self.caption = PolicyDetails._RemovePlaceholders(item['caption'])
self.value = item['value']
def __init__(self, policy, os, is_chromium_os):
self.id = policy['id']
self.name = policy['name']
self.is_deprecated = policy.get('deprecated', False)
self.is_device_only = policy.get('device_only', False)
self.schema = policy.get('schema', {})
expected_platform = 'chrome_os' if is_chromium_os else os.lower()
self.platforms = []
for platform, version in [ p.split(':') for p in policy['supported_on'] ]:
if not version.endswith('-'):
continue
if platform.startswith('chrome.'):
platform_sub = platform[7:]
if platform_sub == '*':
self.platforms.extend(['win', 'mac', 'linux'])
else:
self.platforms.append(platform_sub)
else:
self.platforms.append(platform)
self.platforms.sort()
self.is_supported = expected_platform in self.platforms
if not PolicyDetails.TYPE_MAP.has_key(policy['type']):
raise NotImplementedError('Unknown policy type for %s: %s' %
(policy['name'], policy['type']))
self.policy_type, self.protobuf_type, self.policy_protobuf_type = \
PolicyDetails.TYPE_MAP[policy['type']]
self.schema = policy['schema']
self.desc = '\n'.join(
map(str.strip,
PolicyDetails._RemovePlaceholders(policy['desc']).splitlines()))
self.caption = PolicyDetails._RemovePlaceholders(policy['caption'])
self.max_size = policy.get('max_size', 0)
items = policy.get('items')
if items is None:
self.items = None
else:
self.items = [ PolicyDetails.EnumItem(entry) for entry in items ]
PH_PATTERN = re.compile('<ph[^>]*>([^<]*|[^<]*<ex>([^<]*)</ex>[^<]*)</ph>')
# Simplistic grit placeholder stripper.
@staticmethod
def _RemovePlaceholders(text):
result = ''
pos = 0
for m in PolicyDetails.PH_PATTERN.finditer(text):
result += text[pos:m.start(0)]
result += m.group(2) or m.group(1)
pos = m.end(0)
result += text[pos:]
return result
def main():
parser = OptionParser(usage=__doc__)
parser.add_option('--pch', '--policy-constants-header', dest='header_path',
help='generate header file of policy constants',
metavar='FILE')
parser.add_option('--pcc', '--policy-constants-source', dest='source_path',
help='generate source file of policy constants',
metavar='FILE')
parser.add_option('--cpp', '--cloud-policy-protobuf',
dest='cloud_policy_proto_path',
help='generate cloud policy protobuf file',
metavar='FILE')
parser.add_option('--csp', '--chrome-settings-protobuf',
dest='chrome_settings_proto_path',
help='generate chrome settings protobuf file',
metavar='FILE')
parser.add_option('--cpd', '--cloud-policy-decoder',
dest='cloud_policy_decoder_path',
help='generate C++ code decoding the cloud policy protobuf',
metavar='FILE')
(opts, args) = parser.parse_args()
if len(args) != 3:
print 'exactly platform, chromium_os flag and input file must be specified.'
parser.print_help()
return 2
os = args[0]
is_chromium_os = args[1] == '1'
template_file_name = args[2]
template_file_contents = _LoadJSONFile(template_file_name)
policy_details = [ PolicyDetails(policy, os, is_chromium_os)
for policy in _Flatten(template_file_contents) ]
sorted_policy_details = sorted(policy_details, key=lambda policy: policy.name)
def GenerateFile(path, writer, sorted=False):
if path:
with open(path, 'w') as f:
_OutputGeneratedWarningHeader(f, template_file_name)
writer(sorted and sorted_policy_details or policy_details, os, f)
GenerateFile(opts.header_path, _WritePolicyConstantHeader, sorted=True)
GenerateFile(opts.source_path, _WritePolicyConstantSource, sorted=True)
GenerateFile(opts.cloud_policy_proto_path, _WriteCloudPolicyProtobuf)
GenerateFile(opts.chrome_settings_proto_path, _WriteChromeSettingsProtobuf)
GenerateFile(opts.cloud_policy_decoder_path, _WriteCloudPolicyDecoder)
return 0
#------------------ shared helpers ---------------------------------#
def _OutputGeneratedWarningHeader(f, template_file_path):
f.write('//\n'
'// DO NOT MODIFY THIS FILE DIRECTLY!\n'
'// IT IS GENERATED BY generate_policy_source.py\n'
'// FROM ' + template_file_path + '\n'
'//\n\n')
COMMENT_WRAPPER = textwrap.TextWrapper()
COMMENT_WRAPPER.width = 80
COMMENT_WRAPPER.initial_indent = '// '
COMMENT_WRAPPER.subsequent_indent = '// '
COMMENT_WRAPPER.replace_whitespace = False
# Writes a comment, each line prefixed by // and wrapped to 80 spaces.
def _OutputComment(f, comment):
for line in comment.splitlines():
if len(line) == 0:
f.write('//')
else:
f.write(COMMENT_WRAPPER.fill(line))
f.write('\n')
# Returns an iterator over all the policies in |template_file_contents|.
def _Flatten(template_file_contents):
for policy in template_file_contents['policy_definitions']:
if policy['type'] == 'group':
for sub_policy in policy['policies']:
yield sub_policy
else:
yield policy
def _LoadJSONFile(json_file):
with open(json_file, 'r') as f:
text = f.read()
return eval(text)
#------------------ policy constants header ------------------------#
def _WritePolicyConstantHeader(policies, os, f):
f.write('#ifndef CHROME_COMMON_POLICY_CONSTANTS_H_\n'
'#define CHROME_COMMON_POLICY_CONSTANTS_H_\n'
'\n'
'#include <string>\n'
'\n'
'#include "base/basictypes.h"\n'
'#include "base/values.h"\n'
'#include "components/policy/core/common/policy_details.h"\n'
'\n'
'namespace policy {\n'
'\n'
'namespace internal {\n'
'struct SchemaData;\n'
'}\n\n')
if os == 'win':
f.write('// The windows registry path where Chrome policy '
'configuration resides.\n'
'extern const wchar_t kRegistryChromePolicyKey[];\n')
f.write('// Returns the PolicyDetails for |policy| if |policy| is a known\n'
'// Chrome policy, otherwise returns NULL.\n'
'const PolicyDetails* GetChromePolicyDetails('
'const std::string& policy);\n'
'\n'
'// Returns the schema data of the Chrome policy schema.\n'
'const internal::SchemaData* GetChromeSchemaData();\n'
'\n')
f.write('// Key names for the policy settings.\n'
'namespace key {\n\n')
for policy in policies:
# TODO(joaodasilva): Include only supported policies in
# configuration_policy_handler.cc and configuration_policy_handler_list.cc
# so that these names can be conditional on 'policy.is_supported'.
# http://crbug.com/223616
f.write('extern const char k' + policy.name + '[];\n')
f.write('\n} // namespace key\n\n'
'} // namespace policy\n\n'
'#endif // CHROME_COMMON_POLICY_CONSTANTS_H_\n')
#------------------ policy constants source ------------------------#
# A mapping of the simple schema types to base::Value::Types.
SIMPLE_SCHEMA_NAME_MAP = {
'boolean': 'TYPE_BOOLEAN',
'integer': 'TYPE_INTEGER',
'null' : 'TYPE_NULL',
'number' : 'TYPE_DOUBLE',
'string' : 'TYPE_STRING',
}
class SchemaNodesGenerator:
"""Builds the internal structs to represent a JSON schema."""
def __init__(self, shared_strings):
"""Creates a new generator.
|shared_strings| is a map of strings to a C expression that evaluates to
that string at runtime. This mapping can be used to reuse existing string
constants."""
self.shared_strings = shared_strings
self.schema_nodes = []
self.property_nodes = []
self.properties_nodes = []
self.restriction_nodes = []
self.int_enums = []
self.string_enums = []
self.simple_types = {
'boolean': None,
'integer': None,
'null': None,
'number': None,
'string': None,
}
self.stringlist_type = None
self.ranges = {}
self.id_map = {}
def GetString(self, s):
if s in self.shared_strings:
return self.shared_strings[s]
# Generate JSON escaped string, which is slightly different from desired
# C/C++ escaped string. Known differences includes unicode escaping format.
return json.dumps(s)
def AppendSchema(self, type, extra, comment=''):
index = len(self.schema_nodes)
self.schema_nodes.append((type, extra, comment))
return index
def AppendRestriction(self, first, second):
r = (str(first), str(second))
if not r in self.ranges:
self.ranges[r] = len(self.restriction_nodes)
self.restriction_nodes.append(r)
return self.ranges[r]
def GetSimpleType(self, name):
if self.simple_types[name] == None:
self.simple_types[name] = self.AppendSchema(
SIMPLE_SCHEMA_NAME_MAP[name],
-1,
'simple type: ' + name)
return self.simple_types[name]
def GetStringList(self):
if self.stringlist_type == None:
self.stringlist_type = self.AppendSchema(
'TYPE_LIST',
self.GetSimpleType('string'),
'simple type: stringlist')
return self.stringlist_type
def SchemaHaveRestriction(self, schema):
return any(keyword in schema for keyword in
['minimum', 'maximum', 'enum', 'pattern'])
def IsConsecutiveInterval(self, seq):
sortedSeq = sorted(seq)
return all(sortedSeq[i] + 1 == sortedSeq[i + 1]
for i in xrange(len(sortedSeq) - 1))
def GetEnumIntegerType(self, schema, name):
assert all(type(x) == int for x in schema['enum'])
possible_values = schema['enum']
if self.IsConsecutiveInterval(possible_values):
index = self.AppendRestriction(max(possible_values), min(possible_values))
return self.AppendSchema('TYPE_INTEGER', index,
'integer with enumeration restriction (use range instead): %s' % name)
offset_begin = len(self.int_enums)
self.int_enums += possible_values
offset_end = len(self.int_enums)
return self.AppendSchema('TYPE_INTEGER',
self.AppendRestriction(offset_begin, offset_end),
'integer with enumeration restriction: %s' % name)
def GetEnumStringType(self, schema, name):
assert all(type(x) == str for x in schema['enum'])
offset_begin = len(self.string_enums)
self.string_enums += schema['enum']
offset_end = len(self.string_enums)
return self.AppendSchema('TYPE_STRING',
self.AppendRestriction(offset_begin, offset_end),
'string with enumeration restriction: %s' % name)
def GetEnumType(self, schema, name):
if len(schema['enum']) == 0:
raise RuntimeError('Empty enumeration in %s' % name)
elif schema['type'] == 'integer':
return self.GetEnumIntegerType(schema, name)
elif schema['type'] == 'string':
return self.GetEnumStringType(schema, name)
else:
raise RuntimeError('Unknown enumeration type in %s' % name)
def GetPatternType(self, schema, name):
if schema['type'] != 'string':
raise RuntimeError('Unknown pattern type in %s' % name)
pattern = schema['pattern']
# Try to compile the pattern to validate it, note that the syntax used
# here might be slightly different from re2.
# TODO(binjin): Add a python wrapper of re2 and use it here.
re.compile(pattern)
index = len(self.string_enums);
self.string_enums.append(pattern);
return self.AppendSchema('TYPE_STRING',
self.AppendRestriction(index, index),
'string with pattern restriction: %s' % name);
def GetRangedType(self, schema, name):
if schema['type'] != 'integer':
raise RuntimeError('Unknown ranged type in %s' % name)
min_value_set, max_value_set = False, False
if 'minimum' in schema:
min_value = int(schema['minimum'])
min_value_set = True
if 'maximum' in schema:
max_value = int(schema['minimum'])
max_value_set = True
if min_value_set and max_value_set and min_value > max_value:
raise RuntimeError('Invalid ranged type in %s' % name)
index = self.AppendRestriction(
str(max_value) if max_value_set else 'INT_MAX',
str(min_value) if min_value_set else 'INT_MIN')
return self.AppendSchema('TYPE_INTEGER',
index,
'integer with ranged restriction: %s' % name)
def Generate(self, schema, name):
"""Generates the structs for the given schema.
|schema|: a valid JSON schema in a dictionary.
|name|: the name of the current node, for the generated comments."""
if schema.has_key('$ref'):
if schema.has_key('id'):
raise RuntimeError("Schemas with a $ref can't have an id")
if not isinstance(schema['$ref'], types.StringTypes):
raise RuntimeError("$ref attribute must be a string")
return schema['$ref']
if schema['type'] in self.simple_types:
if not self.SchemaHaveRestriction(schema):
# Simple types use shared nodes.
return self.GetSimpleType(schema['type'])
elif 'enum' in schema:
return self.GetEnumType(schema, name)
elif 'pattern' in schema:
return self.GetPatternType(schema, name)
else:
return self.GetRangedType(schema, name)
if schema['type'] == 'array':
# Special case for lists of strings, which is a common policy type.
# The 'type' may be missing if the schema has a '$ref' attribute.
if schema['items'].get('type', '') == 'string':
return self.GetStringList()
return self.AppendSchema('TYPE_LIST',
self.GenerateAndCollectID(schema['items'], 'items of ' + name))
elif schema['type'] == 'object':
# Reserve an index first, so that dictionaries come before their
# properties. This makes sure that the root node is the first in the
# SchemaNodes array.
index = self.AppendSchema('TYPE_DICTIONARY', -1)
if 'additionalProperties' in schema:
additionalProperties = self.GenerateAndCollectID(
schema['additionalProperties'],
'additionalProperties of ' + name)
else:
additionalProperties = -1
# Properties must be sorted by name, for the binary search lookup.
# Note that |properties| must be evaluated immediately, so that all the
# recursive calls to Generate() append the necessary child nodes; if
# |properties| were a generator then this wouldn't work.
sorted_properties = sorted(schema.get('properties', {}).items())
properties = [
(self.GetString(key), self.GenerateAndCollectID(subschema, key))
for key, subschema in sorted_properties ]
pattern_properties = []
for pattern, subschema in schema.get('patternProperties', {}).items():
pattern_properties.append((self.GetString(pattern),
self.GenerateAndCollectID(subschema, pattern)));
begin = len(self.property_nodes)
self.property_nodes += properties
end = len(self.property_nodes)
self.property_nodes += pattern_properties
pattern_end = len(self.property_nodes)
if index == 0:
self.root_properties_begin = begin
self.root_properties_end = end
extra = len(self.properties_nodes)
self.properties_nodes.append((begin, end, pattern_end,
additionalProperties, name))
# Set the right data at |index| now.
self.schema_nodes[index] = ('TYPE_DICTIONARY', extra, name)
return index
else:
assert False
def GenerateAndCollectID(self, schema, name):
"""A wrapper of Generate(), will take the return value, check and add 'id'
attribute to self.id_map. The wrapper needs to be used for every call to
Generate().
"""
index = self.Generate(schema, name)
if not schema.has_key('id'):
return index
id_str = schema['id']
if self.id_map.has_key(id_str):
raise RuntimeError('Duplicated id: ' + id_str)
self.id_map[id_str] = index
return index
def Write(self, f):
"""Writes the generated structs to the given file.
|f| an open file to write to."""
f.write('const internal::SchemaNode kSchemas[] = {\n'
'// Type Extra\n')
for type, extra, comment in self.schema_nodes:
type += ','
f.write(' { base::Value::%-18s %3d }, // %s\n' % (type, extra, comment))
f.write('};\n\n')
if self.property_nodes:
f.write('const internal::PropertyNode kPropertyNodes[] = {\n'
'// Property #Schema\n')
for key, schema in self.property_nodes:
key += ','
f.write(' { %-50s %6d },\n' % (key, schema))
f.write('};\n\n')
if self.properties_nodes:
f.write('const internal::PropertiesNode kProperties[] = {\n'
'// Begin End PatternEnd Additional Properties\n')
for node in self.properties_nodes:
f.write(' { %5d, %5d, %10d, %5d }, // %s\n' % node)
f.write('};\n\n')
if self.restriction_nodes:
f.write('const internal::RestrictionNode kRestrictionNodes[] = {\n')
f.write('// FIRST, SECOND\n')
for first, second in self.restriction_nodes:
f.write(' {{ %-8s %4s}},\n' % (first + ',', second))
f.write('};\n\n')
if self.int_enums:
f.write('const int kIntegerEnumerations[] = {\n')
for possible_values in self.int_enums:
f.write(' %d,\n' % possible_values)
f.write('};\n\n')
if self.string_enums:
f.write('const char* kStringEnumerations[] = {\n')
for possible_values in self.string_enums:
f.write(' %s,\n' % self.GetString(possible_values))
f.write('};\n\n')
f.write('const internal::SchemaData kChromeSchemaData = {\n'
' kSchemas,\n')
f.write(' kPropertyNodes,\n' if self.property_nodes else ' NULL,\n')
f.write(' kProperties,\n' if self.properties_nodes else ' NULL,\n')
f.write(' kRestrictionNodes,\n' if self.restriction_nodes else ' NULL,\n')
f.write(' kIntegerEnumerations,\n' if self.int_enums else ' NULL,\n')
f.write(' kStringEnumerations,\n' if self.string_enums else ' NULL,\n')
f.write('};\n\n')
def GetByID(self, id_str):
if not isinstance(id_str, types.StringTypes):
return id_str
if not self.id_map.has_key(id_str):
raise RuntimeError('Invalid $ref: ' + id_str)
return self.id_map[id_str]
def ResolveID(self, index, params):
return params[:index] + (self.GetByID(params[index]),) + params[index+1:]
def ResolveReferences(self):
"""Resolve reference mapping, required to be called after Generate()
After calling Generate(), the type of indices used in schema structures
might be either int or string. An int type suggests that it's a resolved
index, but for string type it's unresolved. Resolving a reference is as
simple as looking up for corresponding ID in self.id_map, and replace the
old index with the mapped index.
"""
self.schema_nodes = map(partial(self.ResolveID, 1), self.schema_nodes)
self.property_nodes = map(partial(self.ResolveID, 1), self.property_nodes)
self.properties_nodes = map(partial(self.ResolveID, 3),
self.properties_nodes)
def _WritePolicyConstantSource(policies, os, f):
f.write('#include "policy/policy_constants.h"\n'
'\n'
'#include <algorithm>\n'
'#include <climits>\n'
'\n'
'#include "base/logging.h"\n'
'#include "components/policy/core/common/schema_internal.h"\n'
'\n'
'namespace policy {\n'
'\n'
'namespace {\n'
'\n')
# Generate the Chrome schema.
chrome_schema = {
'type': 'object',
'properties': {},
}
shared_strings = {}
for policy in policies:
shared_strings[policy.name] = "key::k%s" % policy.name
if policy.is_supported:
chrome_schema['properties'][policy.name] = policy.schema
# Note: this list must be kept in sync with the known property list of the
# Chrome schema, so that binary seaching in the PropertyNode array gets the
# right index on this array as well. See the implementation of
# GetChromePolicyDetails() below.
f.write('const PolicyDetails kChromePolicyDetails[] = {\n'
'// is_deprecated is_device_policy id max_external_data_size\n')
for policy in policies:
if policy.is_supported:
f.write(' { %-14s %-16s %3s, %24s },\n' % (
'true,' if policy.is_deprecated else 'false,',
'true,' if policy.is_device_only else 'false,',
policy.id,
policy.max_size))
f.write('};\n\n')
schema_generator = SchemaNodesGenerator(shared_strings)
schema_generator.GenerateAndCollectID(chrome_schema, 'root node')
schema_generator.ResolveReferences()
schema_generator.Write(f)
f.write('bool CompareKeys(const internal::PropertyNode& node,\n'
' const std::string& key) {\n'
' return node.key < key;\n'
'}\n\n')
f.write('} // namespace\n\n')
if os == 'win':
f.write('#if defined(GOOGLE_CHROME_BUILD)\n'
'const wchar_t kRegistryChromePolicyKey[] = '
'L"' + CHROME_POLICY_KEY + '";\n'
'#else\n'
'const wchar_t kRegistryChromePolicyKey[] = '
'L"' + CHROMIUM_POLICY_KEY + '";\n'
'#endif\n\n')
f.write('const internal::SchemaData* GetChromeSchemaData() {\n'
' return &kChromeSchemaData;\n'
'}\n\n')
f.write('const PolicyDetails* GetChromePolicyDetails('
'const std::string& policy) {\n'
' // First index in kPropertyNodes of the Chrome policies.\n'
' static const int begin_index = %s;\n'
' // One-past-the-end of the Chrome policies in kPropertyNodes.\n'
' static const int end_index = %s;\n' %
(schema_generator.root_properties_begin,
schema_generator.root_properties_end))
f.write(' const internal::PropertyNode* begin =\n'
' kPropertyNodes + begin_index;\n'
' const internal::PropertyNode* end = kPropertyNodes + end_index;\n'
' const internal::PropertyNode* it =\n'
' std::lower_bound(begin, end, policy, CompareKeys);\n'
' if (it == end || it->key != policy)\n'
' return NULL;\n'
' // This relies on kPropertyNodes from begin_index to end_index\n'
' // having exactly the same policies (and in the same order) as\n'
' // kChromePolicyDetails, so that binary searching on the first\n'
' // gets the same results as a binary search on the second would.\n'
' // However, kPropertyNodes has the policy names and\n'
' // kChromePolicyDetails doesn\'t, so we obtain the index into\n'
' // the second array by searching the first to avoid duplicating\n'
' // the policy name pointers.\n'
' // Offsetting |it| from |begin| here obtains the index we\'re\n'
' // looking for.\n'
' size_t index = it - begin;\n'
' CHECK_LT(index, arraysize(kChromePolicyDetails));\n'
' return kChromePolicyDetails + index;\n'
'}\n\n')
f.write('namespace key {\n\n')
for policy in policies:
# TODO(joaodasilva): Include only supported policies in
# configuration_policy_handler.cc and configuration_policy_handler_list.cc
# so that these names can be conditional on 'policy.is_supported'.
# http://crbug.com/223616
f.write('const char k{name}[] = "{name}";\n'.format(name=policy.name))
f.write('\n} // namespace key\n\n'
'} // namespace policy\n')
#------------------ policy protobufs --------------------------------#
CHROME_SETTINGS_PROTO_HEAD = '''
syntax = "proto2";
option optimize_for = LITE_RUNTIME;
package enterprise_management;
// For StringList and PolicyOptions.
import "cloud_policy.proto";
'''
CLOUD_POLICY_PROTO_HEAD = '''
syntax = "proto2";
option optimize_for = LITE_RUNTIME;
package enterprise_management;
message StringList {
repeated string entries = 1;
}
message PolicyOptions {
enum PolicyMode {
// The given settings are applied regardless of user choice.
MANDATORY = 0;
// The user may choose to override the given settings.
RECOMMENDED = 1;
// No policy value is present and the policy should be ignored.
UNSET = 2;
}
optional PolicyMode mode = 1 [default = MANDATORY];
}
message BooleanPolicyProto {
optional PolicyOptions policy_options = 1;
optional bool value = 2;
}
message IntegerPolicyProto {
optional PolicyOptions policy_options = 1;
optional int64 value = 2;
}
message StringPolicyProto {
optional PolicyOptions policy_options = 1;
optional string value = 2;
}
message StringListPolicyProto {
optional PolicyOptions policy_options = 1;
optional StringList value = 2;
}
'''
# Field IDs [1..RESERVED_IDS] will not be used in the wrapping protobuf.
RESERVED_IDS = 2
def _WritePolicyProto(f, policy, fields):
_OutputComment(f, policy.caption + '\n\n' + policy.desc)
if policy.items is not None:
_OutputComment(f, '\nValid values:')
for item in policy.items:
_OutputComment(f, ' %s: %s' % (str(item.value), item.caption))
if policy.policy_type == 'TYPE_DICTIONARY':
_OutputComment(f, '\nValue schema:\n%s' %
json.dumps(policy.schema, sort_keys=True, indent=4,
separators=(',', ': ')))
_OutputComment(f, '\nSupported on: %s' % ', '.join(policy.platforms))
f.write('message %sProto {\n' % policy.name)
f.write(' optional PolicyOptions policy_options = 1;\n')
f.write(' optional %s %s = 2;\n' % (policy.protobuf_type, policy.name))
f.write('}\n\n')
fields += [ ' optional %sProto %s = %s;\n' %
(policy.name, policy.name, policy.id + RESERVED_IDS) ]
def _WriteChromeSettingsProtobuf(policies, os, f):
f.write(CHROME_SETTINGS_PROTO_HEAD)
fields = []
f.write('// PBs for individual settings.\n\n')
for policy in policies:
# Note: this protobuf also gets the unsupported policies, since it's an
# exaustive list of all the supported user policies on any platform.
if not policy.is_device_only:
_WritePolicyProto(f, policy, fields)
f.write('// --------------------------------------------------\n'
'// Big wrapper PB containing the above groups.\n\n'
'message ChromeSettingsProto {\n')
f.write(''.join(fields))
f.write('}\n\n')
def _WriteCloudPolicyProtobuf(policies, os, f):
f.write(CLOUD_POLICY_PROTO_HEAD)
f.write('message CloudPolicySettings {\n')
for policy in policies:
if policy.is_supported and not policy.is_device_only:
f.write(' optional %sPolicyProto %s = %s;\n' %
(policy.policy_protobuf_type, policy.name,
policy.id + RESERVED_IDS))
f.write('}\n\n')
#------------------ protobuf decoder -------------------------------#
CPP_HEAD = '''
#include <limits>
#include <string>
#include "base/basictypes.h"
#include "base/callback.h"
#include "base/json/json_reader.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/values.h"
#include "components/policy/core/common/cloud/cloud_external_data_manager.h"
#include "components/policy/core/common/external_data_fetcher.h"
#include "components/policy/core/common/policy_map.h"
#include "policy/policy_constants.h"
#include "policy/proto/cloud_policy.pb.h"
using google::protobuf::RepeatedPtrField;
namespace policy {
namespace em = enterprise_management;
base::Value* DecodeIntegerValue(google::protobuf::int64 value) {
if (value < std::numeric_limits<int>::min() ||
value > std::numeric_limits<int>::max()) {
LOG(WARNING) << "Integer value " << value
<< " out of numeric limits, ignoring.";
return NULL;
}
return base::Value::CreateIntegerValue(static_cast<int>(value));
}
base::ListValue* DecodeStringList(const em::StringList& string_list) {
base::ListValue* list_value = new base::ListValue;
RepeatedPtrField<std::string>::const_iterator entry;
for (entry = string_list.entries().begin();
entry != string_list.entries().end(); ++entry) {
list_value->Append(base::Value::CreateStringValue(*entry));
}
return list_value;
}
base::Value* DecodeJson(const std::string& json) {
scoped_ptr<base::Value> root(
base::JSONReader::Read(json, base::JSON_ALLOW_TRAILING_COMMAS));
if (!root)
LOG(WARNING) << "Invalid JSON string, ignoring: " << json;
// Accept any Value type that parsed as JSON, and leave it to the handler to
// convert and check the concrete type.
return root.release();
}
void DecodePolicy(const em::CloudPolicySettings& policy,
base::WeakPtr<CloudExternalDataManager> external_data_manager,
PolicyMap* map) {
'''
CPP_FOOT = '''}
} // namespace policy
'''
def _CreateValue(type, arg):
if type == 'TYPE_BOOLEAN':
return 'base::Value::CreateBooleanValue(%s)' % arg
elif type == 'TYPE_INTEGER':
return 'DecodeIntegerValue(%s)' % arg
elif type == 'TYPE_STRING':
return 'base::Value::CreateStringValue(%s)' % arg
elif type == 'TYPE_LIST':
return 'DecodeStringList(%s)' % arg
elif type == 'TYPE_DICTIONARY' or type == 'TYPE_EXTERNAL':
return 'DecodeJson(%s)' % arg
else:
raise NotImplementedError('Unknown type %s' % type)
def _CreateExternalDataFetcher(type, name):
if type == 'TYPE_EXTERNAL':
return 'new ExternalDataFetcher(external_data_manager, key::k%s)' % name
return 'NULL'
def _WritePolicyCode(f, policy):
membername = policy.name.lower()
proto_type = '%sPolicyProto' % policy.policy_protobuf_type
f.write(' if (policy.has_%s()) {\n' % membername)
f.write(' const em::%s& policy_proto = policy.%s();\n' %
(proto_type, membername))
f.write(' if (policy_proto.has_value()) {\n')
f.write(' PolicyLevel level = POLICY_LEVEL_MANDATORY;\n'
' bool do_set = true;\n'
' if (policy_proto.has_policy_options()) {\n'
' do_set = false;\n'
' switch(policy_proto.policy_options().mode()) {\n'
' case em::PolicyOptions::MANDATORY:\n'
' do_set = true;\n'
' level = POLICY_LEVEL_MANDATORY;\n'
' break;\n'
' case em::PolicyOptions::RECOMMENDED:\n'
' do_set = true;\n'
' level = POLICY_LEVEL_RECOMMENDED;\n'
' break;\n'
' case em::PolicyOptions::UNSET:\n'
' break;\n'
' }\n'
' }\n'
' if (do_set) {\n')
f.write(' base::Value* value = %s;\n' %
(_CreateValue(policy.policy_type, 'policy_proto.value()')))
# TODO(bartfab): |value| == NULL indicates that the policy value could not be
# parsed successfully. Surface such errors in the UI.
f.write(' if (value) {\n')
f.write(' ExternalDataFetcher* external_data_fetcher = %s;\n' %
_CreateExternalDataFetcher(policy.policy_type, policy.name))
f.write(' map->Set(key::k%s, level, POLICY_SCOPE_USER,\n' %
policy.name)
f.write(' value, external_data_fetcher);\n'
' }\n'
' }\n'
' }\n'
' }\n')
def _WriteCloudPolicyDecoder(policies, os, f):
f.write(CPP_HEAD)
for policy in policies:
if policy.is_supported and not policy.is_device_only:
_WritePolicyCode(f, policy)
f.write(CPP_FOOT)
if __name__ == '__main__':
sys.exit(main())
|
|
import os
import pytz
import tempfile
from mock import patch
from datetime import datetime
from django.test import TestCase
from django.core import mail
from django.conf import settings
from django.utils.six import StringIO
from dbbackup import utils, settings as dbbackup_settings
from dbbackup.tests.utils import (ENCRYPTED_FILE, clean_gpg_keys,
add_private_gpg, COMPRESSED_FILE,
callable_for_filename_template,
DEV_NULL, add_public_gpg)
class Bytes_To_StrTest(TestCase):
def test_get_gb(self):
value = utils.bytes_to_str(byteVal=2**31)
self.assertEqual(value, "2.0 GiB")
def test_0_decimal(self):
value = utils.bytes_to_str(byteVal=1.01, decimals=0)
self.assertEqual(value, "1 B")
def test_2_decimal(self):
value = utils.bytes_to_str(byteVal=1.01, decimals=2)
self.assertEqual(value, "1.01 B")
class Handle_SizeTest(TestCase):
def test_func(self):
filehandle = StringIO('Test string')
value = utils.handle_size(filehandle=filehandle)
self.assertEqual(value, '11.0 B')
class Email_Uncaught_ExceptionTest(TestCase):
def test_success(self):
def func():
pass
utils.email_uncaught_exception(func)
@patch('dbbackup.settings.SEND_EMAIL', False)
def test_raise(self):
def func():
raise Exception('Foo')
with self.assertRaises(Exception):
utils.email_uncaught_exception(func)()
self.assertEqual(len(mail.outbox), 0)
@patch('dbbackup.settings.SEND_EMAIL', True)
@patch('dbbackup.settings.FAILURE_RECIPIENTS', ['foo@bar'])
def test_raise_with_mail(self):
def func():
raise Exception('Foo')
with self.assertRaises(Exception):
utils.email_uncaught_exception(func)()
self.assertEqual(len(mail.outbox), 1)
class Encrypt_FileTest(TestCase):
def setUp(self):
self.path = tempfile.mktemp()
with open(self.path, 'a') as fd:
fd.write('foo')
add_public_gpg()
def tearDown(self):
os.remove(self.path)
clean_gpg_keys()
def test_func(self, *args):
with open(self.path) as fd:
encrypted_file, filename = utils.encrypt_file(inputfile=fd,
filename='foo.txt')
encrypted_file.seek(0)
self.assertTrue(encrypted_file.read())
class Unencrypt_FileTest(TestCase):
def setUp(self):
add_private_gpg()
def tearDown(self):
clean_gpg_keys()
@patch('dbbackup.utils.input', return_value=None)
@patch('dbbackup.utils.getpass', return_value=None)
def test_unencrypt(self, *args):
inputfile = open(ENCRYPTED_FILE, 'r+b')
uncryptfile, filename = utils.unencrypt_file(inputfile, 'foofile.gpg')
uncryptfile.seek(0)
self.assertEqual(b'foo\n', uncryptfile.read())
class Compress_FileTest(TestCase):
def setUp(self):
self.path = tempfile.mktemp()
with open(self.path, 'a+b') as fd:
fd.write(b'foo')
def tearDown(self):
os.remove(self.path)
def test_func(self, *args):
with open(self.path) as fd:
compressed_file, filename = utils.encrypt_file(inputfile=fd,
filename='foo.txt')
class Uncompress_FileTest(TestCase):
def test_func(self):
inputfile = open(COMPRESSED_FILE, 'rb')
fd, filename = utils.uncompress_file(inputfile, 'foo.gz')
fd.seek(0)
self.assertEqual(fd.read(), b'foo\n')
class Create_Spooled_Temporary_FileTest(TestCase):
def setUp(self):
self.path = tempfile.mktemp()
with open(self.path, 'a') as fd:
fd.write('foo')
def tearDown(self):
os.remove(self.path)
def test_func(self, *args):
utils.create_spooled_temporary_file(filepath=self.path)
class TimestampTest(TestCase):
def test_naive_value(self):
with self.settings(USE_TZ=False):
timestamp = utils.timestamp(datetime(2015, 8, 15, 8, 15, 12, 0))
self.assertEqual(timestamp, '2015-08-15-081512')
def test_aware_value(self):
with self.settings(USE_TZ=True) and self.settings(TIME_ZONE='Europe/Rome'):
timestamp = utils.timestamp(datetime(2015, 8, 15, 8, 15, 12, 0, tzinfo=pytz.utc))
self.assertEqual(timestamp, '2015-08-15-101512')
class Datefmt_To_Regex(TestCase):
def test_patterns(self):
now = datetime.now()
for datefmt, regex in utils.PATTERN_MATCHNG:
date_string = datetime.strftime(now, datefmt)
regex = utils.datefmt_to_regex(datefmt)
match = regex.match(date_string)
self.assertTrue(match)
self.assertEqual(match.groups()[0], date_string)
def test_complex_pattern(self):
now = datetime.now()
datefmt = 'Foo%a_%A-%w-%d-%b-%B_%m_%y_%Y-%H-%I-%M_%S_%f_%j-%U-%W-Bar'
date_string = datetime.strftime(now, datefmt)
regex = utils.datefmt_to_regex(datefmt)
self.assertTrue(regex.pattern.startswith('(Foo'))
self.assertTrue(regex.pattern.endswith('Bar)'))
match = regex.match(date_string)
self.assertTrue(match)
self.assertEqual(match.groups()[0], date_string)
class Filename_To_DatestringTest(TestCase):
def test_func(self):
now = datetime.now()
datefmt = dbbackup_settings.DATE_FORMAT
filename = '%s-foo.gz.gpg' % datetime.strftime(now, datefmt)
datestring = utils.filename_to_datestring(filename, datefmt)
self.assertIn(datestring, filename)
def test_generated_filename(self):
filename = utils.filename_generate('bak', 'default')
datestring = utils.filename_to_datestring(filename)
self.assertIn(datestring, filename)
class Filename_To_DateTest(TestCase):
def test_func(self):
now = datetime.now()
datefmt = dbbackup_settings.DATE_FORMAT
filename = '%s-foo.gz.gpg' % datetime.strftime(now, datefmt)
date = utils.filename_to_date(filename, datefmt)
self.assertEqual(date.timetuple()[:5], now.timetuple()[:5])
def test_generated_filename(self):
filename = utils.filename_generate('bak', 'default')
datestring = utils.filename_to_date(filename)
@patch('dbbackup.settings.HOSTNAME', 'test')
class Filename_GenerateTest(TestCase):
@patch('dbbackup.settings.FILENAME_TEMPLATE', '---{databasename}--{servername}-{datetime}.{extension}')
def test_func(self, *args):
extension = 'foo'
generated_name = utils.filename_generate(extension)
self.assertTrue('--' not in generated_name)
self.assertFalse(generated_name.startswith('-'))
def test_db(self, *args):
extension = 'foo'
generated_name = utils.filename_generate(extension)
self.assertTrue(generated_name.startswith(dbbackup_settings.HOSTNAME))
self.assertTrue(generated_name.endswith(extension))
def test_media(self, *args):
extension = 'foo'
generated_name = utils.filename_generate(extension, content_type='media')
self.assertTrue(generated_name.startswith(dbbackup_settings.HOSTNAME))
self.assertTrue(generated_name.endswith(extension))
@patch('django.utils.timezone.settings.USE_TZ', True)
def test_tz_true(self):
filename = utils.filename_generate('bak', 'default')
datestring = utils.filename_to_datestring(filename)
self.assertIn(datestring, filename)
@patch('dbbackup.settings.FILENAME_TEMPLATE', callable_for_filename_template)
def test_template_is_callable(self, *args):
extension = 'foo'
generated_name = utils.filename_generate(extension)
self.assertTrue(generated_name.endswith('foo'))
|
|
from __future__ import absolute_import
import contextlib
import locale
import logging
import re
import os
import posixpath
import shutil
import stat
import subprocess
import sys
import tarfile
import zipfile
from pip.exceptions import InstallationError, BadCommand
from pip.compat import console_to_str, stdlib_pkgs
from pip.locations import (
site_packages, user_site, running_under_virtualenv, virtualenv_no_global,
write_delete_marker_file,
)
from pip._vendor import pkg_resources, six
from pip._vendor.six.moves import input
from pip._vendor.six.moves import cStringIO
from pip._vendor.six import PY2
if PY2:
from io import BytesIO as StringIO
else:
from io import StringIO
__all__ = ['rmtree', 'display_path', 'backup_dir',
'find_command', 'ask', 'Inf',
'normalize_name', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'make_path_relative', 'normalize_path',
'renames', 'get_terminal_size', 'get_prog',
'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',
'captured_stdout', 'remove_tracebacks']
logger = logging.getLogger(__name__)
def get_prog():
try:
if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
# if file type currently read only
if os.stat(path).st_mode & stat.S_IREAD:
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
return
else:
raise
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if sys.version_info[0] == 2:
path = path.decode(sys.getfilesystemencoding(), 'replace')
path = path.encode(sys.getdefaultencoding(), 'replace')
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def find_command(cmd, paths=None, pathext=None):
"""Searches the PATH for the given command and returns its path"""
if paths is None:
paths = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(paths, six.string_types):
paths = [paths]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = get_pathext()
pathext = [ext for ext in pathext.lower().split(os.pathsep) if len(ext)]
# don't use extensions if the command ends with one of them
if os.path.splitext(cmd)[1].lower() in pathext:
pathext = ['']
# check if we find the command on PATH
for path in paths:
# try without extension first
cmd_path = os.path.join(path, cmd)
for ext in pathext:
# then including the extension
cmd_path_ext = cmd_path + ext
if os.path.isfile(cmd_path_ext):
return cmd_path_ext
if os.path.isfile(cmd_path):
return cmd_path
raise BadCommand('Cannot find command %r' % cmd)
def get_pathext(default_pathext=None):
"""Returns the path extensions from environment or a default"""
if default_pathext is None:
default_pathext = os.pathsep.join(['.COM', '.EXE', '.BAT', '.CMD'])
pathext = os.environ.get('PATHEXT', default_pathext)
return pathext
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception(
'No input was expected ($PIP_NO_INPUT set); question: %s' %
message
)
response = input(message)
response = response.strip().lower()
if response not in options:
print(
'Your response (%r) was not one of the expected responses: '
'%s' % (response, ', '.join(options))
)
else:
return response
class _Inf(object):
"""I am bigger than everything!"""
def __eq__(self, other):
if self is other:
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __repr__(self):
return 'Inf'
Inf = _Inf() # this object is not currently used as a sortable in our code
del _Inf
_normalize_re = re.compile(r'[^a-z]', re.I)
def normalize_name(name):
return _normalize_re.sub('-', name.lower())
def format_size(bytes):
if bytes > 1000 * 1000:
return '%.1fMB' % (bytes / 1000.0 / 1000)
elif bytes > 10 * 1000:
return '%ikB' % (bytes / 1000)
elif bytes > 1000:
return '%.1fkB' % (bytes / 1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""
Returns true if the page appears to be the index page of an svn repository
"""
return (re.search(r'<title>[^<]*Revision \d+:', html)
and re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
with open(filename, 'rb') as fp:
return fp.read().decode('utf-8')
def split_leading_dir(path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\'))
or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def make_path_relative(path, rel_to):
"""
Make a filename relative, where the filename path, and it is
relative to rel_to
>>> make_path_relative('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../../../something/a-file.pth'
>>> make_path_relative('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../usr/share/something/a-file.pth'
>>> make_path_relative('/usr/share/a-file.pth', '/usr/share/')
'a-file.pth'
"""
path_filename = os.path.basename(path)
path = os.path.dirname(path)
path = os.path.normpath(os.path.abspath(path))
rel_to = os.path.normpath(os.path.abspath(rel_to))
path_parts = path.strip(os.path.sep).split(os.path.sep)
rel_to_parts = rel_to.strip(os.path.sep).split(os.path.sep)
while path_parts and rel_to_parts and path_parts[0] == rel_to_parts[0]:
path_parts.pop(0)
rel_to_parts.pop(0)
full_parts = ['..'] * len(rel_to_parts) + path_parts + [path_filename]
if full_parts == ['']:
return '.' + os.path.sep
return os.path.sep.join(full_parts)
def normalize_path(path):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
return os.path.normcase(os.path.realpath(os.path.expanduser(path)))
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
norm_path = normalize_path(dist_location(dist))
return norm_path.startswith(normalize_path(user_site))
def dist_in_site_packages(dist):
"""
Return True if given Distribution is installed in
distutils.sysconfig.get_python_lib().
"""
return normalize_path(
dist_location(dist)
).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
"""Is distribution an editable install?"""
# TODO: factor out determining editableness out of FrozenRequirement
from pip import FrozenRequirement
req = FrozenRequirement.from_dist(dist, [])
return req.editable
def get_installed_distributions(local_only=True,
skip=stdlib_pkgs,
include_editables=True,
editables_only=False,
user_only=False):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to stdlib_pkgs
If ``editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
If ``user_only`` is True , only report installations in the user
site directory.
"""
if local_only:
local_test = dist_is_local
else:
local_test = lambda d: True
if include_editables:
editable_test = lambda d: True
else:
editable_test = lambda d: not dist_is_editable(d)
if editables_only:
editables_only_test = lambda d: dist_is_editable(d)
else:
editables_only_test = lambda d: True
if user_only:
user_test = dist_in_usersite
else:
user_test = lambda d: True
return [d for d in pkg_resources.working_set
if local_test(d)
and d.key not in skip
and editable_test(d)
and editables_only_test(d)
and user_test(d)
]
def egg_link_path(dist):
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack(
'hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')
)
except:
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
if not os.path.exists(location):
os.makedirs(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if not os.path.exists(dir):
os.makedirs(dir)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
if not os.path.exists(fn):
os.makedirs(fn)
else:
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
if not os.path.exists(location):
os.makedirs(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif (filename.lower().endswith('.bz2')
or filename.lower().endswith('.tbz')):
mode = 'r:bz2'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warning(
'Cannot determine compression type for file %s', filename,
)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesn't seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
if not os.path.exists(path):
os.makedirs(path)
elif member.issym():
try:
tar._extract_member(member, path)
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
destfp = open(path, 'wb')
try:
shutil.copyfileobj(fp, destfp)
finally:
destfp.close()
fp.close()
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777 - current_umask() | 0o111))
finally:
tar.close()
def unpack_file(filename, location, content_type, link):
filename = os.path.realpath(filename)
if (content_type == 'application/zip'
or filename.endswith('.zip')
or filename.endswith('.whl')
or zipfile.is_zipfile(filename)):
unzip_file(
filename,
location,
flatten=not filename.endswith('.whl')
)
elif (content_type == 'application/x-gzip'
or tarfile.is_tarfile(filename)
or splitext(filename)[1].lower() in (
'.tar', '.tar.gz', '.tar.bz2', '.tgz', '.tbz')):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html')
and is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
'Cannot unpack file %s (downloaded from %s, content-type: %s); '
'cannot detect archive format',
filename, location, content_type,
)
raise InstallationError(
'Cannot determine archive format of %s' % location
)
def remove_tracebacks(output):
pattern = (r'(?:\W+File "(?:.*)", line (?:.*)\W+(?:.*)\W+\^\W+)?'
r'Syntax(?:Error|Warning): (?:.*)')
output = re.sub(pattern, '', output)
if PY2:
return output
# compileall.compile_dir() prints different messages to stdout
# in Python 3
return re.sub(r"\*\*\* Error compiling (?:.*)", '', output)
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True,
command_level=logging.DEBUG, command_desc=None,
extra_environ=None):
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.log(command_level, "Running command %s", command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception as exc:
logger.critical(
"Error %s while executing command %s", exc, command_desc,
)
raise
all_output = []
if stdout is not None:
stdout = remove_tracebacks(console_to_str(proc.stdout.read()))
stdout = cStringIO(stdout)
while 1:
line = stdout.readline()
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
# if not logger.stdout_level_matches(level) and False:
# # TODO(dstufft): Handle progress bar.
# logger.show_progress()
else:
logger.debug(line)
else:
returned_stdout, returned_stderr = proc.communicate()
all_output = [returned_stdout or '']
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.info(
'Complete output from command %s:', command_desc,
)
logger.info(
'\n'.join(all_output) +
'\n----------------------------------------'
)
raise InstallationError(
'Command "%s" failed with error code %s in %s'
% (command_desc, proc.returncode, cwd))
else:
logger.warning(
'Command "%s" had error code %s in %s',
command_desc, proc.returncode, cwd,
)
if stdout is not None:
return remove_tracebacks(''.join(all_output))
def read_text_file(filename):
"""Return the contents of *filename*.
Try to decode the file contents with utf-8, the preferred system encoding
(e.g., cp1252 on some Windows machines), and latin1, in that order.
Decoding a byte string with latin1 will never raise an error. In the worst
case, the returned string will contain some garbage characters.
"""
with open(filename, 'rb') as fp:
data = fp.read()
encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1']
for enc in encodings:
try:
data = data.decode(enc)
except UnicodeDecodeError:
continue
break
assert type(data) != bytes # Latin1 should have worked.
return data
def _make_build_dir(build_dir):
os.makedirs(build_dir)
write_delete_marker_file(build_dir)
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
class StreamWrapper(StringIO):
@classmethod
def from_stream(cls, orig_stream):
cls.orig_stream = orig_stream
return cls()
# compileall.compile_dir() needs stdout.encoding to print to stdout
@property
def encoding(self):
return self.orig_stream.encoding
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Taken from Lib/support/__init__.py in the CPython repo.
"""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print('hello')
self.assertEqual(stdout.getvalue(), 'hello\n')
Taken from Lib/support/__init__.py in the CPython repo.
"""
return captured_output('stdout')
class cached_property(object):
"""A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
# We're being accessed from the class itself, not from an object
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
|
|
"""Support for MQTT message handling."""
import asyncio
from functools import partial, wraps
import inspect
from itertools import groupby
import json
import logging
from operator import attrgetter
import os
import ssl
from typing import Any, Callable, List, Optional, Union
import attr
import certifi
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import websocket_api
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_DEVICE,
CONF_NAME,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_PORT,
CONF_PROTOCOL,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import Event, ServiceCall, callback
from homeassistant.exceptions import HomeAssistantError, Unauthorized
from homeassistant.helpers import config_validation as cv, event, template
from homeassistant.helpers.dispatcher import async_dispatcher_connect, dispatcher_send
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType, HomeAssistantType, ServiceDataType
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.logging import catch_log_exception
# Loading the config flow file will register the flow
from . import config_flow # noqa: F401 pylint: disable=unused-import
from . import debug_info, discovery
from .const import (
ATTR_DISCOVERY_HASH,
ATTR_DISCOVERY_TOPIC,
ATTR_PAYLOAD,
ATTR_QOS,
ATTR_RETAIN,
ATTR_TOPIC,
CONF_BIRTH_MESSAGE,
CONF_BROKER,
CONF_DISCOVERY,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
CONF_WILL_MESSAGE,
DEFAULT_BIRTH,
DEFAULT_DISCOVERY,
DEFAULT_PAYLOAD_AVAILABLE,
DEFAULT_PAYLOAD_NOT_AVAILABLE,
DEFAULT_PREFIX,
DEFAULT_QOS,
DEFAULT_RETAIN,
DEFAULT_WILL,
MQTT_CONNECTED,
MQTT_DISCONNECTED,
PROTOCOL_311,
)
from .debug_info import log_messages
from .discovery import MQTT_DISCOVERY_UPDATED, clear_discovery_hash, set_discovery_hash
from .models import Message, MessageCallbackType, PublishPayloadType
from .subscription import async_subscribe_topics, async_unsubscribe_topics
from .util import _VALID_QOS_SCHEMA, valid_publish_topic, valid_subscribe_topic
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mqtt"
DATA_MQTT = "mqtt"
DATA_MQTT_CONFIG = "mqtt_config"
SERVICE_PUBLISH = "publish"
SERVICE_DUMP = "dump"
CONF_DISCOVERY_PREFIX = "discovery_prefix"
CONF_KEEPALIVE = "keepalive"
CONF_CERTIFICATE = "certificate"
CONF_CLIENT_KEY = "client_key"
CONF_CLIENT_CERT = "client_cert"
CONF_TLS_INSECURE = "tls_insecure"
CONF_TLS_VERSION = "tls_version"
CONF_COMMAND_TOPIC = "command_topic"
CONF_TOPIC = "topic"
CONF_AVAILABILITY = "availability"
CONF_AVAILABILITY_TOPIC = "availability_topic"
CONF_PAYLOAD_AVAILABLE = "payload_available"
CONF_PAYLOAD_NOT_AVAILABLE = "payload_not_available"
CONF_JSON_ATTRS_TOPIC = "json_attributes_topic"
CONF_JSON_ATTRS_TEMPLATE = "json_attributes_template"
CONF_UNIQUE_ID = "unique_id"
CONF_IDENTIFIERS = "identifiers"
CONF_CONNECTIONS = "connections"
CONF_MANUFACTURER = "manufacturer"
CONF_MODEL = "model"
CONF_SW_VERSION = "sw_version"
CONF_VIA_DEVICE = "via_device"
CONF_DEPRECATED_VIA_HUB = "via_hub"
PROTOCOL_31 = "3.1"
DEFAULT_PORT = 1883
DEFAULT_KEEPALIVE = 60
DEFAULT_PROTOCOL = PROTOCOL_311
DEFAULT_TLS_PROTOCOL = "auto"
ATTR_PAYLOAD_TEMPLATE = "payload_template"
MAX_RECONNECT_WAIT = 300 # seconds
CONNECTION_SUCCESS = "connection_success"
CONNECTION_FAILED = "connection_failed"
CONNECTION_FAILED_RECOVERABLE = "connection_failed_recoverable"
def validate_device_has_at_least_one_identifier(value: ConfigType) -> ConfigType:
"""Validate that a device info entry has at least one identifying value."""
if not value.get(CONF_IDENTIFIERS) and not value.get(CONF_CONNECTIONS):
raise vol.Invalid(
"Device must have at least one identifying value in "
"'identifiers' and/or 'connections'"
)
return value
CLIENT_KEY_AUTH_MSG = (
"client_key and client_cert must both be present in "
"the MQTT broker configuration"
)
MQTT_WILL_BIRTH_SCHEMA = vol.Schema(
{
vol.Inclusive(ATTR_TOPIC, "topic_payload"): valid_publish_topic,
vol.Inclusive(ATTR_PAYLOAD, "topic_payload"): cv.string,
vol.Optional(ATTR_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
vol.Optional(ATTR_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
},
required=True,
)
def embedded_broker_deprecated(value):
"""Warn user that embedded MQTT broker is deprecated."""
_LOGGER.warning(
"The embedded MQTT broker has been deprecated and will stop working"
"after June 5th, 2019. Use an external broker instead. For"
"instructions, see https://www.home-assistant.io/docs/mqtt/broker"
)
return value
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.deprecated(CONF_TLS_VERSION, invalidation_version="0.115"),
vol.Schema(
{
vol.Optional(CONF_CLIENT_ID): cv.string,
vol.Optional(CONF_KEEPALIVE, default=DEFAULT_KEEPALIVE): vol.All(
vol.Coerce(int), vol.Range(min=15)
),
vol.Optional(CONF_BROKER): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_CERTIFICATE): vol.Any("auto", cv.isfile),
vol.Inclusive(
CONF_CLIENT_KEY, "client_key_auth", msg=CLIENT_KEY_AUTH_MSG
): cv.isfile,
vol.Inclusive(
CONF_CLIENT_CERT, "client_key_auth", msg=CLIENT_KEY_AUTH_MSG
): cv.isfile,
vol.Optional(CONF_TLS_INSECURE): cv.boolean,
vol.Optional(
CONF_TLS_VERSION, default=DEFAULT_TLS_PROTOCOL
): vol.Any("auto", "1.0", "1.1", "1.2"),
vol.Optional(CONF_PROTOCOL, default=DEFAULT_PROTOCOL): vol.All(
cv.string, vol.In([PROTOCOL_31, PROTOCOL_311])
),
vol.Optional(
CONF_WILL_MESSAGE, default=DEFAULT_WILL
): MQTT_WILL_BIRTH_SCHEMA,
vol.Optional(
CONF_BIRTH_MESSAGE, default=DEFAULT_BIRTH
): MQTT_WILL_BIRTH_SCHEMA,
vol.Optional(CONF_DISCOVERY, default=DEFAULT_DISCOVERY): cv.boolean,
# discovery_prefix must be a valid publish topic because if no
# state topic is specified, it will be created with the given prefix.
vol.Optional(
CONF_DISCOVERY_PREFIX, default=DEFAULT_PREFIX
): valid_publish_topic,
}
),
)
},
extra=vol.ALLOW_EXTRA,
)
SCHEMA_BASE = {vol.Optional(CONF_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA}
MQTT_AVAILABILITY_SINGLE_SCHEMA = vol.Schema(
{
vol.Exclusive(CONF_AVAILABILITY_TOPIC, "availability"): valid_subscribe_topic,
vol.Optional(
CONF_PAYLOAD_AVAILABLE, default=DEFAULT_PAYLOAD_AVAILABLE
): cv.string,
vol.Optional(
CONF_PAYLOAD_NOT_AVAILABLE, default=DEFAULT_PAYLOAD_NOT_AVAILABLE
): cv.string,
}
)
MQTT_AVAILABILITY_LIST_SCHEMA = vol.Schema(
{
vol.Exclusive(CONF_AVAILABILITY, "availability"): vol.All(
cv.ensure_list,
[
{
vol.Optional(CONF_TOPIC): valid_subscribe_topic,
vol.Optional(
CONF_PAYLOAD_AVAILABLE, default=DEFAULT_PAYLOAD_AVAILABLE
): cv.string,
vol.Optional(
CONF_PAYLOAD_NOT_AVAILABLE,
default=DEFAULT_PAYLOAD_NOT_AVAILABLE,
): cv.string,
}
],
),
}
)
MQTT_AVAILABILITY_SCHEMA = MQTT_AVAILABILITY_SINGLE_SCHEMA.extend(
MQTT_AVAILABILITY_LIST_SCHEMA.schema
)
MQTT_ENTITY_DEVICE_INFO_SCHEMA = vol.All(
cv.deprecated(CONF_DEPRECATED_VIA_HUB, CONF_VIA_DEVICE),
vol.Schema(
{
vol.Optional(CONF_IDENTIFIERS, default=list): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_CONNECTIONS, default=list): vol.All(
cv.ensure_list, [vol.All(vol.Length(2), [cv.string])]
),
vol.Optional(CONF_MANUFACTURER): cv.string,
vol.Optional(CONF_MODEL): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_SW_VERSION): cv.string,
vol.Optional(CONF_VIA_DEVICE): cv.string,
}
),
validate_device_has_at_least_one_identifier,
)
MQTT_JSON_ATTRS_SCHEMA = vol.Schema(
{
vol.Optional(CONF_JSON_ATTRS_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_JSON_ATTRS_TEMPLATE): cv.template,
}
)
MQTT_BASE_PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(SCHEMA_BASE)
# Sensor type platforms subscribe to MQTT events
MQTT_RO_PLATFORM_SCHEMA = MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
# Switch type platforms publish to MQTT and may subscribe
MQTT_RW_PLATFORM_SCHEMA = MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
vol.Optional(CONF_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
# Service call validation schema
MQTT_PUBLISH_SCHEMA = vol.Schema(
{
vol.Required(ATTR_TOPIC): valid_publish_topic,
vol.Exclusive(ATTR_PAYLOAD, CONF_PAYLOAD): object,
vol.Exclusive(ATTR_PAYLOAD_TEMPLATE, CONF_PAYLOAD): cv.string,
vol.Optional(ATTR_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
vol.Optional(ATTR_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
},
required=True,
)
SubscribePayloadType = Union[str, bytes] # Only bytes if encoding is None
def _build_publish_data(topic: Any, qos: int, retain: bool) -> ServiceDataType:
"""Build the arguments for the publish service without the payload."""
data = {ATTR_TOPIC: topic}
if qos is not None:
data[ATTR_QOS] = qos
if retain is not None:
data[ATTR_RETAIN] = retain
return data
@bind_hass
def publish(hass: HomeAssistantType, topic, payload, qos=None, retain=None) -> None:
"""Publish message to an MQTT topic."""
hass.add_job(async_publish, hass, topic, payload, qos, retain)
@callback
@bind_hass
def async_publish(
hass: HomeAssistantType, topic: Any, payload, qos=None, retain=None
) -> None:
"""Publish message to an MQTT topic."""
data = _build_publish_data(topic, qos, retain)
data[ATTR_PAYLOAD] = payload
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_PUBLISH, data))
@bind_hass
def publish_template(
hass: HomeAssistantType, topic, payload_template, qos=None, retain=None
) -> None:
"""Publish message to an MQTT topic."""
hass.add_job(async_publish_template, hass, topic, payload_template, qos, retain)
@bind_hass
def async_publish_template(
hass: HomeAssistantType, topic, payload_template, qos=None, retain=None
) -> None:
"""Publish message to an MQTT topic using a template payload."""
data = _build_publish_data(topic, qos, retain)
data[ATTR_PAYLOAD_TEMPLATE] = payload_template
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_PUBLISH, data))
def wrap_msg_callback(msg_callback: MessageCallbackType) -> MessageCallbackType:
"""Wrap an MQTT message callback to support deprecated signature."""
# Check for partials to properly determine if coroutine function
check_func = msg_callback
while isinstance(check_func, partial):
check_func = check_func.func
wrapper_func = None
if asyncio.iscoroutinefunction(check_func):
@wraps(msg_callback)
async def async_wrapper(msg: Any) -> None:
"""Call with deprecated signature."""
await msg_callback(msg.topic, msg.payload, msg.qos)
wrapper_func = async_wrapper
else:
@wraps(msg_callback)
def wrapper(msg: Any) -> None:
"""Call with deprecated signature."""
msg_callback(msg.topic, msg.payload, msg.qos)
wrapper_func = wrapper
return wrapper_func
@bind_hass
async def async_subscribe(
hass: HomeAssistantType,
topic: str,
msg_callback: MessageCallbackType,
qos: int = DEFAULT_QOS,
encoding: Optional[str] = "utf-8",
):
"""Subscribe to an MQTT topic.
Call the return value to unsubscribe.
"""
# Count callback parameters which don't have a default value
non_default = 0
if msg_callback:
non_default = sum(
p.default == inspect.Parameter.empty
for _, p in inspect.signature(msg_callback).parameters.items()
)
wrapped_msg_callback = msg_callback
# If we have 3 parameters with no default value, wrap the callback
if non_default == 3:
_LOGGER.warning(
"Signature of MQTT msg_callback '%s.%s' is deprecated",
inspect.getmodule(msg_callback).__name__,
msg_callback.__name__,
)
wrapped_msg_callback = wrap_msg_callback(msg_callback)
async_remove = await hass.data[DATA_MQTT].async_subscribe(
topic,
catch_log_exception(
wrapped_msg_callback,
lambda msg: (
f"Exception in {msg_callback.__name__} when handling msg on "
f"'{msg.topic}': '{msg.payload}'"
),
),
qos,
encoding,
)
return async_remove
@bind_hass
def subscribe(
hass: HomeAssistantType,
topic: str,
msg_callback: MessageCallbackType,
qos: int = DEFAULT_QOS,
encoding: str = "utf-8",
) -> Callable[[], None]:
"""Subscribe to an MQTT topic."""
async_remove = asyncio.run_coroutine_threadsafe(
async_subscribe(hass, topic, msg_callback, qos, encoding), hass.loop
).result()
def remove():
"""Remove listener convert."""
run_callback_threadsafe(hass.loop, async_remove).result()
return remove
async def _async_setup_discovery(
hass: HomeAssistantType, conf: ConfigType, config_entry
) -> bool:
"""Try to start the discovery of MQTT devices.
This method is a coroutine.
"""
success: bool = await discovery.async_start(
hass, conf[CONF_DISCOVERY_PREFIX], config_entry
)
return success
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Start the MQTT protocol service."""
conf: Optional[ConfigType] = config.get(DOMAIN)
websocket_api.async_register_command(hass, websocket_subscribe)
websocket_api.async_register_command(hass, websocket_remove_device)
websocket_api.async_register_command(hass, websocket_mqtt_info)
if conf is None:
# If we have a config entry, setup is done by that config entry.
# If there is no config entry, this should fail.
return bool(hass.config_entries.async_entries(DOMAIN))
conf = dict(conf)
hass.data[DATA_MQTT_CONFIG] = conf
# Only import if we haven't before.
if not hass.config_entries.async_entries(DOMAIN):
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data={}
)
)
return True
def _merge_config(entry, conf):
"""Merge configuration.yaml config with config entry."""
return {**conf, **entry.data}
async def async_setup_entry(hass, entry):
"""Load a config entry."""
conf = hass.data.get(DATA_MQTT_CONFIG)
# Config entry was created because user had configuration.yaml entry
# They removed that, so remove entry.
if conf is None and entry.source == config_entries.SOURCE_IMPORT:
hass.async_create_task(hass.config_entries.async_remove(entry.entry_id))
return False
# If user didn't have configuration.yaml config, generate defaults
if conf is None:
conf = CONFIG_SCHEMA({DOMAIN: dict(entry.data)})[DOMAIN]
elif any(key in conf for key in entry.data):
shared_keys = conf.keys() & entry.data.keys()
override = {k: entry.data[k] for k in shared_keys}
if CONF_PASSWORD in override:
override[CONF_PASSWORD] = "********"
_LOGGER.info(
"Data in your configuration entry is going to override your "
"configuration.yaml: %s",
override,
)
conf = _merge_config(entry, conf)
hass.data[DATA_MQTT] = MQTT(hass, entry, conf,)
await hass.data[DATA_MQTT].async_connect()
async def async_stop_mqtt(_event: Event):
"""Stop MQTT component."""
await hass.data[DATA_MQTT].async_disconnect()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_mqtt)
async def async_publish_service(call: ServiceCall):
"""Handle MQTT publish service calls."""
msg_topic: str = call.data[ATTR_TOPIC]
payload = call.data.get(ATTR_PAYLOAD)
payload_template = call.data.get(ATTR_PAYLOAD_TEMPLATE)
qos: int = call.data[ATTR_QOS]
retain: bool = call.data[ATTR_RETAIN]
if payload_template is not None:
try:
payload = template.Template(payload_template, hass).async_render()
except template.jinja2.TemplateError as exc:
_LOGGER.error(
"Unable to publish to %s: rendering payload template of "
"%s failed because %s",
msg_topic,
payload_template,
exc,
)
return
await hass.data[DATA_MQTT].async_publish(msg_topic, payload, qos, retain)
hass.services.async_register(
DOMAIN, SERVICE_PUBLISH, async_publish_service, schema=MQTT_PUBLISH_SCHEMA
)
async def async_dump_service(call: ServiceCall):
"""Handle MQTT dump service calls."""
messages = []
@callback
def collect_msg(msg):
messages.append((msg.topic, msg.payload.replace("\n", "")))
unsub = await async_subscribe(hass, call.data["topic"], collect_msg)
def write_dump():
with open(hass.config.path("mqtt_dump.txt"), "wt") as fp:
for msg in messages:
fp.write(",".join(msg) + "\n")
async def finish_dump(_):
"""Write dump to file."""
unsub()
await hass.async_add_executor_job(write_dump)
event.async_call_later(hass, call.data["duration"], finish_dump)
hass.services.async_register(
DOMAIN,
SERVICE_DUMP,
async_dump_service,
schema=vol.Schema(
{
vol.Required("topic"): valid_subscribe_topic,
vol.Optional("duration", default=5): int,
}
),
)
if conf.get(CONF_DISCOVERY):
await _async_setup_discovery(hass, conf, entry)
return True
@attr.s(slots=True, frozen=True)
class Subscription:
"""Class to hold data about an active subscription."""
topic: str = attr.ib()
callback: MessageCallbackType = attr.ib()
qos: int = attr.ib(default=0)
encoding: str = attr.ib(default="utf-8")
class MQTT:
"""Home Assistant MQTT client."""
def __init__(self, hass: HomeAssistantType, config_entry, conf,) -> None:
"""Initialize Home Assistant MQTT client."""
# We don't import on the top because some integrations
# should be able to optionally rely on MQTT.
import paho.mqtt.client as mqtt # pylint: disable=import-outside-toplevel
self.hass = hass
self.config_entry = config_entry
self.conf = conf
self.subscriptions: List[Subscription] = []
self.connected = False
self._mqttc: mqtt.Client = None
self._paho_lock = asyncio.Lock()
self.init_client()
self.config_entry.add_update_listener(self.async_config_entry_updated)
@staticmethod
async def async_config_entry_updated(hass, entry) -> None:
"""Handle signals of config entry being updated.
This is a static method because a class method (bound method), can not be used with weak references.
Causes for this is config entry options changing.
"""
self = hass.data[DATA_MQTT]
conf = hass.data.get(DATA_MQTT_CONFIG)
if conf is None:
conf = CONFIG_SCHEMA({DOMAIN: dict(entry.data)})[DOMAIN]
self.conf = _merge_config(entry, conf)
await self.async_disconnect()
self.init_client()
await self.async_connect()
await discovery.async_stop(hass)
if self.conf.get(CONF_DISCOVERY):
await _async_setup_discovery(hass, self.conf, entry)
def init_client(self):
"""Initialize paho client."""
# We don't import on the top because some integrations
# should be able to optionally rely on MQTT.
import paho.mqtt.client as mqtt # pylint: disable=import-outside-toplevel
if self.conf[CONF_PROTOCOL] == PROTOCOL_31:
proto: int = mqtt.MQTTv31
else:
proto = mqtt.MQTTv311
client_id = self.conf.get(CONF_CLIENT_ID)
if client_id is None:
self._mqttc = mqtt.Client(protocol=proto)
else:
self._mqttc = mqtt.Client(client_id, protocol=proto)
username = self.conf.get(CONF_USERNAME)
password = self.conf.get(CONF_PASSWORD)
if username is not None:
self._mqttc.username_pw_set(username, password)
certificate = self.conf.get(CONF_CERTIFICATE)
# For cloudmqtt.com, secured connection, auto fill in certificate
if (
certificate is None
and 19999 < self.conf[CONF_PORT] < 30000
and self.conf[CONF_BROKER].endswith(".cloudmqtt.com")
):
certificate = os.path.join(
os.path.dirname(__file__), "addtrustexternalcaroot.crt"
)
# When the certificate is set to auto, use bundled certs from certifi
elif certificate == "auto":
certificate = certifi.where()
client_key = self.conf.get(CONF_CLIENT_KEY)
client_cert = self.conf.get(CONF_CLIENT_CERT)
tls_insecure = self.conf.get(CONF_TLS_INSECURE)
if certificate is not None:
self._mqttc.tls_set(
certificate,
certfile=client_cert,
keyfile=client_key,
tls_version=ssl.PROTOCOL_TLS,
)
if tls_insecure is not None:
self._mqttc.tls_insecure_set(tls_insecure)
self._mqttc.on_connect = self._mqtt_on_connect
self._mqttc.on_disconnect = self._mqtt_on_disconnect
self._mqttc.on_message = self._mqtt_on_message
if (
CONF_WILL_MESSAGE in self.conf
and ATTR_TOPIC in self.conf[CONF_WILL_MESSAGE]
):
will_message = Message(**self.conf[CONF_WILL_MESSAGE])
else:
will_message = None
if will_message is not None:
self._mqttc.will_set( # pylint: disable=no-value-for-parameter
topic=will_message.topic,
payload=will_message.payload,
qos=will_message.qos,
retain=will_message.retain,
)
async def async_publish(
self, topic: str, payload: PublishPayloadType, qos: int, retain: bool
) -> None:
"""Publish a MQTT message."""
async with self._paho_lock:
_LOGGER.debug("Transmitting message on %s: %s", topic, payload)
await self.hass.async_add_executor_job(
self._mqttc.publish, topic, payload, qos, retain
)
async def async_connect(self) -> str:
"""Connect to the host. Does not process messages yet."""
# pylint: disable=import-outside-toplevel
import paho.mqtt.client as mqtt
result: int = None
try:
result = await self.hass.async_add_executor_job(
self._mqttc.connect,
self.conf[CONF_BROKER],
self.conf[CONF_PORT],
self.conf[CONF_KEEPALIVE],
)
except OSError as err:
_LOGGER.error("Failed to connect to MQTT server due to exception: %s", err)
if result is not None and result != 0:
_LOGGER.error(
"Failed to connect to MQTT server: %s", mqtt.error_string(result)
)
self._mqttc.loop_start()
async def async_disconnect(self):
"""Stop the MQTT client."""
def stop():
"""Stop the MQTT client."""
# Do not disconnect, we want the broker to always publish will
self._mqttc.loop_stop()
await self.hass.async_add_executor_job(stop)
async def async_subscribe(
self,
topic: str,
msg_callback: MessageCallbackType,
qos: int,
encoding: Optional[str] = None,
) -> Callable[[], None]:
"""Set up a subscription to a topic with the provided qos.
This method is a coroutine.
"""
if not isinstance(topic, str):
raise HomeAssistantError("Topic needs to be a string!")
subscription = Subscription(topic, msg_callback, qos, encoding)
self.subscriptions.append(subscription)
# Only subscribe if currently connected.
if self.connected:
await self._async_perform_subscription(topic, qos)
@callback
def async_remove() -> None:
"""Remove subscription."""
if subscription not in self.subscriptions:
raise HomeAssistantError("Can't remove subscription twice")
self.subscriptions.remove(subscription)
if any(other.topic == topic for other in self.subscriptions):
# Other subscriptions on topic remaining - don't unsubscribe.
return
# Only unsubscribe if currently connected.
if self.connected:
self.hass.async_create_task(self._async_unsubscribe(topic))
return async_remove
async def _async_unsubscribe(self, topic: str) -> None:
"""Unsubscribe from a topic.
This method is a coroutine.
"""
_LOGGER.debug("Unsubscribing from %s", topic)
async with self._paho_lock:
result: int = None
result, _ = await self.hass.async_add_executor_job(
self._mqttc.unsubscribe, topic
)
_raise_on_error(result)
async def _async_perform_subscription(self, topic: str, qos: int) -> None:
"""Perform a paho-mqtt subscription."""
_LOGGER.debug("Subscribing to %s", topic)
async with self._paho_lock:
result: int = None
result, _ = await self.hass.async_add_executor_job(
self._mqttc.subscribe, topic, qos
)
_raise_on_error(result)
def _mqtt_on_connect(self, _mqttc, _userdata, _flags, result_code: int) -> None:
"""On connect callback.
Resubscribe to all topics we were subscribed to and publish birth
message.
"""
# pylint: disable=import-outside-toplevel
import paho.mqtt.client as mqtt
if result_code != mqtt.CONNACK_ACCEPTED:
_LOGGER.error(
"Unable to connect to the MQTT broker: %s",
mqtt.connack_string(result_code),
)
return
self.connected = True
dispatcher_send(self.hass, MQTT_CONNECTED)
_LOGGER.info(
"Connected to MQTT server %s:%s (%s)",
self.conf[CONF_BROKER],
self.conf[CONF_PORT],
result_code,
)
# Group subscriptions to only re-subscribe once for each topic.
keyfunc = attrgetter("topic")
for topic, subs in groupby(sorted(self.subscriptions, key=keyfunc), keyfunc):
# Re-subscribe with the highest requested qos
max_qos = max(subscription.qos for subscription in subs)
self.hass.add_job(self._async_perform_subscription, topic, max_qos)
if (
CONF_BIRTH_MESSAGE in self.conf
and ATTR_TOPIC in self.conf[CONF_BIRTH_MESSAGE]
):
birth_message = Message(**self.conf[CONF_BIRTH_MESSAGE])
self.hass.add_job(
self.async_publish( # pylint: disable=no-value-for-parameter
topic=birth_message.topic,
payload=birth_message.payload,
qos=birth_message.qos,
retain=birth_message.retain,
)
)
def _mqtt_on_message(self, _mqttc, _userdata, msg) -> None:
"""Message received callback."""
self.hass.add_job(self._mqtt_handle_message, msg)
@callback
def _mqtt_handle_message(self, msg) -> None:
_LOGGER.debug(
"Received message on %s%s: %s",
msg.topic,
" (retained)" if msg.retain else "",
msg.payload,
)
timestamp = dt_util.utcnow()
for subscription in self.subscriptions:
if not _match_topic(subscription.topic, msg.topic):
continue
payload: SubscribePayloadType = msg.payload
if subscription.encoding is not None:
try:
payload = msg.payload.decode(subscription.encoding)
except (AttributeError, UnicodeDecodeError):
_LOGGER.warning(
"Can't decode payload %s on %s with encoding %s (for %s)",
msg.payload,
msg.topic,
subscription.encoding,
subscription.callback,
)
continue
self.hass.async_run_job(
subscription.callback,
Message(
msg.topic,
payload,
msg.qos,
msg.retain,
subscription.topic,
timestamp,
),
)
def _mqtt_on_disconnect(self, _mqttc, _userdata, result_code: int) -> None:
"""Disconnected callback."""
self.connected = False
dispatcher_send(self.hass, MQTT_DISCONNECTED)
_LOGGER.warning(
"Disconnected from MQTT server %s:%s (%s)",
self.conf[CONF_BROKER],
self.conf[CONF_PORT],
result_code,
)
def _raise_on_error(result_code: int) -> None:
"""Raise error if error result."""
# pylint: disable=import-outside-toplevel
import paho.mqtt.client as mqtt
if result_code != 0:
raise HomeAssistantError(
f"Error talking to MQTT: {mqtt.error_string(result_code)}"
)
def _match_topic(subscription: str, topic: str) -> bool:
"""Test if topic matches subscription."""
# pylint: disable=import-outside-toplevel
from paho.mqtt.matcher import MQTTMatcher
matcher = MQTTMatcher()
matcher[subscription] = True
try:
next(matcher.iter_match(topic))
return True
except StopIteration:
return False
class MqttAttributes(Entity):
"""Mixin used for platforms that support JSON attributes."""
def __init__(self, config: dict) -> None:
"""Initialize the JSON attributes mixin."""
self._attributes = None
self._attributes_sub_state = None
self._attributes_config = config
async def async_added_to_hass(self) -> None:
"""Subscribe MQTT events."""
await super().async_added_to_hass()
await self._attributes_subscribe_topics()
async def attributes_discovery_update(self, config: dict):
"""Handle updated discovery message."""
self._attributes_config = config
await self._attributes_subscribe_topics()
async def _attributes_subscribe_topics(self):
"""(Re)Subscribe to topics."""
attr_tpl = self._attributes_config.get(CONF_JSON_ATTRS_TEMPLATE)
if attr_tpl is not None:
attr_tpl.hass = self.hass
@callback
@log_messages(self.hass, self.entity_id)
def attributes_message_received(msg: Message) -> None:
try:
payload = msg.payload
if attr_tpl is not None:
payload = attr_tpl.async_render_with_possible_json_value(payload)
json_dict = json.loads(payload)
if isinstance(json_dict, dict):
self._attributes = json_dict
self.async_write_ha_state()
else:
_LOGGER.warning("JSON result was not a dictionary")
self._attributes = None
except ValueError:
_LOGGER.warning("Erroneous JSON: %s", payload)
self._attributes = None
self._attributes_sub_state = await async_subscribe_topics(
self.hass,
self._attributes_sub_state,
{
CONF_JSON_ATTRS_TOPIC: {
"topic": self._attributes_config.get(CONF_JSON_ATTRS_TOPIC),
"msg_callback": attributes_message_received,
"qos": self._attributes_config.get(CONF_QOS),
}
},
)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._attributes_sub_state = await async_unsubscribe_topics(
self.hass, self._attributes_sub_state
)
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
class MqttAvailability(Entity):
"""Mixin used for platforms that report availability."""
def __init__(self, config: dict) -> None:
"""Initialize the availability mixin."""
self._availability_sub_state = None
self._available = False
self._availability_setup_from_config(config)
async def async_added_to_hass(self) -> None:
"""Subscribe MQTT events."""
await super().async_added_to_hass()
await self._availability_subscribe_topics()
self.async_on_remove(
async_dispatcher_connect(self.hass, MQTT_CONNECTED, self.async_mqtt_connect)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, MQTT_DISCONNECTED, self.async_mqtt_connect
)
)
async def availability_discovery_update(self, config: dict):
"""Handle updated discovery message."""
self._availability_setup_from_config(config)
await self._availability_subscribe_topics()
def _availability_setup_from_config(self, config):
"""(Re)Setup."""
self._avail_topics = {}
if CONF_AVAILABILITY_TOPIC in config:
self._avail_topics[config[CONF_AVAILABILITY_TOPIC]] = {
CONF_PAYLOAD_AVAILABLE: config[CONF_PAYLOAD_AVAILABLE],
CONF_PAYLOAD_NOT_AVAILABLE: config[CONF_PAYLOAD_NOT_AVAILABLE],
}
if CONF_AVAILABILITY in config:
for avail in config[CONF_AVAILABILITY]:
self._avail_topics[avail[CONF_TOPIC]] = {
CONF_PAYLOAD_AVAILABLE: avail[CONF_PAYLOAD_AVAILABLE],
CONF_PAYLOAD_NOT_AVAILABLE: avail[CONF_PAYLOAD_NOT_AVAILABLE],
}
self._avail_config = config
async def _availability_subscribe_topics(self):
"""(Re)Subscribe to topics."""
@callback
@log_messages(self.hass, self.entity_id)
def availability_message_received(msg: Message) -> None:
"""Handle a new received MQTT availability message."""
topic = msg.topic
if msg.payload == self._avail_topics[topic][CONF_PAYLOAD_AVAILABLE]:
self._available = True
elif msg.payload == self._avail_topics[topic][CONF_PAYLOAD_NOT_AVAILABLE]:
self._available = False
self.async_write_ha_state()
topics = {}
for topic in self._avail_topics:
topics[f"availability_{topic}"] = {
"topic": topic,
"msg_callback": availability_message_received,
"qos": self._avail_config[CONF_QOS],
}
self._availability_sub_state = await async_subscribe_topics(
self.hass, self._availability_sub_state, topics,
)
@callback
def async_mqtt_connect(self):
"""Update state on connection/disconnection to MQTT broker."""
if not self.hass.is_stopping:
self.async_write_ha_state()
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._availability_sub_state = await async_unsubscribe_topics(
self.hass, self._availability_sub_state
)
@property
def available(self) -> bool:
"""Return if the device is available."""
if not self.hass.data[DATA_MQTT].connected and not self.hass.is_stopping:
return False
return not self._avail_topics or self._available
async def cleanup_device_registry(hass, device_id):
"""Remove device registry entry if there are no remaining entities or triggers."""
# Local import to avoid circular dependencies
# pylint: disable=import-outside-toplevel
from . import device_trigger
device_registry = await hass.helpers.device_registry.async_get_registry()
entity_registry = await hass.helpers.entity_registry.async_get_registry()
if (
device_id
and not hass.helpers.entity_registry.async_entries_for_device(
entity_registry, device_id
)
and not await device_trigger.async_get_triggers(hass, device_id)
):
device_registry.async_remove_device(device_id)
class MqttDiscoveryUpdate(Entity):
"""Mixin used to handle updated discovery message."""
def __init__(self, discovery_data, discovery_update=None) -> None:
"""Initialize the discovery update mixin."""
self._discovery_data = discovery_data
self._discovery_update = discovery_update
self._remove_signal = None
self._removed_from_hass = False
async def async_added_to_hass(self) -> None:
"""Subscribe to discovery updates."""
await super().async_added_to_hass()
self._removed_from_hass = False
discovery_hash = (
self._discovery_data[ATTR_DISCOVERY_HASH] if self._discovery_data else None
)
async def _async_remove_state_and_registry_entry(self) -> None:
"""Remove entity's state and entity registry entry.
Remove entity from entity registry if it is registered, this also removes the state.
If the entity is not in the entity registry, just remove the state.
"""
entity_registry = (
await self.hass.helpers.entity_registry.async_get_registry()
)
if entity_registry.async_is_registered(self.entity_id):
entity_entry = entity_registry.async_get(self.entity_id)
entity_registry.async_remove(self.entity_id)
await cleanup_device_registry(self.hass, entity_entry.device_id)
else:
await self.async_remove()
@callback
async def discovery_callback(payload):
"""Handle discovery update."""
_LOGGER.info(
"Got update for entity with hash: %s '%s'", discovery_hash, payload,
)
debug_info.update_entity_discovery_data(self.hass, payload, self.entity_id)
if not payload:
# Empty payload: Remove component
_LOGGER.info("Removing component: %s", self.entity_id)
self._cleanup_discovery_on_remove()
await _async_remove_state_and_registry_entry(self)
elif self._discovery_update:
# Non-empty payload: Notify component
_LOGGER.info("Updating component: %s", self.entity_id)
await self._discovery_update(payload)
if discovery_hash:
debug_info.add_entity_discovery_data(
self.hass, self._discovery_data, self.entity_id
)
# Set in case the entity has been removed and is re-added
set_discovery_hash(self.hass, discovery_hash)
self._remove_signal = async_dispatcher_connect(
self.hass,
MQTT_DISCOVERY_UPDATED.format(discovery_hash),
discovery_callback,
)
async def async_removed_from_registry(self) -> None:
"""Clear retained discovery topic in broker."""
if not self._removed_from_hass:
discovery_topic = self._discovery_data[ATTR_DISCOVERY_TOPIC]
publish(
self.hass, discovery_topic, "", retain=True,
)
async def async_will_remove_from_hass(self) -> None:
"""Stop listening to signal and cleanup discovery data.."""
self._cleanup_discovery_on_remove()
def _cleanup_discovery_on_remove(self) -> None:
"""Stop listening to signal and cleanup discovery data."""
if self._discovery_data and not self._removed_from_hass:
debug_info.remove_entity_data(self.hass, self.entity_id)
clear_discovery_hash(self.hass, self._discovery_data[ATTR_DISCOVERY_HASH])
self._removed_from_hass = True
if self._remove_signal:
self._remove_signal()
self._remove_signal = None
def device_info_from_config(config):
"""Return a device description for device registry."""
if not config:
return None
info = {
"identifiers": {(DOMAIN, id_) for id_ in config[CONF_IDENTIFIERS]},
"connections": {tuple(x) for x in config[CONF_CONNECTIONS]},
}
if CONF_MANUFACTURER in config:
info["manufacturer"] = config[CONF_MANUFACTURER]
if CONF_MODEL in config:
info["model"] = config[CONF_MODEL]
if CONF_NAME in config:
info["name"] = config[CONF_NAME]
if CONF_SW_VERSION in config:
info["sw_version"] = config[CONF_SW_VERSION]
if CONF_VIA_DEVICE in config:
info["via_device"] = (DOMAIN, config[CONF_VIA_DEVICE])
return info
class MqttEntityDeviceInfo(Entity):
"""Mixin used for mqtt platforms that support the device registry."""
def __init__(self, device_config: Optional[ConfigType], config_entry=None) -> None:
"""Initialize the device mixin."""
self._device_config = device_config
self._config_entry = config_entry
async def device_info_discovery_update(self, config: dict):
"""Handle updated discovery message."""
self._device_config = config.get(CONF_DEVICE)
device_registry = await self.hass.helpers.device_registry.async_get_registry()
config_entry_id = self._config_entry.entry_id
device_info = self.device_info
if config_entry_id is not None and device_info is not None:
device_info["config_entry_id"] = config_entry_id
device_registry.async_get_or_create(**device_info)
@property
def device_info(self):
"""Return a device description for device registry."""
return device_info_from_config(self._device_config)
@websocket_api.websocket_command(
{vol.Required("type"): "mqtt/device/debug_info", vol.Required("device_id"): str}
)
@websocket_api.async_response
async def websocket_mqtt_info(hass, connection, msg):
"""Get MQTT debug info for device."""
device_id = msg["device_id"]
mqtt_info = await debug_info.info_for_device(hass, device_id)
connection.send_result(msg["id"], mqtt_info)
@websocket_api.websocket_command(
{vol.Required("type"): "mqtt/device/remove", vol.Required("device_id"): str}
)
@websocket_api.async_response
async def websocket_remove_device(hass, connection, msg):
"""Delete device."""
device_id = msg["device_id"]
dev_registry = await hass.helpers.device_registry.async_get_registry()
device = dev_registry.async_get(device_id)
if not device:
connection.send_error(
msg["id"], websocket_api.const.ERR_NOT_FOUND, "Device not found"
)
return
for config_entry in device.config_entries:
config_entry = hass.config_entries.async_get_entry(config_entry)
# Only delete the device if it belongs to an MQTT device entry
if config_entry.domain == DOMAIN:
dev_registry.async_remove_device(device_id)
connection.send_message(websocket_api.result_message(msg["id"]))
return
connection.send_error(
msg["id"], websocket_api.const.ERR_NOT_FOUND, "Non MQTT device"
)
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "mqtt/subscribe",
vol.Required("topic"): valid_subscribe_topic,
}
)
async def websocket_subscribe(hass, connection, msg):
"""Subscribe to a MQTT topic."""
if not connection.user.is_admin:
raise Unauthorized
async def forward_messages(mqttmsg: Message):
"""Forward events to websocket."""
connection.send_message(
websocket_api.event_message(
msg["id"],
{
"topic": mqttmsg.topic,
"payload": mqttmsg.payload,
"qos": mqttmsg.qos,
"retain": mqttmsg.retain,
},
)
)
connection.subscriptions[msg["id"]] = await async_subscribe(
hass, msg["topic"], forward_messages
)
connection.send_message(websocket_api.result_message(msg["id"]))
|
|
"""Certbot client crypto utility functions.
.. todo:: Make the transition to use PSS rather than PKCS1_v1_5 when the server
is capable of handling the signatures.
"""
import hashlib
import logging
import re
from typing import List
from typing import Set
import warnings
from cryptography import x509
from cryptography.exceptions import InvalidSignature
from cryptography.exceptions import UnsupportedAlgorithm
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric.ec import ECDSA
from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePublicKey
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey
from cryptography.hazmat.primitives.serialization import Encoding
from cryptography.hazmat.primitives.serialization import NoEncryption
from cryptography.hazmat.primitives.serialization import PrivateFormat
from OpenSSL import crypto
from OpenSSL import SSL
import pyrfc3339
import zope.component
from acme import crypto_util as acme_crypto_util
from certbot import errors
from certbot import interfaces
from certbot import util
from certbot.compat import os
logger = logging.getLogger(__name__)
# High level functions
def generate_key(key_size: int, key_dir: str, key_type: str = "rsa",
elliptic_curve: str = "secp256r1", keyname: str = "key-certbot.pem",
strict_permissions: bool = True) -> util.Key:
"""Initializes and saves a privkey.
Inits key and saves it in PEM format on the filesystem.
.. note:: keyname is the attempted filename, it may be different if a file
already exists at the path.
:param int key_size: key size in bits if key size is rsa.
:param str key_dir: Key save directory.
:param str key_type: Key Type [rsa, ecdsa]
:param str elliptic_curve: Name of the elliptic curve if key type is ecdsa.
:param str keyname: Filename of key
:param bool strict_permissions: If true and key_dir exists, an exception is raised if
the directory doesn't have 0700 permissions or isn't owned by the current user.
:returns: Key
:rtype: :class:`certbot.util.Key`
:raises ValueError: If unable to generate the key given key_size.
"""
try:
key_pem = make_key(
bits=key_size, elliptic_curve=elliptic_curve or "secp256r1", key_type=key_type,
)
except ValueError as err:
logger.debug("", exc_info=True)
logger.error("Encountered error while making key: %s", str(err))
raise err
# Save file
util.make_or_verify_dir(key_dir, 0o700, strict_permissions)
key_f, key_path = util.unique_file(
os.path.join(key_dir, keyname), 0o600, "wb")
with key_f:
key_f.write(key_pem)
if key_type == 'rsa':
logger.debug("Generating RSA key (%d bits): %s", key_size, key_path)
else:
logger.debug("Generating ECDSA key (%d bits): %s", key_size, key_path)
return util.Key(key_path, key_pem)
# TODO: Remove this call once zope dependencies are removed from Certbot.
def init_save_key(key_size, key_dir, key_type="rsa", elliptic_curve="secp256r1",
keyname="key-certbot.pem"):
"""Initializes and saves a privkey.
Inits key and saves it in PEM format on the filesystem.
.. note:: keyname is the attempted filename, it may be different if a file
already exists at the path.
.. deprecated:: 1.16.0
Use :func:`generate_key` instead.
:param int key_size: key size in bits if key size is rsa.
:param str key_dir: Key save directory.
:param str key_type: Key Type [rsa, ecdsa]
:param str elliptic_curve: Name of the elliptic curve if key type is ecdsa.
:param str keyname: Filename of key
:returns: Key
:rtype: :class:`certbot.util.Key`
:raises ValueError: If unable to generate the key given key_size.
"""
warnings.warn("certbot.crypto_util.init_save_key is deprecated, please use "
"certbot.crypto_util.generate_key instead.", DeprecationWarning)
config = zope.component.getUtility(interfaces.IConfig)
return generate_key(key_size, key_dir, key_type=key_type, elliptic_curve=elliptic_curve,
keyname=keyname, strict_permissions=config.strict_permissions)
def generate_csr(privkey: util.Key, names: Set[str], path: str,
must_staple: bool = False, strict_permissions: bool = True) -> util.CSR:
"""Initialize a CSR with the given private key.
:param privkey: Key to include in the CSR
:type privkey: :class:`certbot.util.Key`
:param set names: `str` names to include in the CSR
:param str path: Certificate save directory.
:param bool must_staple: If true, include the TLS Feature extension "OCSP Must Staple"
:param bool strict_permissions: If true and path exists, an exception is raised if
the directory doesn't have 0755 permissions or isn't owned by the current user.
:returns: CSR
:rtype: :class:`certbot.util.CSR`
"""
csr_pem = acme_crypto_util.make_csr(
privkey.pem, names, must_staple=must_staple)
# Save CSR
util.make_or_verify_dir(path, 0o755, strict_permissions)
csr_f, csr_filename = util.unique_file(
os.path.join(path, "csr-certbot.pem"), 0o644, "wb")
with csr_f:
csr_f.write(csr_pem)
logger.debug("Creating CSR: %s", csr_filename)
return util.CSR(csr_filename, csr_pem, "pem")
# TODO: Remove this call once zope dependencies are removed from Certbot.
def init_save_csr(privkey, names, path):
"""Initialize a CSR with the given private key.
.. deprecated:: 1.16.0
Use :func:`generate_csr` instead.
:param privkey: Key to include in the CSR
:type privkey: :class:`certbot.util.Key`
:param set names: `str` names to include in the CSR
:param str path: Certificate save directory.
:returns: CSR
:rtype: :class:`certbot.util.CSR`
"""
warnings.warn("certbot.crypto_util.init_save_csr is deprecated, please use "
"certbot.crypto_util.generate_csr instead.", DeprecationWarning)
config = zope.component.getUtility(interfaces.IConfig)
return generate_csr(privkey, names, path, must_staple=config.must_staple,
strict_permissions=config.strict_permissions)
# WARNING: the csr and private key file are possible attack vectors for TOCTOU
# We should either...
# A. Do more checks to verify that the CSR is trusted/valid
# B. Audit the parsing code for vulnerabilities
def valid_csr(csr):
"""Validate CSR.
Check if `csr` is a valid CSR for the given domains.
:param str csr: CSR in PEM.
:returns: Validity of CSR.
:rtype: bool
"""
try:
req = crypto.load_certificate_request(
crypto.FILETYPE_PEM, csr)
return req.verify(req.get_pubkey())
except crypto.Error:
logger.debug("", exc_info=True)
return False
def csr_matches_pubkey(csr, privkey):
"""Does private key correspond to the subject public key in the CSR?
:param str csr: CSR in PEM.
:param str privkey: Private key file contents (PEM)
:returns: Correspondence of private key to CSR subject public key.
:rtype: bool
"""
req = crypto.load_certificate_request(
crypto.FILETYPE_PEM, csr)
pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, privkey)
try:
return req.verify(pkey)
except crypto.Error:
logger.debug("", exc_info=True)
return False
def import_csr_file(csrfile, data):
"""Import a CSR file, which can be either PEM or DER.
:param str csrfile: CSR filename
:param str data: contents of the CSR file
:returns: (`crypto.FILETYPE_PEM`,
util.CSR object representing the CSR,
list of domains requested in the CSR)
:rtype: tuple
"""
PEM = crypto.FILETYPE_PEM
load = crypto.load_certificate_request
try:
# Try to parse as DER first, then fall back to PEM.
csr = load(crypto.FILETYPE_ASN1, data)
except crypto.Error:
try:
csr = load(PEM, data)
except crypto.Error:
raise errors.Error("Failed to parse CSR file: {0}".format(csrfile))
domains = _get_names_from_loaded_cert_or_req(csr)
# Internally we always use PEM, so re-encode as PEM before returning.
data_pem = crypto.dump_certificate_request(PEM, csr)
return PEM, util.CSR(file=csrfile, data=data_pem, form="pem"), domains
def make_key(bits=1024, key_type="rsa", elliptic_curve=None):
"""Generate PEM encoded RSA|EC key.
:param int bits: Number of bits if key_type=rsa. At least 1024 for RSA.
:param str ec_curve: The elliptic curve to use.
:returns: new RSA or ECDSA key in PEM form with specified number of bits
or of type ec_curve when key_type ecdsa is used.
:rtype: str
"""
if key_type == 'rsa':
if bits < 1024:
raise errors.Error("Unsupported RSA key length: {}".format(bits))
key = crypto.PKey()
key.generate_key(crypto.TYPE_RSA, bits)
elif key_type == 'ecdsa':
try:
name = elliptic_curve.upper()
if name in ('SECP256R1', 'SECP384R1', 'SECP521R1'):
_key = ec.generate_private_key(
curve=getattr(ec, elliptic_curve.upper(), None)(),
backend=default_backend()
)
else:
raise errors.Error("Unsupported elliptic curve: {}".format(elliptic_curve))
except TypeError:
raise errors.Error("Unsupported elliptic curve: {}".format(elliptic_curve))
except UnsupportedAlgorithm as e:
raise e from errors.Error(str(e))
# This type ignore directive is required due to an outdated version of types-cryptography.
# It can be removed once package types-pyOpenSSL depends on cryptography instead of
# types-cryptography and so types-cryptography is not installed anymore.
# See https://github.com/python/typeshed/issues/5618
_key_pem = _key.private_bytes( # type: ignore
encoding=Encoding.PEM,
format=PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=NoEncryption()
)
key = crypto.load_privatekey(crypto.FILETYPE_PEM, _key_pem)
else:
raise errors.Error("Invalid key_type specified: {}. Use [rsa|ecdsa]".format(key_type))
return crypto.dump_privatekey(crypto.FILETYPE_PEM, key)
def valid_privkey(privkey):
"""Is valid RSA private key?
:param str privkey: Private key file contents in PEM
:returns: Validity of private key.
:rtype: bool
"""
try:
return crypto.load_privatekey(
crypto.FILETYPE_PEM, privkey).check()
except (TypeError, crypto.Error):
return False
def verify_renewable_cert(renewable_cert):
"""For checking that your certs were not corrupted on disk.
Several things are checked:
1. Signature verification for the cert.
2. That fullchain matches cert and chain when concatenated.
3. Check that the private key matches the certificate.
:param renewable_cert: cert to verify
:type renewable_cert: certbot.interfaces.RenewableCert
:raises errors.Error: If verification fails.
"""
verify_renewable_cert_sig(renewable_cert)
verify_fullchain(renewable_cert)
verify_cert_matches_priv_key(renewable_cert.cert_path, renewable_cert.key_path)
def verify_renewable_cert_sig(renewable_cert):
"""Verifies the signature of a RenewableCert object.
:param renewable_cert: cert to verify
:type renewable_cert: certbot.interfaces.RenewableCert
:raises errors.Error: If signature verification fails.
"""
try:
with open(renewable_cert.chain_path, 'rb') as chain_file:
chain = x509.load_pem_x509_certificate(chain_file.read(), default_backend())
with open(renewable_cert.cert_path, 'rb') as cert_file:
cert = x509.load_pem_x509_certificate(cert_file.read(), default_backend())
pk = chain.public_key()
with warnings.catch_warnings():
verify_signed_payload(pk, cert.signature, cert.tbs_certificate_bytes,
cert.signature_hash_algorithm)
except (IOError, ValueError, InvalidSignature) as e:
error_str = "verifying the signature of the certificate located at {0} has failed. \
Details: {1}".format(renewable_cert.cert_path, e)
logger.exception(error_str)
raise errors.Error(error_str)
def verify_signed_payload(public_key, signature, payload, signature_hash_algorithm):
"""Check the signature of a payload.
:param RSAPublicKey/EllipticCurvePublicKey public_key: the public_key to check signature
:param bytes signature: the signature bytes
:param bytes payload: the payload bytes
:param cryptography.hazmat.primitives.hashes.HashAlgorithm \
signature_hash_algorithm: algorithm used to hash the payload
:raises InvalidSignature: If signature verification fails.
:raises errors.Error: If public key type is not supported
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if isinstance(public_key, RSAPublicKey):
verifier = public_key.verifier(
signature, PKCS1v15(), signature_hash_algorithm
)
verifier.update(payload)
verifier.verify()
elif isinstance(public_key, EllipticCurvePublicKey):
verifier = public_key.verifier(
signature, ECDSA(signature_hash_algorithm)
)
verifier.update(payload)
verifier.verify()
else:
raise errors.Error("Unsupported public key type")
def verify_cert_matches_priv_key(cert_path, key_path):
""" Verifies that the private key and cert match.
:param str cert_path: path to a cert in PEM format
:param str key_path: path to a private key file
:raises errors.Error: If they don't match.
"""
try:
context = SSL.Context(SSL.SSLv23_METHOD)
context.use_certificate_file(cert_path)
context.use_privatekey_file(key_path)
context.check_privatekey()
except (IOError, SSL.Error) as e:
error_str = "verifying the certificate located at {0} matches the \
private key located at {1} has failed. \
Details: {2}".format(cert_path,
key_path, e)
logger.exception(error_str)
raise errors.Error(error_str)
def verify_fullchain(renewable_cert):
""" Verifies that fullchain is indeed cert concatenated with chain.
:param renewable_cert: cert to verify
:type renewable_cert: certbot.interfaces.RenewableCert
:raises errors.Error: If cert and chain do not combine to fullchain.
"""
try:
with open(renewable_cert.chain_path) as chain_file:
chain = chain_file.read()
with open(renewable_cert.cert_path) as cert_file:
cert = cert_file.read()
with open(renewable_cert.fullchain_path) as fullchain_file:
fullchain = fullchain_file.read()
if (cert + chain) != fullchain:
error_str = "fullchain does not match cert + chain for {0}!"
error_str = error_str.format(renewable_cert.lineagename)
raise errors.Error(error_str)
except IOError as e:
error_str = "reading one of cert, chain, or fullchain has failed: {0}".format(e)
logger.exception(error_str)
raise errors.Error(error_str)
except errors.Error as e:
raise e
def pyopenssl_load_certificate(data):
"""Load PEM/DER certificate.
:raises errors.Error:
"""
openssl_errors = []
for file_type in (crypto.FILETYPE_PEM, crypto.FILETYPE_ASN1):
try:
return crypto.load_certificate(file_type, data), file_type
except crypto.Error as error: # TODO: other errors?
openssl_errors.append(error)
raise errors.Error("Unable to load: {0}".format(",".join(
str(error) for error in openssl_errors)))
def _load_cert_or_req(cert_or_req_str, load_func,
typ=crypto.FILETYPE_PEM):
try:
return load_func(typ, cert_or_req_str)
except crypto.Error as err:
logger.debug("", exc_info=True)
logger.error("Encountered error while loading certificate or csr: %s", str(err))
raise
def _get_sans_from_cert_or_req(cert_or_req_str, load_func,
typ=crypto.FILETYPE_PEM):
# pylint: disable=protected-access
return acme_crypto_util._pyopenssl_cert_or_req_san(_load_cert_or_req(
cert_or_req_str, load_func, typ))
def get_sans_from_cert(cert, typ=crypto.FILETYPE_PEM):
"""Get a list of Subject Alternative Names from a certificate.
:param str cert: Certificate (encoded).
:param typ: `crypto.FILETYPE_PEM` or `crypto.FILETYPE_ASN1`
:returns: A list of Subject Alternative Names.
:rtype: list
"""
return _get_sans_from_cert_or_req(
cert, crypto.load_certificate, typ)
def _get_names_from_cert_or_req(cert_or_req, load_func, typ):
loaded_cert_or_req = _load_cert_or_req(cert_or_req, load_func, typ)
return _get_names_from_loaded_cert_or_req(loaded_cert_or_req)
def _get_names_from_loaded_cert_or_req(loaded_cert_or_req):
# pylint: disable=protected-access
return acme_crypto_util._pyopenssl_cert_or_req_all_names(loaded_cert_or_req)
def get_names_from_cert(csr, typ=crypto.FILETYPE_PEM):
"""Get a list of domains from a cert, including the CN if it is set.
:param str cert: Certificate (encoded).
:param typ: `crypto.FILETYPE_PEM` or `crypto.FILETYPE_ASN1`
:returns: A list of domain names.
:rtype: list
"""
return _get_names_from_cert_or_req(
csr, crypto.load_certificate, typ)
def get_names_from_req(csr: str, typ: int = crypto.FILETYPE_PEM) -> List[str]:
"""Get a list of domains from a CSR, including the CN if it is set.
:param str cert: CSR (encoded).
:param typ: `crypto.FILETYPE_PEM` or `crypto.FILETYPE_ASN1`
:returns: A list of domain names.
:rtype: list
"""
return _get_names_from_cert_or_req(csr, crypto.load_certificate_request, typ)
def dump_pyopenssl_chain(chain, filetype=crypto.FILETYPE_PEM):
"""Dump certificate chain into a bundle.
:param list chain: List of `crypto.X509` (or wrapped in
:class:`josepy.util.ComparableX509`).
"""
# XXX: returns empty string when no chain is available, which
# shuts up RenewableCert, but might not be the best solution...
return acme_crypto_util.dump_pyopenssl_chain(chain, filetype)
def notBefore(cert_path):
"""When does the cert at cert_path start being valid?
:param str cert_path: path to a cert in PEM format
:returns: the notBefore value from the cert at cert_path
:rtype: :class:`datetime.datetime`
"""
return _notAfterBefore(cert_path, crypto.X509.get_notBefore)
def notAfter(cert_path):
"""When does the cert at cert_path stop being valid?
:param str cert_path: path to a cert in PEM format
:returns: the notAfter value from the cert at cert_path
:rtype: :class:`datetime.datetime`
"""
return _notAfterBefore(cert_path, crypto.X509.get_notAfter)
def _notAfterBefore(cert_path, method):
"""Internal helper function for finding notbefore/notafter.
:param str cert_path: path to a cert in PEM format
:param function method: one of ``crypto.X509.get_notBefore``
or ``crypto.X509.get_notAfter``
:returns: the notBefore or notAfter value from the cert at cert_path
:rtype: :class:`datetime.datetime`
"""
# pylint: disable=redefined-outer-name
with open(cert_path, "rb") as f:
x509 = crypto.load_certificate(crypto.FILETYPE_PEM, f.read())
# pyopenssl always returns bytes
timestamp = method(x509)
reformatted_timestamp = [timestamp[0:4], b"-", timestamp[4:6], b"-",
timestamp[6:8], b"T", timestamp[8:10], b":",
timestamp[10:12], b":", timestamp[12:]]
# pyrfc3339 always uses the type `str`
timestamp_bytes = b"".join(reformatted_timestamp)
timestamp_str = timestamp_bytes.decode('ascii')
return pyrfc3339.parse(timestamp_str)
def sha256sum(filename):
"""Compute a sha256sum of a file.
NB: In given file, platform specific newlines characters will be converted
into their equivalent unicode counterparts before calculating the hash.
:param str filename: path to the file whose hash will be computed
:returns: sha256 digest of the file in hexadecimal
:rtype: str
"""
sha256 = hashlib.sha256()
with open(filename, 'r') as file_d:
sha256.update(file_d.read().encode('UTF-8'))
return sha256.hexdigest()
# Finds one CERTIFICATE stricttextualmsg according to rfc7468#section-3.
# Does not validate the base64text - use crypto.load_certificate.
CERT_PEM_REGEX = re.compile(
b"""-----BEGIN CERTIFICATE-----\r?
.+?\r?
-----END CERTIFICATE-----\r?
""",
re.DOTALL # DOTALL (/s) because the base64text may include newlines
)
def cert_and_chain_from_fullchain(fullchain_pem):
"""Split fullchain_pem into cert_pem and chain_pem
:param str fullchain_pem: concatenated cert + chain
:returns: tuple of string cert_pem and chain_pem
:rtype: tuple
:raises errors.Error: If there are less than 2 certificates in the chain.
"""
# First pass: find the boundary of each certificate in the chain.
# TODO: This will silently skip over any "explanatory text" in between boundaries,
# which is prohibited by RFC8555.
certs = CERT_PEM_REGEX.findall(fullchain_pem.encode())
if len(certs) < 2:
raise errors.Error("failed to parse fullchain into cert and chain: " +
"less than 2 certificates in chain")
# Second pass: for each certificate found, parse it using OpenSSL and re-encode it,
# with the effect of normalizing any encoding variations (e.g. CRLF, whitespace).
certs_normalized = [crypto.dump_certificate(crypto.FILETYPE_PEM,
crypto.load_certificate(crypto.FILETYPE_PEM, cert)).decode() for cert in certs]
# Since each normalized cert has a newline suffix, no extra newlines are required.
return (certs_normalized[0], "".join(certs_normalized[1:]))
def get_serial_from_cert(cert_path):
"""Retrieve the serial number of a certificate from certificate path
:param str cert_path: path to a cert in PEM format
:returns: serial number of the certificate
:rtype: int
"""
# pylint: disable=redefined-outer-name
with open(cert_path, "rb") as f:
x509 = crypto.load_certificate(crypto.FILETYPE_PEM, f.read())
return x509.get_serial_number()
def find_chain_with_issuer(fullchains, issuer_cn, warn_on_no_match=False):
"""Chooses the first certificate chain from fullchains whose topmost
intermediate has an Issuer Common Name matching issuer_cn (in other words
the first chain which chains to a root whose name matches issuer_cn).
:param fullchains: The list of fullchains in PEM chain format.
:type fullchains: `list` of `str`
:param `str` issuer_cn: The exact Subject Common Name to match against any
issuer in the certificate chain.
:returns: The best-matching fullchain, PEM-encoded, or the first if none match.
:rtype: `str`
"""
for chain in fullchains:
certs = CERT_PEM_REGEX.findall(chain.encode())
top_cert = x509.load_pem_x509_certificate(certs[-1], default_backend())
top_issuer_cn = top_cert.issuer.get_attributes_for_oid(x509.NameOID.COMMON_NAME)
if top_issuer_cn and top_issuer_cn[0].value == issuer_cn:
return chain
# Nothing matched, return whatever was first in the list.
if warn_on_no_match:
logger.warning("Certbot has been configured to prefer certificate chains with "
"issuer '%s', but no chain from the CA matched this issuer. Using "
"the default certificate chain instead.", issuer_cn)
return fullchains[0]
|
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
'''
Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
class MaxUploadTest(BitcoinTestFramework):
def __init__(self):
self.utxo = []
self.txouts = gen_return_txouts()
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("LUNARD", "lunard"),
help="lunard binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000"]))
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in xrange(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - Decimal("0.001000")
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.generate(1)
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# test_nodes[0] will only request old blocks
# test_nodes[1] will only request new blocks
# test_nodes[2] will test resetting the counters
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
# Test logic begins here
# Now mine a big block
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
new_block_size = self.nodes[0].getblock(big_new_block)['size']
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 200*1024*1024
daily_buffer = 144 * MAX_BLOCK_SIZE
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 144MB will be reserved for relaying new blocks, so expect this to
# succeed for ~70 tries.
for i in xrange(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in xrange(3):
test_nodes[0].send_message(getdata_request)
test_nodes[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
print "Peer 0 disconnected after downloading old block too many times"
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 200 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(200):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
print "Peer 1 able to repeatedly download new block"
# But if test_nodes[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
print "Peer 1 disconnected after trying to download old block"
print "Advancing system time on node to clear counters..."
# If we advance the time by 24 hours, then the counters should reset,
# and test_nodes[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
test_nodes[2].sync_with_ping()
test_nodes[2].send_message(getdata_request)
test_nodes[2].sync_with_ping()
assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
print "Peer 2 able to download old block"
[c.disconnect_node() for c in connections]
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
print "Restarting nodes with -whitelist=127.0.0.1"
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
#recreate/reconnect 3 test nodes
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(20):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist
print "Peer 1 still connected after trying to download old block (whitelisted)"
[c.disconnect_node() for c in connections]
if __name__ == '__main__':
MaxUploadTest().main()
|
|
# encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
import os
import numpy
import unittest
from numpy.testing import assert_allclose, assert_equal
from distarray.testing import ParallelTestCase, import_or_skip, temp_filepath
from distarray.localapi import LocalArray, ndenumerate
from distarray.localapi import (save_dnpy, load_dnpy, save_hdf5, load_hdf5,
load_npy)
from distarray.localapi.maps import Distribution
class TestDnpyFileIO(ParallelTestCase):
def setUp(self):
d = Distribution.from_shape(comm=self.comm, shape=(7,))
self.larr0 = LocalArray(d)
# a different file on every engine
self.output_path = temp_filepath(extension='.dnpy')
def tearDown(self):
if os.path.exists(self.output_path):
os.remove(self.output_path)
def test_flat_file_save_with_filename(self):
save_dnpy(self.output_path, self.larr0)
with open(self.output_path, 'rb') as fp:
magic = fp.read(6)
self.assertTrue(magic == b'\x93DARRY')
def test_flat_file_save_with_file_object(self):
with open(self.output_path, 'wb') as fp:
save_dnpy(fp, self.larr0)
with open(self.output_path, 'rb') as fp:
magic = fp.read(6)
self.assertTrue(magic == b'\x93DARRY')
@unittest.skip("FIXME")
def test_flat_file_save_load_with_filename(self):
save_dnpy(self.output_path, self.larr0)
larr1 = load_dnpy(comm=self.comm, file=self.output_path)
self.assertTrue(isinstance(larr1, LocalArray))
assert_allclose(self.larr0, larr1)
@unittest.skip("FIXME")
def test_flat_file_save_load_with_file_object(self):
save_dnpy(self.output_path, self.larr0)
with open(self.output_path, 'rb') as fp:
larr1 = load_dnpy(comm=self.comm, file=fp)
self.assertTrue(isinstance(larr1, LocalArray))
assert_allclose(self.larr0, larr1)
bn_test_data = [
({'size': 2,
'dist_type': 'b',
'proc_grid_rank': 0,
'proc_grid_size': 2,
'start': 0,
'stop': 1,
},
{'size': 10,
'dist_type': 'n',
}),
({'size': 2,
'dist_type': 'b',
'proc_grid_rank': 1,
'proc_grid_size': 2,
'start': 1,
'stop': 2,
},
{'size': 10,
'dist_type': 'n',
})
]
nc_test_data = [
({'size': 2,
'dist_type': 'n',
},
{'size': 10,
'dist_type': 'c',
'proc_grid_rank': 0,
'proc_grid_size': 2,
'start': 0,
},),
({'size': 2,
'dist_type': 'n',
},
{'size': 10,
'dist_type': 'c',
'proc_grid_rank': 1,
'proc_grid_size': 2,
'start': 1,
},)
]
nu_test_data = [
# Note: unstructured indices must be in increasing order
# (restiction of h5py / HDF5)
(
{'size': 2,
'dist_type': 'n',
},
{'size': 10,
'dist_type': 'u',
'proc_grid_rank': 0,
'proc_grid_size': 2,
'indices': [0, 3, 4, 6, 8],
},
),
(
{'size': 2,
'dist_type': 'n',
},
{'size': 10,
'dist_type': 'u',
'proc_grid_rank': 1,
'proc_grid_size': 2,
'indices': [1, 2, 5, 7, 9],
},
)
]
class TestNpyFileLoad(ParallelTestCase):
comm_size = 2
def setUp(self):
self.rank = self.comm.Get_rank()
# set up a common filename to work with
if self.rank == 0:
self.output_path = temp_filepath(extension='.npy')
else:
self.output_path = None
self.output_path = self.comm.bcast(self.output_path, root=0)
# save some data to that file
self.expected = numpy.arange(20).reshape(2, 10)
if self.rank == 0:
numpy.save(self.output_path, self.expected)
self.comm.Barrier()
def tearDown(self):
# delete the test file
if self.rank == 0:
if os.path.exists(self.output_path):
os.remove(self.output_path)
def test_load_bn(self):
dim_data_per_rank = bn_test_data
la = load_npy(comm=self.comm, filename=self.output_path,
dim_data=dim_data_per_rank[self.rank])
assert_equal(la, self.expected[numpy.newaxis, self.rank])
def test_load_nc(self):
dim_data_per_rank = nc_test_data
expected_slices = [(slice(None), slice(0, None, 2)),
(slice(None), slice(1, None, 2))]
la = load_npy(comm=self.comm, filename=self.output_path,
dim_data=dim_data_per_rank[self.rank])
assert_equal(la, self.expected[expected_slices[self.rank]])
def test_load_nu(self):
dim_data_per_rank = nu_test_data
expected_indices = [dd[1]['indices'] for dd in dim_data_per_rank]
la = load_npy(comm=self.comm, filename=self.output_path,
dim_data=dim_data_per_rank[self.rank])
assert_equal(la, self.expected[:, expected_indices[self.rank]])
class TestHdf5FileSave(ParallelTestCase):
def setUp(self):
self.rank = self.comm.Get_rank()
self.h5py = import_or_skip('h5py')
self.key = "data"
# set up a common file to work with
if self.rank == 0:
self.output_path = temp_filepath(extension='.hdf5')
else:
self.output_path = None
self.output_path = self.comm.bcast(self.output_path, root=0)
def test_save_1d(self):
d = Distribution.from_shape(comm=self.comm, shape=(51,))
la = LocalArray(d)
np_arr = numpy.random.random(la.local_shape)
la.ndarray = np_arr
save_hdf5(self.output_path, la, key=self.key, mode='w')
# check saved file
with self.h5py.File(self.output_path, 'r', driver='mpio',
comm=self.comm) as fp:
for i, v in ndenumerate(la):
self.assertEqual(v, fp[self.key][i])
def test_save_2d(self):
d = Distribution.from_shape(comm=self.comm, shape=(11, 15))
la = LocalArray(d)
np_arr = numpy.random.random(la.local_shape)
la.ndarray = np_arr
save_hdf5(self.output_path, la, key=self.key, mode='w')
with self.h5py.File(self.output_path, 'r', driver='mpio',
comm=self.comm) as fp:
for i, v in ndenumerate(la):
self.assertEqual(v, fp[self.key][i])
def tearDown(self):
# delete the test file
if self.rank == 0:
if os.path.exists(self.output_path):
os.remove(self.output_path)
class TestHdf5FileLoad(ParallelTestCase):
comm_size = 2
def setUp(self):
self.rank = self.comm.Get_rank()
self.h5py = import_or_skip('h5py')
self.key = "data"
self.expected = numpy.arange(20).reshape(2, 10)
# set up a common file to work with
if self.rank == 0:
self.output_path = temp_filepath(extension='.hdf5')
with self.h5py.File(self.output_path, 'w') as fp:
fp[self.key] = self.expected
else:
self.output_path = None
self.comm.Barrier() # wait until file exists
self.output_path = self.comm.bcast(self.output_path, root=0)
def tearDown(self):
# delete the test file
if self.rank == 0:
if os.path.exists(self.output_path):
os.remove(self.output_path)
def test_load_bn(self):
dim_data_per_rank = bn_test_data
la = load_hdf5(comm=self.comm, filename=self.output_path,
dim_data=dim_data_per_rank[self.rank],
key=self.key)
with self.h5py.File(self.output_path, 'r', driver='mpio',
comm=self.comm) as fp:
assert_equal(la, self.expected[numpy.newaxis, self.rank])
def test_load_nc(self):
dim_data_per_rank = nc_test_data
expected_slices = [(slice(None), slice(0, None, 2)),
(slice(None), slice(1, None, 2))]
la = load_hdf5(comm=self.comm, filename=self.output_path,
dim_data=dim_data_per_rank[self.rank],
key=self.key)
with self.h5py.File(self.output_path, 'r', driver='mpio',
comm=self.comm) as fp:
expected_slice = expected_slices[self.rank]
assert_equal(la, self.expected[expected_slice])
def test_load_nu(self):
dim_data_per_rank = nu_test_data
expected_indices = [dd[1]['indices'] for dd in dim_data_per_rank]
la = load_hdf5(comm=self.comm, filename=self.output_path,
dim_data=dim_data_per_rank[self.rank],
key=self.key)
with self.h5py.File(self.output_path, 'r', driver='mpio',
comm=self.comm) as fp:
assert_equal(la, self.expected[:, expected_indices[self.rank]])
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from benchmarks import silk_flags
from measurements import smoothness
import page_sets
import page_sets.key_silk_cases
from telemetry import benchmark
class _Smoothness(perf_benchmark.PerfBenchmark):
"""Base class for smoothness-based benchmarks."""
# Certain smoothness pages do not perform gesture scrolling, in turn yielding
# an empty first_gesture_scroll_update_latency result. Such empty results
# should be ignored, allowing aggregate metrics for that page set.
_PAGES_WITHOUT_SCROLL_GESTURE_BLACKLIST = [
'http://mobile-news.sandbox.google.com/news/pt0']
test = smoothness.Smoothness
@classmethod
def Name(cls):
return 'smoothness'
@classmethod
def ValueCanBeAddedPredicate(cls, value, is_first_result):
del is_first_result # unused
if (value.name == 'first_gesture_scroll_update_latency' and
value.page.url in cls._PAGES_WITHOUT_SCROLL_GESTURE_BLACKLIST and
value.values is None):
return False
return True
class SmoothnessTop25(_Smoothness):
"""Measures rendering statistics while scrolling down the top 25 web pages.
http://www.chromium.org/developers/design-documents/rendering-benchmarks
"""
page_set = page_sets.Top25SmoothPageSet
@classmethod
def Name(cls):
return 'smoothness.top_25_smooth'
class SmoothnessToughFiltersCases(_Smoothness):
"""Measures frame rate and a variety of other statistics.
Uses a selection of pages making use of SVG and CSS Filter Effects.
"""
page_set = page_sets.ToughFiltersCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_filters_cases'
class SmoothnessToughPathRenderingCases(_Smoothness):
"""Tests a selection of pages with SVG and 2D Canvas paths.
Measures frame rate and a variety of other statistics. """
page_set = page_sets.ToughPathRenderingCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_path_rendering_cases'
@benchmark.Disabled('android') # crbug.com/526901
class SmoothnessToughCanvasCases(_Smoothness):
"""Measures frame rate and a variety of other statistics.
Uses a selection of pages making use of the 2D Canvas API.
"""
page_set = page_sets.ToughCanvasCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_canvas_cases'
@benchmark.Disabled('android') # crbug.com/373812
class SmoothnessToughWebGLCases(_Smoothness):
page_set = page_sets.ToughWebglCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_webgl_cases'
@benchmark.Enabled('android')
class SmoothnessMaps(perf_benchmark.PerfBenchmark):
page_set = page_sets.MapsPageSet
@classmethod
def Name(cls):
return 'smoothness.maps'
@benchmark.Disabled('android',
'mac') # crbug.com/567802
class SmoothnessKeyDesktopMoveCases(_Smoothness):
page_set = page_sets.KeyDesktopMoveCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.key_desktop_move_cases'
@benchmark.Enabled('android')
class SmoothnessKeyMobileSites(_Smoothness):
"""Measures rendering statistics while scrolling down the key mobile sites.
http://www.chromium.org/developers/design-documents/rendering-benchmarks
"""
page_set = page_sets.KeyMobileSitesSmoothPageSet
@classmethod
def Name(cls):
return 'smoothness.key_mobile_sites_smooth'
@benchmark.Disabled('mac') # crbug.com/563615
class SmoothnessToughAnimationCases(_Smoothness):
test = smoothness.SmoothnessWithRestart
page_set = page_sets.ToughAnimationCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_animation_cases'
@benchmark.Enabled('android')
class SmoothnessKeySilkCases(_Smoothness):
"""Measures rendering statistics for the key silk cases without GPU
rasterization.
"""
page_set = page_sets.KeySilkCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.key_silk_cases'
def CreateStorySet(self, options):
stories = super(SmoothnessKeySilkCases, self).CreateStorySet(options)
# Page26 (befamous) is too noisy to be useful; crbug.com/461127
to_remove = [story for story in stories
if isinstance(story, page_sets.key_silk_cases.Page26)]
for story in to_remove:
stories.RemoveStory(story)
return stories
@benchmark.Enabled('android', 'mac')
class SmoothnessGpuRasterizationTop25(_Smoothness):
"""Measures rendering statistics for the top 25 with GPU rasterization.
"""
tag = 'gpu_rasterization'
page_set = page_sets.Top25SmoothPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.top_25_smooth'
@benchmark.Enabled('android')
class SmoothnessGpuRasterizationKeyMobileSites(_Smoothness):
"""Measures rendering statistics for the key mobile sites with GPU
rasterization.
"""
tag = 'gpu_rasterization'
page_set = page_sets.KeyMobileSitesSmoothPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.key_mobile_sites_smooth'
class SmoothnessGpuRasterizationToughPathRenderingCases(_Smoothness):
"""Tests a selection of pages with SVG and 2D canvas paths with GPU
rasterization.
"""
tag = 'gpu_rasterization'
page_set = page_sets.ToughPathRenderingCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.tough_path_rendering_cases'
class SmoothnessGpuRasterizationFiltersCases(_Smoothness):
"""Tests a selection of pages with SVG and CSS filter effects with GPU
rasterization.
"""
tag = 'gpu_rasterization'
page_set = page_sets.ToughFiltersCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.tough_filters_cases'
@benchmark.Enabled('android')
class SmoothnessSyncScrollKeyMobileSites(_Smoothness):
"""Measures rendering statistics for the key mobile sites with synchronous
(main thread) scrolling.
"""
tag = 'sync_scroll'
page_set = page_sets.KeyMobileSitesSmoothPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForSyncScrolling(options)
@classmethod
def Name(cls):
return 'smoothness.sync_scroll.key_mobile_sites_smooth'
@benchmark.Enabled('android')
class SmoothnessSimpleMobilePages(_Smoothness):
"""Measures rendering statistics for simple mobile sites page set.
"""
page_set = page_sets.SimpleMobileSitesPageSet
@classmethod
def Name(cls):
return 'smoothness.simple_mobile_sites'
@benchmark.Enabled('android')
class SmoothnessFlingSimpleMobilePages(_Smoothness):
"""Measures rendering statistics for flinging a simple mobile sites page set.
"""
page_set = page_sets.SimpleMobileSitesFlingPageSet
def SetExtraBrowserOptions(self, options):
# As the fling parameters cannot be analytically determined to not
# overscroll, disable overscrolling explicitly. Overscroll behavior is
# orthogonal to fling performance, and its activation is only more noise.
options.AppendExtraBrowserArgs('--disable-overscroll-edge-effect')
@classmethod
def Name(cls):
return 'smoothness.fling.simple_mobile_sites'
@benchmark.Enabled('android')
class SmoothnessToughPinchZoomCases(_Smoothness):
"""Measures rendering statistics for pinch-zooming in the tough pinch zoom
cases.
"""
page_set = page_sets.AndroidToughPinchZoomCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_pinch_zoom_cases'
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/564008
@benchmark.Enabled('chromeos', 'mac')
class SmoothnessDesktopToughPinchZoomCases(_Smoothness):
"""Measures rendering statistics for pinch-zooming in the tough pinch zoom
cases. Uses lower zoom levels customized for desktop limits.
"""
page_set = page_sets.DesktopToughPinchZoomCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.desktop_tough_pinch_zoom_cases'
@benchmark.Enabled('android')
class SmoothnessGpuRasterizationToughPinchZoomCases(_Smoothness):
"""Measures rendering statistics for pinch-zooming in the tough pinch zoom
cases with GPU rasterization.
"""
tag = 'gpu_rasterization'
test = smoothness.Smoothness
page_set = page_sets.AndroidToughPinchZoomCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.tough_pinch_zoom_cases'
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/564008
@benchmark.Enabled('chromeos', 'mac')
class SmoothnessGpuRasterizationDesktopToughPinchZoomCases(_Smoothness):
"""Measures rendering statistics for pinch-zooming in the tough pinch zoom
cases with GPU rasterization. Uses lower zoom levels customized for desktop
limits.
"""
tag = 'gpu_rasterization'
test = smoothness.Smoothness
page_set = page_sets.DesktopToughPinchZoomCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.desktop_tough_pinch_zoom_cases'
@benchmark.Enabled('android', 'chromeos')
class SmoothnessToughScrollingWhileZoomedInCases(_Smoothness):
"""Measures rendering statistics for pinch-zooming then diagonal scrolling"""
page_set = page_sets.ToughScrollingWhileZoomedInCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_scrolling_while_zoomed_in_cases'
@benchmark.Enabled('android')
class SmoothnessPolymer(_Smoothness):
"""Measures rendering statistics for Polymer cases.
"""
page_set = page_sets.PolymerPageSet
@classmethod
def Name(cls):
return 'smoothness.polymer'
@benchmark.Enabled('android')
class SmoothnessGpuRasterizationPolymer(_Smoothness):
"""Measures rendering statistics for the Polymer cases with GPU rasterization.
"""
tag = 'gpu_rasterization'
page_set = page_sets.PolymerPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.polymer'
class SmoothnessToughScrollingCases(_Smoothness):
page_set = page_sets.ToughScrollingCasesPageSet
@classmethod
def ValueCanBeAddedPredicate(cls, value, is_first_result):
del is_first_result # unused
# Only keep 'mean_pixels_approximated' and 'mean_pixels_checkerboarded'
# metrics. (crbug.com/529331)
return value.name in ('mean_pixels_approximated',
'mean_pixels_checkerboarded')
@classmethod
def Name(cls):
return 'smoothness.tough_scrolling_cases'
@benchmark.Enabled('android', "mac")
class SmoothnessGpuRasterizationToughScrollingCases(_Smoothness):
tag = 'gpu_rasterization'
test = smoothness.Smoothness
page_set = page_sets.ToughScrollingCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.tough_scrolling_cases'
@benchmark.Disabled('android') # http://crbug.com/531593
class SmoothnessToughImageDecodeCases(_Smoothness):
page_set = page_sets.ToughImageDecodeCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_image_decode_cases'
class SmoothnessImageDecodingCases(_Smoothness):
"""Measures decoding statistics for jpeg images.
"""
page_set = page_sets.ImageDecodingCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
options.AppendExtraBrowserArgs('--disable-accelerated-jpeg-decoding')
@classmethod
def Name(cls):
return 'smoothness.image_decoding_cases'
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/563974
@benchmark.Disabled('android') # http://crbug.com/513699
class SmoothnessGpuImageDecodingCases(_Smoothness):
"""Measures decoding statistics for jpeg images with GPU rasterization.
"""
tag = 'gpu_rasterization_and_decoding'
page_set = page_sets.ImageDecodingCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
# TODO(sugoi): Remove the following line once M41 goes stable
options.AppendExtraBrowserArgs('--enable-accelerated-jpeg-decoding')
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization_and_decoding.image_decoding_cases'
@benchmark.Enabled('android')
class SmoothnessPathologicalMobileSites(_Smoothness):
"""Measures task execution statistics while scrolling pathological sites.
"""
page_set = page_sets.PathologicalMobileSitesPageSet
@classmethod
def Name(cls):
return 'smoothness.pathological_mobile_sites'
class SmoothnessToughAnimatedImageCases(_Smoothness):
page_set = page_sets.ToughAnimatedImageCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_animated_image_cases'
class SmoothnessToughTextureUploadCases(_Smoothness):
page_set = page_sets.ToughTextureUploadCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_texture_upload_cases'
class SmoothnessToughAdCases(_Smoothness):
"""Measures rendering statistics while displaying advertisements."""
page_set = page_sets.ToughAdCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_ad_cases'
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/555089
# http://crbug.com/522619 (mac/win)
@benchmark.Disabled('win', 'mac')
class SmoothnessScrollingToughAdCases(_Smoothness):
"""Measures rendering statistics while scrolling advertisements."""
page_set = page_sets.ScrollingToughAdCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.scrolling_tough_ad_cases'
# http://crbug.com/522619 (mac/win)
@benchmark.Disabled('win', 'mac')
class SmoothnessBidirectionallyScrollingToughAdCases(_Smoothness):
"""Measures rendering statistics while scrolling advertisements."""
page_set = page_sets.BidirectionallyScrollingToughAdCasesPageSet
def SetExtraBrowserOptions(self, options):
# Don't accidentally reload the page while scrolling.
options.AppendExtraBrowserArgs('--disable-pull-to-refresh-effect')
@classmethod
def Name(cls):
return 'smoothness.bidirectionally_scrolling_tough_ad_cases'
class SmoothnessToughWebGLAdCases(_Smoothness):
"""Measures rendering statistics while scrolling advertisements."""
page_set = page_sets.ToughWebglAdCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_webgl_ad_cases'
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/574485
|
|
import argparse
from collections import defaultdict
import glob
import logging
import subprocess
import pkg_resources
from imhotep.repomanagers import ShallowRepoManager, RepoManager
from .reporters.printing import PrintingReporter
from .reporters.github import CommitReporter, PRReporter
from .diff_parser import DiffContextParser
from .shas import get_pr_info, CommitInfo
from imhotep import http_client
from .errors import UnknownTools, NoCommitInfo
log = logging.getLogger(__name__)
def run(cmd, cwd='.'):
log.debug("Running: %s", cmd)
return subprocess.Popen(
[cmd], stdout=subprocess.PIPE, shell=True, cwd=cwd).communicate()[0]
def find_config(dirname, config_filenames):
configs = []
for filename in config_filenames:
configs += glob.glob('%s/%s' % (dirname, filename))
return set(configs)
def run_analysis(repo, filenames=set(), linter_configs=set()):
results = defaultdict(lambda: defaultdict(list))
for tool in repo.tools:
log.debug("running %s" % tool.__class__.__name__)
configs = {}
try:
configs = tool.get_configs()
except AttributeError:
pass
linter_configs = find_config(repo.dirname, configs)
log.debug("Tool configs %s, found configs %s", configs, linter_configs)
run_results = tool.invoke(repo.dirname,
filenames=filenames,
linter_configs=linter_configs)
for fname, fresults in run_results.items():
for lineno, violations in fresults.items():
results[fname][lineno].extend(violations)
return results
def load_plugins():
tools = []
for ep in pkg_resources.iter_entry_points(group='imhotep_linters'):
klass = ep.load()
tools.append(klass(run))
return tools
class Imhotep(object):
def __init__(self, requester=None, repo_manager=None,
repo_name=None, pr_number=None,
commit_info=None,
commit=None, origin_commit=None, no_post=None, debug=None,
filenames=None, shallow_clone=False, github_domain=None,
report_file_violations=False, **kwargs):
# TODO(justinabrahms): kwargs exist until we handle cli params better
# TODO(justinabrahms): This is a sprawling API. Tighten it up.
self.requester = requester
self.manager = repo_manager
self.commit_info = commit_info
self.repo_name = repo_name
self.pr_number = pr_number
self.commit = commit
self.origin_commit = origin_commit
self.no_post = no_post
self.debug = debug
if filenames is None:
filenames = []
self.requested_filenames = set(filenames)
self.shallow = shallow_clone
self.github_domain = github_domain
self.report_file_violations = report_file_violations
if self.commit is None and self.pr_number is None:
raise NoCommitInfo()
def get_reporter(self):
if self.no_post:
return PrintingReporter()
if self.pr_number:
return PRReporter(self.requester, self.repo_name, self.pr_number)
elif self.commit is not None:
return CommitReporter(self.requester, self.repo_name)
def get_filenames(self, entries, requested_set=None):
filenames = set([x.result_filename for x in entries])
if requested_set is not None and len(requested_set):
filenames = requested_set.intersection(filenames)
return list(filenames)
def invoke(self, reporter=None, max_errors=float('inf')):
cinfo = self.commit_info
if not reporter:
reporter = self.get_reporter()
try:
repo = self.manager.clone_repo(self.repo_name,
remote_repo=cinfo.remote_repo,
ref=cinfo.ref)
diff = repo.diff_commit(cinfo.commit, compare_point=cinfo.origin)
# Move out to its own thing
parser = DiffContextParser(diff)
parse_results = parser.parse()
filenames = self.get_filenames(parse_results,
self.requested_filenames)
results = run_analysis(repo, filenames=filenames)
error_count = 0
for entry in parse_results:
added_lines = [l.number for l in entry.added_lines]
if not entry.added_lines:
continue
pos_map = {0: min(l.position for l in entry.added_lines)}
for x in entry.added_lines:
pos_map[x.number] = x.position
if self.report_file_violations:
# "magic" value of line 0 represents file-level results.
added_lines.append(0)
violations = results.get(entry.result_filename, {})
violating_lines = [int(l) for l in violations.keys()]
matching_numbers = set(added_lines).intersection(
violating_lines)
for x in matching_numbers:
error_count += 1
if error_count > max_errors:
continue
reporter.report_line(
cinfo.origin, entry.result_filename,
x, pos_map[x], violations['%s' % x])
if error_count > max_errors \
and hasattr(reporter, 'post_comment'):
reporter.post_comment(
"There were too many ({error_count}) linting errors to"
" continue.".format(error_count=error_count))
log.info("%d violations.", error_count)
finally:
self.manager.cleanup()
def gen_imhotep(**kwargs):
# TODO(justinabrahms): Interface should have a "are creds valid?" method
req = http_client.BasicAuthRequester(kwargs['github_username'],
kwargs['github_password'])
plugins = load_plugins()
tools = get_tools(kwargs['linter'], plugins)
if kwargs['shallow']:
Manager = ShallowRepoManager
else:
Manager = RepoManager
domain = kwargs['github_domain']
manager = Manager(authenticated=kwargs['authenticated'],
cache_directory=kwargs['cache_directory'],
tools=tools,
executor=run,
domain=domain)
if kwargs['pr_number']:
pr_info = get_pr_info(req, kwargs['repo_name'], kwargs['pr_number'], domain)
commit_info = pr_info.to_commit_info()
else:
# TODO(justinabrahms): origin & remote_repo doesnt work for commits
commit_info = CommitInfo(kwargs['commit'], None, None, None)
log.debug('Shallow: %s', kwargs['shallow'])
shallow_clone = kwargs['shallow'] or False
return Imhotep(
requester=req, repo_manager=manager, commit_info=commit_info,
shallow_clone=shallow_clone, domain=domain, **kwargs)
def get_tools(whitelist, known_plugins):
"""
Filter all known plugins by a whitelist specified. If the whitelist is
empty, default to all plugins.
"""
def getpath(c):
return "%s:%s" % (c.__module__, c.__class__.__name__)
tools = [x for x in known_plugins if getpath(x) in whitelist]
if not tools:
if whitelist:
raise UnknownTools(map(getpath, known_plugins))
tools = known_plugins
return tools
def parse_args(args):
arg_parser = argparse.ArgumentParser(
description="Posts static analysis results to github.")
arg_parser.add_argument(
'--config-file',
default="imhotep_config.json",
type=str,
help="Configuration file in json.")
arg_parser.add_argument(
'--repo_name', required=True,
help="Github repository name in owner/repo format")
arg_parser.add_argument(
'--commit',
help="The sha of the commit to run static analysis on.")
arg_parser.add_argument(
'--origin-commit',
required=False,
default='HEAD^',
help='Commit to use as the comparison point.')
arg_parser.add_argument(
'--filenames', nargs="+",
help="filenames you want static analysis to be limited to.")
arg_parser.add_argument(
'--debug',
action='store_true',
help="Will dump debugging output and won't clean up after itself.")
arg_parser.add_argument(
'--github-username',
help='Github user to post comments as.')
arg_parser.add_argument(
'--github-password',
help='Github password for the above user.')
arg_parser.add_argument(
'--no-post',
action="store_true",
help="[DEBUG] will print out comments rather than posting to github.")
arg_parser.add_argument(
'--authenticated',
action="store_true",
help="Indicates the repository requires authentication")
arg_parser.add_argument(
'--pr-number',
help="Number of the pull request to comment on")
arg_parser.add_argument(
'--cache-directory',
help="Path to directory to cache the repository",
type=str,
required=False)
arg_parser.add_argument(
'--linter',
help="Path to linters to run, e.g. 'imhotep.tools:PyLint'",
type=str,
nargs="+",
default=[],
required=False)
arg_parser.add_argument(
'--shallow',
help="Performs a shallow clone of the repo",
action="store_true")
arg_parser.add_argument(
'--github-domain',
help="You can provide an alternative domain, if you\'re using github enterprise, for instance",
default="github.com")
arg_parser.add_argument(
'--report-file-violations',
help="Report file-level violations, i.e. those not on individual lines",
action="store_true")
# parse out repo name
return arg_parser.parse_args(args)
|
|
import argparse
import json
import logging
import sys
import time
from textwrap import dedent
from ceph_deploy import conf, exc, hosts
from ceph_deploy.util import system, packages
from ceph_deploy.cliutil import priority
from ceph_deploy.lib import remoto
LOG = logging.getLogger(__name__)
def get_bootstrap_osd_key(cluster):
"""
Read the bootstrap-osd key for `cluster`.
"""
path = '{cluster}.bootstrap-osd.keyring'.format(cluster=cluster)
try:
with open(path, 'rb') as f:
return f.read()
except IOError:
raise RuntimeError('bootstrap-osd keyring not found; run \'gatherkeys\'')
def create_osd_keyring(conn, cluster, key):
"""
Run on osd node, writes the bootstrap key if not there yet.
"""
logger = conn.logger
path = '/var/lib/ceph/bootstrap-osd/{cluster}.keyring'.format(
cluster=cluster,
)
if not conn.remote_module.path_exists(path):
logger.warning('osd keyring does not exist yet, creating one')
conn.remote_module.write_keyring(path, key)
def osd_tree(conn, cluster):
"""
Check the status of an OSD. Make sure all are up and in
What good output would look like::
{
"epoch": 8,
"num_osds": 1,
"num_up_osds": 1,
"num_in_osds": "1",
"full": "false",
"nearfull": "false"
}
Note how the booleans are actually strings, so we need to take that into
account and fix it before returning the dictionary. Issue #8108
"""
ceph_executable = system.executable_path(conn, 'ceph')
command = [
ceph_executable,
'--cluster={cluster}'.format(cluster=cluster),
'osd',
'tree',
'--format=json',
]
out, err, code = remoto.process.check(
conn,
command,
)
try:
loaded_json = json.loads(''.join(out))
# convert boolean strings to actual booleans because
# --format=json fails to do this properly
for k, v in loaded_json.items():
if v == 'true':
loaded_json[k] = True
elif v == 'false':
loaded_json[k] = False
return loaded_json
except ValueError:
return {}
def osd_status_check(conn, cluster):
"""
Check the status of an OSD. Make sure all are up and in
What good output would look like::
{
"epoch": 8,
"num_osds": 1,
"num_up_osds": 1,
"num_in_osds": "1",
"full": "false",
"nearfull": "false"
}
Note how the booleans are actually strings, so we need to take that into
account and fix it before returning the dictionary. Issue #8108
"""
ceph_executable = system.executable_path(conn, 'ceph')
command = [
ceph_executable,
'--cluster={cluster}'.format(cluster=cluster),
'osd',
'stat',
'--format=json',
]
try:
out, err, code = remoto.process.check(
conn,
command,
)
except TypeError:
# XXX This is a bug in remoto. If the other end disconnects with a timeout
# it will return a None, and here we are expecting a 3 item tuple, not a None
# so it will break with a TypeError. Once remoto fixes this, we no longer need
# this try/except.
return {}
try:
loaded_json = json.loads(''.join(out))
# convert boolean strings to actual booleans because
# --format=json fails to do this properly
for k, v in loaded_json.items():
if v == 'true':
loaded_json[k] = True
elif v == 'false':
loaded_json[k] = False
return loaded_json
except ValueError:
return {}
def catch_osd_errors(conn, logger, args):
"""
Look for possible issues when checking the status of an OSD and
report them back to the user.
"""
logger.info('checking OSD status...')
status = osd_status_check(conn, args.cluster)
osds = int(status.get('num_osds', 0))
up_osds = int(status.get('num_up_osds', 0))
in_osds = int(status.get('num_in_osds', 0))
full = status.get('full', False)
nearfull = status.get('nearfull', False)
if osds > up_osds:
difference = osds - up_osds
logger.warning('there %s %d OSD%s down' % (
['is', 'are'][difference != 1],
difference,
"s"[difference == 1:])
)
if osds > in_osds:
difference = osds - in_osds
logger.warning('there %s %d OSD%s out' % (
['is', 'are'][difference != 1],
difference,
"s"[difference == 1:])
)
if full:
logger.warning('OSDs are full!')
if nearfull:
logger.warning('OSDs are near full!')
def create_osd(
conn,
cluster,
data,
journal,
zap,
fs_type,
dmcrypt,
dmcrypt_dir,
storetype,
block_wal,
block_db,
**kw):
"""
Run on osd node, creates an OSD from a data disk.
"""
ceph_volume_executable = system.executable_path(conn, 'ceph-volume')
args = [
ceph_volume_executable,
'--cluster', cluster,
'lvm',
'create',
'--%s' % storetype,
'--data', data
]
if zap:
LOG.warning('zapping is no longer supported when preparing')
if dmcrypt:
args.append('--dmcrypt')
# TODO: re-enable dmcrypt support once ceph-volume grows it
LOG.warning('dmcrypt is currently not supported')
if storetype == 'bluestore':
if block_wal:
args.append('--block.wal')
args.append(block_wal)
if block_db:
args.append('--block.db')
args.append(block_db)
elif storetype == 'filestore':
if not journal:
raise RuntimeError('A journal lv or GPT partition must be specified when using filestore')
args.append('--journal')
args.append(journal)
if kw.get('debug'):
remoto.process.run(
conn,
args,
extend_env={'CEPH_VOLUME_DEBUG': '1'}
)
else:
remoto.process.run(
conn,
args
)
def create(args, cfg, create=False):
if not args.host:
raise RuntimeError('Required host was not specified as a positional argument')
LOG.debug(
'Creating OSD on cluster %s with data device %s',
args.cluster,
args.data
)
key = get_bootstrap_osd_key(cluster=args.cluster)
bootstrapped = set()
errors = 0
hostname = args.host
try:
if args.data is None:
raise exc.NeedDiskError(hostname)
distro = hosts.get(
hostname,
username=args.username,
callbacks=[packages.ceph_is_installed]
)
LOG.info(
'Distro info: %s %s %s',
distro.name,
distro.release,
distro.codename
)
if hostname not in bootstrapped:
bootstrapped.add(hostname)
LOG.debug('Deploying osd to %s', hostname)
conf_data = conf.ceph.load_raw(args)
distro.conn.remote_module.write_conf(
args.cluster,
conf_data,
args.overwrite_conf
)
create_osd_keyring(distro.conn, args.cluster, key)
# default to bluestore unless explicitly told not to
storetype = 'bluestore'
if args.filestore:
storetype = 'filestore'
create_osd(
distro.conn,
cluster=args.cluster,
data=args.data,
journal=args.journal,
zap=args.zap_disk,
fs_type=args.fs_type,
dmcrypt=args.dmcrypt,
dmcrypt_dir=args.dmcrypt_key_dir,
storetype=storetype,
block_wal=args.block_wal,
block_db=args.block_db,
debug=args.debug,
)
# give the OSD a few seconds to start
time.sleep(5)
catch_osd_errors(distro.conn, distro.conn.logger, args)
LOG.debug('Host %s is now ready for osd use.', hostname)
distro.conn.exit()
except RuntimeError as e:
LOG.error(e)
errors += 1
if errors:
raise exc.GenericError('Failed to create %d OSDs' % errors)
def disk_zap(args):
hostname = args.host
for disk in args.disk:
if not disk or not hostname:
raise RuntimeError('zap command needs both HOSTNAME and DISK but got "%s %s"' % (hostname, disk))
LOG.debug('zapping %s on %s', disk, hostname)
distro = hosts.get(
hostname,
username=args.username,
callbacks=[packages.ceph_is_installed]
)
LOG.info(
'Distro info: %s %s %s',
distro.name,
distro.release,
distro.codename
)
distro.conn.remote_module.zeroing(disk)
ceph_volume_executable = system.executable_path(distro.conn, 'ceph-volume')
if args.debug:
remoto.process.run(
distro.conn,
[
ceph_volume_executable,
'lvm',
'zap',
disk,
],
env={'CEPH_VOLUME_DEBUG': '1'}
)
else:
remoto.process.run(
distro.conn,
[
ceph_volume_executable,
'lvm',
'zap',
disk,
],
)
distro.conn.exit()
def disk_list(args, cfg):
command = ['fdisk', '-l']
for hostname in args.host:
distro = hosts.get(
hostname,
username=args.username,
callbacks=[packages.ceph_is_installed]
)
out, err, code = remoto.process.check(
distro.conn,
command,
)
for line in out:
line = line.decode('utf-8')
if line.startswith('Disk /'):
distro.conn.logger.info(line)
def osd_list(args, cfg):
for hostname in args.host:
distro = hosts.get(
hostname,
username=args.username,
callbacks=[packages.ceph_is_installed]
)
LOG.info(
'Distro info: %s %s %s',
distro.name,
distro.release,
distro.codename
)
LOG.debug('Listing disks on {hostname}...'.format(hostname=hostname))
ceph_volume_executable = system.executable_path(distro.conn, 'ceph-volume')
if args.debug:
remoto.process.run(
distro.conn,
[
ceph_volume_executable,
'lvm',
'list',
],
env={'CEPH_VOLUME_DEBUG': '1'}
)
else:
remoto.process.run(
distro.conn,
[
ceph_volume_executable,
'lvm',
'list',
],
)
distro.conn.exit()
def osd(args):
cfg = conf.ceph.load(args)
if args.subcommand == 'list':
osd_list(args, cfg)
elif args.subcommand == 'create':
create(args, cfg)
else:
LOG.error('subcommand %s not implemented', args.subcommand)
sys.exit(1)
def disk(args):
cfg = conf.ceph.load(args)
if args.subcommand == 'list':
disk_list(args, cfg)
elif args.subcommand == 'create':
create(args, cfg)
elif args.subcommand == 'zap':
disk_zap(args)
else:
LOG.error('subcommand %s not implemented', args.subcommand)
sys.exit(1)
@priority(50)
def make(parser):
"""
Prepare a data disk on remote host.
"""
sub_command_help = dedent("""
Create OSDs from a data disk on a remote host:
ceph-deploy osd create {node} --data /path/to/device
For bluestore, optional devices can be used::
ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device
ceph-deploy osd create {node} --data /path/to/data --block-wal /path/to/wal-device
ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device --block-wal /path/to/wal-device
For filestore, the journal must be specified, as well as the objectstore::
ceph-deploy osd create {node} --filestore --data /path/to/data --journal /path/to/journal
For data devices, it can be an existing logical volume in the format of:
vg/lv, or a device. For other OSD components like wal, db, and journal, it
can be logical volume (in vg/lv format) or it must be a GPT partition.
"""
)
parser.formatter_class = argparse.RawDescriptionHelpFormatter
parser.description = sub_command_help
osd_parser = parser.add_subparsers(dest='subcommand')
osd_parser.required = True
osd_list = osd_parser.add_parser(
'list',
help='List OSD info from remote host(s)'
)
osd_list.add_argument(
'host',
nargs='+',
metavar='HOST',
help='remote host(s) to list OSDs from'
)
osd_list.add_argument(
'--debug',
action='store_true',
help='Enable debug mode on remote ceph-volume calls',
)
osd_create = osd_parser.add_parser(
'create',
help='Create new Ceph OSD daemon by preparing and activating a device'
)
osd_create.add_argument(
'--data',
metavar='DATA',
help='The OSD data logical volume (vg/lv) or absolute path to device'
)
osd_create.add_argument(
'--journal',
help='Logical Volume (vg/lv) or path to GPT partition',
)
osd_create.add_argument(
'--zap-disk',
action='store_true',
help='DEPRECATED - cannot zap when creating an OSD'
)
osd_create.add_argument(
'--fs-type',
metavar='FS_TYPE',
choices=['xfs',
'btrfs'
],
default='xfs',
help='filesystem to use to format DEVICE (xfs, btrfs)',
)
osd_create.add_argument(
'--dmcrypt',
action='store_true',
help='use dm-crypt on DEVICE',
)
osd_create.add_argument(
'--dmcrypt-key-dir',
metavar='KEYDIR',
default='/etc/ceph/dmcrypt-keys',
help='directory where dm-crypt keys are stored',
)
osd_create.add_argument(
'--filestore',
action='store_true', default=None,
help='filestore objectstore',
)
osd_create.add_argument(
'--bluestore',
action='store_true', default=None,
help='bluestore objectstore',
)
osd_create.add_argument(
'--block-db',
default=None,
help='bluestore block.db path'
)
osd_create.add_argument(
'--block-wal',
default=None,
help='bluestore block.wal path'
)
osd_create.add_argument(
'host',
nargs='?',
metavar='HOST',
help='Remote host to connect'
)
osd_create.add_argument(
'--debug',
action='store_true',
help='Enable debug mode on remote ceph-volume calls',
)
parser.set_defaults(
func=osd,
)
@priority(50)
def make_disk(parser):
"""
Manage disks on a remote host.
"""
disk_parser = parser.add_subparsers(dest='subcommand')
disk_parser.required = True
disk_zap = disk_parser.add_parser(
'zap',
help='destroy existing data and filesystem on LV or partition',
)
disk_zap.add_argument(
'host',
nargs='?',
metavar='HOST',
help='Remote HOST(s) to connect'
)
disk_zap.add_argument(
'disk',
nargs='+',
metavar='DISK',
help='Disk(s) to zap'
)
disk_zap.add_argument(
'--debug',
action='store_true',
help='Enable debug mode on remote ceph-volume calls',
)
disk_list = disk_parser.add_parser(
'list',
help='List disk info from remote host(s)'
)
disk_list.add_argument(
'host',
nargs='+',
metavar='HOST',
help='Remote HOST(s) to list OSDs from'
)
disk_list.add_argument(
'--debug',
action='store_true',
help='Enable debug mode on remote ceph-volume calls',
)
parser.set_defaults(
func=disk,
)
|
|
"""
Performs an end-to-end test where we verify that the data
output by the client command line interface is equal to
the values we expect using the test dataset.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import json
import unittest
import ga4gh.client as client
import ga4gh.backend as backend
import ga4gh.cli as cli
import ga4gh.protocol as protocol
import ga4gh.datarepo as datarepo
import tests.utils as utils
import tests.paths as paths
import freezegun
@freezegun.freeze_time(datetime.datetime.now())
class TestClientOutput(unittest.TestCase):
"""
Base class for client output tests
"""
def setUp(self):
self._maxDiff = None
repoPath = paths.testDataRepo
self._dataUrl = "file://{}".format(repoPath)
dataRepository = datarepo.SqlDataRepository(repoPath)
dataRepository.open(datarepo.MODE_READ)
self._backend = backend.Backend(dataRepository)
self._client = client.LocalClient(self._backend)
def captureCliOutput(self, command, arguments, outputFormat):
clientCommand = "{} {} {} -O {}".format(
command, self._dataUrl, arguments, outputFormat)
stdout, stderr = utils.captureOutput(
cli.client_main, clientCommand.split())
self.assertEqual(len(stderr), 0)
return stdout
class TestClientFasta(TestClientOutput):
"""
Tests client FASTA output
"""
def captureFastaOutput(self, command, arguments=""):
stdout = self.captureCliOutput(command, arguments, "fasta")
lines = stdout.split()
return lines
def testListReferenceBases(self):
referenceSetIterator = self._client.searchReferenceSets()
referenceSet = next(referenceSetIterator)
referencesIterator = self._client.searchReferences(referenceSet.id)
reference = next(referencesIterator)
start = 1
end = 5
lines = self.captureFastaOutput(
"references-list-bases --start {} --end {}".format(start, end),
reference.id)
self.assertEqual(
lines[0], ">{}:{}-{}".format(reference.id, start, end))
cliBases = ''.join(lines[1:])
bases = self._client.listReferenceBases(reference.id, start, end)
self.assertEqual(cliBases, bases)
class TestClientJson(TestClientOutput):
"""
Tests that the JSON output by the client on the command line for
various options is equal to the values we find using the Python
client API.
"""
def captureJsonOutput(self, command, arguments=""):
"""
Runs the specified command add the JSON output option and
returns the result as a list of JSON parsed dictionaries.
"""
stdout = self.captureCliOutput(command, arguments, "json")
cliOutput = []
for line in stdout.splitlines():
try:
cliOutput.append(json.loads(line))
except ValueError, e:
raise Exception((e, line, stdout, command, arguments))
return cliOutput
def verifyParsedOutputsEqual(
self, clientIterator, cliCommand, cliArguments=""):
"""
Verify that the parsed JSON of all the objects in the specified
client iterator are equal to the parsed JSON from the specified
CLI command.
"""
cliOutput = self.captureJsonOutput(cliCommand, cliArguments)
clientOutput = [protocol.toJsonDict(gObj) for gObj in clientIterator]
self.assertEqual(clientOutput, cliOutput)
return len(clientOutput)
def testGetCallSet(self):
for dataset in self._client.searchDatasets():
for variantSet in self._client.searchVariantSets(dataset.id):
for callSet in self._client.searchCallSets(variantSet.id):
self.verifyParsedOutputsEqual(
[callSet], "callsets-get", callSet.id)
def testGetDataset(self):
for dataset in self._client.searchDatasets():
self.verifyParsedOutputsEqual(
[dataset], "datasets-get", dataset.id)
def testGetReadGroup(self):
for dataset in self._client.searchDatasets():
for readGroupSet in self._client.searchReadGroupSets(dataset.id):
for readGroup in readGroupSet.read_groups:
self.verifyParsedOutputsEqual(
[readGroup], "readgroups-get", readGroup.id)
def testGetReadGroupSet(self):
for dataset in self._client.searchDatasets():
for readGroupSet in self._client.searchReadGroupSets(dataset.id):
self.verifyParsedOutputsEqual(
[readGroupSet], "readgroupsets-get", readGroupSet.id)
def testGetReference(self):
for referenceSet in self._client.searchReferenceSets():
for reference in self._client.searchReferences(referenceSet.id):
self.verifyParsedOutputsEqual(
[reference], "references-get", reference.id)
def testGetReferenceSet(self):
for referenceSet in self._client.searchReferenceSets():
self.verifyParsedOutputsEqual(
[referenceSet], "referencesets-get", referenceSet.id)
@unittest.skip("TODO: clarify semantics of callsets and fix")
def testGetVariant(self):
test_executed = 0
start = 0
end = 1000
referenceName = "1"
for dataset in self._client.searchDatasets():
for variantSet in self._client.searchVariantSets(dataset.id):
variants = self._client.searchVariants(
variantSet.id, start=start, end=end,
referenceName=referenceName)
for variant in variants:
test_executed += self.verifyParsedOutputsEqual(
[variant], "variants-get", variant.id)
self.assertGreater(test_executed, 0)
def testGetVariantAnnotationSet(self):
test_executed = 0
for dataset in self._client.searchDatasets():
for variantSet in self._client.searchVariantSets(dataset.id):
for annSet in self._client.searchVariantAnnotationSets(
variantSet.id):
test_executed += self.verifyParsedOutputsEqual(
[annSet], "variantannotationsets-get", annSet.id)
self.assertGreater(test_executed, 0)
def testGetVariantSet(self):
for dataset in self._client.searchDatasets():
for variantSet in self._client.searchVariantSets(dataset.id):
self.verifyParsedOutputsEqual(
[variantSet], "variantsets-get", variantSet.id)
def testSearchCallSets(self):
for dataset in self._client.searchDatasets():
for variantSet in self._client.searchVariantSets(dataset.id):
iterator = self._client.searchCallSets(variantSet.id)
args = "--variantSetId {}".format(variantSet.id)
self.verifyParsedOutputsEqual(
iterator, "callsets-search", args)
def testSearchDatasets(self):
iterator = self._client.searchDatasets()
self.verifyParsedOutputsEqual(iterator, "datasets-search")
def testSearchReadGroupSets(self):
for dataset in self._client.searchDatasets():
iterator = self._client.searchReadGroupSets(dataset.id)
self.verifyParsedOutputsEqual(
iterator, "readgroupsets-search",
"--datasetId {}".format(dataset.id))
def testSearchReads(self):
test_executed = 0
start = 0
end = 1000000
for dataset in self._client.searchDatasets():
for readGroupSet in self._client.searchReadGroupSets(dataset.id):
for readGroup in readGroupSet.read_groups:
reference = self._client.searchReferences(
referenceSetId=readGroup.reference_set_id).next()
referenceId = reference.id
iterator = self._client.searchReads(
[readGroup.id], referenceId=referenceId,
start=start, end=end)
args = "--start {} --end {} --readGroupIds {}\
--referenceId {}".format(
start, end, readGroup.id, referenceId)
test_executed += self.verifyParsedOutputsEqual(
iterator, "reads-search", args)
self.assertGreater(test_executed, 0)
def testSearchReferenceSets(self):
iterator = self._client.searchReferenceSets()
self.verifyParsedOutputsEqual(iterator, "referencesets-search")
def testSearchReferences(self):
for referenceSet in self._client.searchReferenceSets():
iterator = self._client.searchReferences(
referenceSetId=referenceSet.id)
args = "--referenceSetId={}".format(referenceSet.id)
self.verifyParsedOutputsEqual(iterator, "references-search", args)
def testSearchVariantSets(self):
for dataset in self._client.searchDatasets():
iterator = self._client.searchVariantSets(dataset.id)
self.verifyParsedOutputsEqual(iterator, "variantsets-search")
def testSearchVariants(self):
test_executed = 0
start = 0
end = 1000
referenceName = "1"
for dataset in self._client.searchDatasets():
for variantSet in self._client.searchVariantSets(dataset.id):
iterator = self._client.searchVariants(
variantSet.id, start=start, end=end,
referenceName=referenceName, callSetIds=[])
args = "--variantSetId {} --start {} --end {} -r {}".format(
variantSet.id, start, end, referenceName)
test_executed += self.verifyParsedOutputsEqual(
iterator, "variants-search", args)
self.assertGreater(test_executed, 0)
def testSearchVariantAnnotationSets(self):
for dataset in self._client.searchDatasets():
for variantSet in self._client.searchVariantSets(dataset.id):
iterator = self._client.searchVariantAnnotationSets(
variantSet.id)
args = "{}".format(variantSet.id)
self.verifyParsedOutputsEqual(
iterator, "variantannotationsets-search", args)
def testSearchVariantAnnotations(self):
test_executed = 0
start = 0
end = 10000000
referenceName = "1"
for dataset in self._client.searchDatasets():
for variantSet in self._client.searchVariantSets(dataset.id):
searchIterator = self._client.searchVariantAnnotationSets(
variantSet.id)
for variantAnnotationSet in searchIterator:
iterator = self._client.searchVariantAnnotations(
variantAnnotationSet.id,
start=start,
end=end,
referenceName=referenceName)
args = ("--variantAnnotationSetId {}"
" --start {} --end {} -r {}").format(
variantAnnotationSet.id, start, end, referenceName)
test_executed += self.verifyParsedOutputsEqual(
iterator, "variantannotations-search", args)
self.assertGreater(test_executed, 0)
|
|
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config.cfg import NoSuchOptError
from oslo_log import log as logging
from oslo_utils import importutils
import six
from trove.cluster.models import DBCluster
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _LI
from trove.common import wsgi
from trove.datastore import models as datastore_models
from trove.extensions.common import models
from trove.extensions.common import views
from trove.instance.models import DBInstance
LOG = logging.getLogger(__name__)
import_class = importutils.import_class
CONF = cfg.CONF
@six.add_metaclass(abc.ABCMeta)
class BaseDatastoreRootController(wsgi.Controller):
"""Base class that defines the contract for root controllers."""
@abc.abstractmethod
def root_index(self, req, tenant_id, instance_id, is_cluster):
pass
@abc.abstractmethod
def root_create(self, req, body, tenant_id, instance_id, is_cluster):
pass
@abc.abstractmethod
def root_delete(self, req, tenant_id, instance_id, is_cluster):
pass
@staticmethod
def _get_password_from_body(body=None):
if body:
return body['password'] if 'password' in body else None
return None
class DefaultRootController(BaseDatastoreRootController):
def root_index(self, req, tenant_id, instance_id, is_cluster):
"""Returns True if root is enabled; False otherwise."""
if is_cluster:
raise exception.ClusterOperationNotSupported(
operation='show_root')
LOG.info(_LI("Getting root enabled for instance '%s'.") % instance_id)
LOG.info(_LI("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
is_root_enabled = models.Root.load(context, instance_id)
return wsgi.Result(views.RootEnabledView(is_root_enabled).data(), 200)
def root_create(self, req, body, tenant_id, instance_id, is_cluster):
if is_cluster:
raise exception.ClusterOperationNotSupported(
operation='enable_root')
LOG.info(_LI("Enabling root for instance '%s'.") % instance_id)
LOG.info(_LI("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
user_name = context.user
password = DefaultRootController._get_password_from_body(body)
root = models.Root.create(context, instance_id,
user_name, password)
return wsgi.Result(views.RootCreatedView(root).data(), 200)
def root_delete(self, req, tenant_id, instance_id, is_cluster):
if is_cluster:
raise exception.ClusterOperationNotSupported(
operation='disable_root')
LOG.info(_LI("Disabling root for instance '%s'.") % instance_id)
LOG.info(_LI("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
try:
found_user = self._find_root_user(context, instance_id)
except (ValueError, AttributeError) as e:
raise exception.BadRequest(msg=str(e))
if not found_user:
raise exception.UserNotFound(uuid="root")
models.Root.delete(context, instance_id)
return wsgi.Result(None, 200)
class ClusterRootController(DefaultRootController):
def root_index(self, req, tenant_id, instance_id, is_cluster):
"""Returns True if root is enabled; False otherwise."""
if is_cluster:
return self.cluster_root_index(req, tenant_id, instance_id)
else:
return self.instance_root_index(req, tenant_id, instance_id)
def instance_root_index(self, req, tenant_id, instance_id):
LOG.info(_LI("Getting root enabled for instance '%s'.") % instance_id)
LOG.info(_LI("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
try:
is_root_enabled = models.ClusterRoot.load(context, instance_id)
except exception.UnprocessableEntity:
raise exception.UnprocessableEntity(
"Cluster %s is not ready." % instance_id)
return wsgi.Result(views.RootEnabledView(is_root_enabled).data(), 200)
def cluster_root_index(self, req, tenant_id, cluster_id):
LOG.info(_LI("Getting root enabled for cluster '%s'.") % cluster_id)
single_instance_id, cluster_instances = self._get_cluster_instance_id(
tenant_id, cluster_id)
return self.instance_root_index(req, tenant_id, single_instance_id)
def _block_cluster_instance_actions(self):
return False
def check_cluster_instance_actions(self, instance_id):
# Check if instance is in a cluster and if actions are allowed
instance = DBInstance.find_by(id=instance_id)
if instance.cluster_id and self._block_cluster_instance_actions():
raise exception.ClusterInstanceOperationNotSupported()
def root_create(self, req, body, tenant_id, instance_id, is_cluster):
if is_cluster:
return self.cluster_root_create(req, body, tenant_id, instance_id)
else:
self.check_cluster_instance_actions(instance_id)
return self.instance_root_create(req, body, instance_id)
def instance_root_create(self, req, body, instance_id,
cluster_instances=None):
LOG.info(_LI("Enabling root for instance '%s'.") % instance_id)
LOG.info(_LI("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
user_name = context.user
password = ClusterRootController._get_password_from_body(body)
root = models.ClusterRoot.create(context, instance_id, user_name,
password, cluster_instances)
return wsgi.Result(views.RootCreatedView(root).data(), 200)
def cluster_root_create(self, req, body, tenant_id, cluster_id):
LOG.info(_LI("Enabling root for cluster '%s'.") % cluster_id)
single_instance_id, cluster_instances = self._get_cluster_instance_id(
tenant_id, cluster_id)
return self.instance_root_create(req, body, single_instance_id,
cluster_instances)
def _find_cluster_node_ids(self, tenant_id, cluster_id):
args = {'tenant_id': tenant_id, 'cluster_id': cluster_id}
cluster_instances = DBInstance.find_all(**args).all()
return [db_instance.id for db_instance in cluster_instances]
def _get_cluster_instance_id(self, tenant_id, cluster_id):
instance_ids = self._find_cluster_node_ids(tenant_id, cluster_id)
single_instance_id = instance_ids[0]
return single_instance_id, instance_ids
class RootController(wsgi.Controller):
"""Controller for instance functionality."""
def index(self, req, tenant_id, instance_id):
"""Returns True if root is enabled; False otherwise."""
datastore_manager, is_cluster = self._get_datastore(tenant_id,
instance_id)
root_controller = self.load_root_controller(datastore_manager)
return root_controller.root_index(req, tenant_id, instance_id,
is_cluster)
def create(self, req, tenant_id, instance_id, body=None):
"""Enable the root user for the db instance."""
datastore_manager, is_cluster = self._get_datastore(tenant_id,
instance_id)
root_controller = self.load_root_controller(datastore_manager)
if root_controller is not None:
return root_controller.root_create(req, body, tenant_id,
instance_id, is_cluster)
else:
raise NoSuchOptError('root_controller', group='datastore_manager')
def delete(self, req, tenant_id, instance_id):
datastore_manager, is_cluster = self._get_datastore(tenant_id,
instance_id)
root_controller = self.load_root_controller(datastore_manager)
if root_controller is not None:
return root_controller.root_delete(req, tenant_id,
instance_id, is_cluster)
else:
raise NoSuchOptError
def _get_datastore(self, tenant_id, instance_or_cluster_id):
"""
Returns datastore manager and a boolean
showing if instance_or_cluster_id is a cluster id
"""
args = {'id': instance_or_cluster_id, 'tenant_id': tenant_id}
is_cluster = False
try:
db_info = DBInstance.find_by(**args)
except exception.ModelNotFoundError:
is_cluster = True
db_info = DBCluster.find_by(**args)
ds_version = (datastore_models.DatastoreVersion.
load_by_uuid(db_info.datastore_version_id))
ds_manager = ds_version.manager
return (ds_manager, is_cluster)
def load_root_controller(self, manager):
try:
clazz = CONF.get(manager).get('root_controller')
LOG.debug("Loading Root Controller class %s." % clazz)
root_controller = import_class(clazz)
return root_controller()
except NoSuchOptError:
return None
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import logging
import json
import time
import urllib2
from datetime import datetime
from tg import config, session, redirect, request, expose
from tg.decorators import without_trailing_slash
from pylons import tmpl_context as c
from requests_oauthlib import OAuth2Session
import requests
from formencode import validators as fev
from forgeimporters import base
log = logging.getLogger(__name__)
class GitHubProjectNameValidator(fev.FancyValidator):
not_empty = True
messages = {
'invalid': 'Valid symbols are: letters, numbers, dashes, '
'underscores and periods',
'unavailable': 'This project is unavailable for import',
}
def _to_python(self, value, state=None):
user_name = state.full_dict.get('user_name', '')
user_name = state.full_dict.get('gh_user_name', user_name).strip()
project_name = value.strip()
full_project_name = '%s/%s' % (user_name, project_name)
if not re.match(r'^[a-zA-Z0-9-_.]+$', project_name):
raise fev.Invalid(self.message('invalid', state), value, state)
if not GitHubProjectExtractor(full_project_name).check_readable():
raise fev.Invalid(self.message('unavailable', state), value, state)
return project_name
class GitHubProjectExtractor(base.ProjectExtractor):
PAGE_MAP = {
'project_info': 'https://api.github.com/repos/{project_name}',
'issues': 'https://api.github.com/repos/{project_name}/issues',
'wiki_url': 'https://github.com/{project_name}.wiki',
}
POSSIBLE_STATES = ('opened', 'closed')
SUPPORTED_ISSUE_EVENTS = ('closed', 'reopened', 'assigned')
NEXT_PAGE_URL_RE = re.compile(r'<([^>]*)>; rel="next"')
def __init__(self, *args, **kw):
self.token = None
user = kw.pop('user', None)
if user:
self.token = user.get_tool_data('GitHubProjectImport', 'token')
super(GitHubProjectExtractor, self).__init__(*args, **kw)
def add_token(self, url):
if self.token:
glue = '&' if '?' in url else '?'
url += glue + 'access_token=' + self.token
return url
def wait_for_limit_reset(self, headers):
reset = headers.get('X-RateLimit-Reset')
limit = headers.get('X-RateLimit-Limit')
reset = datetime.utcfromtimestamp(int(reset))
now = datetime.utcnow()
log.warn('Rate limit exceeded (%s requests/hour). '
'Sleeping until %s UTC' % (limit, reset))
time.sleep((reset - now).total_seconds())
def urlopen(self, url, **kw):
try:
resp = super(GitHubProjectExtractor, self).urlopen(
self.add_token(url), **kw)
except urllib2.HTTPError as e:
# GitHub will return 403 if rate limit exceeded.
# We're checking for limit on every request below, but we still
# can get 403 if other import task exceeds the limit before.
if e.code == 403 and e.info().get('X-RateLimit-Remaining') == '0':
self.wait_for_limit_reset(e.info())
return self.urlopen(url, **kw)
else:
raise e
remain = resp.info().get('X-RateLimit-Remaining')
if remain and int(remain) == 0:
self.wait_for_limit_reset(resp.info())
return self.urlopen(url, **kw)
return resp
def check_readable(self):
resp = requests.head(self.get_page_url('project_info'))
return resp.status_code == 200
def get_next_page_url(self, link):
if not link:
return
m = self.NEXT_PAGE_URL_RE.match(link)
return m.group(1) if m else None
def parse_page(self, page):
# Look at link header to handle pagination
link = page.info().get('Link')
next_page_url = self.get_next_page_url(link)
return json.loads(page.read().decode('utf8')), next_page_url
def get_page(self, page_name_or_url, **kw):
page = super(GitHubProjectExtractor, self).get_page(
page_name_or_url, **kw)
page, next_page_url = page
while next_page_url:
p = super(GitHubProjectExtractor,
self).get_page(next_page_url, **kw)
p, next_page_url = p
page += p
self.page = page
return self.page
def get_summary(self):
return self.get_page('project_info').get('description')
def get_homepage(self):
return self.get_page('project_info').get('homepage')
def get_repo_url(self):
return self.get_page('project_info').get('clone_url')
def iter_issues(self):
# github api doesn't allow getting closed and opened tickets in one
# query
issues = []
url = self.get_page_url('issues') + '?state={state}'
for state in self.POSSIBLE_STATES:
issue_list_url = url.format(
state=state,
)
issues += self.get_page(issue_list_url)
issues.sort(key=lambda x: x['number'])
for issue in issues:
yield (issue['number'], issue)
def iter_comments(self, issue):
comments_url = issue['comments_url']
comments = self.get_page(comments_url)
for comment in comments:
yield comment
def iter_events(self, issue):
events_url = issue['events_url']
events = self.get_page(events_url)
for event in events:
if event.get('event') in self.SUPPORTED_ISSUE_EVENTS:
yield event
def has_wiki(self):
return self.get_page('project_info').get('has_wiki')
def has_tracker(self):
return self.get_page('project_info').get('has_issues')
class GitHubOAuthMixin(object):
'''Support for github oauth web application flow.'''
def oauth_begin(self):
client_id = config.get('github_importer.client_id')
secret = config.get('github_importer.client_secret')
if not client_id or not secret:
return # GitHub app is not configured
if c.user.get_tool_data('GitHubProjectImport', 'token'):
return # token already exists, nothing to do
redirect_uri = request.url.rstrip('/') + '/oauth_callback'
oauth = OAuth2Session(client_id, redirect_uri=redirect_uri)
auth_url, state = oauth.authorization_url(
'https://github.com/login/oauth/authorize')
# Used in callback to prevent CSRF
session['github.oauth.state'] = state
session['github.oauth.redirect'] = request.url
session.save()
redirect(auth_url)
@without_trailing_slash
@expose()
def oauth_callback(self, **kw):
client_id = config.get('github_importer.client_id')
secret = config.get('github_importer.client_secret')
if not client_id or not secret:
return # GitHub app is not configured
oauth = OAuth2Session(
client_id, state=session.get('github.oauth.state'))
token = oauth.fetch_token(
'https://github.com/login/oauth/access_token',
client_secret=secret,
authorization_response=request.url
)
c.user.set_tool_data('GitHubProjectImport',
token=token['access_token'])
redirect(session.get('github.oauth.redirect', '/'))
|
|
import sys
import time
import socket
import json
from EventRegistry import EventRegistry, QueryArticles, RequestArticlesInfo
from EventRegistry import QueryEvent, RequestEventArticles
from EventRegistry import RequestArticlesIdList, RequestEventArticleUris
from datetime import date
from tweet_common import url_fix
from ermcfg import SOCKET_TIMEOUT, REQUEST_SLEEP, ER_LOG, ER_USER, ER_PASS
from ermcfg import ARTICLES_BATCH_SIZE, EVENTS_BATCH_SIZE, URLS_PER_PAGE
from ermcfg import ER_WAIT_BETWEEN_REQUESTS
def er_execute_query(er, q, n_retries=1000, wait=REQUEST_SLEEP,
wait_before=ER_WAIT_BETWEEN_REQUESTS):
'''Makes the query to ER, handles timeouts, retries after a period and ER
API errors. Also waits be
'''
# Give ER time to breath between requests
if wait_before > 0:
#print('Sleeping before request: %f' % (wait_before,))
time.sleep(wait_before)
counter = 0
while True:
try:
res = er.execQuery(q)
break
except socket.timeout:
e = sys.exc_info()[0]
print(e)
time.sleep(wait)
if counter > n_retries:
raise
counter += 1
# retry
# error handling
if u'error' in res:
raise ValueError('EventRegistry API Return Error' + res['error'])
return res
def er_get_urls_for_day(day=date(2014, 4, 16), lang='eng'):
'''Get ER article URIs between a given day
Returns:
A dictionary of URL -> EventId (EventUri)
'''
socket.setdefaulttimeout(SOCKET_TIMEOUT)
er = EventRegistry(host="http://eventregistry.org", logging=ER_LOG)
er.login(ER_USER, ER_PASS)
page = 0
total_urls = 0
urlmap = dict()
# setup query
q = QueryArticles(lang=lang)
q.setDateLimit(day, day)
# request article id list
q.addRequestedResult(RequestArticlesIdList())
# make query
print('Fetching URLs for day %s' % (str(day),))
res = er_execute_query(er, q)
articleIds = res["articleIds"]
for articleId in range(0, len(articleIds), URLS_PER_PAGE):
# setup query
chunk = articleIds[articleId:articleId + URLS_PER_PAGE]
q = QueryArticles()
q.setArticleIdList(chunk)
q.addRequestedResult(RequestArticlesInfo(includeBody=False,
includeTitle=False,
includeBasicInfo=False,
includeSourceInfo=False,
page=0, count=URLS_PER_PAGE))
page_urls = 0
# make query
res = er_execute_query(er, q)
articles = res['articles']['results']
# check if empty
if len(articles) == 0:
print('Fetched a total of %d urls', (total_urls,))
return urlmap
# add to dict of URI -> Event
for article in articles:
if 'url' in article and 'eventUri' in article:
urlmap[url_fix(article['url'])] = article['eventUri']
page_urls += 1
print('Fetched page %d: %d urls' % (page, page_urls))
total_urls += page_urls
page += 1
# unreachable
return urlmap
def er_get_events_article(event_ids, lang='eng'):
'''
Gets centroid article for a list of events
'''
socket.setdefaulttimeout(SOCKET_TIMEOUT)
er = EventRegistry(host="http://eventregistry.org", logging=ER_LOG)
er.login(ER_USER, ER_PASS)
artmap = dict()
batch_size = ARTICLES_BATCH_SIZE
n_events = len(event_ids)
print('Fetching articles for %d events' % (n_events,))
for ii in range(0, n_events, batch_size):
batch_ids = event_ids[ii:ii + batch_size]
page_events = 0
q = QueryEvent(batch_ids)
q.addRequestedResult(RequestEventArticles(count=2,
lang=lang, bodyLen=-1))
res = er_execute_query(er, q)
events = res.keys()
page_events = len(events)
if page_events == 0:
break
for eventid in events:
info = res[eventid]['articles']['results']
if len(info) == 0:
continue
info = info[0]
a = json.dumps({'body': info['body'], 'title': info['title']})
artmap[eventid] = a
return artmap
def er_get_events_urls(event_ids, lang='eng'):
'''
Gets article urls for a list of events
Used in "online" mode
'''
socket.setdefaulttimeout(SOCKET_TIMEOUT)
er = EventRegistry(host="http://eventregistry.org", logging=ER_LOG)
er.login(ER_USER, ER_PASS)
urlmap = dict()
batch_size = EVENTS_BATCH_SIZE
n_events = len(event_ids)
print('Fetching urls for %d events' % (n_events,))
for ii in range(0, len(event_ids), batch_size):
batch_ids = event_ids[ii:ii + batch_size]
page = 0
while True:
page_urls = 0
q = QueryEvent(batch_ids)
q.addRequestedResult(RequestEventArticleUris())
res = er_execute_query(er, q)
events = res.keys()
page_events = len(events)
if page_events == 0:
break
for eventid in events:
for url in res[eventid]:
urlmap[url] = eventid
page_urls += 1
print('Fetched page %d: %d urls / %d events' % (page, page_urls,
page_events))
page += 1
return urlmap
def er_get_latest(lang='eng'):
'''
Generator: returns (urlmap, artmap, datemap), respectively:
url-eventid
eventid-articleInfo: {'title', 'body'}
date-[eventid1, ...]
'''
lastActivityId = 0
er = EventRegistry(host="http://eventregistry.org", logging=ER_LOG)
er.login(ER_USER, ER_PASS)
while True:
# get events that have recently changed
# first time (when lastActivityIdId==0) this will get you max
# 1000 events most recently changed
# (if they were not updated more than 60 minutes in the past)
# on following calls, by specifying lastActivityIdId this will only
# return events changed after your last request
eventsDict = er.getRecentEvents(100, 60, lang=lang,
eventsWithLocationOnly=False,
lastStoryActivityId=lastActivityId)
# if ER is offline or you get a timeout then you might get None as the
# result
# Check if the response from the server was an error message
if u'error' in eventsDict:
raise ValueError('EventRegistry API Return Error'
+ eventsDict['error'])
if eventsDict is not None:
events = eventsDict['recentActivity']['events']['activity']
size = EVENTS_BATCH_SIZE
for i in range(0, len(events), size):
event_ids = events[i:i + size]
artmap = er_get_events_article(event_ids, lang='eng')
urlmap = er_get_events_urls(event_ids, lang=lang)
datemap = dict()
info = eventsDict['recentActivity']['events']['eventInfo']
for event_id in events:
date = str(info[event_id]['eventDate'])
if date not in datemap:
datemap[date] = [str(event_id)]
else:
datemap[date].append(str(event_id))
new_id = eventsDict['recentActivity']['events']['lastActivityId']
# if ER rebooted you might get activity id that is smaller than
# the last one you have
if new_id < lastActivityId:
lastActivityId = 0
else:
# remember the last updated id
lastActivityId = new_id
# --- yield (return) ---
yield (urlmap, artmap, datemap)
# --- next entry point ---
# sleep for 60 seconds
time.sleep(60)
|
|
# -*- coding: utf-8 -*-
# TODO(colin): fix these lint errors (http://pep8.readthedocs.io/en/release-1.7.x/intro.html#error-codes)
# pep8-disable:E127,E128,E501
"""
gitbigfile command module
This module defines git-bigfile commands.
"""
import os
import sys
import hashlib
import tempfile
import re
import errno
import shutil
from gitbigfile import util, transport
SHA_FILE_SIZE = 41
SHA_PATTERN = re.compile('^[0-9a-fA-F]+$')
def config(global_flag=False):
"""Function to help configure git-bigfile filter options"""
if not global_flag:
# Check that we are in a git repository
# Following command will exit if it fails
util.get_repo_dir()
# filter options
options = [('filter.bigfile.clean', 'git-bigfile filter-clean'),
('filter.bigfile.smudge', 'git-bigfile filter-smudge')]
# transport options
valid_transports = transport.MANDATORY_OPTIONS.keys()
while True:
t = raw_input('Enter transport [%s]: ' %
'|'.join(valid_transports))
if t in valid_transports:
options.append(('git-bigfile.transport', t))
break
else:
print 'Invalid transport %s' % t
for t_option in transport.MANDATORY_OPTIONS[t]:
value = raw_input('Enter %s %s: ' % (t, t_option))
options.append(('git-bigfile.%s.%s' % (t, t_option), value))
util.set_git_options(options, global_flag)
class GitBigfile(object):
def __init__(self):
# These are all calculated lazily.
self._objects = None
self._repo_path = None
self._config = None
self._transport = None
def objects(self):
if self._objects is None:
self._objects = util.get_bigfile_dir('objects')
return self._objects
def repo_path(self):
if self._repo_path is None:
self._repo_path = util.get_repo_dir()
return self._repo_path
def config(self):
if self._config is None:
self._config = util.get_git_config()
return self._config
def transport(self):
if self._transport is None:
self._transport = self._get_transport()
return self._transport
def _repo_uses_bigfile(self):
"""Return False if the repo is not set up to use bigfile at all."""
# If there is no filter to handle bigfiles, there can be no bigfiles.
try:
with open(util.get_gitattributes(), 'r') as f:
return 'filter=bigfile' in f.read()
except (IOError, OSError), e:
if e.errno == errno.ENOENT:
return False
else:
raise
def _get_relpath(self, filename):
"""Return filename relative file path from the current dir"""
full_path = os.path.join(self.repo_path(), filename)
return os.path.relpath(full_path)
def _get_transport(self):
"""Return the transport class to use"""
# Get the transport to use
try:
t = self.config()['git-bigfile.transport']
except KeyError:
sys.stderr.write('git-bigfile.transport is not set\n')
sys.exit(1)
# Get and check all transport options
kwargs = dict([(key.split('.')[-1], value) for key, value in self.config().items()
if key.startswith('git-bigfile.%s.' % t)])
try:
mandatory_options = frozenset(transport.MANDATORY_OPTIONS[t])
except KeyError:
sys.stderr.write('Unknown transport: %s\n' % t)
sys.stderr.write('Valid transports: %s\n' %
' '.join(transport.MANDATORY_OPTIONS.keys()))
sys.exit(1)
options = frozenset(kwargs.keys())
if not mandatory_options.issubset(options):
missing_options = mandatory_options - options
sys.stderr.write('Missing option(s) for %s transport:\n' % t)
sys.stderr.write('\n'.join(['git-bigfile.%s.%s' % (t, option)
for option in missing_options]))
sys.stderr.write('\n')
sys.exit(1)
t_class = t[0].upper() + t[1:]
return getattr(transport, t_class)(**kwargs)
def _get_tempfile(self):
"""Return a File object of a temporary file. It is not auto-deleted."""
return tempfile.NamedTemporaryFile(dir=util.get_bigfile_dir('tmp'),
delete=False)
def _check_stdin(self):
"""Check if the data received on stdin is a sha file
Return a tuple (data read, sha) or (data read, None)
"""
data = sys.stdin.read(64)
sha = data.strip()
if len(data) == SHA_FILE_SIZE and SHA_PATTERN.match(sha):
return (data, sha)
else:
return (data, None)
def filter_clean(self):
"""The clean filter is run when a bigfile is staged.
It replaces the bigfile received on stdin with its SHA.
"""
data, sha = self._check_stdin()
# if data is a sha, just output (this is an unexpanded bigfile)
# otherwise read in buffered chunks of the data
# calculating the SHA and copying to a temporary file
if sha is None:
temp = self._get_tempfile()
hashfunc = hashlib.sha1()
while True:
hashfunc.update(data)
temp.write(data)
data = sys.stdin.read(4096)
if not data:
break
# Calculate the SHA of the data
sha = hashfunc.hexdigest()
# Rename the temporary file
temp.close()
bigfile = os.path.join(self.objects(), sha)
os.rename(temp.name, bigfile)
sys.stderr.write('Saving bigfile: %s\n' % sha)
print sha
def filter_smudge(self):
"""The smudge filter is run on checkout.
It tries to replace the SHA file with the corresponding
bigfile.
"""
data, sha = self._check_stdin()
if sha:
# Try to recover the bigfile
bigfile = os.path.join(self.objects(), sha)
if os.path.isfile(bigfile):
sys.stderr.write('Recovering bigfile: %s\n' % sha)
with open(bigfile, 'rb') as f:
while True:
data = f.read(4096)
if not data:
break
sys.stdout.write(data)
else:
sys.stderr.write('Saving placeholder (bigfile not in cache): %s\n' % sha)
print sha
else:
# If it is not a 40 character long hash, just output
sys.stderr.write('Unknown git-bigfile format\n')
while True:
sys.stdout.write(data)
data = sys.stdin.read(4096)
if not data:
break
def _get_bigfiles_status(self):
"""Return the lists of bigfiles to_expand, expanded and deleted as a tuple
Each list includes for each bigfile the filename, sha, is_pushed bool and size
"""
to_expand = []
expanded = []
deleted = []
# If there is no filter to handle bigfiles, there can be no bigfiles.
if not self._repo_uses_bigfile():
return ([], [], [])
tree_entries = util.run('git ls-tree -l -r HEAD --full-tree').split('\n')
bigfiles = [(entry.split()[-1], entry.split()[2])
for entry in tree_entries if entry.split()[-2] == str(SHA_FILE_SIZE)]
# Even with a filter, there may not be any bigfiles in this repo.
if not bigfiles:
return ([], [], [])
pushed_files = self.transport().pushed()
for filename, blob in bigfiles:
relpath = self._get_relpath(filename)
sha = util.run('git show %s' % blob)
# Check is this is a sha (size is already correct)
if not SHA_PATTERN.match(sha):
# Not a bigfile sha
continue
is_pushed = sha in pushed_files
try:
size = os.path.getsize(relpath)
except OSError, e:
if e.errno == errno.ENOENT:
# No such file or directory: file was deleted
deleted.append((relpath, sha, is_pushed, None))
else:
raise
else:
if size == SHA_FILE_SIZE:
to_expand.append((relpath, sha, is_pushed, None))
else:
expanded.append((relpath, sha, is_pushed, size))
return (to_expand, expanded, deleted)
def _get_unpushed_files(self):
"""Return the list of unpushed files"""
# Without a filter to handle bigfiles, there can be nothing to push.
if not self._repo_uses_bigfile():
return []
pushed_files = self.transport().pushed()
cached_files = os.listdir(self.objects())
unpushed_files = frozenset(cached_files) - frozenset(pushed_files)
return unpushed_files
def status(self):
"""Display the status of all bigfiles"""
to_expand, expanded, deleted = self._get_bigfiles_status()
util.print_status('Unexpanded bigfiles', to_expand)
util.print_status('Expanded bigfiles', expanded)
util.print_status('Deleted bigfiles', deleted)
def pull(self, files=None):
"""Expand bigfiles by pulling them from the server if needed"""
to_expand, expanded, deleted = self._get_bigfiles_status()
for filename, sha, is_pushed, size in to_expand:
# If they specified a list of files to limit to, check the limit.
if files and filename not in files:
continue
cache_file = os.path.join(self.objects(), sha)
if not os.path.isfile(cache_file):
if self.transport().exists(sha):
print 'Downloading %s : %s' % (sha[:8], filename)
temp = self._get_tempfile()
temp.close() # we just need the name
self.transport().get(sha, temp.name)
os.rename(temp.name, cache_file)
try:
print 'Expanding %s : %s' % (sha[:8], filename)
shutil.copy(cache_file, filename)
except IOError:
print 'Could not get %s' % filename
else:
# Update the index
util.run('git add %s' % filename)
def push(self, files=None):
"""Push cached files to the server"""
for sha in self._get_unpushed_files():
print 'Uploading %s' % sha[:8]
local_file = os.path.join(self.objects(), sha)
self.transport().put(local_file, sha)
def clear(self):
"""Remove pushed files from cache"""
# TODO(csilvers): short-circuit if self.objects() is the empty dir.
pushed_files = self.transport().pushed()
for sha in pushed_files:
cache_file = os.path.join(self.objects(), sha)
try:
os.unlink(cache_file)
print 'Removing %s from cache' % sha[:8]
except OSError as e:
if e.errno == errno.ENOENT:
pass
else:
raise
# We can also delete the entire tmp dir, which should be empty.
temp_dir = util.get_bigfile_dir('tmp')
temp_files = os.listdir(temp_dir)
if temp_files:
print 'Removing %s objects from the temp-dir' % len(temp_files)
for filename in temp_files:
os.unlink(os.path.join(temp_dir, filename))
def add(self, filename):
"""Add filename to .gitattributes and to the index"""
if os.path.isfile(filename):
gitattributes = util.get_gitattributes()
base_name = os.path.basename(filename)
print 'Adding %s to %s' % (base_name, gitattributes)
with open(gitattributes, 'a') as f:
f.write('%s filter=bigfile -crlf\n' % base_name)
util.run('git add %s' % gitattributes)
print 'Adding %s to the index' % filename
util.run('git add %s' % filename)
else:
sys.stderr.write('%s did not match any file\n' % filename)
sys.exit(1)
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#import abc
from tensorflow.contrib.rnn.ops import gen_lstm_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.platform import resource_loader
from tensorflow.contrib.rnn import LSTMBlockWrapper
from tensorflow.python.keras.utils import tf_utils
import tensorflow as tf
import os
module_dir = os.path.dirname(__file__)
lib_name = os.path.join(module_dir, 'libxsmm_lstm.so')
xsmm_lstm = tf.load_op_library(lib_name)
@tf.RegisterGradient("XsmmLSTMCell")
def _LSTMBlockCellGrad(op, *grad):
"""Gradient for XsmmLSTMCell."""
w_in_kcck = False
try:
w_in_kcck = op.get_attr("w_in_kcck")
except:
pass
if w_in_kcck:
(x, cs_prev, h_prev, w, wT, wci, wcf, wco, b) = op.inputs
else:
(x, cs_prev, h_prev, w, wci, wcf, wco, b) = op.inputs
wT = w
(i, cs, f, o, ci, co, _) = op.outputs
(_, cs_grad, _, _, _, _, h_grad) = grad
(cs_prev_grad, h_prev_grad, x_grad, w_grad, b_grad, wci_grad, wcf_grad,
wco_grad) = xsmm_lstm.xsmm_lstm_cell_grad(
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
w_t=wT,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
i=i,
cs=cs,
f=f,
o=o,
ci=ci,
co=co,
cs_grad=cs_grad,
h_grad=h_grad,
use_peephole=op.get_attr("use_peephole"),
w_in_kcck=w_in_kcck)
if w_in_kcck:
return (x_grad, cs_prev_grad, h_prev_grad, w_grad, None, wci_grad, wcf_grad,
wco_grad, b_grad)
else:
return (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad)
@ops.RegisterGradient("XsmmFusedLSTM")
def _XsmmFusedLSTMGrad(op, *grad):
"""Gradient for XsmmFusedLSTM."""
seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b = op.inputs
i, cs, f, o, ci, co, h = op.outputs
cs_grad = grad[1]
h_grad = grad[6]
(x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad,
b_grad) = xsmm_lstm.xsmm_fused_lstm_grad(
seq_len_max,
x,
cs_prev,
h_prev,
w,
wci,
wcf,
wco,
b,
i,
cs,
f,
o,
ci,
co,
h,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"),
use_residue=op.get_attr("use_residue"),
use_dropout=op.get_attr("use_dropout"))
return [
None, x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad
]
class XsmmFusedLSTM(LSTMBlockWrapper):
"""XsmmFusedLSTM implementation of LSTM.
This is an extremely efficient LSTM implementation, that uses a single TF op
for the entire LSTM. It should be both faster and more memory-efficient than
LSTMBlockCell defined above.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
The variable naming is consistent with `rnn_cell_impl.LSTMCell`.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False,
dropout=0.0,
residual_connection=False,
reuse=None,
dtype=None,
name="lstm_fused_cell"):
"""Initialize the LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: clip the cell to this value. Default is no cell clipping.
use_peephole: Whether to use peephole connections or not.
residual_connection: Whether to add residue connections or not.
dropout: Whether to apply dropout or not.
reuse: (optional) boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
dtype: the dtype of variables of this layer.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases. By default this is "lstm_cell", for variable-name compatibility
with `tf.nn.rnn_cell.LSTMCell`.
"""
super(XsmmFusedLSTM, self).__init__(
_reuse=reuse, name=name, dtype=dtype)
self._num_units = num_units
self._forget_bias = forget_bias
self._cell_clip = cell_clip if cell_clip is not None else -1
self._use_peephole = use_peephole
self._residual_connection = residual_connection
self._dropout = dropout
# Inputs must be 3-dimensional.
self.input_spec = base_layer.InputSpec(ndim=3)
@property
def num_units(self):
"""Number of units in this cell (output dimension)."""
return self._num_units
def build(self, input_shape):
input_size = input_shape[2].value
self._kernel = self.add_variable(
"kernel", [input_size + self._num_units, self._num_units * 4])
self._bias = self.add_variable(
"bias", [self._num_units * 4],
initializer=init_ops.constant_initializer(0.0))
if self._use_peephole:
self._w_i_diag = self.add_variable("w_i_diag", [self._num_units])
self._w_f_diag = self.add_variable("w_f_diag", [self._num_units])
self._w_o_diag = self.add_variable("w_o_diag", [self._num_units])
self.built = True
def _call_cell(self,
inputs,
initial_cell_state=None,
initial_output=None,
dtype=None,
sequence_length=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)` or None.
Returns:
A pair containing:
- Cell state (cs): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
- Output (h): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
"""
inputs_shape = inputs.get_shape().with_rank(3)
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
if self._use_peephole:
wci = self._w_i_diag
wco = self._w_o_diag
wcf = self._w_f_diag
else:
wci = wcf = wco = array_ops.zeros([self._num_units], dtype=dtype)
if sequence_length is None:
max_seq_len = math_ops.to_int64(time_len)
else:
max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))
print(" Xsmm LSTM Fused Cell: dropout = %.3f, Resudue = %s" % (self._dropout, self._residual_connection))
orig_inputs = inputs
if self._dropout > 0.0:
inputs = tf.nn.dropout(inputs, 1 - self._dropout)
'''
_, cs, _, _, _, _, h = gen_lstm_ops.block_lstm(
seq_len_max=max_seq_len,
x=inputs,
cs_prev=initial_cell_state,
h_prev=initial_output,
w=self._kernel,
wci=wci,
wcf=wcf,
wco=wco,
b=self._bias,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
'''
_, cs, _, _, _, _, h = xsmm_lstm.xsmm_fused_lstm(
seq_len_max=max_seq_len,
x=inputs,
cs_prev=initial_cell_state,
h_prev=initial_output,
w=self._kernel,
wci=wci,
wcf=wcf,
wco=wco,
b=self._bias,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole,
use_residue=False,
use_dropout=False)
if self._residual_connection:
with tf.name_scope("fused_residual_connection"):
h = h + orig_inputs
return cs, h
class XsmmLSTMCell(rnn_cell_impl.RNNCell):
"""LIbxsmm LSTM Cell"""
def __init__(self,
num_units,
forget_bias=1.0,
state_is_tuple=True,
activation=None,
reuse=None,
name=None,
dtype=None,
w_in_kcck=True,
**kwargs):
"""Initialize the libxsmm LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
Must set to `0.0` manually when restoring from CudnnLSTM-trained
checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. The latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`. It
could also be string that is within Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases.
dtype: Default dtype of the layer (default of `None` means use the type
of the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
When restoring from CudnnLSTM-trained checkpoints, must use
`CudnnCompatibleLSTMCell` instead.
"""
super(XsmmLSTMCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
# Inputs must be 2-dimensional.
self.input_spec = base_layer.InputSpec(ndim=2)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._w_in_kcck = w_in_kcck
if activation:
self._activation = activations.get(activation)
else:
self._activation = math_ops.tanh
@property
def state_size(self):
return (rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
% str(inputs_shape))
input_depth = inputs_shape[-1]
h_depth = self._num_units
C = input_depth + h_depth
K = 4 * self._num_units
ctxt = tf.get_default_graph()._get_control_flow_context()
if ctxt: ctxt = ctxt.GetWhileContext()
self._kernel = self.add_variable(
"kernel",
shape=[input_depth + h_depth, 4 * self._num_units])
self._bias = self.add_variable(
"bias",
shape=[4 * self._num_units],
initializer=init_ops.zeros_initializer(dtype=self.dtype))
if self._w_in_kcck:
if ctxt: ctxt.Exit()
def block_transpose(inp, C, BC, K, BK):
inp_packed = tf.reshape(tf.transpose(tf.reshape(inp, [C//BC, BC, K//BK, BK]), perm=[2, 0, 1, 3]), [C, K])
inp_packed_trans = tf.reshape(tf.transpose(tf.reshape(inp, [C//BC, BC, 4, K//(BK*4), BK]), perm=[2, 0, 3, 4, 1]), [C, K])
return inp_packed, inp_packed_trans
with tf.variable_scope("kernel_transpose") as vs:
with tf.name_scope(""), tf.name_scope(vs.name):
BC = 64 if input_depth % 64 == 0 else input_depth
BK = 64 if h_depth % 64 == 0 else h_depth
W, R = tf.split(self._kernel, [input_depth, h_depth], 0)
W, WT = block_transpose(W, input_depth, BC, K, BK)
R, RT = block_transpose(R, h_depth, BK, K, BK)
self._kernel = tf.concat([W, R], 0)
self._kernel_trans = tf.concat([WT, RT], 0)
if ctxt: ctxt.Enter()
else:
self._kernel_trans = self._kernel
self.built = True
def call(self, inputs, state):
"""Long short-term memory cell (LSTM).
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: An `LSTMStateTuple` of state tensors, each shaped
`[batch_size, num_units]`, if `state_is_tuple` has been set to
`True`. Otherwise, a `Tensor` shaped
`[batch_size, 2 * num_units]`.
Returns:
A pair containing the new hidden state, and the new state (either a
`LSTMStateTuple` or a concatenated state, depending on
`state_is_tuple`).
"""
if len(state) != 2:
raise ValueError("Expecting state to be a tuple with length 2.")
if False: #self._use_peephole:
wci = self._w_i_diag
wcf = self._w_f_diag
wco = self._w_o_diag
else:
wci = wcf = wco = array_ops.zeros([self._num_units])
(cs_prev, h_prev) = state
(_, cs, _, _, _, _, h) = xsmm_lstm.xsmm_lstm_cell(
x=inputs,
cs_prev=cs_prev,
h_prev=h_prev,
w=self._kernel,
w_t=self._kernel_trans,
wci=wci,
wcf=wcf,
wco=wco,
b=self._bias,
forget_bias=self._forget_bias,
cell_clip=-1,
use_peephole=False,
w_in_kcck=self._w_in_kcck,
name=self._name)
new_state = rnn_cell_impl.LSTMStateTuple(cs, h)
return h, new_state
def get_config(self):
config = {
"num_units": self._num_units,
"forget_bias": self._forget_bias,
"state_is_tuple": self._state_is_tuple,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(XsmmLSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
|
'''
Class for reading from Brainware DAM files
DAM files are binary files for holding raw data. They are broken up into
sequence of Segments, each containing a single raw trace and parameters.
The DAM file does NOT contain a sampling rate, nor can it be reliably
calculated from any of the parameters. You can calculate it from
the "sweep length" attribute if it is present, but it isn't always present.
It is more reliable to get it from the corresponding SRC file or F32 file if
you have one.
The DAM file also does not divide up data into Blocks, so only a single
Block is returned..
Brainware was developed by Dr. Jan Schnupp and is availabe from
Tucker Davis Technologies, Inc.
http://www.tdt.com/downloads.htm
Neither Dr. Jan Schnupp nor Tucker Davis Technologies, Inc. had any part in the
development of this code
The code is implemented with the permission of Dr. Jan Schnupp
Author: Todd Jennings
'''
# import needed core python modules
import os
import os.path
# numpy and quantities are already required by neo
import numpy as np
import quantities as pq
# needed core neo modules
from neo.core import (AnalogSignal, Block,
Group, Segment)
# need to subclass BaseIO
from neo.io.baseio import BaseIO
class BrainwareDamIO(BaseIO):
"""
Class for reading Brainware raw data files with the extension '.dam'.
The read_block method returns the first Block of the file. It will
automatically close the file after reading.
The read method is the same as read_block.
Note:
The file format does not contain a sampling rate. The sampling rate
is set to 1 Hz, but this is arbitrary. If you have a corresponding .src
or .f32 file, you can get the sampling rate from that. It may also be
possible to infer it from the attributes, such as "sweep length", if
present.
Usage:
>>> from neo.io.brainwaredamio import BrainwareDamIO
>>> damfile = BrainwareDamIO(filename='multi_500ms_mulitrep_ch1.dam')
>>> blk1 = damfile.read()
>>> blk2 = damfile.read_block()
>>> print blk1.segments
>>> print blk1.segments[0].analogsignals
>>> print blk1.units
>>> print blk1.units[0].name
>>> print blk2
>>> print blk2[0].segments
"""
is_readable = True # This class can only read data
is_writable = False # write is not supported
# This class is able to directly or indirectly handle the following objects
# You can notice that this greatly simplifies the full Neo object hierarchy
supported_objects = [Block, Group,
Segment, AnalogSignal]
readable_objects = [Block]
writeable_objects = []
has_header = False
is_streameable = False
# This is for GUI stuff: a definition for parameters when reading.
# This dict should be keyed by object (`Block`). Each entry is a list
# of tuple. The first entry in each tuple is the parameter name. The
# second entry is a dict with keys 'value' (for default value),
# and 'label' (for a descriptive name).
# Note that if the highest-level object requires parameters,
# common_io_test will be skipped.
read_params = {Block: []}
# do not support write so no GUI stuff
write_params = None
name = 'Brainware DAM File'
extensions = ['dam']
mode = 'file'
def __init__(self, filename=None):
'''
Arguments:
filename: the filename
'''
BaseIO.__init__(self)
self._path = filename
self._filename = os.path.basename(filename)
self._fsrc = None
def read_block(self, lazy=False, **kargs):
'''
Reads a block from the raw data file "fname" generated
with BrainWare
'''
assert not lazy, 'Do not support lazy'
# there are no keyargs implemented to so far. If someone tries to pass
# them they are expecting them to do something or making a mistake,
# neither of which should pass silently
if kargs:
raise NotImplementedError('This method does not have any '
'arguments implemented yet')
self._fsrc = None
block = Block(file_origin=self._filename)
# create the objects to store other objects
gr = Group(file_origin=self._filename)
# load objects into their containers
block.groups.append(gr)
# open the file
with open(self._path, 'rb') as fobject:
# while the file is not done keep reading segments
while True:
seg = self._read_segment(fobject)
# if there are no more Segments, stop
if not seg:
break
# store the segment and signals
block.segments.append(seg)
gr.analogsignals.append(seg.analogsignals[0])
# remove the file object
self._fsrc = None
block.create_many_to_one_relationship()
return block
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# IMPORTANT!!!
# These are private methods implementing the internal reading mechanism.
# Due to the way BrainWare DAM files are structured, they CANNOT be used
# on their own. Calling these manually will almost certainly alter your
# position in the file in an unrecoverable manner, whether they throw
# an exception or not.
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def _read_segment(self, fobject):
'''
Read a single segment with a single analogsignal
Returns the segment or None if there are no more segments
'''
try:
# float64 -- start time of the AnalogSignal
t_start = np.fromfile(fobject, dtype=np.float64, count=1)[0]
except IndexError:
# if there are no more Segments, return
return False
# int16 -- index of the stimulus parameters
seg_index = np.fromfile(fobject, dtype=np.int16, count=1)[0].tolist()
# int16 -- number of stimulus parameters
numelements = np.fromfile(fobject, dtype=np.int16, count=1)[0]
# read the name strings for the stimulus parameters
paramnames = []
for _ in range(numelements):
# unit8 -- the number of characters in the string
numchars = np.fromfile(fobject, dtype=np.uint8, count=1)[0]
# char * numchars -- a single name string
name = np.fromfile(fobject, dtype=np.uint8, count=numchars)
# exclude invalid characters
name = str(name[name >= 32].view('c').tostring())
# add the name to the list of names
paramnames.append(name)
# float32 * numelements -- the values for the stimulus parameters
paramvalues = np.fromfile(fobject, dtype=np.float32, count=numelements)
# combine parameter names and the parameters as a dict
params = dict(zip(paramnames, paramvalues))
# int32 -- the number elements in the AnalogSignal
numpts = np.fromfile(fobject, dtype=np.int32, count=1)[0]
# int16 * numpts -- the AnalogSignal itself
signal = np.fromfile(fobject, dtype=np.int16, count=numpts)
sig = AnalogSignal(signal.astype(np.float) * pq.mV,
t_start=t_start * pq.d,
file_origin=self._filename,
sampling_period=1. * pq.s,
copy=False)
# Note: setting the sampling_period to 1 s is arbitrary
# load the AnalogSignal and parameters into a new Segment
seg = Segment(file_origin=self._filename,
index=seg_index,
**params)
seg.analogsignals = [sig]
return seg
|
|
#!/usr/bin/env python
import base64
import binascii
import boto.dynamodb
from boto.dynamodb.condition import *
from boto.exception import DynamoDBResponseError
import boto.kms
from Crypto.Cipher import AES
from Crypto import Random
import json
import time
# http://stackoverflow.com/questions/12524994/encrypt-decrypt-using-pycrypto-aes-256
BS = 16
# this appends BS - len(s) % BS (that is, the lowest number >0 that can be added to len(s) to get a multiple of BS) bytes to s,
# where each byte is the number of bytes to be appended
pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
# this removes the last X bytes of s, where X is the numeric value of the last byte
unpad = lambda s: s[:-ord(s[len(s)-1:])]
# manually and unit tested
def get_kaurna_table(region='us-east-1', read_throughput=1, write_throughput=1, **kwargs):
# declared schema:
# hash: secret_name
# range: secret_version
# undeclared fields:
# encrypted_secret
# encrypted_data_key
# encryption_context
# authorized_entities
# create_date
# last_data_key_rotation
# deprecated
ddb = boto.dynamodb.connect_to_region(region_name=region)
try:
# get_table output is a DDB Table object
return ddb.get_table(name='kaurna')
# If the table doesn't exist, an error will get thrown
except DynamoDBResponseError as e:
schema = ddb.create_schema(
hash_key_name='secret_name',
hash_key_proto_value=str,
range_key_name='secret_version',
range_key_proto_value=int
)
# create_table output is a DDB Table object
return ddb.create_table(name='kaurna', schema=schema, read_units=read_throughput, write_units=write_throughput)
# manually and unit tested
def create_kaurna_key(region='us-east-1', **kwargs):
# This method will create the kaurna KMS master key if necessary
kms = boto.kms.connect_to_region(region_name=region)
# list_aliases response:
# {'Truncated': False, 'Aliases': [{'AliasArn': 'arn:aws:kms:us-east-1:000000000000:alias/aws/ebs', 'AliasName': 'alias/aws/ebs'}, {'AliasArn': 'arn:aws:kms:us-east-1:000000000000:alias/aws/rds', 'AliasName': 'alias/aws/rds'}, {'AliasArn': 'arn:aws:kms:us-east-1:000000000000:alias/aws/redshift', 'AliasName': 'alias/aws/redshift'}, {'AliasArn': 'arn:aws:kms:us-east-1:000000000000:alias/aws/s3', 'AliasName': 'alias/aws/s3'}, {'AliasArn': 'arn:aws:kms:us-east-1:000000000000:alias/kaurna', 'AliasName': 'alias/kaurna', 'TargetKeyId': '1234abcd-12ab-12ab-12ab-123456abcdef'}]}
aliases = kms.list_aliases()
if 'alias/kaurna' in [alias['AliasName'] for alias in aliases['Aliases']]:
return False
else:
# create_key response:
# {'KeyMetadata': {'KeyId': '1234abcd-12ab-12ab-12ab-123456abcdef', 'Description': '', 'Enabled': True, 'KeyUsage': 'ENCRYPT_DECRYPT', 'CreationDate': 1431872957.123, 'Arn': 'arn:aws:kms:us-east-1:000000000000:key/1234abcd-12ab-12ab-12ab-123456abcdef', 'AWSAccountId': '000000000000'}}
# TODO: see what the format of this response is and make it so that the alias gets attached properly
response = kms.create_key()
# create_alias has no output
kms.create_alias('alias/kaurna', response['KeyMetadata']['KeyId'])
return True
# manually and unit tested
def get_data_key(encryption_context=None, region='us-east-1'):
# This method will generate a new data key
kms = boto.kms.connect_to_region(region_name=region)
# generate_data_key output:
# {'Plaintext': '<binary blob>', 'KeyId': 'arn:aws:kms:us-east-1:000000000000:key/1234abcd-12ab-12ab-12ab-123456abcdef', 'CiphertextBlob': '<binary blob>'}
data_key = kms.generate_data_key(key_id='alias/kaurna', encryption_context=encryption_context, key_spec='AES_256')
return data_key
# manually and unit tested
def _generate_encryption_context(authorized_entities):
if not authorized_entities:
return None
encryption_context = {}
for entity in authorized_entities:
encryption_context[entity] = 'kaurna'
return encryption_context
# tested manually
def store_secret(secret_name, secret, secret_version=None, authorized_entities=None, region='us-east-1', **kwargs):
# This method will store the key in DynamoDB
# If version is specified, it'll be stored as that version, or an error will be thrown if that version exists
# if the version isn't specified, it'll be stored as version 1 if the entry doesn't already exist and version N+1 if it does, where N is the greatest existing version
if not secret_name or not secret:
raise Exception('Must provide both secret_name and the secret itself.')
items = load_all_entries(secret_name=secret_name, secret_version=secret_version, region=region, attributes_to_get=['secret_name','secret_version'])
if secret_version:
for item in items:
# if there's anything here, we want to fail because the specified secret/version already exists
raise Exception('To update an existing secret/version, please use update_secrets, or use delete_secret to delete the secret/version first.')
else:
versions = [item['secret_version'] for item in items]
secret_version = 1 + max(versions + [0])
# at this point both secret_name and secret_version are set, and we know neither of them is currently in use.
encryption_context_dict = _generate_encryption_context(authorized_entities)
encryption_context_string = json.dumps(encryption_context_dict)
data_key = get_data_key(encryption_context=encryption_context_dict, region=region)
encrypted_data_key = binascii.b2a_base64(data_key['CiphertextBlob'])
encrypted_secret = encrypt_with_key(plaintext=secret, key=data_key['Plaintext'])
now = int(time.time()) # we really don't need sub-second accuracy on this, so strip it out to prevent confusion
attrs = {
'secret_name': secret_name, # customer sets
'secret_version': int(secret_version), # customer sets
'encrypted_secret': encrypted_secret, # customer provides plaintext, then kaurna encrypts
'encrypted_data_key': encrypted_data_key, # kaurna gets from kms
'encryption_context': encryption_context_string, # kaurna derives from authorized_entities
'authorized_entities': json.dumps(authorized_entities), # customer sets
'create_date': now, # kaurna sets this at initial creation
'last_data_key_rotation': now, # kaurna sets this whenever the data key changes
'deprecated': False # customer sets
}
get_kaurna_table(region=region).new_item(attrs=attrs).save()
return
# manually tested
def load_all_entries(secret_name=None, secret_version=None, region='us-east-1', attributes_to_get=None, **kwargs):
table = get_kaurna_table(region=region)
if secret_version and not secret_name:
raise Exception('If secret_version is provided, you must also provide secret_name.')
if secret_version:
return table.query(hash_key=secret_name, range_key_condition=EQ(int(secret_version)), attributes_to_get=attributes_to_get)
elif secret_name:
return table.query(hash_key=secret_name, attributes_to_get=attributes_to_get)
else:
return table.scan(attributes_to_get=attributes_to_get)
# manually tested
def rotate_data_keys(secret_name=None, secret_version=None, region='us-east-1', **kwargs):
items = load_all_entries(secret_name=secret_name, secret_version=secret_version, region=region)
for item in items:
_reencrypt_item_and_save(item=item, region=region)
return
# manually tested
def _reencrypt_item_and_save(item, region='us-east-1'):
# this method takes a DynamoDB item and reencrypts it
# It uses the 'encryption_context' entry for decryption, but then uses the 'authorized_entities' attribute to re-encrypt
old_encrypted_secret = item.getitem('encrypted_secret')
old_encrypted_data_key = item.getitem('encrypted_data_key')
old_encryption_context = json.loads(item.getitem('encryption_context'))
new_encryption_context = _generate_encryption_context(json.loads(item.getitem('authorized_entities')))
new_data_key = get_data_key(encryption_context=new_encryption_context, region=region)
new_encrypted_data_key = binascii.b2a_base64(new_data_key['CiphertextBlob'])
new_encrypted_secret = encrypt_with_key(plaintext=decrypt_with_key(old_encrypted_secret, decrypt_with_kms(old_encrypted_data_key, old_encryption_context, region=region)['Plaintext']), key=new_data_key['Plaintext'])
item['encryption_context'] = json.dumps(new_encryption_context)
item['encrypted_secret'] = new_encrypted_secret
item['encrypted_data_key'] = new_encrypted_data_key
item['last_data_key_rotation'] = int(time.time())
item.save()
return item
# manually tested
def update_secrets(secret_name, secret_version=None, authorized_entities=None, region='us-east-1', **kwargs):
# This method will update the authorized entities for a secret.
# If no version is specified, it will update all versions of the secret
items = load_all_entries(secret_name=secret_name, secret_version=secret_version, region=region)
for item in items:
item['authorized_entities'] = json.dumps(authorized_entities)
_reencrypt_item_and_save(item=item, region=region)
return
# manually tested
def erase_secret(secret_name, secret_version=None, region='us-east-1', **kwargs):
# This method will delete the specified secret, or all versions of the secret if version is None
if not secret_name:
raise Exception('Must provide secret_name.')
items = load_all_entries(secret_name=secret_name, secret_version=secret_version, region=region)
for item in items:
item.delete()
return
# manually tested
def erase_all_the_things(region='us-east-1', seriously=False, **kwargs):
# This method will delete the kaurna DynamoDB table.
if seriously:
get_kaurna_table(region=region).delete()
return
# manually tested
def deprecate_secrets(secret_name=None, secret_version=None, region='us-east-1', **kwargs):
# This method will mark the specified secret as deprecated, so that kaurna knows that it's old and shouldn't be used
items = load_all_entries(secret_name=secret_name, secret_version=secret_version, region=region)
for item in items:
item['deprecated'] = True
item.save()
return
# manually tested
def activate_secrets(secret_name=None, secret_version=None, region='us-east-1', **kwargs):
# This method will mark the specified secret as NOT deprecated, so that kaurna knows that it can be used
items = load_all_entries(secret_name=secret_name, secret_version=secret_version, region=region)
for item in items:
item['deprecated'] = False
item.save()
return
# manually tested
def describe_secrets(secret_name=None, secret_version=None, region='us-east-1', **kwargs):
# This method will return a variety of non-secret information about a secret
# If secret_name is provided, only versions of that secret will be described
# if secret_name and secret_version are both provided, only that secret/version will be described
# if secret_version is provided but secret_name isn't, an error will be thrown (by load_all_entries)
# return format:
# {"foobar": {1:{"create_date":123456, "last_data_key_rotation":234567, "authorized_entities":"", "deprecated":False}}}
descriptions = {}
items = load_all_entries(secret_name=secret_name, secret_version=secret_version, region=region, attributes_to_get=['secret_name','secret_version','create_date','last_data_key_rotation','authorized_entities','deprecated'])
for item in items:
name = item['secret_name']
version = item['secret_version']
descriptions[name] = descriptions.get(name, {})
description = {
'create_date' : item['create_date'],
'last_data_key_rotation' : item['last_data_key_rotation'],
'authorized_entities' : json.loads(item['authorized_entities']),
'deprecated': item['deprecated']
}
descriptions[name][version] = description
return descriptions
# manually tested
def get_secret(secret_name, secret_version=None, region='us-east-1', **kwargs):
if not secret_name:
raise Exception('Must provide secret_name.')
items = sorted([secret for secret in load_all_entries(secret_name=secret_name, secret_version=secret_version, region=region) if not secret['deprecated']], key=lambda i: i['secret_version'])
if len(items) == 0:
raise Exception('No active versions of secret \'{0}\' found.'.format(secret_name))
item = items[-1]
return _decrypt_item(item=item, region=region)
def _decrypt_item(item, region='us-east-1'):
return decrypt_with_key(item['encrypted_secret'], decrypt_with_kms(item['encrypted_data_key'], json.loads(item['encryption_context']), region=region)['Plaintext'])
# manually tested
def encrypt_with_key(plaintext, key, iv=None):
return (lambda iv: base64.b64encode(iv + AES.new(key, AES.MODE_CBC, iv).encrypt(pad(plaintext))))(iv if iv else Random.new().read(AES.block_size))
# manually tested
def decrypt_with_key(ciphertext, key):
return unpad(AES.new(key, AES.MODE_CBC, base64.b64decode(ciphertext)[:16]).decrypt(base64.b64decode(ciphertext)[16:]))
# Untested, as we never actually use this. It's just here for symmetry.
def encrypt_with_kms(plaintext, key_id='alias/kaurna', encryption_context=None, grant_tokens=None, region='us-east-1'):
# encrypt output:
# {u'KeyId': u'arn:aws:kms:us-east-1:000000000000:key/1234abcd-12ab-12ab-12ab-123456abcdef', u'CiphertextBlob': '<binary blob>'}
return binascii.b2a_base64(boto.kms.connect_to_region(region_name=region).encrypt(key_id=key_id, plaintext=plaintext, encryption_context=encryption_context, grant_tokens=grant_tokens)['CiphertextBlob'])
# manually tested
def decrypt_with_kms(ciphertext_blob, encryption_context=None, grant_tokens=None, region='us-east-1'):
# decrypt output:
# {'Plaintext': '<binary blob>', 'KeyId': 'arn:aws:kms:us-east-1:000000000000:key/1234abcd-12ab-12ab-12ab-123456abcdef'}
return boto.kms.connect_to_region(region_name=region).decrypt(ciphertext_blob = binascii.a2b_base64(ciphertext_blob), encryption_context=encryption_context, grant_tokens=grant_tokens)
|
|
# -*- test-case-name: vumi.transports.irc.tests.test_irc -*-
"""IRC transport."""
from twisted.words.protocols import irc
from twisted.internet import protocol
from twisted.internet.defer import inlineCallbacks
from twisted.python import log
from vumi.config import (
ConfigClientEndpoint, ConfigText, ConfigList, ConfigInt,
ClientEndpointFallback)
from vumi.reconnecting_client import ReconnectingClientService
from vumi.transports import Transport
from vumi.transports.failures import TemporaryFailure
class IrcMessage(object):
"""Container for details of a message to or from an IRC user.
:type sender: str
:param sender:
Who sent the message (usually user!ident@hostmask).
:type recipient: str
:param recipient:
User or channel recieving the message.
:type content: str
:param content:
Contents of message.
:type nickname: str
:param nickname:
Nickname used by the client that received the message.
Optional.
:type command: str
:param command:
IRC command that produced the message.
"""
def __init__(self, sender, command, recipient, content, nickname=None):
self.sender = self.canonicalize_recipient(sender)
self.command = command
self.recipient = self.canonicalize_recipient(recipient)
self.content = content
self.nickname = nickname
def __eq__(self, other):
if isinstance(other, IrcMessage):
return all(getattr(self, name) == getattr(other, name)
for name in ("sender", "command", "recipient",
"content", "nickname"))
return False
@staticmethod
def canonicalize_recipient(recipient):
"""Convert a generic IRC address (with possible server parts)
to a simple lowercase username or channel."""
return recipient.partition('!')[0].lower()
def channel(self):
"""Return the channel if the recipient is a channel.
Otherwise return None.
"""
if self.recipient[:1] in ('#', '&', '$'):
return self.recipient
return None
def addressed_to(self, nickname):
nickname = self.canonicalize_recipient(nickname)
if not self.channel():
return self.recipient == nickname
parts = self.content.split(None, 1)
maybe_nickname = parts[0].rstrip(':,') if parts else ''
maybe_nickname = self.canonicalize_recipient(maybe_nickname)
return maybe_nickname == nickname
class VumiBotProtocol(irc.IRCClient):
"""An IRC bot that bridges IRC to Vumi."""
def __init__(self, nickname, channels, irc_transport):
self.connected = False
self.nickname = nickname
self.channels = channels
self.irc_transport = irc_transport
def publish_message(self, irc_msg):
self.irc_transport.handle_inbound_irc_message(irc_msg)
def consume_message(self, irc_msg):
recipient = irc_msg.recipient.encode('utf8')
content = irc_msg.content.encode('utf8')
if irc_msg.command == 'ACTION':
self.describe(recipient, content)
else:
self.msg(recipient, content)
# connecting and disconnecting from server
def connectionMade(self):
irc.IRCClient.connectionMade(self)
self.connected = True
log.msg("Connected (nickname is: %s)" % (self.nickname,))
def connectionLost(self, reason):
irc.IRCClient.connectionLost(self, reason)
self.connected = False
log.msg("Disconnected (nickname was: %s)." % (self.nickname,))
# callbacks for events
def signedOn(self):
"""Called when bot has succesfully signed on to server."""
log.msg("Attempting to join channels: %r" % (self.channels,))
for channel in self.channels:
self.join(channel)
def joined(self, channel):
"""This will get called when the bot joins the channel."""
log.msg("Joined %r" % (channel,))
def privmsg(self, sender, recipient, message):
"""This will get called when the bot receives a message."""
irc_msg = IrcMessage(sender, 'PRIVMSG', recipient, message,
self.nickname)
self.publish_message(irc_msg)
def noticed(self, sender, recipient, message):
"""This will get called when the bot receives a notice."""
irc_msg = IrcMessage(sender, 'NOTICE', recipient, message,
self.nickname)
self.publish_message(irc_msg)
def action(self, sender, recipient, message):
"""This will get called when the bot sees someone do an action."""
irc_msg = IrcMessage(sender, 'ACTION', recipient, message,
self.nickname)
self.publish_message(irc_msg)
# irc callbacks
def irc_NICK(self, prefix, params):
"""Called when an IRC user changes their nickname."""
old_nick = prefix.partition('!')[0]
new_nick = params[0]
log.msg("Nick changed from %r to %r" % (old_nick, new_nick))
# For fun, override the method that determines how a nickname is changed on
# collisions. The default method appends an underscore.
def alterCollidedNick(self, nickname):
"""
Generate an altered version of a nickname that caused a collision in an
effort to create an unused related name for subsequent registration.
"""
return nickname + '^'
class VumiBotFactory(protocol.ClientFactory):
"""A factory for :class:`VumiBotClient` instances.
A new protocol instance will be created each time we connect to
the server.
"""
# the class of the protocol to build when new connection is made
protocol = VumiBotProtocol
def __init__(self, vumibot_args):
self.vumibot_args = vumibot_args
self.irc_server = None
self.vumibot = None
def format_server_address(self, addr):
# getattr is used in case someone connects to an
# endpoint that isn't an IPv4 or IPv6 endpoint.
return "%s:%s" % (
getattr(addr, 'host', 'unknown'),
getattr(addr, 'port', 'unknown')
)
def buildProtocol(self, addr):
self.irc_server = self.format_server_address(addr)
self.vumibot = self.protocol(*self.vumibot_args)
return self.vumibot
class IrcConfig(Transport.CONFIG_CLASS):
"""
IRC transport config.
"""
twisted_endpoint = ConfigClientEndpoint(
"Endpoint to connect to the IRC server on.",
fallbacks=[ClientEndpointFallback('network', 'port')],
required=True, static=True)
nickname = ConfigText(
"IRC nickname for the transport IRC client to use.",
required=True, static=True)
channels = ConfigList(
"List of channels to join.",
default=(), static=True)
# TODO: Deprecate these fields when confmodel#5 is done.
network = ConfigText(
"*DEPRECATED* 'network' and 'port' fields may be used in place of the"
" 'twisted_endpoint' field.", static=True)
port = ConfigInt(
"*DEPRECATED* 'network' and 'port' fields may be used in place of the"
" 'twisted_endpoint' field.", static=True, default=6667)
class IrcTransport(Transport):
"""
IRC based transport.
"""
CONFIG_CLASS = IrcConfig
factory = None
service = None
def setup_transport(self):
config = self.get_static_config()
self.factory = VumiBotFactory((config.nickname, config.channels,
self))
self.service = ReconnectingClientService(
config.twisted_endpoint, self.factory)
self.service.startService()
@inlineCallbacks
def teardown_transport(self):
if self.service is not None:
yield self.service.stopService()
def handle_inbound_irc_message(self, irc_msg):
irc_server = self.factory.irc_server
irc_channel = irc_msg.channel()
nickname = irc_msg.nickname
to_addr = None
content = irc_msg.content
if irc_channel is None:
# This is a direct message, not a channel message.
to_addr = irc_msg.recipient
elif irc_msg.addressed_to(nickname):
# This is a channel message, but we've been mentioned by name.
to_addr = nickname
# Strip the name prefix, so workers don't have to handle it.
content = (content.split(None, 1) + [''])[1]
message_dict = {
'to_addr': to_addr,
'from_addr': irc_msg.sender,
'group': irc_channel,
'content': content,
'transport_name': self.transport_name,
'transport_type': self.config.get('transport_type', 'irc'),
'helper_metadata': {
'irc': {
'transport_nickname': nickname,
'addressed_to_transport':
irc_msg.addressed_to(nickname),
'irc_server': irc_server,
'irc_channel': irc_channel,
'irc_command': irc_msg.command,
},
},
'transport_metadata': {
'irc_channel': irc_channel,
},
}
self.publish_message(**message_dict)
@inlineCallbacks
def handle_outbound_message(self, msg):
vumibot = self.factory.vumibot
if vumibot is None or not vumibot.connected:
raise TemporaryFailure("IrcTransport not connected.")
irc_metadata = msg['helper_metadata'].get('irc', {})
transport_metadata = msg['transport_metadata']
irc_command = irc_metadata.get('irc_command', 'PRIVMSG')
# Continue to support pre-group-chat hackery.
irc_channel = msg.get('group') or transport_metadata.get('irc_channel')
recipient = irc_channel if irc_channel is not None else msg['to_addr']
content = msg['content']
if irc_channel and msg['to_addr'] and (irc_command != 'ACTION'):
# We have a directed channel message, so prefix with the nick.
content = "%s: %s" % (msg['to_addr'], content)
irc_msg = IrcMessage(vumibot.nickname, irc_command, recipient, content)
vumibot.consume_message(irc_msg)
# intentionally duplicate message id in sent_message_id since
# IRC doesn't have its own message ids.
yield self.publish_ack(user_message_id=msg['message_id'],
sent_message_id=msg['message_id'])
|
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Controller objects that control the context in which chrome runs.
This is responsible for the setup necessary for launching chrome, and for
creating a DevToolsConnection. There are remote device and local
desktop-specific versions.
"""
import contextlib
import copy
import datetime
import errno
import logging
import os
import platform
import shutil
import socket
import subprocess
import sys
import tempfile
import time
import traceback
import psutil
import chrome_cache
import common_util
import device_setup
import devtools_monitor
import emulation
from options import OPTIONS
_SRC_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..'))
_CATAPULT_DIR = os.path.join(_SRC_DIR, 'third_party', 'catapult')
sys.path.append(os.path.join(_CATAPULT_DIR, 'devil'))
from devil.android import device_errors
from devil.android.sdk import intent
sys.path.append(
os.path.join(_CATAPULT_DIR, 'telemetry', 'third_party', 'websocket-client'))
import websocket
class ChromeControllerMetadataGatherer(object):
"""Gather metadata for the ChromeControllerBase."""
def __init__(self):
self._chromium_commit = None
def GetMetadata(self):
"""Gets metadata to update in the ChromeControllerBase"""
if self._chromium_commit is None:
def _GitCommand(subcmd):
return subprocess.check_output(['git', '-C', _SRC_DIR] + subcmd).strip()
try:
self._chromium_commit = _GitCommand(['merge-base', 'master', 'HEAD'])
if self._chromium_commit != _GitCommand(['rev-parse', 'HEAD']):
self._chromium_commit = 'unknown'
except subprocess.CalledProcessError:
self._chromium_commit = 'git_error'
return {
'chromium_commit': self._chromium_commit,
'date': datetime.datetime.utcnow().isoformat(),
'seconds_since_epoch': time.time()
}
class ChromeControllerInternalError(Exception):
pass
def _AllocateTcpListeningPort():
"""Allocates a TCP listening port.
Note: The use of this function is inherently OS level racy because the
port returned by this function might be re-used by another running process.
"""
temp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
temp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
temp_socket.bind(('', 0))
return temp_socket.getsockname()[1]
finally:
temp_socket.close()
class ChromeControllerError(Exception):
"""Chrome error with detailed log.
Note:
Some of these errors might be known intermittent errors that can usually be
retried by the caller after re-doing any specific setup again.
"""
_INTERMITTENT_WHITE_LIST = {websocket.WebSocketTimeoutException,
devtools_monitor.DevToolsConnectionTargetCrashed}
_PASSTHROUGH_WHITE_LIST = (MemoryError, SyntaxError)
def __init__(self, log):
"""Constructor
Args:
log: String containing the log of the running Chrome instance that was
running. It will be interleaved with any other running Android
package.
"""
self.error_type, self.error_value, self.error_traceback = sys.exc_info()
super(ChromeControllerError, self).__init__(repr(self.error_value))
self.parent_stack = traceback.extract_stack()
self.log = log
def Dump(self, output):
"""Dumps the entire error's infos into file-like object."""
output.write('-' * 60 + ' {}:\n'.format(self.__class__.__name__))
output.write(repr(self) + '\n')
output.write('{} is {}known as intermittent.\n'.format(
self.error_type.__name__, '' if self.IsIntermittent() else 'NOT '))
output.write(
'-' * 60 + ' {}\'s full traceback:\n'.format(self.error_type.__name__))
output.write(''.join(traceback.format_list(self.parent_stack)))
traceback.print_tb(self.error_traceback, file=output)
output.write('-' * 60 + ' Begin log\n')
output.write(self.log)
output.write('-' * 60 + ' End log\n')
def IsIntermittent(self):
"""Returns whether the error is an known intermittent error."""
return self.error_type in self._INTERMITTENT_WHITE_LIST
def RaiseOriginal(self):
"""Raises the original exception that has caused <self>."""
raise self.error_type, self.error_value, self.error_traceback
class ChromeControllerBase(object):
"""Base class for all controllers.
Defines common operations but should not be created directly.
"""
METADATA_GATHERER = ChromeControllerMetadataGatherer()
DEVTOOLS_CONNECTION_ATTEMPTS = 10
DEVTOOLS_CONNECTION_ATTEMPT_INTERVAL_SECONDS = 1
def __init__(self):
self._chrome_args = [
# Disable backgound network requests that may pollute WPR archive,
# pollute HTTP cache generation, and introduce noise in loading
# performance.
'--disable-background-networking',
'--disable-default-apps',
'--no-proxy-server',
# TODO(gabadie): Remove once crbug.com/354743 done.
'--safebrowsing-disable-auto-update',
# Disables actions that chrome performs only on first run or each
# launches, which can interfere with page load performance, or even
# block its execution by waiting for user input.
'--disable-fre',
'--no-default-browser-check',
'--no-first-run',
# Tests & dev-tools related stuff.
'--enable-test-events',
'--remote-debugging-port=%d' % OPTIONS.devtools_port,
# Detailed log.
'--enable-logging=stderr',
'--v=1',
]
self._wpr_attributes = None
self._metadata = {}
self._emulated_device = None
self._network_name = None
self._slow_death = False
def AddChromeArguments(self, args):
"""Add command-line arguments to the chrome execution."""
self._chrome_args.extend(args)
@contextlib.contextmanager
def Open(self):
"""Context that returns a connection/chrome instance.
Returns:
DevToolsConnection instance for which monitoring has been set up but not
started.
"""
raise NotImplementedError
def ChromeMetadata(self):
"""Return metadata such as emulation information.
Returns:
Metadata as JSON dictionary.
"""
return self._metadata
def GetDevice(self):
"""Returns an android device, or None if chrome is local."""
return None
def SetDeviceEmulation(self, device_name):
"""Set device emulation.
Args:
device_name: (str) Key from --devices_file.
"""
devices = emulation.LoadEmulatedDevices(file(OPTIONS.devices_file))
self._emulated_device = devices[device_name]
def SetNetworkEmulation(self, network_name):
"""Set network emulation.
Args:
network_name: (str) Key from emulation.NETWORK_CONDITIONS or None to
disable network emulation.
"""
assert network_name in emulation.NETWORK_CONDITIONS or network_name is None
self._network_name = network_name
def ResetBrowserState(self):
"""Resets the chrome's browser state."""
raise NotImplementedError
def PushBrowserCache(self, cache_path):
"""Pushes the HTTP chrome cache to the profile directory.
Caution:
The chrome cache backend type differ according to the platform. On
desktop, the cache backend type is `blockfile` versus `simple` on Android.
This method assumes that your are pushing a cache with the correct backend
type, and will NOT verify for you.
Args:
cache_path: The directory's path containing the cache locally.
"""
raise NotImplementedError
def PullBrowserCache(self):
"""Pulls the HTTP chrome cache from the profile directory.
Returns:
Temporary directory containing all the browser cache. Caller will need to
remove this directory manually.
"""
raise NotImplementedError
def SetSlowDeath(self, slow_death=True):
"""Set to pause before final kill of chrome.
Gives time for caches to write.
Args:
slow_death: (bool) True if you want that which comes to all who live, to
be slow.
"""
self._slow_death = slow_death
@contextlib.contextmanager
def OpenWprHost(self, wpr_archive_path, record=False,
network_condition_name=None,
disable_script_injection=False,
out_log_path=None):
"""Opens a Web Page Replay host context.
Args:
wpr_archive_path: host sided WPR archive's path.
record: Enables or disables WPR archive recording.
network_condition_name: Network condition name available in
emulation.NETWORK_CONDITIONS.
disable_script_injection: Disable JavaScript file injections that is
fighting against resources name entropy.
out_log_path: Path of the WPR host's log.
"""
raise NotImplementedError
def _StartConnection(self, connection):
"""This should be called after opening an appropriate connection."""
if self._emulated_device:
self._metadata.update(emulation.SetUpDeviceEmulationAndReturnMetadata(
connection, self._emulated_device))
if self._network_name:
network_condition = emulation.NETWORK_CONDITIONS[self._network_name]
logging.info('Set up network emulation %s (latency=%dms, down=%d, up=%d)'
% (self._network_name, network_condition['latency'],
network_condition['download'], network_condition['upload']))
emulation.SetUpNetworkEmulation(connection, **network_condition)
self._metadata['network_emulation'] = copy.copy(network_condition)
self._metadata['network_emulation']['name'] = self._network_name
else:
self._metadata['network_emulation'] = \
{k: 'disabled' for k in ['name', 'download', 'upload', 'latency']}
self._metadata.update(self.METADATA_GATHERER.GetMetadata())
logging.info('Devtools connection success')
def _GetChromeArguments(self):
"""Get command-line arguments for the chrome execution."""
chrome_args = self._chrome_args[:]
if self._wpr_attributes:
chrome_args.extend(self._wpr_attributes.chrome_args)
return chrome_args
class RemoteChromeController(ChromeControllerBase):
"""A controller for an android device, aka remote chrome instance."""
# An estimate of time to wait for the device to become idle after expensive
# operations, such as opening the launcher activity.
TIME_TO_IDLE_SECONDS = 2
def __init__(self, device):
"""Initialize the controller.
Caution: The browser state might need to be manually reseted.
Args:
device: (device_utils.DeviceUtils) an android device.
"""
assert device is not None, 'Should you be using LocalController instead?'
super(RemoteChromeController, self).__init__()
self._device = device
self._metadata['platform'] = {
'os': 'A-' + device.build_id,
'product_model': device.product_model
}
self._InitDevice()
def GetDevice(self):
"""Overridden android device."""
return self._device
@contextlib.contextmanager
def Open(self):
"""Overridden connection creation."""
if self._wpr_attributes:
assert self._wpr_attributes.chrome_env_override == {}, \
'Remote controller doesn\'t support chrome environment variables.'
package_info = OPTIONS.ChromePackage()
command_line_path = '/data/local/chrome-command-line'
self._device.ForceStop(package_info.package)
chrome_args = self._GetChromeArguments()
logging.info('Launching %s with flags: %s' % (package_info.package,
subprocess.list2cmdline(chrome_args)))
with device_setup.FlagReplacer(
self._device, command_line_path, self._GetChromeArguments()):
self._DismissCrashDialogIfNeeded()
start_intent = intent.Intent(
package=package_info.package, activity=package_info.activity,
data='about:blank')
self._device.adb.Logcat(clear=True, dump=True)
self._device.StartActivity(start_intent, blocking=True)
try:
for attempt_id in xrange(self.DEVTOOLS_CONNECTION_ATTEMPTS):
logging.info('Devtools connection attempt %d' % attempt_id)
# Adb forwarding does not provide a way to print the port number if
# it is allocated atomically by the OS by passing port=0, but we need
# dynamically allocated listening port here to handle parallel run on
# different devices.
host_side_port = _AllocateTcpListeningPort()
logging.info('Allocated host sided listening port for devtools '
'connection: %d', host_side_port)
try:
with device_setup.ForwardPort(
self._device, 'tcp:%d' % host_side_port,
'localabstract:chrome_devtools_remote'):
try:
connection = devtools_monitor.DevToolsConnection(
OPTIONS.devtools_hostname, host_side_port)
self._StartConnection(connection)
except socket.error as e:
if e.errno != errno.ECONNRESET:
raise
time.sleep(self.DEVTOOLS_CONNECTION_ATTEMPT_INTERVAL_SECONDS)
continue
yield connection
if self._slow_death:
self._device.adb.Shell('am start com.google.android.launcher')
time.sleep(self.TIME_TO_IDLE_SECONDS)
break
except device_errors.AdbCommandFailedError as error:
_KNOWN_ADB_FORWARDER_FAILURES = [
'cannot bind to socket: Address already in use',
'cannot rebind existing socket: Resource temporarily unavailable']
for message in _KNOWN_ADB_FORWARDER_FAILURES:
if message in error.message:
break
else:
raise
continue
else:
raise ChromeControllerInternalError(
'Failed to connect to Chrome devtools after {} '
'attempts.'.format(self.DEVTOOLS_CONNECTION_ATTEMPTS))
except ChromeControllerError._PASSTHROUGH_WHITE_LIST:
raise
except Exception:
logcat = ''.join([l + '\n' for l in self._device.adb.Logcat(dump=True)])
raise ChromeControllerError(log=logcat)
finally:
self._device.ForceStop(package_info.package)
self._DismissCrashDialogIfNeeded()
def ResetBrowserState(self):
"""Override resetting Chrome local state."""
logging.info('Resetting Chrome local state')
package = OPTIONS.ChromePackage().package
# Remove the Chrome Profile and the various disk caches. Other parts
# theoretically should not affect loading performance. Also remove the tab
# state to prevent it from growing infinitely. [:D]
for directory in ['app_chrome/Default', 'cache', 'app_chrome/ShaderCache',
'app_tabs']:
cmd = ['rm', '-rf', '/data/data/{}/{}'.format(package, directory)]
self._device.adb.Shell(subprocess.list2cmdline(cmd))
def RebootDevice(self):
"""Reboot the remote device."""
assert self._wpr_attributes is None, 'WPR should be closed before rebooting'
logging.warning('Rebooting the device')
device_setup.Reboot(self._device)
self._InitDevice()
def PushBrowserCache(self, cache_path):
"""Override for chrome cache pushing."""
logging.info('Push cache from %s' % cache_path)
chrome_cache.PushBrowserCache(self._device, cache_path)
def PullBrowserCache(self):
"""Override for chrome cache pulling."""
assert self._slow_death, 'Must do SetSlowDeath() before opening chrome.'
logging.info('Pull cache from device')
return chrome_cache.PullBrowserCache(self._device)
@contextlib.contextmanager
def OpenWprHost(self, wpr_archive_path, record=False,
network_condition_name=None,
disable_script_injection=False,
out_log_path=None):
"""Starts a WPR host, overrides Chrome flags until contextmanager exit."""
assert not self._wpr_attributes, 'WPR is already running.'
with device_setup.RemoteWprHost(self._device, wpr_archive_path,
record=record,
network_condition_name=network_condition_name,
disable_script_injection=disable_script_injection,
out_log_path=out_log_path) as wpr_attributes:
self._wpr_attributes = wpr_attributes
yield
self._wpr_attributes = None
def _DismissCrashDialogIfNeeded(self):
for _ in xrange(10):
if not self._device.DismissCrashDialogIfNeeded():
break
def _InitDevice(self):
self._device.EnableRoot()
class LocalChromeController(ChromeControllerBase):
"""Controller for a local (desktop) chrome instance."""
def __init__(self):
"""Initialize the controller.
Caution: The browser state might need to be manually reseted.
"""
super(LocalChromeController, self).__init__()
if OPTIONS.no_sandbox:
self.AddChromeArguments(['--no-sandbox'])
self._profile_dir = OPTIONS.local_profile_dir
self._using_temp_profile_dir = self._profile_dir is None
if self._using_temp_profile_dir:
self._profile_dir = tempfile.mkdtemp(suffix='.profile')
self._chrome_env_override = {}
self._metadata['platform'] = {
'os': platform.system()[0] + '-' + platform.release(),
'product_model': 'unknown'
}
def __del__(self):
if self._using_temp_profile_dir:
shutil.rmtree(self._profile_dir)
@staticmethod
def KillChromeProcesses():
"""Kills all the running instances of Chrome.
Returns: (int) The number of processes that were killed.
"""
killed_count = 0
chrome_path = OPTIONS.LocalBinary('chrome')
for process in psutil.process_iter():
try:
process_bin_path = None
# In old versions of psutil, process.exe is a member, in newer ones it's
# a method.
if type(process.exe) == str:
process_bin_path = process.exe
else:
process_bin_path = process.exe()
if os.path.abspath(process_bin_path) == os.path.abspath(chrome_path):
process.terminate()
killed_count += 1
try:
process.wait(timeout=10)
except psutil.TimeoutExpired:
process.kill()
except psutil.AccessDenied:
pass
except psutil.NoSuchProcess:
pass
return killed_count
def SetChromeEnvOverride(self, env):
"""Set the environment for Chrome.
Args:
env: (dict) Environment.
"""
self._chrome_env_override = env
@contextlib.contextmanager
def Open(self):
"""Overridden connection creation."""
# Kill all existing Chrome instances.
killed_count = LocalChromeController.KillChromeProcesses()
if killed_count > 0:
logging.warning('Killed existing Chrome instance.')
chrome_cmd = [OPTIONS.LocalBinary('chrome')]
chrome_cmd.extend(self._GetChromeArguments())
# Force use of simple cache.
chrome_cmd.append('--use-simple-cache-backend=on')
chrome_cmd.append('--user-data-dir=%s' % self._profile_dir)
# Navigates to about:blank for couples of reasons:
# - To find the correct target descriptor at devtool connection;
# - To avoid cache and WPR pollution by the NTP.
chrome_cmd.append('about:blank')
tmp_log = \
tempfile.NamedTemporaryFile(prefix="chrome_controller_", suffix='.log')
chrome_process = None
try:
chrome_env_override = self._chrome_env_override.copy()
if self._wpr_attributes:
chrome_env_override.update(self._wpr_attributes.chrome_env_override)
chrome_env = os.environ.copy()
chrome_env.update(chrome_env_override)
# Launch Chrome.
logging.info(common_util.GetCommandLineForLogging(chrome_cmd,
chrome_env_override))
chrome_process = subprocess.Popen(chrome_cmd, stdout=tmp_log.file,
stderr=tmp_log.file, env=chrome_env)
# Attempt to connect to Chrome's devtools
for attempt_id in xrange(self.DEVTOOLS_CONNECTION_ATTEMPTS):
logging.info('Devtools connection attempt %d' % attempt_id)
process_result = chrome_process.poll()
if process_result is not None:
raise ChromeControllerInternalError(
'Unexpected Chrome exit: {}'.format(process_result))
try:
connection = devtools_monitor.DevToolsConnection(
OPTIONS.devtools_hostname, OPTIONS.devtools_port)
break
except socket.error as e:
if e.errno != errno.ECONNREFUSED:
raise
time.sleep(self.DEVTOOLS_CONNECTION_ATTEMPT_INTERVAL_SECONDS)
else:
raise ChromeControllerInternalError(
'Failed to connect to Chrome devtools after {} '
'attempts.'.format(self.DEVTOOLS_CONNECTION_ATTEMPTS))
# Start and yield the devtool connection.
self._StartConnection(connection)
yield connection
if self._slow_death:
connection.Close()
chrome_process.wait()
chrome_process = None
except ChromeControllerError._PASSTHROUGH_WHITE_LIST:
raise
except Exception:
raise ChromeControllerError(log=open(tmp_log.name).read())
finally:
if OPTIONS.local_noisy:
sys.stderr.write(open(tmp_log.name).read())
del tmp_log
if chrome_process:
try:
chrome_process.kill()
except OSError:
pass # Chrome is already dead.
def ResetBrowserState(self):
"""Override for chrome state reseting."""
assert os.path.isdir(self._profile_dir)
logging.info('Reset chrome\'s profile')
# Don't do a rmtree(self._profile_dir) because it might be a temp directory.
for filename in os.listdir(self._profile_dir):
path = os.path.join(self._profile_dir, filename)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def PushBrowserCache(self, cache_path):
"""Override for chrome cache pushing."""
self._EnsureProfileDirectory()
profile_cache_path = self._GetCacheDirectoryPath()
logging.info('Copy cache directory from %s to %s.' % (
cache_path, profile_cache_path))
chrome_cache.CopyCacheDirectory(cache_path, profile_cache_path)
def PullBrowserCache(self):
"""Override for chrome cache pulling."""
cache_path = tempfile.mkdtemp()
profile_cache_path = self._GetCacheDirectoryPath()
logging.info('Copy cache directory from %s to %s.' % (
profile_cache_path, cache_path))
chrome_cache.CopyCacheDirectory(profile_cache_path, cache_path)
return cache_path
@contextlib.contextmanager
def OpenWprHost(self, wpr_archive_path, record=False,
network_condition_name=None,
disable_script_injection=False,
out_log_path=None):
"""Override for WPR context."""
assert not self._wpr_attributes, 'WPR is already running.'
with device_setup.LocalWprHost(wpr_archive_path,
record=record,
network_condition_name=network_condition_name,
disable_script_injection=disable_script_injection,
out_log_path=out_log_path) as wpr_attributes:
self._wpr_attributes = wpr_attributes
yield
self._wpr_attributes = None
def _EnsureProfileDirectory(self):
if (not os.path.isdir(self._profile_dir) or
os.listdir(self._profile_dir) == []):
# Launch chrome so that it populates the profile directory.
with self.Open():
pass
assert os.path.isdir(self._profile_dir)
assert os.path.isdir(os.path.dirname(self._GetCacheDirectoryPath()))
def _GetCacheDirectoryPath(self):
return os.path.join(self._profile_dir, 'Default', 'Cache')
|
|
import json
from math import ceil
from twilio.base import values
from twilio.base.exceptions import TwilioRestException
class Version(object):
"""
Represents an API version.
"""
def __init__(self, domain):
"""
:param Domain domain:
:return:
"""
self.domain = domain
self.version = None
def absolute_url(self, uri):
"""
Turns a relative uri into an absolute url.
"""
return self.domain.absolute_url(self.relative_uri(uri))
def relative_uri(self, uri):
"""
Turns a relative uri into a versioned relative uri.
"""
return '{}/{}'.format(self.version.strip('/'), uri.strip('/'))
def request(self, method, uri, params=None, data=None, headers=None,
auth=None, timeout=None, allow_redirects=False):
"""
Make an HTTP request.
"""
url = self.relative_uri(uri)
return self.domain.request(
method,
url,
params=params,
data=data,
headers=headers,
auth=auth,
timeout=timeout,
allow_redirects=allow_redirects
)
@classmethod
def exception(cls, method, uri, response, message):
"""
Wraps an exceptional response in a `TwilioRestException`.
"""
# noinspection PyBroadException
try:
error_payload = json.loads(response.text)
if 'message' in error_payload:
message = '{}: {}'.format(message, error_payload['message'])
details = error_payload.get('details')
code = error_payload.get('code', response.status_code)
return TwilioRestException(response.status_code, uri, message, code, method, details)
except Exception:
return TwilioRestException(response.status_code, uri, message, response.status_code,
method)
def fetch(self, method, uri, params=None, data=None, headers=None, auth=None, timeout=None,
allow_redirects=False):
"""
Fetch a resource instance.
"""
response = self.request(
method,
uri,
params=params,
data=data,
headers=headers,
auth=auth,
timeout=timeout,
allow_redirects=allow_redirects,
)
# Note that 3XX response codes are allowed for fetches.
if response.status_code < 200 or response.status_code >= 400:
raise self.exception(method, uri, response, 'Unable to fetch record')
return json.loads(response.text)
def update(self, method, uri, params=None, data=None, headers=None, auth=None, timeout=None,
allow_redirects=False):
"""
Update a resource instance.
"""
response = self.request(
method,
uri,
params=params,
data=data,
headers=headers,
auth=auth,
timeout=timeout,
allow_redirects=allow_redirects,
)
if response.status_code < 200 or response.status_code >= 300:
raise self.exception(method, uri, response, 'Unable to update record')
return json.loads(response.text)
def delete(self, method, uri, params=None, data=None, headers=None, auth=None, timeout=None,
allow_redirects=False):
"""
Delete a resource.
"""
response = self.request(
method,
uri,
params=params,
data=data,
headers=headers,
auth=auth,
timeout=timeout,
allow_redirects=allow_redirects,
)
if response.status_code < 200 or response.status_code >= 300:
raise self.exception(method, uri, response, 'Unable to delete record')
return response.status_code == 204
def read_limits(self, limit=None, page_size=None):
"""
Takes a limit on the max number of records to read and a max page_size
and calculates the max number of pages to read.
:param int limit: Max number of records to read.
:param int page_size: Max page size.
:return dict: A dictionary of paging limits.
"""
if limit is not None and page_size is None:
page_size = limit
return {
'limit': limit or values.unset,
'page_size': page_size or values.unset,
}
def page(self, method, uri, params=None, data=None, headers=None, auth=None, timeout=None,
allow_redirects=False):
"""
Makes an HTTP request.
"""
return self.request(
method,
uri,
params=params,
data=data,
headers=headers,
auth=auth,
timeout=timeout,
allow_redirects=allow_redirects,
)
def stream(self, page, limit=None, page_limit=None):
"""
Generates records one a time from a page, stopping at prescribed limits.
:param Page page: The page to stream.
:param int limit: The max number of records to read.
:param int page_limit: The max number of pages to read.
"""
current_record = 1
current_page = 1
while page is not None:
for record in page:
yield record
current_record += 1
if limit and limit is not values.unset and limit < current_record:
return
current_page += 1
if page_limit and page_limit is not values.unset and page_limit < current_page:
return
page = page.next_page()
def create(self, method, uri, params=None, data=None, headers=None, auth=None, timeout=None,
allow_redirects=False):
"""
Create a resource instance.
"""
response = self.request(
method,
uri,
params=params,
data=data,
headers=headers,
auth=auth,
timeout=timeout,
allow_redirects=allow_redirects,
)
if response.status_code < 200 or response.status_code >= 300:
raise self.exception(method, uri, response, 'Unable to create record')
return json.loads(response.text)
|
|
from __future__ import unicode_literals
import datetime
from decimal import Decimal
from django.core.exceptions import FieldError
from django.db.models import (
Sum, Count,
F, Value, Func,
IntegerField, BooleanField, CharField)
from django.db.models.fields import FieldDoesNotExist
from django.test import TestCase
from django.utils import six
from .models import Author, Book, Store, DepartmentStore, Company, Employee
def cxOracle_513_py3_bug(func):
"""
cx_Oracle versions up to and including 5.1.3 have a bug with respect to
string handling under Python3 (essentially, they treat Python3 strings
as Python2 strings rather than unicode). This makes some tests here
fail under Python 3 -- so we mark them as expected failures.
See https://code.djangoproject.com/ticket/23843, in particular comment 6,
which points to https://bitbucket.org/anthony_tuininga/cx_oracle/issue/6/
"""
from unittest import expectedFailure
from django.db import connection
if connection.vendor == 'oracle' and six.PY3 and connection.Database.version <= '5.1.3':
return expectedFailure(func)
else:
return func
class NonAggregateAnnotationTestCase(TestCase):
fixtures = ["annotations.json"]
def test_basic_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()))
for book in books:
self.assertEqual(book.is_book, 1)
def test_basic_f_annotation(self):
books = Book.objects.annotate(another_rating=F('rating'))
for book in books:
self.assertEqual(book.another_rating, book.rating)
def test_joined_annotation(self):
books = Book.objects.select_related('publisher').annotate(
num_awards=F('publisher__num_awards'))
for book in books:
self.assertEqual(book.num_awards, book.publisher.num_awards)
def test_annotate_with_aggregation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()),
rating_count=Count('rating'))
for book in books:
self.assertEqual(book.is_book, 1)
self.assertEqual(book.rating_count, 1)
def test_aggregate_over_annotation(self):
agg = Author.objects.annotate(other_age=F('age')).aggregate(otherage_sum=Sum('other_age'))
other_agg = Author.objects.aggregate(age_sum=Sum('age'))
self.assertEqual(agg['otherage_sum'], other_agg['age_sum'])
def test_filter_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField())
).filter(is_book=1)
for book in books:
self.assertEqual(book.is_book, 1)
def test_filter_annotation_with_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=3.5)
for book in books:
self.assertEqual(book.other_rating, 3.5)
def test_filter_annotation_with_double_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=F('rating'))
for book in books:
self.assertEqual(book.other_rating, book.rating)
def test_filter_agg_with_double_f(self):
books = Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('sum_rating'))
for book in books:
self.assertEqual(book.sum_rating, book.rating)
def test_filter_wrong_annotation(self):
with six.assertRaisesRegex(self, FieldError, "Cannot resolve keyword .*"):
list(Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('nope')))
def test_update_with_annotation(self):
book_preupdate = Book.objects.get(pk=2)
Book.objects.annotate(other_rating=F('rating') - 1).update(rating=F('other_rating'))
book_postupdate = Book.objects.get(pk=2)
self.assertEqual(book_preupdate.rating - 1, book_postupdate.rating)
def test_annotation_with_m2m(self):
books = Book.objects.annotate(author_age=F('authors__age')).filter(pk=1).order_by('author_age')
self.assertEqual(books[0].author_age, 34)
self.assertEqual(books[1].author_age, 35)
def test_annotation_reverse_m2m(self):
books = Book.objects.annotate(
store_name=F('store__name')).filter(
name='Practical Django Projects').order_by(
'store_name')
self.assertQuerysetEqual(
books, [
'Amazon.com',
'Books.com',
'Mamma and Pappa\'s Books'
],
lambda b: b.store_name
)
def test_values_annotation(self):
"""
Annotations can reference fields in a values clause,
and contribute to an existing values clause.
"""
# annotate references a field in values()
qs = Book.objects.values('rating').annotate(other_rating=F('rating') - 1)
book = qs.get(pk=1)
self.assertEqual(book['rating'] - 1, book['other_rating'])
# filter refs the annotated value
book = qs.get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
# can annotate an existing values with a new field
book = qs.annotate(other_isbn=F('isbn')).get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
self.assertEqual(book['other_isbn'], '155860191')
def test_defer_annotation(self):
"""
Deferred attributes can be referenced by an annotation,
but they are not themselves deferred, and cannot be deferred.
"""
qs = Book.objects.defer('rating').annotate(other_rating=F('rating') - 1)
with self.assertNumQueries(2):
book = qs.get(other_rating=4)
self.assertEqual(book.rating, 5)
self.assertEqual(book.other_rating, 4)
with six.assertRaisesRegex(self, FieldDoesNotExist, "\w has no field named u?'other_rating'"):
book = qs.defer('other_rating').get(other_rating=4)
def test_mti_annotations(self):
"""
Fields on an inherited model can be referenced by an
annotated field.
"""
d = DepartmentStore.objects.create(
name='Angus & Robinson',
original_opening=datetime.date(2014, 3, 8),
friday_night_closing=datetime.time(21, 00, 00),
chain='Westfield'
)
books = Book.objects.filter(rating__gt=4)
for b in books:
d.books.add(b)
qs = DepartmentStore.objects.annotate(
other_name=F('name'),
other_chain=F('chain'),
is_open=Value(True, BooleanField()),
book_isbn=F('books__isbn')
).select_related('store').order_by('book_isbn').filter(chain='Westfield')
self.assertQuerysetEqual(
qs, [
('Angus & Robinson', 'Westfield', True, '155860191'),
('Angus & Robinson', 'Westfield', True, '159059725')
],
lambda d: (d.other_name, d.other_chain, d.is_open, d.book_isbn)
)
def test_column_field_ordering(self):
"""
Test that columns are aligned in the correct order for
resolve_columns. This test will fail on mysql if column
ordering is out. Column fields should be aligned as:
1. extra_select
2. model_fields
3. annotation_fields
4. model_related_fields
"""
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
self.assertQuerysetEqual(
qs.order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
def test_column_field_ordering_with_deferred(self):
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
# and we respect deferred columns!
self.assertQuerysetEqual(
qs.defer('age').order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
@cxOracle_513_py3_bug
def test_custom_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE')
).order_by('name')
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'),
('Django Software Foundation', 'No Tag'),
('Google', 'Do No Evil'),
('Yahoo', 'Internet Company')
],
lambda c: (c.name, c.tagline)
)
@cxOracle_513_py3_bug
def test_custom_functions_can_ref_other_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
class Lower(Func):
function = 'LOWER'
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE')
).annotate(
tagline_lower=Lower(F('tagline'), output_field=CharField())
).order_by('name')
# LOWER function supported by:
# oracle, postgres, mysql, sqlite, sqlserver
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'.lower()),
('Django Software Foundation', 'No Tag'.lower()),
('Google', 'Do No Evil'.lower()),
('Yahoo', 'Internet Company'.lower())
],
lambda c: (c.name, c.tagline_lower)
)
|
|
# -*- coding: utf-8 -*-
"""
tests/tests.py
======================
This is a testing file for Flask-Webhelpers.
:copyright: 2015 Sudheer Satyanarayana
:license: BSD, see LICENSE for details
"""
from __future__ import with_statement
import os
import flask
import unittest
from flask_webhelpers import ObjectGrid
class MyClass(object):
pass
class TestFlasktWebelpers(unittest.TestCase):
def get_users(self):
my_list = []
for i in xrange(100):
my_obj = MyClass()
my_obj.user_id = i
my_obj.username = 'username_%s' % i
my_list.append(my_obj)
return my_list
def setUp(self):
app = flask.Flask(__name__)
from flask import request
app.config['TESTING'] = True
@app.route("/")
def first():
my_list = self.get_users()
grid = ObjectGrid(
request=request,
itemlist=my_list,
columns=['user_id', 'username']
)
rendered_grid = str(grid)
return rendered_grid
self.app = app
@app.route("/second")
def second():
my_list = self.get_users()
grid = ObjectGrid(
request=request,
itemlist=my_list,
columns=['user_id', 'username']
)
rendered_grid = str(grid)
return rendered_grid
@app.route("/third/<int:my_custom_arg>")
def third(my_custom_arg):
my_list = self.get_users()
grid = ObjectGrid(
request=request,
itemlist=my_list,
columns=['user_id', 'username']
)
grid.exclude_ordering = []
rendered_grid = str(grid)
return rendered_grid
@app.route("/fourth")
def fourth():
my_list = self.get_users()
grid = ObjectGrid(
request=request,
itemlist=my_list,
columns=['user_id', 'username']
)
grid.exclude_ordering = []
rendered_grid = str(grid)
return rendered_grid
@app.route("/fifth")
def fifth():
my_list = self.get_users()
grid = ObjectGrid(
request=request,
itemlist=my_list,
columns=['user_id', 'username', 'action']
)
grid.column_formats = {
'action': lambda column_number, i, item:
'<td>Custom Content</td>'
}
grid.exclude_ordering = ['action']
rendered_grid = str(grid)
return rendered_grid
@app.route("/sixth")
def sixth():
my_list = self.get_users()
grid = ObjectGrid(
request=request,
itemlist=my_list,
columns=['user_id', 'username']
)
grid.exclude_ordering = []
rendered_grid = str(grid)
return rendered_grid
@app.route("/seventh/<int:some_id>")
def seventh(some_id):
my_list = self.get_users()
grid = ObjectGrid(
request=request,
itemlist=my_list,
columns=['user_id', 'username']
)
grid.exclude_ordering = []
rendered_grid = str(grid)
return rendered_grid
self.app = app
def test_basic(self):
client = self.app.test_client()
rv = client.get('/')
assert ('<tr class="odd r1"><td class="c1">0</td><td class="c2">'
'username_0</td></tr>') in rv.data
def test_header_th(self):
client = self.app.test_client()
rv = client.get('/second')
assert ('<tr class="header"><th class="c1 user_id">'
'User Id</th>') in rv.data
def test_header_view_args(self):
client = self.app.test_client()
rv = client.get('/third/10')
assert ('<th class="c1 ordering user_id">'
'<a href="/third/10?order_col=user_id&order_dir=asc">'
'User Id</a>'
'<span class="marker"></span></th>') in rv.data
def test_header_request_args(self):
client = self.app.test_client()
rv = client.get('/fourth?my_custom_arg=23')
assert ('<th class="c1 ordering user_id">'
'<a href="/fourth?order_col=user_id&my_custom_arg=23&'
'order_dir=asc">User Id</a>'
'<span class="marker"></span></th>') in rv.data
def test_custom_col(self):
client = self.app.test_client()
rv = client.get('/fifth')
assert '<th class="c3 action">Action</th>' in rv.data
def test_header_link(self):
client = self.app.test_client()
rv = client.get('/sixth')
assert ('<th class="c1 ordering user_id">'
'<a href="/sixth?order_col=user_id&order_dir=asc">'
'User Id</a>'
'<span class="marker"></span></th>') in rv.data
def test_view_args_request_args_combined(self):
client = self.app.test_client()
rv = client.get('/seventh/100?some_param=some_value')
assert ('<a href="/seventh/100?order_col=user_id'
'&some_param=some_value&order_dir=asc">') in rv.data
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
"""unittest-xml-reporting is a PyUnit-based TestRunner that can export test
results to XML files that can be consumed by a wide range of tools, such as
build systems, IDEs and Continuous Integration servers.
This module provides the XMLTestRunner class, which is heavily based on the
default TextTestRunner. This makes the XMLTestRunner very simple to use.
The script below, adapted from the unittest documentation, shows how to use
XMLTestRunner in a very simple way. In fact, the only difference between this
script and the original one is the last line:
import random
import unittest
import xmlrunner
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.seq = range(10)
def test_shuffle(self):
# make sure the shuffled sequence does not lose any elements
random.shuffle(self.seq)
self.seq.sort()
self.assertEqual(self.seq, range(10))
def test_choice(self):
element = random.choice(self.seq)
self.assert_(element in self.seq)
def test_sample(self):
self.assertRaises(ValueError, random.sample, self.seq, 20)
for element in random.sample(self.seq, 5):
self.assert_(element in self.seq)
if __name__ == '__main__':
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='test-reports'))
"""
import os
import sys
import time
from unittest import TestResult, _TextTestResult, TextTestRunner
from cStringIO import StringIO
import xml.dom.minidom
class XMLDocument(xml.dom.minidom.Document):
def createCDATAOrText(self, data):
if ']]>' in data:
return self.createTextNode(data)
return self.createCDATASection(data)
class _TestInfo(object):
"""This class is used to keep useful information about the execution of a
test method.
"""
# Possible test outcomes
(SUCCESS, FAILURE, ERROR) = range(3)
def __init__(self, test_result, test_method, outcome=SUCCESS, err=None):
"Create a new instance of _TestInfo."
self.test_result = test_result
self.test_method = test_method
self.outcome = outcome
self.err = err
self.stdout = test_result.stdout and test_result.stdout.getvalue().strip() or ''
self.stderr = test_result.stdout and test_result.stderr.getvalue().strip() or ''
def get_elapsed_time(self):
"""Return the time that shows how long the test method took to
execute.
"""
return self.test_result.stop_time - self.test_result.start_time
def get_description(self):
"Return a text representation of the test method."
return self.test_result.getDescription(self.test_method)
def get_error_info(self):
"""Return a text representation of an exception thrown by a test
method.
"""
if not self.err:
return ''
return self.test_result._exc_info_to_string(
self.err, self.test_method)
class _XMLTestResult(_TextTestResult):
"""A test result class that can express test results in a XML report.
Used by XMLTestRunner.
"""
def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1,
elapsed_times=True):
"Create a new instance of _XMLTestResult."
_TextTestResult.__init__(self, stream, descriptions, verbosity)
self.successes = []
self.callback = None
self.elapsed_times = elapsed_times
self.output_patched = False
def _prepare_callback(self, test_info, target_list, verbose_str,
short_str):
"""Append a _TestInfo to the given target list and sets a callback
method to be called by stopTest method.
"""
target_list.append(test_info)
def callback():
"""This callback prints the test method outcome to the stream,
as well as the elapsed time.
"""
# Ignore the elapsed times for a more reliable unit testing
if not self.elapsed_times:
self.start_time = self.stop_time = 0
if self.showAll:
self.stream.writeln('(%.3fs) %s' % \
(test_info.get_elapsed_time(), verbose_str))
elif self.dots:
self.stream.write(short_str)
self.callback = callback
def _patch_standard_output(self):
"""Replace the stdout and stderr streams with string-based streams
in order to capture the tests' output.
"""
if not self.output_patched:
(self.old_stdout, self.old_stderr) = (sys.stdout, sys.stderr)
self.output_patched = True
(sys.stdout, sys.stderr) = (self.stdout, self.stderr) = \
(StringIO(), StringIO())
def _restore_standard_output(self):
"Restore the stdout and stderr streams."
(sys.stdout, sys.stderr) = (self.old_stdout, self.old_stderr)
self.output_patched = False
def startTest(self, test):
"Called before execute each test method."
self._patch_standard_output()
self.start_time = time.time()
TestResult.startTest(self, test)
if self.showAll:
self.stream.write(' ' + self.getDescription(test))
self.stream.write(" ... ")
def stopTest(self, test):
"Called after execute each test method."
self._restore_standard_output()
_TextTestResult.stopTest(self, test)
self.stop_time = time.time()
if self.callback and callable(self.callback):
self.callback()
self.callback = None
def addSuccess(self, test):
"Called when a test executes successfully."
self._prepare_callback(_TestInfo(self, test),
self.successes, 'OK', '.')
def addFailure(self, test, err):
"Called when a test method fails."
self._prepare_callback(_TestInfo(self, test, _TestInfo.FAILURE, err),
self.failures, 'FAIL', 'F')
def addError(self, test, err):
"Called when a test method raises an error."
self._prepare_callback(_TestInfo(self, test, _TestInfo.ERROR, err),
self.errors, 'ERROR', 'E')
def printErrorList(self, flavour, errors):
"Write some information about the FAIL or ERROR to the stream."
for test_info in errors:
if isinstance(test_info, tuple):
test_info, exc_info = test_info
try:
t = test_info.get_elapsed_time()
except AttributeError:
t = 0
try:
descr = test_info.get_description()
except AttributeError:
try:
descr = test_info.getDescription()
except AttributeError:
descr = str(test_info)
try:
err_info = test_info.get_error_info()
except AttributeError:
err_info = str(test_info)
self.stream.writeln(self.separator1)
self.stream.writeln('%s [%.3fs]: %s' % (flavour, t, descr))
self.stream.writeln(self.separator2)
self.stream.writeln('%s' % err_info)
def _get_info_by_testcase(self):
"""This method organizes test results by TestCase module. This
information is used during the report generation, where a XML report
will be generated for each TestCase.
"""
tests_by_testcase = {}
for tests in (self.successes, self.failures, self.errors):
for test_info in tests:
if not isinstance(test_info, _TestInfo):
print("Unexpected test result type: %r" % (test_info,))
continue
testcase = type(test_info.test_method)
# Ignore module name if it is '__main__'
module = testcase.__module__ + '.'
if module == '__main__.':
module = ''
testcase_name = module + testcase.__name__
if testcase_name not in tests_by_testcase:
tests_by_testcase[testcase_name] = []
tests_by_testcase[testcase_name].append(test_info)
return tests_by_testcase
def _report_testsuite(suite_name, tests, xml_document):
"Appends the testsuite section to the XML document."
testsuite = xml_document.createElement('testsuite')
xml_document.appendChild(testsuite)
testsuite.setAttribute('name', str(suite_name))
testsuite.setAttribute('tests', str(len(tests)))
testsuite.setAttribute('time', '%.3f' %
sum([e.get_elapsed_time() for e in tests]))
failures = len([1 for e in tests if e.outcome == _TestInfo.FAILURE])
testsuite.setAttribute('failures', str(failures))
errors = len([1 for e in tests if e.outcome == _TestInfo.ERROR])
testsuite.setAttribute('errors', str(errors))
return testsuite
_report_testsuite = staticmethod(_report_testsuite)
def _report_testcase(suite_name, test_result, xml_testsuite, xml_document):
"Appends a testcase section to the XML document."
testcase = xml_document.createElement('testcase')
xml_testsuite.appendChild(testcase)
testcase.setAttribute('classname', str(suite_name))
testcase.setAttribute('name', test_result.test_method.shortDescription()
or getattr(test_result.test_method, '_testMethodName',
str(test_result.test_method)))
testcase.setAttribute('time', '%.3f' % test_result.get_elapsed_time())
if (test_result.outcome != _TestInfo.SUCCESS):
elem_name = ('failure', 'error')[test_result.outcome-1]
failure = xml_document.createElement(elem_name)
testcase.appendChild(failure)
failure.setAttribute('type', str(test_result.err[0].__name__))
failure.setAttribute('message', str(test_result.err[1]))
error_info = test_result.get_error_info()
failureText = xml_document.createCDATAOrText(error_info)
failure.appendChild(failureText)
_report_testcase = staticmethod(_report_testcase)
def _report_output(test_runner, xml_testsuite, xml_document, stdout, stderr):
"Appends the system-out and system-err sections to the XML document."
systemout = xml_document.createElement('system-out')
xml_testsuite.appendChild(systemout)
systemout_text = xml_document.createCDATAOrText(stdout)
systemout.appendChild(systemout_text)
systemerr = xml_document.createElement('system-err')
xml_testsuite.appendChild(systemerr)
systemerr_text = xml_document.createCDATAOrText(stderr)
systemerr.appendChild(systemerr_text)
_report_output = staticmethod(_report_output)
def generate_reports(self, test_runner):
"Generates the XML reports to a given XMLTestRunner object."
all_results = self._get_info_by_testcase()
if type(test_runner.output) == str and not \
os.path.exists(test_runner.output):
os.makedirs(test_runner.output)
for suite, tests in all_results.items():
doc = XMLDocument()
# Build the XML file
testsuite = _XMLTestResult._report_testsuite(suite, tests, doc)
stdout, stderr = [], []
for test in tests:
_XMLTestResult._report_testcase(suite, test, testsuite, doc)
if test.stdout:
stdout.extend(['*****************', test.get_description(), test.stdout])
if test.stderr:
stderr.extend(['*****************', test.get_description(), test.stderr])
_XMLTestResult._report_output(test_runner, testsuite, doc,
'\n'.join(stdout), '\n'.join(stderr))
xml_content = doc.toprettyxml(indent='\t')
if type(test_runner.output) is str:
report_file = open('%s%sTEST-%s.xml' % \
(test_runner.output, os.sep, suite), 'w')
try:
report_file.write(xml_content)
finally:
report_file.close()
else:
# Assume that test_runner.output is a stream
test_runner.output.write(xml_content)
class XMLTestRunner(TextTestRunner):
"""A test runner class that outputs the results in JUnit like XML files.
"""
def __init__(self, output='.', stream=sys.stderr, descriptions=True, \
verbose=False, elapsed_times=True):
"Create a new instance of XMLTestRunner."
verbosity = (1, 2)[verbose]
TextTestRunner.__init__(self, stream, descriptions, verbosity)
self.output = output
self.elapsed_times = elapsed_times
def _make_result(self):
"""Create the TestResult object which will be used to store
information about the executed tests.
"""
return _XMLTestResult(self.stream, self.descriptions, \
self.verbosity, self.elapsed_times)
def run(self, test):
"Run the given test case or test suite."
# Prepare the test execution
result = self._make_result()
# Print a nice header
self.stream.writeln()
self.stream.writeln('Running tests...')
self.stream.writeln(result.separator2)
# Execute tests
start_time = time.time()
test(result)
stop_time = time.time()
time_taken = stop_time - start_time
# Print results
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", time_taken))
self.stream.writeln()
# Error traces
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = (len(result.failures), len(result.errors))
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed:
self.stream.write(", ")
self.stream.write("errors=%d" % errored)
self.stream.writeln(")")
else:
self.stream.writeln("OK")
# Generate reports
self.stream.writeln()
self.stream.writeln('Generating XML reports...')
result.generate_reports(self)
return result
|
|
import h2o
import math
from h2o.estimators import H2OXGBoostEstimator
class H2OTree():
"""
Represents a model of a Tree built by one of H2O's algorithms (GBM, Random Forest).
"""
def __init__(self, model, tree_number, tree_class=None):
params = {"model": model.model_id,
"tree_number": tree_number,
"tree_class": tree_class}
response = h2o.api(endpoint="GET /3/Tree", data=params)
self._left_children = response['left_children']
self._right_children = response['right_children']
self._node_ids = self.__extract_internal_ids(response['root_node_id'])
self._descriptions = response['descriptions']
self._model_id = model.model_id
self._tree_number = response['tree_number']
self._tree_class = response['tree_class']
self._thresholds = self.__convert_threshold_nans(response['thresholds'])
self._features = response['features']
self._levels = self.__decode_categoricals(model, response['levels'])
self._nas = response['nas']
self._predictions = response['predictions']
self._root_node = self.__assemble_tree(0)
@property
def left_children(self):
"""An array with left child nodes of tree's nodes"""
return self._left_children
@property
def right_children(self):
"""An array with right child nodes of tree's nodes"""
return self._right_children
@property
def node_ids(self):
"""Array with identification numbers of nodes. Node IDs are generated by H2O."""
return self._node_ids
@property
def descriptions(self):
"""Descriptions for each node to be found in the tree.
Contains split threshold if the split is based on numerical column.
For cactegorical splits, it contains list of categorical levels for transition from the parent node.
"""
return self._descriptions
@property
def model_id(self):
"""
Name (identification) of the model this tree is related to.
"""
return self._model_id
@property
def tree_number(self):
"""The order in which the tree has been built in the model."""
return self._tree_number
@property
def tree_class(self):
"""The name of a tree's class. Number of tree classes equals to the number of levels in
categorical response column.
As there is exactly one class per categorical level, name of tree's class equals to the corresponding
categorical level of response column.
In case of regression and binomial, the name of the categorical level is ignored can be omitted,
as there is exactly one tree built in both cases.
"""
return self._tree_class
@property
def thresholds(self):
"""Node split thresholds. Split thresholds are not only related to numerical splits, but might be present
in case of categorical split as well."""
return self._thresholds
@property
def features(self):
"""Names of the feature/column used for the split."""
return self._features
@property
def levels(self):
"""Categorical levels on split from parent's node belonging into this node. None for root node or
non-categorical splits."""
return self._levels
@property
def nas(self):
"""representing if NA values go to the left node or right node. The value may be None if node is a leaf
or there is no possibility of an NA value appearing on a node."""
return self._nas
@property
def root_node(self):
"""An instance of H2ONode representing the beginning of the tree behind the model.
Allows further tree traversal.
"""
return self._root_node
@property
def predictions(self):
"""Values predicted on tree's nodes."""
return self._predictions
def __convert_threshold_nans(self, thresholds):
for i in range(0, len(thresholds)):
if thresholds[i] == "NaN": thresholds[i] = float('nan')
return thresholds
def __assemble_tree(self, node):
if node == -1: return None
left_child = self._left_children[node]
right_child = self._right_children[node]
if left_child == -1 and right_child == -1:
return H2OLeafNode(node_id=self._node_ids[node],
prediction=self._predictions[node])
else:
return H2OSplitNode(node_id=self._node_ids[node],
left_child=self.__assemble_tree(left_child),
right_child=self.__assemble_tree(right_child),
threshold=self._thresholds[node],
split_feature=self._features[node],
na_direction=self._nas[node],
left_levels=self._levels[left_child],
right_levels = self._levels[right_child])
def __decode_categoricals(self, model, levels):
string_levels = len(self._left_children) * [None]
if type(model) is H2OXGBoostEstimator:
return string_levels
for i in range(0, len(self._left_children)):
if (self._features[i] is None): continue
left_node = self._left_children[i]
right_node = self._right_children[i]
split_column_index = model._model_json["output"]["names"].index(self._features[i])
domain = model._model_json["output"]["domains"][split_column_index]
if domain is None: continue
if left_node != -1:
left_levels = []
if levels[left_node] is not None:
for lvl_index in levels[left_node]:
left_levels.append(domain[lvl_index])
string_levels[left_node] = left_levels
if right_node != -1:
right_levels = []
if levels[right_node] is not None:
for lvl_index in levels[right_node]:
right_levels.append(domain[lvl_index])
string_levels[right_node] = right_levels
return string_levels
def __extract_internal_ids(self, root_node_id):
node_index = 0
node_ids = [root_node_id]
for i in range(0, len(self._left_children)):
if (self._left_children[i] != -1):
node_index = node_index + 1
node_ids.append(self._left_children[i])
self._left_children[i] = node_index
else:
self._left_children[i] = -1
if (self._right_children[i] != -1):
node_index = node_index + 1
node_ids.append(self._right_children[i])
self._right_children[i] = node_index
else:
self._right_children[i] = -1
return node_ids
def __len__(self):
return len(self._node_ids)
def __str__(self):
return "Tree related to model {}. Tree number is {}, tree class is '{}'\n\n".format(self._model_id,
self._tree_number,
self._tree_class)
def show(self):
print(self.__str__())
class H2ONode():
"""
Represents a single abstract node in an H2OTree
"""
def __init__(self, node_id):
self._id = node_id
@property
def id(self):
"""Node's unique identifier (integer). Generated by H2O."""
return self._id
def __str__(self):
return "Node ID {} \n".format(self._id)
class H2OLeafNode(H2ONode):
"""
Represents a single terminal node in an H2OTree with final prediction.
"""
def __init__(self, node_id, prediction):
H2ONode.__init__(self, node_id)
self._prediction = prediction
@property
def id(self):
"""Node's unique identifier (integer). Generated by H2O."""
return self._id
@property
def prediction(self):
"""Prediction value in the terminal node (numeric floating point)"""
return self._prediction
def __str__(self):
return "Leaf node ID {}. Predicted value at leaf node is {} \n".format(self._id, self._prediction)
def show(self):
print(self.__str__())
class H2OSplitNode(H2ONode):
"""
Represents a single node with either numerical or categorical split in an H2OTree with all its attributes.
"""
def __init__(self, node_id, threshold, left_child, right_child, split_feature, na_direction, left_levels, right_levels):
H2ONode.__init__(self, node_id)
self._threshold = threshold
self._left_child = left_child
self._right_child = right_child
self._split_feature = split_feature
self._na_direction = na_direction
self._left_levels = left_levels
self._right_levels = right_levels
@property
def id(self):
"""Node's unique identifier (integer). Generated by H2O."""
return self._id
@property
def threshold(self):
"""Split threshold, typically when the split column is numerical."""
return self._threshold
@property
def left_child(self):
"""
Integer identifier of the left child node, if there is any. Otherwise None.
"""
return self._left_child
@property
def right_child(self):
"""Integer identifier of the right child node, if there is any. Otherwise None."""
return self._right_child
@property
def split_feature(self):
"""The name of the column this node splits on."""
return self._split_feature
@property
def na_direction(self):
"""The direction of NA values. LEFT means NA values go to the left child node, RIGH means NA values
go to the right child node.
A value of None means occurance of NA for the given split column is not possible on this node due to
an earlier split on the very same feature.
"""
return self._na_direction
@property
def left_levels(self):
"""Categorical levels on the edge from this node to the left child node.
None for non-categorical splits."""
return self._left_levels
@property
def right_levels(self):
"""Categorical levels on the edge from this node to the right child node.
None for non-categorical splits."""
return self._right_levels
def __str__(self):
out = "Node ID {} \n".format(self._id)
if self._split_feature is not None:
if self._left_child is not None:
out += "Left child node ID = {}\n".format(self.left_child.id)
else:
out += "There is no left child\n"
if self._right_child is not None:
out += "Right child node ID = {}\n".format(self.right_child.id)
else:
out += "There is no right child\n"
out += "\nSplits on column {}\n".format(self._split_feature)
else:
out += "This is a terminal node"
if math.isnan(self._threshold):
if self._left_child is not None:
out += " - Categorical levels going to the left node: {}\n".format(self._left_levels)
if self._right_child is not None:
out += " - Categorical levels going to the right node: {}\n".format(self._right_levels)
else:
out += "Split threshold < {} to the left node, >= {} to the right node \n".format(self._threshold,
self._threshold)
if self._na_direction is not None: out += "\nNA values go to the {}".format(self._na_direction)
return out
def show(self):
print(self.__str__())
|
|
from functools import partial
import math
import actions
from actions import _get_as_str
import call_definitions
from call_definitions import xpcom_constructor as xpcom_const, python_wrap
from entity_values import entity
import instanceactions
from jstypes import JSWrapper
from validator.compat import FX47_DEFINITION
from validator.constants import MDN_DOC
# A list of identifiers and member values that may not be used.
BANNED_IDENTIFIERS = {
u'newThread':
'Creating threads from JavaScript is a common cause '
'of crashes and is unsupported in recent versions of the platform',
u'processNextEvent':
'Spinning the event loop with processNextEvent is a common cause of '
'deadlocks, crashes, and other errors due to unintended reentrancy. '
'Please use asynchronous callbacks instead wherever possible',
}
CUSTOMIZATION_API_HELP = (
'We are currently working to provide libraries and APIs to allow '
'extensions to modify these settings in ways that we can guarantee are '
'in-policy. In the interim, we recommend that you avoid changing these '
'settings altogether, if at all possible.')
CUSTOMIZATION_PREF_MESSAGE = {
'description': (
'Extensions must not alter user preferences such as the current home '
'page, new tab page, or search engine, without explicit user consent, '
'in which a user takes a non-default action. Such changes must also '
'be reverted when the extension is disabled or uninstalled.',
'In nearly all cases, new values for these preferences should be '
'set in the default preference branch, rather than the user branch.'),
'signing_help':
'Add-ons which directly change these preferences must undergo at '
'manual code review for at least one submission. ' +
CUSTOMIZATION_API_HELP,
'signing_severity': 'high',
}
NETWORK_PREF_MESSAGE = {
'description':
'Changing network preferences may be dangerous, and often leads to '
'performance costs.',
'signing_help':
'Changes to these preferences are strongly discouraged. If at all '
'possible, you should remove any reference to them from '
'your extension. Extensions which do modify these preferences '
'must undergo light manual code review for at least one submission.',
'signing_severity': 'low',
}
SEARCH_PREF_MESSAGE = {
'description':
'Search engine preferences may not be changed by add-ons directly. '
'All such changes must be made only via the browser search service, '
'and only after an explicit opt-in from the user. All such changes '
'must be reverted when the extension is disabled or uninstalled.',
'signing_help': (
'You should remove all references to these preferences from your '
'code, and interact with search settings only via the '
'`Services.search` interface. Extensions which interact with these '
'preferences directly are not acceptable within the Firefox add-on '
'ecosystem.',
'Note, however, that extensions which change search settings even via '
'the search service must undergo manual code review for at least '
'one submission. ' + CUSTOMIZATION_API_HELP),
'signing_severity': 'high',
}
SECURITY_PREF_MESSAGE = {
'description':
'Changing this preference may have severe security implications, and '
'is forbidden under most circumstances.',
'editors_only': True,
'signing_help': ('Extensions which alter these settings are allowed '
'within the Firefox add-on ecosystem by exception '
'only, and under extremely limited circumstances.',
'Please remove any reference to these preference names '
'from your add-on.'),
'signing_severity': 'high',
}
MARIONETTE_MESSAGE = {
'warning': 'Marionette should not be accessed by extensions',
'description': 'References to the Marionette service are not acceptable '
'in extensions. Please remove them.',
}
def fuel_error(traverse_node, err):
traverse_node.im_self.warning(
err_id=('js', 'traverser', 'dangerous_global'),
warning='The FUEL library is no longer supported.',
description='The FUEL library is no longer supported. Please use the '
'Add-ons SDK instead. See %s for more information.'
% MDN_DOC % 'Add-ons/SDK',
for_appversions=FX47_DEFINITION,
tier=5,
compatibility_type='error')
BANNED_PREF_BRANCHES = (
# Security and update preferences
(u'app.update.', SECURITY_PREF_MESSAGE),
(u'browser.addon-watch.', SECURITY_PREF_MESSAGE),
(u'capability.policy.', None),
(u'datareporting.', SECURITY_PREF_MESSAGE),
(u'extensions.blocklist.', SECURITY_PREF_MESSAGE),
(u'extensions.checkCompatibility', None),
(u'extensions.getAddons.', SECURITY_PREF_MESSAGE),
(u'extensions.update.', SECURITY_PREF_MESSAGE),
(u'xpinstall.signatures.required', SECURITY_PREF_MESSAGE),
# Let's see if we can get away with this...
# Changing any preference in this branch should result in a
# warning. However, this substring may turn out to be too
# generic, and lead to spurious warnings, in which case we'll
# have to single out sub-branches.
(u'security.', SECURITY_PREF_MESSAGE),
# Search, homepage, and customization preferences
(u'browser.newtab.url', CUSTOMIZATION_PREF_MESSAGE),
(u'browser.newtabpage.enabled', CUSTOMIZATION_PREF_MESSAGE),
(u'browser.search.defaultenginename', SEARCH_PREF_MESSAGE),
(u'browser.search.searchEnginesURL', SEARCH_PREF_MESSAGE),
(u'browser.startup.homepage', CUSTOMIZATION_PREF_MESSAGE),
(u'extensions.getMoreThemesURL', None),
(u'keyword.URL', SEARCH_PREF_MESSAGE),
(u'keyword.enabled', SEARCH_PREF_MESSAGE),
# Network
(u'network.proxy.autoconfig_url', {
'description':
'As many add-ons have reason to change the proxy autoconfig URL, '
'and only one at a time may do so without conflict, extensions '
'must make proxy changes using other mechanisms. Installing a '
'proxy filter is the recommended alternative: '
'https://developer.mozilla.org/en-US/docs/Mozilla/Tech/XPCOM/'
'Reference/Interface/nsIProtocolProxyService#registerFilter()',
'signing_help':
'Dynamic proxy configuration should be implemented via proxy '
'filters, as described above. This preference should not be '
'set, except directly by end users.',
'signing_severity': 'low'}),
(u'network.proxy.', NETWORK_PREF_MESSAGE),
(u'network.http.', NETWORK_PREF_MESSAGE),
(u'network.websocket.', NETWORK_PREF_MESSAGE),
# Other
(u'browser.preferences.instantApply', None),
(u'extensions.alwaysUnpack', None),
(u'extensions.bootstrappedAddons', None),
(u'extensions.dss.', None),
(u'extensions.installCache', None),
(u'extensions.lastAppVersion', None),
(u'extensions.pendingOperations', None),
(u'general.useragent.', None),
(u'nglayout.debug.disable_xul_cache', None),
# Marionette
(u'marionette.', MARIONETTE_MESSAGE),
)
BANNED_PREF_REGEXPS = [
r'extensions\..*\.update\.(url|enabled|interval)',
]
def is_shared_scope(traverser, right=None, node_right=None):
"""Returns true if the traverser `t` is traversing code loaded into
a shared scope, such as a browser window. Particularly used for
detecting when global overwrite warnings should be issued."""
# FIXME(Kris): This is not a great heuristic.
return not (traverser.is_jsm or
traverser.err.get_resource('em:bootstrap') == 'true')
# See https://github.com/mattbasta/amo-validator/wiki/JS-Predefined-Entities
# for details on entity properties.
CONTENT_DOCUMENT = None
CATEGORY_MANAGER = {
u'addCategoryEntry':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
('Bootstrapped add-ons may not create persistent category '
'entries.' if len(a) > 3 and t(a[3]).is_literal() else
'Authors of bootstrapped add-ons must take care to clean up '
'any added category entries at shutdown.')}}
OBSOLETE_EXTENSION_MANAGER = {
'value': {},
'dangerous': 'This interface is part of the obsolete extension manager '
'interface, which is not available in any remotely modern '
'version of Firefox. It should not be referenced in any '
'code.'}
ADDON_INSTALL_METHOD = {
'value': {},
'dangerous': {
'description': (
'Add-ons may install other add-ons only by user consent. Any '
'such installations must be carefully reviewed to ensure '
'their safety.'),
'editors_only': True,
'signing_help': (
"Rather than directly install other add-ons, you should offer "
"users the opportunity to install them via the normal web install "
"process, using an install link or button connected to the "
"`InstallTrigger` API: "
"https://developer.mozilla.org/en-US/docs/Web/API/InstallTrigger",
"Updates to existing add-ons should be provided via the "
"manifest's `updateURL` mechanism."),
'signing_severity': 'high'},
}
SEARCH_MESSAGE = 'Potentially dangerous use of the search service'
SEARCH_DESCRIPTION = (
'Changes to the default and currently-selected search engine settings '
'may only take place after users have explicitly opted-in, by taking '
'a non-default action. Any such changes must be reverted when the add-on '
'making them is disabled or uninstalled.')
def search_warning(severity='medium', editors_only=False,
message=SEARCH_MESSAGE,
description=SEARCH_DESCRIPTION):
return {'err_id': ('testcases_javascript_actions',
'search_service',
'changes'),
'signing_help':
'Add-ons which directly change search settings must undergo '
'manual code review for at least one submission. ' +
CUSTOMIZATION_API_HELP,
'signing_severity': severity,
'editors_only': editors_only,
'warning': message,
'description': description}
REGISTRY_WRITE = {'dangerous': {
'err_id': ('testcases_javascript_actions',
'windows_registry',
'write'),
'warning': 'Writes to the registry may be dangerous',
'description': 'Writing to the registry can have many system-level '
'consequences and requires careful review.',
'signing_help': (
'Please store any settings relevant to your add-on within the '
'current Firefox profile, ideally using the preferences service.'
'If you are intentionally changing system settings, consider '
'searching for a Firefox API which has a similar effect. If no such '
'API exists, we strongly discourage making any changes which affect '
'the system outside of the browser.'),
'signing_severity': 'medium',
'editors_only': True}}
def registry_key(write=False):
"""Represents a function which returns a registry key object."""
res = {'return': lambda wrapper, arguments, traverser: (
build_quick_xpcom('createInstance', 'nsIWindowMediator',
traverser, wrapper=True))}
if write:
res.update(REGISTRY_WRITE)
return res
NSIX509CERT_METHODS = {
'getUsagesArray': entity('nsIX509Cert.getUsagesArray'),
'requestUsagesArrayAsync': entity('nsIX509Cert.requestUsagesArrayAsync'),
'getUsagesString': entity('nsIX509Cert.getUsagesString'),
}
INTERFACES = {
u'nsISupports': {'value': {}},
u'mozIStorageBaseStatement':
{'value':
{u'execute':
{'dangerous': instanceactions.SYNCHRONOUS_SQL_DESCRIPTION},
u'executeStep':
{'dangerous': instanceactions.SYNCHRONOUS_SQL_DESCRIPTION}}},
u'nsIExtensionManager': OBSOLETE_EXTENSION_MANAGER,
u'nsIUpdateItem': OBSOLETE_EXTENSION_MANAGER,
u'nsIInstallLocation': OBSOLETE_EXTENSION_MANAGER,
u'nsIAddonInstallListener': OBSOLETE_EXTENSION_MANAGER,
u'nsIAddonUpdateCheckListener': OBSOLETE_EXTENSION_MANAGER,
u'nsICategoryManager':
{'value': CATEGORY_MANAGER},
u'nsIAccessibleRetrieval':
{'dangerous':
'Using the nsIAccessibleRetrieval interface causes significant '
'performance degradation in Gecko. It should only be used in '
'accessibility-related add-ons.',
'value': {}},
u'nsIBrowserSearchService':
{'value':
{u'currentEngine':
{'readonly': search_warning(severity='high')},
u'defaultEngine':
{'readonly': search_warning(severity='high')},
u'addEngine':
{'dangerous': search_warning()},
u'addEngineWithDetails':
{'dangerous': search_warning()},
u'removeEngine':
{'dangerous': search_warning()},
u'moveEngine':
{'dangerous': search_warning()}}},
u'nsIComponentRegistrar':
{'value':
{u'autoRegister':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Bootstrapped add-ons may not register chrome '
'manifest files.'},
u'registerFactory':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care to '
'clean up any component registrations at shutdown.'}}},
u'nsIDNSService': {'value': {u'resolve': entity('nsIDNSService.resolve')}},
u'nsIJSON':
{'value':
{u'encode':
{'return': call_definitions.nsIJSON_deprec},
u'decode':
{'return': call_definitions.nsIJSON_deprec}}},
u'nsINavBookmarksService': {'value': {
'getURIForKeyword': entity('nsINavBookmarksService.getURIForKeyword'),
}},
u'nsIObserverService':
{'value':
{u'addObserver': entity('nsIObserverService.addObserver')},
'dangerous': lambda a, t, e:
lambda t, e: (
e.metadata.get('is_jetpack') and
'The observer service should not be used directly in SDK '
"add-ons. Please use the 'sdk/system/events' module "
'instead.')},
u'nsIPrefBranch':
{'value': dict(
tuple((method, {'return': instanceactions.set_preference})
for method in (u'setBoolPref',
u'setCharPref',
u'setComplexValue',
u'setIntPref',
u'clearUserPref',
u'deleteBranch',
u'resetBranch')) +
tuple((method, {'return': instanceactions.get_preference})
for method in (u'getBoolPref',
u'getCharPref',
u'getChildList',
u'getComplexValue',
u'getFloatPref',
u'getIntPref',
u'getPrefType',
u'prefHasUserValue')))},
u'nsIResProtocolHandler':
{'value':
{u'setSubstitution':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
a and len(a) > 1 and t(a[1]).get_literal_value() and
'Authors of bootstrapped add-ons must take care '
'to clean up any added resource substitutions '
'at shutdown.'}}},
u'nsISound': {'value': {'play': entity('nsISound.play')}},
u'nsIStringBundleService':
{'value':
{u'createStringBundle':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care '
'to flush the string bundle cache at shutdown.'},
u'createExtensibleBundle':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care '
'to flush the string bundle cache at shutdown.'}}},
u'nsIStyleSheetService':
{'value':
{u'loadAndRegisterSheet':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care to '
'unregister registered stylesheets at shutdown.'}}},
u'nsITransferable':
{'value':
{u'init':
entity('nsITransferable.init')}},
u'nsIWindowMediator':
{'value':
{'registerNotification':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care '
'to remove any added observers at shutdown.'}}},
u'nsIWindowWatcher':
{'value':
{u'addListener':
{'dangerous':
lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care '
'to remove any added observers at shutdown.'},
u'openWindow': entity('nsIWindowWatcher.openWindow')}},
u'nsIProtocolProxyService': {'value': {
u'registerFilter': {'dangerous': {
'err_id': ('testcases_javascript_actions',
'predefinedentities', 'proxy_filter'),
'description': (
'Proxy filters can be used to direct arbitrary network '
'traffic through remote servers, and may potentially '
'be abused.',
'Additionally, to prevent conflicts, the `applyFilter` '
'method should always return its third argument in cases '
'when it is not supplying a specific proxy.'),
'editors_only': True,
'signing_help': 'Due to the potential for unintended effects, '
'any add-on which uses this API must undergo '
'manual code review for at least one submission.',
'signing_severity': 'low'}}}},
u'nsIWebBrowserPersist':
{'value':
{u'saveChannel':
{'return': call_definitions.webbrowserpersist},
u'saveURI':
{'return':
call_definitions.webbrowserpersist_saveuri},
u'savePrivacyAwareURI':
{'return': call_definitions.webbrowserpersist}}},
'nsIWindowsRegKey': {'value': {u'create': REGISTRY_WRITE,
u'createChild': registry_key(write=True),
u'openChild': registry_key(),
u'writeBinaryValue': REGISTRY_WRITE,
u'writeInt64Value': REGISTRY_WRITE,
u'writeIntValue': REGISTRY_WRITE,
u'writeStringValue': REGISTRY_WRITE,
}},
'nsIPK11TokenDB': {'value': {
'listTokens': entity('nsIPK11TokenDB.listTokens'),
'findTokenByName': entity('nsIPK11TokenDB.findTokenByName')
}},
'nsIPKCS11ModuleDB': {'value': {'listModules': entity('nsIPKCS11ModuleDB.listModules')}},
'nsIPKCS11Module': {'value': {'listSlots': entity('nsIPKCS11Module.listSlots')}},
'nsIIOService': {'value': {
'newChannel': entity('nsIIOService.newChannel'),
'newChannelFromURI': entity('nsIIOService.newChannelFromURI'),
'newChannelFromURIWithProxyFlags': entity('nsIIOService.newChannelFromURIWithProxyFlags'),
}},
'nsIX509Cert': {'value': NSIX509CERT_METHODS},
'nsIX509Cert2': {'value': NSIX509CERT_METHODS},
'nsIX509Cert3': {'value': NSIX509CERT_METHODS},
'nsIX509CertDB': {'value': {
'findCertByNickname': entity('nsIX509CertDB.findCertByNickname'),
'findEmailEncryptionCert': entity('nsIX509CertDB.findEmailEncryptionCert'),
'findEmailSigningCert': entity('nsIX509CertDB.findEmailSigningCert'),
'addCert': entity('nsIX509CertDB.addCert'),
}},
'nsISupportsArray': entity('nsISupportsArray'),
u'mozIAsyncFavicons': {'value': {
u'setAndFetchFaviconForPage': entity('mozIAsyncFavicons.setAndFetchFaviconForPage'),
u'replaceFaviconDataFromDataURL': entity('mozIAsyncFavicons.replaceFaviconDataFromDataURL'),
}},
u'nsIFormHistory2': entity('nsIFormHistory2'),
}
INTERFACE_ENTITIES = {u'nsIXMLHttpRequest':
{'xpcom_map':
lambda: GLOBAL_ENTITIES['XMLHttpRequest']},
u'nsIProcess': {'dangerous': {
'warning': 'The use of nsIProcess is potentially '
'dangerous and requires careful review '
'by an administrative reviewer.',
'editors_only': True,
'signing_help':
'Consider alternatives to directly launching '
'executables, such as loading a URL with an '
'appropriate external protocol handler, making '
'network requests to a local service, or using '
'the (as a last resort) `nsIFile.launch()` method '
'to open a file with the appropriate application.',
'signing_severity': 'high',
}},
u'nsIDOMGeoGeolocation': {'dangerous':
'Use of the geolocation API by add-ons requires '
'prompting users for consent.'},
u'nsIWindowsRegKey': {'dangerous': {
'signing_help':
'The information stored in many standard registry '
'keys is available via built-in Firefox APIs, '
'such as `Services.sysinfo`, `Services.dirsvc`, '
'and the environment service '
'(http://mzl.la/1OGgCF3). We strongly discourage '
'extensions from reading registry information '
'which is not available via other Firefox APIs.',
'signing_severity': 'low',
'editors_only': True,
'description': (
'Access to the registry is potentially '
'dangerous, and should be reviewed with special '
'care.')}},
}
DANGEROUS_CERT_DB = {
'err_id': ('javascript', 'predefinedentities', 'cert_db'),
'description': 'Access to the X509 certificate '
'database is potentially dangerous '
'and requires careful review by an '
'administrative reviewer.',
'editors_only': True,
'signing_help': 'Please avoid interacting with the certificate and trust '
'databases if at all possible. Any add-ons which interact '
'with these databases must undergo manual code review '
'prior to signing.',
'signing_severity': 'high',
}
INTERFACE_ENTITIES.update(
(interface, {'dangerous': DANGEROUS_CERT_DB})
for interface in ('nsIX509CertDB', 'nsIX509CertDB2', 'nsIX509CertList',
'nsICertOverrideService'))
CONTRACT_ENTITIES = {
contract: DANGEROUS_CERT_DB
for contract in ('@mozilla.org/security/x509certdb;1',
'@mozilla.org/security/x509certlist;1',
'@mozilla.org/security/certoverride;1')}
for interface in INTERFACES:
def construct(interface):
def wrap():
return INTERFACES[interface]
return wrap
if interface not in INTERFACE_ENTITIES:
INTERFACE_ENTITIES[interface] = {}
INTERFACE_ENTITIES[interface]['xpcom_map'] = construct(interface)
def build_quick_xpcom(method, interface, traverser, wrapper=False):
"""A shortcut to quickly build XPCOM objects on the fly."""
extra = ()
if isinstance(interface, (list, tuple)):
interface, extra = interface[0], interface[1:]
def interface_obj(iface):
return traverser._build_global(
name=method,
entity={'xpcom_map':
lambda: INTERFACES.get(iface, INTERFACES['nsISupports'])})
constructor = xpcom_const(method, pretraversed=True)
obj = constructor(None, [interface_obj(interface)], traverser)
for iface in extra:
# `xpcom_constructor` really needs to be cleaned up so we can avoid
# this duplication.
iface = interface_obj(iface)
iface = traverser._build_global('QueryInterface',
iface.value['xpcom_map']())
obj.value = obj.value.copy()
value = obj.value['value'].copy()
value.update(iface.value['value'])
obj.value.update(iface.value)
obj.value['value'] = value
if isinstance(obj, JSWrapper) and not wrapper:
obj = obj.value
return obj
UNSAFE_TEMPLATE_METHOD = (
'The use of `%s` can lead to unsafe '
'remote code execution, and therefore must be done with '
'great care, and only with sanitized data.')
SERVICES = {
'appinfo': ('nsIXULAppInfo', 'nsIXULRuntime'),
'appShell': 'nsIAppShellService',
'blocklist': 'nsIBlocklistService',
'cache': 'nsICacheService',
'cache2': 'nsICacheStorageService',
'clipboard': 'nsIClipboard',
'console': 'nsIConsoleService',
'contentPrefs': 'nsIContentPrefService',
'cookies': ('nsICookieManager', 'nsICookieManager2', 'nsICookieService'),
'dirsvc': ('nsIDirectoryService', 'nsIProperties'),
'DOMRequest': 'nsIDOMRequestService',
'domStorageManager': 'nsIDOMStorageManager',
'downloads': 'nsIDownloadManager',
'droppedLinkHandler': 'nsIDroppedLinkHandler',
'eTLD': 'nsIEffectiveTLDService',
'focus': 'nsIFocusManager',
'io': ('nsIIOService', 'nsIIOService2'),
'locale': 'nsILocaleService',
'logins': 'nsILoginManager',
'obs': 'nsIObserverService',
'perms': 'nsIPermissionManager',
'prefs': ('nsIPrefBranch2', 'nsIPrefService', 'nsIPrefBranch'),
'prompt': 'nsIPromptService',
'scriptloader': 'mozIJSSubScriptLoader',
'scriptSecurityManager': 'nsIScriptSecurityManager',
'search': 'nsIBrowserSearchService',
'startup': 'nsIAppStartup',
'storage': 'mozIStorageService',
'strings': 'nsIStringBundleService',
'sysinfo': 'nsIPropertyBag2',
'telemetry': 'nsITelemetry',
'tm': 'nsIThreadManager',
'uriFixup': 'nsIURIFixup',
'urlFormatter': 'nsIURLFormatter',
'vc': 'nsIVersionComparator',
'wm': 'nsIWindowMediator',
'ww': 'nsIWindowWatcher',
}
for key, value in SERVICES.items():
SERVICES[key] = {'value': partial(build_quick_xpcom,
'getService', value)}
DANGEROUS_EVAL = {
'err_id': ('javascript', 'dangerous_global', 'eval'),
'description': ('Evaluation of strings as code can lead to security '
'vulnerabilities and performance issues, even in the '
'most innocuous of circumstances. Please avoid using '
'`eval` and the `Function` constructor when at all '
'possible.',
'Alternatives are available for most use cases. See '
'https://developer.mozilla.org/en-US/Add-ons/'
'Overlay_Extensions/XUL_School/'
'Appendix_C:_Avoid_using_eval_in_Add-ons '
'for more information.'),
'signing_help':
'Please try to avoid evaluating strings as code wherever possible. '
'Read over the linked document for suggested alternatives. '
'If you are referencing the `Function` constructor without calling '
'it, and cannot avoid continuing to do so, consider alternatives '
'such as calling `Object.getPrototypeOf` on an existing function '
'object.',
'signing_severity': 'high'}
FUNCTION_EXPORT_HELP = (
'Given the potential security risks of exposing APIs to unprivileged '
'code, extensions which use these APIs must undergo manual review for at '
'least one submission. If you are not using these APIs to interact with '
'content code, please consider alternatives, such as built-in '
'message passing functionality.')
# GLOBAL_ENTITIES is also representative of the `window` object.
GLOBAL_ENTITIES = {
u'window': {'value': lambda t: {'value': GLOBAL_ENTITIES}},
u'null': {'literal': lambda t: JSWrapper(None, traverser=t)},
u'Cc': {'readonly': False,
'value':
lambda t: GLOBAL_ENTITIES['Components']['value']['classes']},
u'Ci': {'readonly': False,
'value':
lambda t: GLOBAL_ENTITIES['Components']['value']['interfaces']},
u'Cu': {'readonly': False,
'value':
lambda t: GLOBAL_ENTITIES['Components']['value']['utils']},
# From Services.jsm.
u'Services': {'value': SERVICES},
# From Preferences.jsm.
# TODO: Support calls that return instances of this object which
# operate on non-root branches.
u'Preferences': {'value': {
u'get': {'return': instanceactions.get_preference},
u'reset': {'return': instanceactions.set_preference},
u'resetBranch': {'return': instanceactions.set_preference},
u'set': {'return': instanceactions.set_preference}}},
u'AddonManager': {
'readonly': False,
'value': {
u'autoUpdateDefault': {'readonly': SECURITY_PREF_MESSAGE},
u'checkUpdateSecurity': {'readonly': SECURITY_PREF_MESSAGE},
u'checkUpdateSecurityDefault': {'readonly': SECURITY_PREF_MESSAGE},
u'updateEnabled': {'readonly': SECURITY_PREF_MESSAGE},
u'getInstallForFile': ADDON_INSTALL_METHOD,
u'getInstallForURL': ADDON_INSTALL_METHOD,
u'installAddonsFromWebpage': ADDON_INSTALL_METHOD}},
u'ctypes': {'dangerous': {
'description': (
'Insufficiently meticulous use of ctypes can lead to serious, '
'and often exploitable, errors. The use of bundled binary code, '
'or access to system libraries, may allow for add-ons to '
'perform unsafe operations. All ctypes use must be carefully '
'reviewed by a qualified reviewer.'),
'editors_only': True,
'signing_help': ('Please try to avoid interacting with or bundling '
'native binaries whenever possible. If you are '
'bundling binaries for performance reasons, please '
'consider alternatives such as Emscripten '
'(http://mzl.la/1KrSUh2), JavaScript typed arrays '
'(http://mzl.la/1Iw02sr), and Worker threads '
'(http://mzl.la/1OGfAcc).',
'Any code which makes use of the `ctypes` API '
'must undergo manual code review for at least one '
'submission.'),
'signing_severity': 'high'}},
u'document':
{'value':
{u'title':
{'overwriteable': True,
'readonly': False},
u'defaultView':
{'value': lambda t: {'value': GLOBAL_ENTITIES}},
u'loadOverlay':
{'dangerous':
lambda a, t, e:
not a or not _get_as_str(t(a[0])).lower()
.startswith(('chrome:', 'resource:'))},
u'write': entity('document.write'),
u'writeln': entity('document.write')}},
# The nefariuos timeout brothers!
u'setTimeout': {'dangerous': actions._call_settimeout},
u'setInterval': {'dangerous': actions._call_settimeout},
u'require': {'dangerous': actions._call_require},
u'encodeURI': {'readonly': True},
u'decodeURI': {'readonly': True},
u'encodeURIComponent': {'readonly': True},
u'decodeURIComponent': {'readonly': True},
u'escape': {'readonly': True},
u'unescape': {'readonly': True},
u'isFinite': {'readonly': True},
u'isNaN': {'readonly': True},
u'parseFloat': {'readonly': True},
u'parseInt': {'readonly': True},
u'eval': {'dangerous': DANGEROUS_EVAL},
u'Function': {'dangerous': DANGEROUS_EVAL},
u'Object':
{'value':
{u'prototype': {'readonly': is_shared_scope},
u'constructor': # Just an experiment for now
{'value': lambda t: GLOBAL_ENTITIES['Function']}}},
u'String':
{'value':
{u'prototype': {'readonly': is_shared_scope}},
'return': call_definitions.string_global},
u'Array':
{'value':
{u'prototype': {'readonly': is_shared_scope}},
'return': call_definitions.array_global},
u'Number':
{'value':
{u'prototype':
{'readonly': is_shared_scope},
u'POSITIVE_INFINITY':
{'value': lambda t: JSWrapper(float('inf'), traverser=t)},
u'NEGATIVE_INFINITY':
{'value': lambda t: JSWrapper(float('-inf'), traverser=t)}},
'return': call_definitions.number_global},
u'Boolean':
{'value':
{u'prototype': {'readonly': is_shared_scope}},
'return': call_definitions.boolean_global},
u'RegExp': {'value': {u'prototype': {'readonly': is_shared_scope}}},
u'Date': {'value': {u'prototype': {'readonly': is_shared_scope}}},
u'File': {'value': {u'prototype': {'readonly': is_shared_scope}}},
u'Math':
{'value':
{u'PI':
{'value': lambda t: JSWrapper(math.pi, traverser=t)},
u'E':
{'value': lambda t: JSWrapper(math.e, traverser=t)},
u'LN2':
{'value': lambda t: JSWrapper(math.log(2), traverser=t)},
u'LN10':
{'value': lambda t: JSWrapper(math.log(10), traverser=t)},
u'LOG2E':
{'value': lambda t: JSWrapper(math.log(math.e, 2),
traverser=t)},
u'LOG10E':
{'value': lambda t: JSWrapper(math.log10(math.e),
traverser=t)},
u'SQRT2':
{'value': lambda t: JSWrapper(math.sqrt(2), traverser=t)},
u'SQRT1_2':
{'value': lambda t: JSWrapper(math.sqrt(1/2), traverser=t)},
u'abs':
{'return': python_wrap(abs, [('num', 0)])},
u'acos':
{'return': python_wrap(math.acos, [('num', 0)])},
u'asin':
{'return': python_wrap(math.asin, [('num', 0)])},
u'atan':
{'return': python_wrap(math.atan, [('num', 0)])},
u'atan2':
{'return': python_wrap(math.atan2, [('num', 0),
('num', 1)])},
u'ceil':
{'return': python_wrap(math.ceil, [('num', 0)])},
u'cos':
{'return': python_wrap(math.cos, [('num', 0)])},
u'exp':
{'return': python_wrap(math.exp, [('num', 0)])},
u'floor':
{'return': python_wrap(math.floor, [('num', 0)])},
u'log':
{'return': call_definitions.math_log},
u'max':
{'return': python_wrap(max, [('num', 0)], nargs=True)},
u'min':
{'return': python_wrap(min, [('num', 0)], nargs=True)},
u'pow':
{'return': python_wrap(math.pow, [('num', 0),
('num', 0)])},
u'random': # Random always returns 0.5 in our fantasy land.
{'return': call_definitions.math_random},
u'round':
{'return': call_definitions.math_round},
u'sin':
{'return': python_wrap(math.sin, [('num', 0)])},
u'sqrt':
{'return': python_wrap(math.sqrt, [('num', 1)])},
u'tan':
{'return': python_wrap(math.tan, [('num', 0)])},
}},
u'netscape':
{'value':
{u'security':
{'value':
{u'PrivilegeManager':
{'value':
{u'enablePrivilege':
{'dangerous': {
'signing_help':
'Any references to this API must '
'be removed from your extension. '
'Add-ons using this API will not '
'be accepted for signing.',
'signing_severity': 'high',
'description': (
'enablePrivilege is extremely '
'dangerous, and nearly always '
'unnecessary. It should not be '
'used under any circumstances.'),
}}}}}}}},
u'navigator':
{'value': {u'wifi': {'dangerous': True},
u'geolocation': {'dangerous': True}}},
u'Components':
{'dangerous_on_read':
lambda t, e: bool(e.metadata.get('is_jetpack')),
'value':
{u'classes':
{'xpcom_wildcard': True,
'value':
{u'createInstance':
{'return': xpcom_const('createInstance')},
u'getService':
{'return': xpcom_const('getService')}}},
'utils':
{'value': {u'evalInSandbox':
{'dangerous': {
'editors_only': 'true',
'signing_help':
DANGEROUS_EVAL['signing_help'],
'signing_severity': 'low'}},
u'cloneInto':
{'dangerous': {
'editors_only': True,
'signing_help': FUNCTION_EXPORT_HELP,
'signing_severity': 'low',
'description': (
'Can be used to expose privileged '
'functionality to unprivileged scopes. '
'Care should be taken to ensure that '
'this is done safely.')}},
u'exportFunction':
{'dangerous': {
'editors_only': True,
'signing_help': FUNCTION_EXPORT_HELP,
'signing_severity': 'low',
'description': (
'Can be used to expose privileged '
'functionality to unprivileged scopes. '
'Care should be taken to ensure that '
'this is done safely.')}},
u'import':
{'dangerous':
lambda a, t, e:
a and 'ctypes.jsm' in _get_as_str(t(a[0]))},
u'waiveXrays':
{'return': call_definitions.js_unwrap}}},
u'interfaces': {'value': INTERFACE_ENTITIES}}},
u'extensions': {'dangerous': True},
u'xpcnativewrappers': {'dangerous': True},
u'XMLHttpRequest':
{'value':
{u'open':
{'dangerous':
# Ban synchronous XHR by making sure the third arg
# is absent and false.
lambda a, t, e:
a and len(a) >= 3 and
not t(a[2]).get_literal_value() and
'Synchronous HTTP requests can cause serious UI '
'performance problems, especially to users with '
'slow network connections.'}}},
# Global properties are inherently read-only, though this formalizes it.
u'Infinity':
{'value':
lambda t:
GLOBAL_ENTITIES[u'Number']['value'][u'POSITIVE_INFINITY']},
u'NaN': {'readonly': True},
u'undefined': {'readonly': True},
u'innerHeight': {'readonly': False},
u'innerWidth': {'readonly': False},
u'width': {'readonly': False},
u'height': {'readonly': False},
u'content':
{'context': 'content',
'value':
{u'document':
{'value': lambda t: GLOBAL_ENTITIES[u'document']}}},
u'contentWindow':
{'context': 'content',
'value':
lambda t: {'value': GLOBAL_ENTITIES}},
u'_content': {'value': lambda t: GLOBAL_ENTITIES[u'content']},
u'gBrowser':
{'value':
{u'contentDocument':
{'context': 'content',
'value': lambda t: CONTENT_DOCUMENT},
u'contentWindow':
{'value':
lambda t: {'value': GLOBAL_ENTITIES}},
u'selectedTab':
{'readonly': False}}},
u'opener':
{'value':
lambda t: {'value': GLOBAL_ENTITIES}},
u'XPCNativeWrapper':
{'value':
{u'unwrap':
{'return': call_definitions.js_unwrap}},
'return': call_definitions.js_wrap},
# Preference creation in pref defaults files
u'pref': {'dangerous': actions._call_create_pref},
u'user_pref': {'dangerous': actions._call_create_pref},
u'unsafeWindow': {'dangerous': 'The use of unsafeWindow is insecure and '
'should be avoided whenever possible. '
'Consider using a different API if it is '
'available in order to achieve similar '
'functionality.'},
u'XPCOMUtils':
{'value': {u'categoryManager': {'value': CATEGORY_MANAGER}}},
u'MarionetteComponent': {'dangerous_on_read': MARIONETTE_MESSAGE},
u'MarionetteServer': {'dangerous_on_read': MARIONETTE_MESSAGE},
'Application': {'dangerous_on_read': fuel_error},
'NewTabURL': {'value': {'override': entity('NewTabURL.override')}},
'Proxy':
{'value':
{u'createFunction':
{'return': call_definitions.Proxy_deprec},
u'create':
{'return': call_definitions.Proxy_deprec}}},
# Common third-party libraries
'Handlebars': {
'value': {
'SafeString':
{'dangerous':
UNSAFE_TEMPLATE_METHOD % 'Handlebars.SafeString'}}},
# Angular
'$sce': {
'value': {
'trustAs': {'dangerous':
UNSAFE_TEMPLATE_METHOD % '$sce.trustAs'},
'trustAsHTML': {'dangerous':
UNSAFE_TEMPLATE_METHOD % '$sce.trustAsHTML'}}},
}
CONTENT_DOCUMENT = GLOBAL_ENTITIES[u'content']['value'][u'document']
|
|
import json
import os
from cStringIO import StringIO
from nose.tools import assert_raises, assert_equal
from datetime import datetime, timedelta
import Image
from tg import config
from nose.tools import assert_equal
from ming.orm.ormsession import ThreadLocalORMSession
from paste.httpexceptions import HTTPFound
import allura
from allura import model as M
from allura.tests import TestController
from allura.tests import decorators as td
from allura.lib import helpers as h
class TestNeighborhood(TestController):
def setUp(self):
# change the override_root config value to change which root controller the test uses
self._make_app = allura.config.middleware.make_app
def make_app(global_conf, full_stack=True, **app_conf):
app_conf['override_root'] = 'test_neighborhood_root'
return self._make_app(global_conf, full_stack, **app_conf)
allura.config.middleware.make_app = make_app
super(TestNeighborhood, self).setUp()
def tearDown(self):
super(TestNeighborhood, self).tearDown()
allura.config.middleware.make_app = self._make_app
def test_home_project(self):
r = self.app.get('/adobe/wiki/')
assert r.location.endswith('/adobe/wiki/Home/')
r = r.follow()
assert 'Welcome' in str(r), str(r)
r = self.app.get('/adobe/admin/', extra_environ=dict(username='test-user'),
status=403)
def test_redirect(self):
r = self.app.post('/adobe/_admin/update',
params=dict(redirect='wiki/Home/'),
extra_environ=dict(username='root'))
r = self.app.get('/adobe/')
assert r.location.endswith('/adobe/wiki/Home/')
def test_admin(self):
r = self.app.get('/adobe/_admin/', extra_environ=dict(username='root'))
r = self.app.get('/adobe/_admin/overview', extra_environ=dict(username='root'))
r = self.app.get('/adobe/_admin/accolades', extra_environ=dict(username='root'))
neighborhood = M.Neighborhood.query.get(name='Adobe')
neighborhood.features['google_analytics'] = True
r = self.app.post('/adobe/_admin/update',
params=dict(name='Mozq1', css='', homepage='# MozQ1!', tracking_id='U-123456'),
extra_environ=dict(username='root'))
r = self.app.post('/adobe/_admin/update',
params=dict(name='Mozq1', css='', homepage='# MozQ1!\n[Root]'),
extra_environ=dict(username='root'))
# make sure project_template is validated as proper json
r = self.app.post('/adobe/_admin/update',
params=dict(project_template='{'),
extra_environ=dict(username='root'))
assert 'Invalid JSON' in r
def test_admin_overview_audit_log(self):
def check_log(message):
return M.AuditLog.query.find({'message': message}).count() == 1
nbhd = M.Neighborhood.query.get(name='Projects')
nbhd.features['css'] = 'custom'
nbhd.features['google_analytics'] = True
params = {
'name': 'Pjs',
'redirect': 'http://fake.org/',
'show_title': 'false',
'allow_browse': 'false',
'css': '.class { border: 1px; }',
'tracking_id': 'U-123456',
'homepage': '[Homepage]',
'project_list_url': 'http://fake.org/project_list',
'project_template': '{"name": "template"}'
}
self.app.post('/p/_admin/update', params=params,
extra_environ=dict(username='root'))
# must get as many log records as many values are updated
assert M.AuditLog.query.find().count() == len(params)
assert check_log('change neighborhood name to Pjs')
assert check_log('change neighborhood redirect to http://fake.org/')
assert check_log('change neighborhood show title to False')
assert check_log('change neighborhood allow browse to False')
assert check_log('change neighborhood css to .class { border: 1px; }')
assert check_log('change neighborhood homepage to [Homepage]')
assert check_log('change neighborhood project list url to '
'http://fake.org/project_list')
assert check_log('change neighborhood project template to '
'{"name": "template"}')
assert check_log('update neighborhood tracking_id')
def test_show_title(self):
r = self.app.get('/adobe/_admin/overview', extra_environ=dict(username='root'))
neighborhood = M.Neighborhood.query.get(name='Adobe')
# if not set show_title must be True
assert neighborhood.show_title
# title should be present
assert 'class="project_title"' in str(r)
r = self.app.post('/adobe/_admin/update',
params=dict(name='Mozq1', css='',
homepage='# MozQ1!',
tracking_id='U-123456',
show_title='false'),
extra_environ=dict(username='root'))
# no title now
r = self.app.get('/adobe/', extra_environ=dict(username='root'))
assert 'class="project_title"' not in str(r)
r = self.app.get('/adobe/wiki/Home/',
extra_environ=dict(username='root'))
assert 'class="project_title"' not in str(r)
# title must be present on project page
r = self.app.get('/adobe/adobe-1/admin/',
extra_environ=dict(username='root'))
assert 'class="project_title"' in str(r)
def test_admin_stats_del_count(self):
neighborhood = M.Neighborhood.query.get(name='Adobe')
proj = M.Project.query.get(neighborhood_id=neighborhood._id)
proj.deleted = True
ThreadLocalORMSession.flush_all()
r = self.app.get('/adobe/_admin/stats/', extra_environ=dict(username='root'))
assert 'Deleted: 1' in r
assert 'Private: 0' in r
def test_admin_stats_priv_count(self):
neighborhood = M.Neighborhood.query.get(name='Adobe')
proj = M.Project.query.get(neighborhood_id=neighborhood._id)
proj.deleted = False
proj.private = True
ThreadLocalORMSession.flush_all()
r = self.app.get('/adobe/_admin/stats/', extra_environ=dict(username='root'))
assert 'Deleted: 0' in r
assert 'Private: 1' in r
def test_admin_stats_adminlist(self):
neighborhood = M.Neighborhood.query.get(name='Adobe')
proj = M.Project.query.get(neighborhood_id=neighborhood._id)
proj.private = False
ThreadLocalORMSession.flush_all()
r = self.app.get('/adobe/_admin/stats/adminlist', extra_environ=dict(username='root'))
pq = M.Project.query.find(dict(neighborhood_id=neighborhood._id, deleted=False))
pq.sort('name')
projects = pq.skip(0).limit(int(25)).all()
for proj in projects:
admin_role = M.ProjectRole.query.get(project_id=proj.root_project._id, name='Admin')
if admin_role is None:
continue
user_role_list = M.ProjectRole.query.find(dict(project_id=proj.root_project._id, name=None)).all()
for ur in user_role_list:
if ur.user is not None and admin_role._id in ur.roles:
assert proj.name in r
assert ur.user.username in r
def test_icon(self):
file_name = 'neo-icon-set-454545-256x350.png'
file_path = os.path.join(allura.__path__[0], 'nf', 'allura', 'images', file_name)
file_data = file(file_path).read()
upload = ('icon', file_name, file_data)
r = self.app.get('/adobe/_admin/', extra_environ=dict(username='root'))
r = self.app.post('/adobe/_admin/update',
params=dict(name='Mozq1', css='', homepage='# MozQ1'),
extra_environ=dict(username='root'), upload_files=[upload])
r = self.app.get('/adobe/icon')
image = Image.open(StringIO(r.body))
assert image.size == (48, 48)
r = self.app.get('/adobe/icon?foo=bar')
def test_google_analytics(self):
# analytics allowed
neighborhood = M.Neighborhood.query.get(name='Adobe')
neighborhood.features['google_analytics'] = True
r = self.app.get('/adobe/_admin/overview', extra_environ=dict(username='root'))
assert 'Analytics Tracking ID' in r
r = self.app.get('/adobe/adobe-1/admin/overview', extra_environ=dict(username='root'))
assert 'Analytics Tracking ID' in r
r = self.app.post('/adobe/_admin/update',
params=dict(name='Adobe', css='', homepage='# MozQ1', tracking_id='U-123456'),
extra_environ=dict(username='root'))
r = self.app.post('/adobe/adobe-1/admin/update',
params=dict(tracking_id='U-654321'),
extra_environ=dict(username='root'))
r = self.app.get('/adobe/adobe-1/admin/overview', extra_environ=dict(username='root'))
assert "_add_tracking('nbhd', 'U-123456');" in r
assert "_add_tracking('proj', 'U-654321');" in r
# analytics not allowed
neighborhood = M.Neighborhood.query.get(name='Adobe')
neighborhood.features['google_analytics'] = False
r = self.app.get('/adobe/_admin/overview', extra_environ=dict(username='root'))
assert 'Analytics Tracking ID' not in r
r = self.app.get('/adobe/adobe-1/admin/overview', extra_environ=dict(username='root'))
assert 'Analytics Tracking ID' not in r
r = self.app.get('/adobe/adobe-1/admin/overview', extra_environ=dict(username='root'))
assert "_add_tracking('nbhd', 'U-123456');" not in r
assert "_add_tracking('proj', 'U-654321');" not in r
def test_custom_css(self):
test_css = '.test{color:red;}'
custom_css = 'Custom CSS'
neighborhood = M.Neighborhood.query.get(name='Adobe')
neighborhood.css = test_css
neighborhood.features['css'] = 'none'
r = self.app.get('/adobe/')
assert test_css not in r
r = self.app.get('/adobe/_admin/overview', extra_environ=dict(username='root'))
assert custom_css not in r
neighborhood = M.Neighborhood.query.get(name='Adobe')
neighborhood.features['css'] = 'picker'
r = self.app.get('/adobe/')
while isinstance(r.response, HTTPFound):
r = r.follow()
assert test_css in r
r = self.app.get('/adobe/_admin/overview', extra_environ=dict(username='root'))
assert custom_css in r
neighborhood = M.Neighborhood.query.get(name='Adobe')
neighborhood.features['css'] = 'custom'
r = self.app.get('/adobe/')
while isinstance(r.response, HTTPFound):
r = r.follow()
assert test_css in r
r = self.app.get('/adobe/_admin/overview', extra_environ=dict(username='root'))
assert custom_css in r
def test_picker_css(self):
neighborhood = M.Neighborhood.query.get(name='Adobe')
neighborhood.features['css'] = 'picker'
r = self.app.get('/adobe/_admin/overview', extra_environ=dict(username='root'))
assert 'Project title, font' in r
assert 'Project title, color' in r
assert 'Bar on top' in r
assert 'Title bar, background' in r
assert 'Title bar, foreground' in r
r = self.app.post('/adobe/_admin/update',
params={'name': 'Adobe',
'css': '',
'homepage': '',
'css-projecttitlefont': 'arial,sans-serif',
'css-projecttitlecolor': 'green',
'css-barontop': '#555555',
'css-titlebarbackground': '#333',
'css-titlebarcolor': '#444',
'css-addopt-icon-theme': 'dark'},
extra_environ=dict(username='root'), upload_files=[])
neighborhood = M.Neighborhood.query.get(name='Adobe')
assert '/*projecttitlefont*/.project_title{font-family:arial,sans-serif;}' in neighborhood.css
assert '/*projecttitlecolor*/.project_title{color:green;}' in neighborhood.css
assert '/*barontop*/.pad h2.colored {background-color:#555555; background-image: none;}' in neighborhood.css
assert '/*titlebarbackground*/.pad h2.title{background-color:#333; background-image: none;}' in neighborhood.css
assert "/*titlebarcolor*/.pad h2.title, .pad h2.title small a {color:#444;} "\
".pad h2.dark small b.ico {background-image: "\
"url('/nf/_ew_/theme/allura/images/neo-icon-set-ffffff-256x350.png');}" in neighborhood.css
def test_max_projects(self):
# Set max value to unlimit
neighborhood = M.Neighborhood.query.get(name='Projects')
neighborhood.features['max_projects'] = None
r = self.app.post('/p/register',
params=dict(project_unixname='maxproject1', project_name='Max project1', project_description='', neighborhood='Projects'),
antispam=True,
extra_environ=dict(username='root'), status=302)
assert '/p/maxproject1/admin' in r.location
# Set max value to 0
neighborhood = M.Neighborhood.query.get(name='Projects')
neighborhood.features['max_projects'] = 0
r = self.app.post('/p/register',
params=dict(project_unixname='maxproject2', project_name='Max project2', project_description='', neighborhood='Projects'),
antispam=True,
extra_environ=dict(username='root'))
while isinstance(r.response, HTTPFound):
r = r.follow()
assert 'You have exceeded the maximum number of projects' in r
def test_project_rate_limit(self):
# Set rate limit to unlimit
with h.push_config(config, **{'project.rate_limits': '{}'}):
r = self.app.post('/p/register',
params=dict(project_unixname='rateproject1', project_name='Rate project1', project_description='', neighborhood='Projects'),
antispam=True,
extra_environ=dict(username='test-user-1'), status=302)
assert '/p/rateproject1/admin' in r.location
# Set rate limit to 1 in first hour of user account
with h.push_config(config, **{'project.rate_limits': '{"3600": 1}'}):
r = self.app.post('/p/register',
params=dict(project_unixname='rateproject2', project_name='Rate project2', project_description='', neighborhood='Projects'),
antispam=True,
extra_environ=dict(username='test-user-1'))
while isinstance(r.response, HTTPFound):
r = r.follow()
assert 'Project creation rate limit exceeded. Please try again later.' in r
def test_project_rate_limit_admin(self):
# Set rate limit to unlimit
with h.push_config(config, **{'project.rate_limits': '{}'}):
r = self.app.post('/p/register',
params=dict(project_unixname='rateproject1', project_name='Rate project1', project_description='', neighborhood='Projects'),
antispam=True,
extra_environ=dict(username='root'), status=302)
assert '/p/rateproject1/admin' in r.location
# Set rate limit to 1 in first hour of user account
with h.push_config(config, **{'project.rate_limits': '{"3600": 1}'}):
r = self.app.post('/p/register',
params=dict(project_unixname='rateproject2', project_name='Rate project2', project_description='', neighborhood='Projects'),
antispam=True,
extra_environ=dict(username='root'))
assert '/p/rateproject2/admin' in r.location
def test_invite(self):
p_nbhd_id = str(M.Neighborhood.query.get(name='Projects')._id)
r = self.app.get('/adobe/_moderate/', extra_environ=dict(username='root'))
r = self.app.post('/adobe/_moderate/invite',
params=dict(pid='adobe-1', invite='on', neighborhood_id=p_nbhd_id),
extra_environ=dict(username='root'))
r = self.app.get(r.location, extra_environ=dict(username='root'))
assert 'error' in r
r = self.app.post('/adobe/_moderate/invite',
params=dict(pid='no_such_user', invite='on', neighborhood_id=p_nbhd_id),
extra_environ=dict(username='root'))
r = self.app.get(r.location, extra_environ=dict(username='root'))
assert 'error' in r
r = self.app.post('/adobe/_moderate/invite',
params=dict(pid='test', invite='on', neighborhood_id=p_nbhd_id),
extra_environ=dict(username='root'))
r = self.app.get(r.location, extra_environ=dict(username='root'))
assert 'invited' in r, r
assert 'warning' not in r
r = self.app.post('/adobe/_moderate/invite',
params=dict(pid='test', invite='on', neighborhood_id=p_nbhd_id),
extra_environ=dict(username='root'))
r = self.app.get(r.location, extra_environ=dict(username='root'))
assert 'warning' in r
r = self.app.post('/adobe/_moderate/invite',
params=dict(pid='test', uninvite='on', neighborhood_id=p_nbhd_id),
extra_environ=dict(username='root'))
r = self.app.get(r.location, extra_environ=dict(username='root'))
assert 'uninvited' in r
assert 'warning' not in r
r = self.app.post('/adobe/_moderate/invite',
params=dict(pid='test', uninvite='on', neighborhood_id=p_nbhd_id),
extra_environ=dict(username='root'))
r = self.app.get(r.location, extra_environ=dict(username='root'))
assert 'warning' in r
r = self.app.post('/adobe/_moderate/invite',
params=dict(pid='test', invite='on', neighborhood_id=p_nbhd_id),
extra_environ=dict(username='root'))
r = self.app.get(r.location, extra_environ=dict(username='root'))
assert 'invited' in r
assert 'warning' not in r
def test_evict(self):
r = self.app.get('/adobe/_moderate/', extra_environ=dict(username='root'))
r = self.app.post('/adobe/_moderate/evict',
params=dict(pid='test'),
extra_environ=dict(username='root'))
r = self.app.get(r.location, extra_environ=dict(username='root'))
assert 'error' in r
r = self.app.post('/adobe/_moderate/evict',
params=dict(pid='adobe-1'),
extra_environ=dict(username='root'))
r = self.app.get(r.location, extra_environ=dict(username='root'))
assert 'adobe-1 evicted to Projects' in r
def test_home(self):
r = self.app.get('/adobe/')
def test_register(self):
r = self.app.get('/adobe/register', status=405)
r = self.app.post('/adobe/register',
params=dict(project_unixname='', project_name='Nothing', project_description='', neighborhood='Adobe'),
antispam=True,
extra_environ=dict(username='root'))
assert r.html.find('div', {'class':'error'}).string == 'Please enter a value'
r = self.app.post('/adobe/register',
params=dict(project_unixname='mymoz', project_name='My Moz', project_description='', neighborhood='Adobe'),
antispam=True,
extra_environ=dict(username='*anonymous'),
status=302)
r = self.app.post('/adobe/register',
params=dict(project_unixname='foo.mymoz', project_name='My Moz', project_description='', neighborhood='Adobe'),
antispam=True,
extra_environ=dict(username='root'))
assert r.html.find('div', {'class':'error'}).string == 'Please use only letters, numbers, and dashes 3-15 characters long.'
r = self.app.post('/p/register',
params=dict(project_unixname='test', project_name='Tester', project_description='', neighborhood='Projects'),
antispam=True,
extra_environ=dict(username='root'))
assert r.html.find('div', {'class':'error'}).string == 'This project name is taken.'
r = self.app.post('/adobe/register',
params=dict(project_unixname='mymoz', project_name='My Moz', project_description='', neighborhood='Adobe'),
antispam=True,
extra_environ=dict(username='root'),
status=302)
def test_register_private_fails_for_anon(self):
r = self.app.post(
'/p/register',
params=dict(
project_unixname='mymoz',
project_name='My Moz',
project_description='',
neighborhood='Projects',
private_project='on'),
antispam=True,
extra_environ=dict(username='*anonymous'),
status=302)
assert config.get('auth.login_url', '/auth/') in r.location, r.location
def test_register_private_fails_for_non_admin(self):
self.app.post(
'/p/register',
params=dict(
project_unixname='mymoz',
project_name='My Moz',
project_description='',
neighborhood='Projects',
private_project='on'),
antispam=True,
extra_environ=dict(username='test-user'),
status=403)
def test_register_private_fails_for_non_private_neighborhood(self):
# Turn off private
neighborhood = M.Neighborhood.query.get(name='Projects')
neighborhood.features['private_projects'] = False
r = self.app.get('/p/add_project', extra_environ=dict(username='root'))
assert 'private_project' not in r
assert_raises(ValueError,
self.app.post,
'/p/register',
params=dict(
project_unixname='myprivate1',
project_name='My Priv1',
project_description='',
neighborhood='Projects',
private_project='on'),
antispam=True,
extra_environ=dict(username='root'))
proj = M.Project.query.get(shortname='myprivate1', neighborhood_id=neighborhood._id)
assert proj is None
# Turn on private
neighborhood = M.Neighborhood.query.get(name='Projects')
neighborhood.features['private_projects'] = True
r = self.app.get('/p/add_project', extra_environ=dict(username='root'))
assert 'private_project' in r
self.app.post(
'/p/register',
params=dict(
project_unixname='myprivate2',
project_name='My Priv2',
project_description='',
neighborhood='Projects',
private_project='on'),
antispam=True,
extra_environ=dict(username='root'))
proj = M.Project.query.get(shortname='myprivate2', neighborhood_id=neighborhood._id)
assert proj.private
def test_register_private_ok(self):
r = self.app.post(
'/p/register',
params=dict(
project_unixname='mymoz',
project_name='My Moz',
project_description='',
neighborhood='Projects',
private_project='on',
tools='Wiki'),
antispam=True,
extra_environ=dict(username='root'),
status=302)
assert config.get('auth.login_url', '/auth/') not in r.location, r.location
r = self.app.get(
'/p/mymoz/wiki/',
extra_environ=dict(username='root')).follow(extra_environ=dict(username='root'), status=200)
r = self.app.get(
'/p/mymoz/wiki/',
extra_environ=dict(username='*anonymous'),
status=302)
assert config.get('auth.login_url', '/auth/') in r.location, r.location
self.app.get(
'/p/mymoz/wiki/',
extra_environ=dict(username='test-user'),
status=403)
def test_project_template(self):
icon_url = 'file://' + os.path.join(allura.__path__[0], 'nf', 'allura', 'images', 'neo-icon-set-454545-256x350.png')
test_groups = [{
"name": "Viewer", # group will be created, all params are valid
"permissions": ["read"],
"usernames": ["user01"]
}, {
"name": "", # group won't be created - invalid name
"permissions": ["read"],
"usernames": ["user01"]
}, {
"name": "TestGroup1", # group won't be created - invalid perm name
"permissions": ["foobar"],
"usernames": ["user01"]
}, {
"name": "TestGroup2", # will be created; 'inspect' perm ignored
"permissions": ["read", "inspect"],
"usernames": ["user01", "user02"]
}, {
"name": "TestGroup3", # will be created with no users in group
"permissions": ["admin"]
}]
r = self.app.post('/adobe/_admin/update', params=dict(name='Mozq1',
css='', homepage='# MozQ1!\n[Root]', project_template="""{
"private":true,
"icon":{
"url":"%s",
"filename":"icon.png"
},
"tools":{
"wiki":{
"label":"Wiki",
"mount_point":"wiki",
"options":{
"show_right_bar":false,
"show_left_bar":false,
"show_discussion":false,
"some_url": "http://foo.com/$shortname/"
},
"home_text":"My home text!"
},
"discussion":{"label":"Discussion","mount_point":"discussion"},
"blog":{"label":"News","mount_point":"news","options":{
"show_discussion":false
}},
"downloads":{"label":"Downloads","mount_point":"downloads"},
"admin":{"label":"Admin","mount_point":"admin"}
},
"tool_order":["wiki","discussion","news","downloads","admin"],
"labels":["mmi"],
"trove_cats":{
"topic":[247],
"developmentstatus":[11]
},
"groups": %s
}""" % (icon_url, json.dumps(test_groups))),
extra_environ=dict(username='root'))
r = self.app.post(
'/adobe/register',
params=dict(
project_unixname='testtemp',
project_name='Test Template',
project_description='',
neighborhood='Mozq1',
private_project='off'),
antispam=True,
extra_environ=dict(username='root'),
status=302).follow()
p = M.Project.query.get(shortname='testtemp')
# make sure the correct tools got installed in the right order
top_nav = r.html.find('div', {'id':'top_nav'})
assert top_nav.contents[1]['href'] == '/adobe/testtemp/wiki/'
assert 'Wiki' in top_nav.contents[1].contents[0]
assert top_nav.contents[3]['href'] == '/adobe/testtemp/discussion/'
assert 'Discussion' in top_nav.contents[3].contents[0]
assert top_nav.contents[5]['href'] == '/adobe/testtemp/news/'
assert 'News' in top_nav.contents[5].contents[0]
assert top_nav.contents[7]['href'] == '/adobe/testtemp/admin/'
assert 'Admin' in top_nav.contents[7].contents[0]
# make sure project is private
r = self.app.get(
'/adobe/testtemp/wiki/',
extra_environ=dict(username='root')).follow(extra_environ=dict(username='root'), status=200)
r = self.app.get(
'/adobe/testtemp/wiki/',
extra_environ=dict(username='*anonymous'),
status=302)
# check the labels and trove cats
r = self.app.get('/adobe/testtemp/admin/trove')
assert 'mmi' in r
assert 'Topic :: Communications :: Telephony' in r
assert 'Development Status :: 5 - Production/Stable' in r
# check the wiki text
r = self.app.get('/adobe/testtemp/wiki/').follow()
assert "My home text!" in r
# check tool options
opts = p.app_config('wiki').options
assert_equal(False, opts.show_discussion)
assert_equal(False, opts.show_left_bar)
assert_equal(False, opts.show_right_bar)
assert_equal("http://foo.com/testtemp/", opts.some_url)
# check that custom groups/perms/users were setup correctly
roles = p.named_roles
for group in test_groups:
name = group.get('name')
permissions = group.get('permissions', [])
usernames = group.get('usernames', [])
if name in ('Viewer', 'TestGroup2', 'TestGroup3'):
role = M.ProjectRole.by_name(name, project=p)
# confirm role created in project
assert role in roles
for perm in permissions:
# confirm valid permissions added to role, and invalid
# permissions ignored
if perm in p.permissions:
assert M.ACE.allow(role._id, perm) in p.acl
else:
assert M.ACE.allow(role._id, perm) not in p.acl
# confirm valid users received role
for username in usernames:
user = M.User.by_username(username)
if user and user._id:
assert role in user.project_role(project=p).roles
# confirm roles with invalid json data are not created
if name in ('', 'TestGroup1'):
assert name not in roles
def test_name_suggest(self):
r = self.app.get('/p/suggest_name?project_name=My+Moz')
assert_equal(r.json, dict(suggested_name='mymoz'))
r = self.app.get('/p/suggest_name?project_name=Te%st!')
assert_equal(r.json, dict(suggested_name='test'))
def test_name_check(self):
r = self.app.get('/p/check_names?unix_name=My+Moz')
assert r.json['unixname_message'] == 'Please use only letters, numbers, and dashes 3-15 characters long.'
r = self.app.get('/p/check_names?unix_name=Te%st!')
assert r.json['unixname_message'] == 'Please use only letters, numbers, and dashes 3-15 characters long.'
r = self.app.get('/p/check_names?unix_name=mymoz')
assert_equal(r.json['unixname_message'], False)
r = self.app.get('/p/check_names?unix_name=test')
assert r.json['unixname_message'] == 'This project name is taken.'
@td.with_tool('test/sub1', 'Wiki', 'wiki')
def test_neighborhood_project(self):
self.app.get('/adobe/adobe-1/admin/', status=200)
self.app.get('/p/test/sub1/wiki/')
self.app.get('/p/test/sub1/', status=302)
self.app.get('/p/test/no-such-app/', status=404)
def test_neighborhood_namespace(self):
# p/test exists, so try creating adobe/test
self.app.get('/adobe/test/wiki/', status=404)
r = self.app.post('/adobe/register',
params=dict(project_unixname='test', project_name='Test again', project_description='', neighborhood='Adobe', tools='Wiki'),
antispam=True,
extra_environ=dict(username='root'))
assert r.status_int == 302, r.html.find('div', {'class':'error'}).string
r = self.app.get('/adobe/test/wiki/').follow(status=200)
def test_neighborhood_awards(self):
file_name = 'adobe_icon.png'
file_path = os.path.join(allura.__path__[0], 'public', 'nf', 'images', file_name)
file_data = file(file_path).read()
upload = ('icon', file_name, file_data)
r = self.app.get('/adobe/_admin/awards', extra_environ=dict(username='root'))
r = self.app.post('/adobe/_admin/awards/create',
params=dict(short='FOO', full='A basic foo award'),
extra_environ=dict(username='root'), upload_files=[upload])
r = self.app.post('/adobe/_admin/awards/create',
params=dict(short='BAR', full='A basic bar award with no icon'),
extra_environ=dict(username='root'))
foo_id = str(M.Award.query.find(dict(short='FOO')).first()._id)
bar_id = str(M.Award.query.find(dict(short='BAR')).first()._id)
r = self.app.post('/adobe/_admin/awards/%s/update' % bar_id,
params=dict(short='BAR2', full='Updated description.'),
extra_environ=dict(username='root')).follow().follow()
assert 'BAR2' in r
assert 'Updated description.' in r
r = self.app.get('/adobe/_admin/awards/%s' % foo_id, extra_environ=dict(username='root'))
r = self.app.get('/adobe/_admin/awards/%s/icon' % foo_id, extra_environ=dict(username='root'))
image = Image.open(StringIO(r.body))
assert image.size == (48, 48)
self.app.post('/adobe/_admin/awards/grant',
params=dict(grant='FOO', recipient='adobe-1'),
extra_environ=dict(username='root'))
self.app.get('/adobe/_admin/awards/%s/adobe-1' % foo_id, extra_environ=dict(username='root'))
self.app.post('/adobe/_admin/awards/%s/adobe-1/revoke' % foo_id,
extra_environ=dict(username='root'))
self.app.post('/adobe/_admin/awards/%s/delete' % foo_id,
extra_environ=dict(username='root'))
def test_add_a_project_link(self):
# Install Home tool for all neighborhoods
for nb in M.Neighborhood.query.find().all():
p = nb.neighborhood_project
p.install_app('home', 'home', 'Home', ordinal=0)
r = self.app.get('/p/')
assert 'Add a Project' in r
r = self.app.get('/u/', extra_environ=dict(username='test-user'))
assert 'Add a Project' not in r
r = self.app.get('/adobe/', extra_environ=dict(username='test-user'))
assert 'Add a Project' not in r
r = self.app.get('/u/', extra_environ=dict(username='root'))
assert 'Add a Project' in r
r = self.app.get('/adobe/', extra_environ=dict(username='root'))
assert 'Add a Project' in r
def test_help(self):
r = self.app.get('/p/_admin/help/', extra_environ=dict(username='root'))
assert 'macro' in r
@td.with_user_project('test-user')
def test_profile_topnav_menu(self):
r = self.app.get('/u/test-user/', extra_environ=dict(username='test-user')).follow()
assert '<a href="/u/test-user/profile/" class="ui-icon-tool-home">' in r
def test_more_projects_link(self):
r = self.app.get('/adobe/adobe-1/admin/')
link = r.html.find('div', {'class':'neighborhood_title_link'}).find('a')
assert 'View More Projects' in str(link)
assert link['href'] == '/adobe/'
|
|
from __future__ import absolute_import
import os
import sys
import re
import textwrap
import site
import scripttest
import virtualenv
from tests.lib.path import Path, curdir, u
DATA_DIR = Path(__file__).folder.folder.join("data").abspath
SRC_DIR = Path(__file__).abspath.folder.folder.folder
pyversion = sys.version[:3]
pyversion_tuple = sys.version_info
def path_to_url(path):
"""
Convert a path to URI. The path will be made absolute and
will not have quoted path parts.
(adapted from pip.util)
"""
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join(filepath)
if drive:
return 'file:///' + drive + url
return 'file://' + url
class TestData(object):
"""
Represents a bundle of pre-created test data.
This copies a pristine set of test data into a root location that is
designed to be test specific. The reason for this is when running the tests
concurrently errors can be generated because the related tooling uses
the directory as a work space. This leads to two concurrent processes
trampling over each other. This class gets around that by copying all
data into a directory and operating on the copied data.
"""
def __init__(self, root, source=None):
self.source = source or DATA_DIR
self.root = Path(root).abspath
@classmethod
def copy(cls, root):
obj = cls(root)
obj.reset()
return obj
def reset(self):
self.root.rmtree()
self.source.copytree(self.root)
@property
def packages(self):
return self.root.join("packages")
@property
def packages2(self):
return self.root.join("packages2")
@property
def packages3(self):
return self.root.join("packages3")
@property
def src(self):
return self.root.join("src")
@property
def indexes(self):
return self.root.join("indexes")
@property
def reqfiles(self):
return self.root.join("reqfiles")
@property
def find_links(self):
return path_to_url(self.packages)
@property
def find_links2(self):
return path_to_url(self.packages2)
@property
def find_links3(self):
return path_to_url(self.packages3)
def index_url(self, index="simple"):
return path_to_url(self.root.join("indexes", index))
class TestFailure(AssertionError):
"""
An "assertion" failed during testing.
"""
pass
class TestPipResult(object):
def __init__(self, impl, verbose=False):
self._impl = impl
if verbose:
print(self.stdout)
if self.stderr:
print('======= stderr ========')
print(self.stderr)
print('=======================')
def __getattr__(self, attr):
return getattr(self._impl, attr)
if sys.platform == 'win32':
@property
def stdout(self):
return self._impl.stdout.replace('\r\n', '\n')
@property
def stderr(self):
return self._impl.stderr.replace('\r\n', '\n')
def __str__(self):
return str(self._impl).replace('\r\n', '\n')
else:
# Python doesn't automatically forward __str__ through __getattr__
def __str__(self):
return str(self._impl)
def assert_installed(self, pkg_name, editable=True, with_files=[],
without_files=[], without_egg_link=False,
use_user_site=False, sub_dir=False):
e = self.test_env
if editable:
pkg_dir = e.venv / 'src' / pkg_name.lower()
# If package was installed in a sub directory
if sub_dir:
pkg_dir = pkg_dir / sub_dir
else:
without_egg_link = True
pkg_dir = e.site_packages / pkg_name
if use_user_site:
egg_link_path = e.user_site / pkg_name + '.egg-link'
else:
egg_link_path = e.site_packages / pkg_name + '.egg-link'
if without_egg_link:
if egg_link_path in self.files_created:
raise TestFailure(
'unexpected egg link file created: %r\n%s' %
(egg_link_path, self)
)
else:
if egg_link_path not in self.files_created:
raise TestFailure(
'expected egg link file missing: %r\n%s' %
(egg_link_path, self)
)
egg_link_file = self.files_created[egg_link_path]
# FIXME: I don't understand why there's a trailing . here
if not (egg_link_file.bytes.endswith('\n.') and
egg_link_file.bytes[:-2].endswith(pkg_dir)):
raise TestFailure(textwrap.dedent(u('''\
Incorrect egg_link file %r
Expected ending: %r
------- Actual contents -------
%s
-------------------------------''' % (
egg_link_file,
pkg_dir + '\n.',
repr(egg_link_file.bytes))
)))
if use_user_site:
pth_file = e.user_site / 'easy-install.pth'
else:
pth_file = e.site_packages / 'easy-install.pth'
if (pth_file in self.files_updated) == without_egg_link:
raise TestFailure('%r unexpectedly %supdated by install' % (
pth_file, (not without_egg_link and 'not ' or '')))
if (pkg_dir in self.files_created) == (curdir in without_files):
raise TestFailure(textwrap.dedent('''\
expected package directory %r %sto be created
actually created:
%s
''') % (
pkg_dir,
(curdir in without_files and 'not ' or ''),
sorted(self.files_created.keys())))
for f in with_files:
if not (pkg_dir / f).normpath in self.files_created:
raise TestFailure(
'Package directory %r missing expected content %r' %
(pkg_dir, f)
)
for f in without_files:
if (pkg_dir / f).normpath in self.files_created:
raise TestFailure(
'Package directory %r has unexpected content %f' %
(pkg_dir, f)
)
class PipTestEnvironment(scripttest.TestFileEnvironment):
"""
A specialized TestFileEnvironment for testing pip
"""
#
# Attribute naming convention
# ---------------------------
#
# Instances of this class have many attributes representing paths
# in the filesystem. To keep things straight, absolute paths have
# a name of the form xxxx_path and relative paths have a name that
# does not end in '_path'.
exe = sys.platform == 'win32' and '.exe' or ''
verbose = False
def __init__(self, base_path, *args, **kwargs):
# Make our base_path a test.lib.path.Path object
base_path = Path(base_path)
# Store paths related to the virtual environment
_virtualenv = kwargs.pop("virtualenv")
path_locations = virtualenv.path_locations(_virtualenv)
# Make sure we have test.lib.path.Path objects
venv, lib, include, bin = map(Path, path_locations)
# workaround for https://github.com/pypa/virtualenv/issues/306
if hasattr(sys, "pypy_version_info"):
lib = os.path.join(venv, 'lib-python', pyversion)
self.venv_path = venv
self.lib_path = lib
self.include_path = include
self.bin_path = bin
if hasattr(sys, "pypy_version_info"):
self.site_packages_path = self.venv_path.join("site-packages")
else:
self.site_packages_path = self.lib_path.join("site-packages")
self.user_base_path = self.venv_path.join("user")
self.user_bin_path = self.user_base_path.join(
self.bin_path - self.venv_path
)
self.user_site_path = self.venv_path.join(
"user",
site.USER_SITE[len(site.USER_BASE) + 1:],
)
# Create a Directory to use as a scratch pad
self.scratch_path = base_path.join("scratch").mkdir()
# Set our default working directory
kwargs.setdefault("cwd", self.scratch_path)
# Setup our environment
environ = kwargs.get("environ")
if environ is None:
environ = os.environ.copy()
environ["PATH"] = Path.pathsep.join(
[self.bin_path] + [environ.get("PATH", [])],
)
environ["PYTHONUSERBASE"] = self.user_base_path
# Writing bytecode can mess up updated file detection
environ["PYTHONDONTWRITEBYTECODE"] = "1"
kwargs["environ"] = environ
# Call the TestFileEnvironment __init__
super(PipTestEnvironment, self).__init__(base_path, *args, **kwargs)
# Expand our absolute path directories into relative
for name in ["base", "venv", "lib", "include", "bin", "site_packages",
"user_base", "user_site", "user_bin", "scratch"]:
real_name = "%s_path" % name
setattr(self, name, getattr(self, real_name) - self.base_path)
# Make sure temp_path is a Path object
self.temp_path = Path(self.temp_path)
# Ensure the tmp dir exists, things break horribly if it doesn't
self.temp_path.mkdir()
# create easy-install.pth in user_site, so we always have it updated
# instead of created
self.user_site_path.makedirs()
self.user_site_path.join("easy-install.pth").touch()
def _ignore_file(self, fn):
if fn.endswith('__pycache__') or fn.endswith(".pyc"):
result = True
else:
result = super(PipTestEnvironment, self)._ignore_file(fn)
return result
def run(self, *args, **kw):
if self.verbose:
print('>> running %s %s' % (args, kw))
cwd = kw.pop('cwd', None)
run_from = kw.pop('run_from', None)
assert not cwd or not run_from, "Don't use run_from; it's going away"
cwd = cwd or run_from or self.cwd
return TestPipResult(
super(PipTestEnvironment, self).run(cwd=cwd, *args, **kw),
verbose=self.verbose,
)
def pip(self, *args, **kwargs):
# On old versions of Python, urllib3/requests will raise a warning
# about the lack of an SSLContext. Expect it when running commands
# that will touch the outside world.
if (pyversion_tuple < (2, 7, 9) and
args and args[0] in ('search', 'install', 'download')):
kwargs['expect_stderr'] = True
return self.run("pip", *args, **kwargs)
def pip_install_local(self, *args, **kwargs):
return self.pip(
"install", "--no-index",
"--find-links", path_to_url(os.path.join(DATA_DIR, "packages")),
*args, **kwargs
)
# FIXME ScriptTest does something similar, but only within a single
# ProcResult; this generalizes it so states can be compared across
# multiple commands. Maybe should be rolled into ScriptTest?
def diff_states(start, end, ignore=None):
"""
Differences two "filesystem states" as represented by dictionaries
of FoundFile and FoundDir objects.
Returns a dictionary with following keys:
``deleted``
Dictionary of files/directories found only in the start state.
``created``
Dictionary of files/directories found only in the end state.
``updated``
Dictionary of files whose size has changed (FIXME not entirely
reliable, but comparing contents is not possible because
FoundFile.bytes is lazy, and comparing mtime doesn't help if
we want to know if a file has been returned to its earlier
state).
Ignores mtime and other file attributes; only presence/absence and
size are considered.
"""
ignore = ignore or []
def prefix_match(path, prefix):
if path == prefix:
return True
prefix = prefix.rstrip(os.path.sep) + os.path.sep
return path.startswith(prefix)
start_keys = set([k for k in start.keys()
if not any([prefix_match(k, i) for i in ignore])])
end_keys = set([k for k in end.keys()
if not any([prefix_match(k, i) for i in ignore])])
deleted = dict([(k, start[k]) for k in start_keys.difference(end_keys)])
created = dict([(k, end[k]) for k in end_keys.difference(start_keys)])
updated = {}
for k in start_keys.intersection(end_keys):
if (start[k].size != end[k].size):
updated[k] = end[k]
return dict(deleted=deleted, created=created, updated=updated)
def assert_all_changes(start_state, end_state, expected_changes):
"""
Fails if anything changed that isn't listed in the
expected_changes.
start_state is either a dict mapping paths to
scripttest.[FoundFile|FoundDir] objects or a TestPipResult whose
files_before we'll test. end_state is either a similar dict or a
TestPipResult whose files_after we'll test.
Note: listing a directory means anything below
that directory can be expected to have changed.
"""
__tracebackhide__ = True
start_files = start_state
end_files = end_state
if isinstance(start_state, TestPipResult):
start_files = start_state.files_before
if isinstance(end_state, TestPipResult):
end_files = end_state.files_after
diff = diff_states(start_files, end_files, ignore=expected_changes)
if list(diff.values()) != [{}, {}, {}]:
raise TestFailure('Unexpected changes:\n' + '\n'.join(
[k + ': ' + ', '.join(v.keys()) for k, v in diff.items()]))
# Don't throw away this potentially useful information
return diff
def _create_test_package_with_subdirectory(script, subdirectory):
script.scratch_path.join("version_pkg").mkdir()
version_pkg_path = script.scratch_path / 'version_pkg'
version_pkg_path.join("version_pkg.py").write(textwrap.dedent("""
def main():
print('0.1')
"""))
version_pkg_path.join("setup.py").write(
textwrap.dedent("""
from setuptools import setup, find_packages
setup(name='version_pkg',
version='0.1',
packages=find_packages(),
py_modules=['version_pkg'],
entry_points=dict(console_scripts=['version_pkg=version_pkg:main']))
"""))
subdirectory_path = version_pkg_path.join(subdirectory)
subdirectory_path.mkdir()
subdirectory_path.join('version_subpkg.py').write(textwrap.dedent("""
def main():
print('0.1')
"""))
subdirectory_path.join('setup.py').write(
textwrap.dedent("""
from setuptools import setup, find_packages
setup(name='version_subpkg',
version='0.1',
packages=find_packages(),
py_modules=['version_subpkg'],
entry_points=dict(console_scripts=['version_pkg=version_subpkg:main']))
"""))
script.run('git', 'init', cwd=version_pkg_path)
script.run('git', 'add', '.', cwd=version_pkg_path)
script.run(
'git', 'commit', '-q',
'--author', 'pip <pypa-dev@googlegroups.com>',
'-am', 'initial version', cwd=version_pkg_path
)
return version_pkg_path
def _create_test_package(script, name='version_pkg', vcs='git'):
script.scratch_path.join(name).mkdir()
version_pkg_path = script.scratch_path / name
version_pkg_path.join("%s.py" % name).write(textwrap.dedent("""
def main():
print('0.1')
"""))
version_pkg_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup, find_packages
setup(
name='{name}',
version='0.1',
packages=find_packages(),
py_modules=['{name}'],
entry_points=dict(console_scripts=['{name}={name}:main'])
)
""".format(name=name)))
if vcs == 'git':
script.run('git', 'init', cwd=version_pkg_path)
script.run('git', 'add', '.', cwd=version_pkg_path)
script.run(
'git', 'commit', '-q',
'--author', 'pip <pypa-dev@googlegroups.com>',
'-am', 'initial version', cwd=version_pkg_path,
)
elif vcs == 'hg':
script.run('hg', 'init', cwd=version_pkg_path)
script.run('hg', 'add', '.', cwd=version_pkg_path)
script.run(
'hg', 'commit', '-q',
'--user', 'pip <pypa-dev@googlegroups.com>',
'-m', 'initial version', cwd=version_pkg_path,
)
elif vcs == 'svn':
repo_url = _create_svn_repo(script, version_pkg_path)
script.run(
'svn', 'checkout', repo_url, 'pip-test-package',
cwd=script.scratch_path
)
checkout_path = script.scratch_path / 'pip-test-package'
# svn internally stores windows drives as uppercase; we'll match that.
checkout_path = checkout_path.replace('c:', 'C:')
version_pkg_path = checkout_path
elif vcs == 'bazaar':
script.run('bzr', 'init', cwd=version_pkg_path)
script.run('bzr', 'add', '.', cwd=version_pkg_path)
script.run(
'bzr', 'whoami', 'pip <pypa-dev@googlegroups.com>',
cwd=version_pkg_path)
script.run(
'bzr', 'commit', '-q',
'--author', 'pip <pypa-dev@googlegroups.com>',
'-m', 'initial version', cwd=version_pkg_path,
)
else:
raise ValueError('Unknown vcs: %r' % vcs)
return version_pkg_path
def _create_svn_repo(script, version_pkg_path):
repo_url = path_to_url(
script.scratch_path / 'pip-test-package-repo' / 'trunk')
script.run(
'svnadmin', 'create', 'pip-test-package-repo',
cwd=script.scratch_path
)
script.run(
'svn', 'import', version_pkg_path, repo_url,
'-m', 'Initial import of pip-test-package',
cwd=script.scratch_path
)
return repo_url
def _change_test_package_version(script, version_pkg_path):
version_pkg_path.join("version_pkg.py").write(textwrap.dedent('''\
def main():
print("some different version")'''))
script.run(
'git', 'clean', '-qfdx',
cwd=version_pkg_path,
expect_stderr=True,
)
script.run(
'git', 'commit', '-q',
'--author', 'pip <pypa-dev@googlegroups.com>',
'-am', 'messed version',
cwd=version_pkg_path,
expect_stderr=True,
)
def assert_raises_regexp(exception, reg, run, *args, **kwargs):
"""Like assertRaisesRegexp in unittest"""
__tracebackhide__ = True
try:
run(*args, **kwargs)
assert False, "%s should have been thrown" % exception
except Exception:
e = sys.exc_info()[1]
p = re.compile(reg)
assert p.search(str(e)), str(e)
|
|
"""Viewfinder production scripts
Cookbook:
* Launch new instances: specify NodeType,Name,Zone
$ fab create_instance:STAGING,STAGING_003,us-east-1c
* Resume installation of failed instance creation (eg: stuck in state 'pending' or not ssh-able):
$ fab create_instance:STAGING,STAGING_003,us-east-1c,i-9a0cdbe9
* Stop and destroy a running instance:
$ fab destroy_instance:STAGING,i-9a0cdbe9
* Deploy env/code changes and restart:
$ fab nodetype:STAGING deploy
$ fab nodetype:PROD deploy
Region currently default to 'us-east-1'. When running in multiple regions, add region:<...> task.
"""
import os
import re
import subprocess
import time
from collections import defaultdict
from fabric.api import *
from fabric.operations import *
from fabric.network import NetworkError
from fabric.state import output
from fabric.utils import *
from fabric.contrib.files import *
from fabric.contrib.project import rsync_project
from viewfinder.backend.base import util
from viewfinder.backend.prod import ec2_utils
kVFPassphraseFile = '~/.ssh/vf-passphrase'
kInstanceType = 'm1.medium'
env.user = 'ec2-user'
env.key_filename = '~/.ssh/wwwkey.pem'
env.region = 'us-east-1'
env.node_type = None
# Disable various output levels.
output['running'] = False
output['stdout'] = False
output['warnings'] = False
# Amazon Linux AMI. Note that these are region-specific; this one is for
# us-east. List is at http://aws.amazon.com/amazon-linux-ami/
BASE_AMI = 'ami-3275ee5b'
def runs_last(func):
"""Decorator to run a function only on the last invocation.
We determine last by comparing the number times called with the size of env.hosts.
Return None on all invocations but the last one where we return the function return value.
"""
def Wrapper():
calls = func.num_host_calls
if calls >= len(env.hosts) - 1:
return func()
else:
func.num_host_calls = calls + 1
return None
setattr(func, 'num_host_calls', 0)
return Wrapper
def fprint(string):
host_str = '[%s] ' % env.host_string if env.host_string else ''
time_str = time.strftime("%H:%M:%S")
puts('%s%s %s' % (host_str, time_str, string), show_prefix=False, end='\n')
def fprompt(text, default=None, validate=None):
host_str = '[%s] ' % env.host_string if env.host_string else ''
time_str = time.strftime("%H:%M:%S")
return prompt('%s%s %s' % (host_str, time_str, text), default=default, validate=validate)
def load_passphrase_from_file():
"""Read the viewfinder passphrase from local file."""
vf_path = os.path.expanduser(kVFPassphraseFile)
assert os.access(vf_path, os.F_OK) and os.access(vf_path, os.R_OK), '%s must exist and be readable' % vf_path
with open(vf_path) as f:
user_data = f.read()
return user_data.strip('\n')
def get_ami_metadata():
"""Fetch ami metadata for the local instance. Returns a dict of 'key':'value'. eg: 'instance-id':'i-e7f7e69b'."""
res = {}
base_url = 'http://169.254.169.254/latest/meta-data'
instance_id = run('curl %s/instance-id' % base_url)
assert re.match('i-[0-9a-f]{8}', instance_id)
res['instance-id'] = instance_id
return res
@task
def get_healthz():
"""Fetch healthz status for the local instance."""
url = 'https://localhost:8443/healthz'
ret = 'FAIL'
with settings(warn_only=True):
ret = run('curl -k %s' % url)
fprint('Healthz status: %s' % ret)
return ret == 'OK'
@task
def nodetype(typ):
"""Specify node type: STAGING or PROD."""
# Don't override hosts if specified on the command line.
if not env.hosts:
env.hosts = ec2_utils.ListInstancesDNS(region='us-east-1', node_types=[typ], states=['running'])
env.nodetype = typ
def is_old_env():
"""Return True if ~/env is old-style (plain directory) or False if new style (symlink).
No ~/env returns False.
"""
env_exists = exists('~/env')
if not env_exists:
# So such directory or link.
return False
with settings(warn_only=True):
is_link = run('readlink ~/env')
if is_link.return_code == 0:
# This is a symlink. New-style environment.
return False
return True
def is_old_code():
"""Return True if ~/viewfinder is old-style (plain directory) or False if new style (symlink).
No ~/viewfinder returns False.
"""
code_exists = exists('~/viewfinder')
if not code_exists:
# So such directory or link.
return False
with settings(warn_only=True):
is_link = run('readlink ~/viewfinder')
if is_link.return_code == 0:
# This is a symlink. New-style code.
return False
return True
def get_file_suffix(prefix, filename):
# Depending on how the linking was done, the destination could be absolute or relative, with or without '/'.
result = re.match(r'^(?:/home/%s/)?%s.([0-9a-f]+)/?$' % (env.user, prefix), filename)
if result is None or len(result.groups()) != 1:
return None
return result.groups()[0]
def get_link_suffix(symlink):
"""Follow 'symlink' in ~/ and determine the suffix for the target (of the form <symlink>.[a-f0-9]+.
Returns None if the symlink does not exist or is not a symlink.
"""
with settings(warn_only=True):
if not exists('~/%s' % symlink):
return None
target = run('readlink ~/%s' % symlink)
if target.return_code != 0:
return None
suffix = get_file_suffix(symlink, target)
assert suffix is not None, 'Could not determine suffix from filename %s' % target
return suffix
def active_env():
"""Return the revision ID of the current environment, or None if it does not exist or cannot be determined."""
return get_link_suffix('env')
def active_code():
"""Return the revision ID of the current code, or None if it does not exist or cannot be determined."""
return get_link_suffix('viewfinder')
def latest_requirements_revision():
"""Return the revision ID of the last change to the prod-requirements file.
hg log lists all revisions, regardless of what we're synced to. -r :. shows all entries up to the currently-synced
point. However, they are listed in reverse order (older first, latest last), so we must tail it.
"""
return local('hg log -r :. --template "{node|short}\n" scripts/prod-requirements.txt | tail -n 1', capture=True)
def hg_revision():
"""Returns the HG revision."""
return local('hg identify -i', capture=True)
def hg_revision_timestamp(rev):
"""Returns the timestamp (in seconds) of 'rev', or None if cannot be determined.
Since mq (and non-linear history in general) makes it possible have revisions dated
before their true "commit" date, we must find the newest ancestor of the given revision.
"""
try:
revset = 'last(sort(ancestors(%s), date))' % rev
res = subprocess.check_output(['hg', 'log', '-r', revset, '--template', '{date}'], stderr=subprocess.STDOUT)
return float(res.strip())
except subprocess.CalledProcessError:
return None
@runs_once
def code_prep():
"""Generate the code tarball and return the HG revision."""
rev = hg_revision()
assert not rev.endswith('+'), 'Client has pending changes, cannot install.'
fprint('Preparing local code tarball (rev %s)' % rev)
filename = 'viewfinder.%s.tar.gz' % rev
local('hg identify -i > hg_revision.txt')
local('tar czf %s --exclude "*.o" --exclude "*~" --exclude "*.pyc" __init__.py scripts/ marketing/ backend/ resources/ secrets/viewfinder.co hg_revision.txt' % filename)
return rev
@runs_last
def code_cleanup():
"""Delete the generated tarball and revision file."""
fprint('Cleaning up local code')
local('rm -f hg_revision.txt viewfinder.*.tar.gz')
@task
def code_install():
"""Install latest code from local directory.
We put the current hg revision in a file, generate a local tarball, copy it to the instance and untar.
code_prep() and code_cleanup() are run the first and last time respectively.
"""
assert env.host_string, "no hosts specified"
assert not is_old_code(), 'Active code is using the old style (directory instead of symlink). ' \
'Manual intervention required'
# code_prep is only run the first time. Subsequent runs return the same value as the first time.
rev = code_prep()
if code_verify(rev):
return
fprint('Installing code (rev %s)' % rev)
filename = 'viewfinder.%s.tar.gz' % rev
dirname = 'viewfinder.%s' % rev
put(filename, '~/%s' % filename)
run('mkdir -p ~/%s' % dirname)
# TODO: purge old pycs
with cd('~/%s' % dirname):
run('tar xzvf ../%s' % filename)
# HACK: the local viewfinder/pythonpath directory has testing garbage in it,
# so until we fix the push to use the hg manifest recreate it on the other
# side instead of syncing it.
run('mkdir -p ~/%s/pythonpath' % dirname)
with cd('~/%s/pythonpath' % dirname):
run('ln -f -s ~/%s viewfinder' % dirname)
# Delete the tarball. We never reuse it anyway.
run('rm -f ~/%s' % filename)
# code_cleanup is run on the last invocation (based on the size of env.hosts).
code_cleanup()
@task
def code_activate(requirements_revision=None):
"""Make the code at revision active (latest if None)."""
assert not is_old_code(), 'Active code is old-style (directory, not symlink). Manual intervention required!'
req_rev = requirements_revision or hg_revision()
assert code_verify(req_rev), 'Desired code revision %s invalid, cannot be made active' % req_rev
# Note: -T forces the target to be treated as a normal file. Without it, the link will be:
# ~/viewfinder/viewfinder.<rev> -> ~/viewfinder.<rev> instead of being in the home directory.
run('ln -T -s -f ~/viewfinder.%s ~/viewfinder' % req_rev)
fprint('Code at revision %s marked active.' % req_rev)
@task
def code_verify(revision=None):
"""Verify the code for a given revision (latest if None).
We only check the symlink. TODO: find a way to validate the code itself."""
if is_old_code():
fprint('installed code is in the old style (directory instead of symlink). Manual intervention required')
return False
rev = revision or hg_revision()
if exists('~/viewfinder.%s' % rev):
fprint('Code at revision %s is installed' % rev)
return True
else:
fprint('Code at revision %s is not installed' % rev)
return False
@task
def virtualenv_install():
"""Install the latest virtual environment if needed.
We do nothing if the env is already the latest.
We do install the new environment even if we are using the old style.
This does not activate (symlink) the newly installed environment.
"""
# Installs the latest virtual environment from the local prod-requirements.txt.
prod_rev = latest_requirements_revision()
assert re.match(r'[0-9a-f]+', prod_rev)
active_env_rev = active_env()
if prod_rev == active_env_rev:
assert virtualenv_verify(prod_rev), 'Active environment is not valid'
return
env_dir = 'env.%s' % prod_rev
package_dir = 'python-package.%s' % prod_rev
requirements_file = 'prod-requirements.txt.%s' % prod_rev
if exists(env_dir):
fprint('prod-requirements (rev %s) already installed, but not active.' % prod_rev)
else:
fprint('installing environment from prod-requirements (rev %s)' % prod_rev)
run('rm -rf ~/%s ~/%s ~/%s' % (env_dir, package_dir, requirements_file))
rsync_project(local_dir='third_party/python-package/', remote_dir='~/%s/' % package_dir, ssh_opts='-o StrictHostKeyChecking=no')
put('scripts/prod-requirements.txt', '~/%s' % requirements_file)
run('python2.7 ~/%s/virtualenv.py --never-download ~/%s/viewfinder' % (package_dir, env_dir))
# Let fabric surface the failure.
run('~/%s/viewfinder/bin/pip install -f file://$HOME/%s --no-index -r ~/%s' %
(env_dir, package_dir, requirements_file))
# Do not delete the prod-requirements file when done as we may use it to verify the environment later.
@task
def virtualenv_activate(requirements_revision=None):
"""Make the virtual env at revision active (latest if None)."""
assert not is_old_env(), 'Active environment is old-style (directory, not symlink). Manual intervention required!'
req_rev = requirements_revision or latest_requirements_revision()
assert virtualenv_verify(req_rev), 'Desired env revision %s invalid, cannot be made active' % req_rev
# Create sitecustomize.py file, which sets default str encoding as UTF-8.
# See http://blog.ianbicking.org/illusive-setdefaultencoding.html.
env_dir = 'env.%s' % req_rev
run('echo "import sys; sys.setdefaultencoding(\'utf-8\')" > %s/viewfinder/lib/python2.7/sitecustomize.py' % env_dir);
# Note: -T forces the target to be treated as a normal file. Without it, the link will be:
# ~/viewfinder/viewfinder.<rev> -> ~/viewfinder.<rev> instead of being in the home directory.
run('ln -T -s -f ~/env.%s ~/env' % req_rev)
fprint('Environment at rev %s marked active.' % req_rev)
@task
def virtualenv_verify(requirements_revision=None):
"""Verify the virtual environment for a given revision (latest if None)."""
req_rev = requirements_revision or latest_requirements_revision()
env_dir = 'env.%s' % req_rev
package_dir = 'python-package.%s' % req_rev
requirements_file = 'prod-requirements.txt.%s' % req_rev
with settings(warn_only=True):
out = run('~/%s/viewfinder/bin/pip install -f file://$HOME/%s --no-index -r ~/%s --no-install --no-download -q' % (env_dir, package_dir, requirements_file))
if out.return_code == 0:
fprint('Valid virtual environment for prod-requirements (rev %s)' % req_rev)
return True
else:
fprint('Bad virtual environment for prod-requirements (rev %s)' % req_rev)
return False
@task
def install_crontab():
"""Install or remove crontab for given node type."""
assert env.nodetype, 'no nodetype specified'
assert env.host_string, 'no hosts specified'
cron_file = '~/viewfinder/scripts/crontab.%s' % env.nodetype.lower()
# Run 'crontab <filename>' if the remote file exists, otherwise run 'crontab -r'.
# Warn only as 'crontab -r' fails if no crontab is installed.
with settings(warn_only=True):
run('if [ -e %s ]; then crontab %s; else crontab -r; fi' % (cron_file, cron_file))
@task
def yum_install():
"""Install required yum packages."""
fprint('Installing yum packages.')
sudo('yum -y update')
sudo('yum -y install make zlib gcc gcc-c++ openssl-devel python27 python27-devel libcurl-devel pcre-devel')
@task
def haproxy_install():
"""Install and configure haproxy.
HAProxy is not controlled by the prod-requirements file, and not easily versioned. As such, we install it in its
own directory.
TODO(marc): replace with yum package once 1.5 is stable and rolled out to AWS.
"""
# rsync the haproxy source.
fprint('Rsync thirdparty/haproxy ~/haproxy')
rsync_project(local_dir='third_party/haproxy/', remote_dir='~/haproxy/', ssh_opts='-o StrictHostKeyChecking=no')
# build haproxy and install it in ~/bin.}
fprint('Building haproxy')
run('haproxy/build.sh ~/')
# Concatenate the certificate and key into a single file (this is expected by haproxy) and push it.
fprint('Generating viewfinder.pem for haproxy')
vf_passphrase = load_passphrase_from_file()
# Staging and prod use the same certs.
local('scripts/generate_haproxy_certificate.sh viewfinder.co %s viewfinder.pem' % vf_passphrase)
run('mkdir -p ~/conf')
run('rm -f ~/conf/viewfinder.pem')
put('viewfinder.pem', '~/conf/viewfinder.pem')
run('chmod 400 ~/conf/viewfinder.pem')
# Remove local file.
local('rm -f viewfinder.pem')
# Install the config files.
fprint('Pushing haproxy configs')
assert env.nodetype, 'no nodetype specified'
run('ln -f -s ~/viewfinder/scripts/haproxy.conf ~/conf/haproxy.conf')
run('ln -f -s ~/viewfinder/scripts/haproxy.redirect.%s.conf ~/conf/haproxy.redirect.conf' % env.nodetype.lower())
def setup_instance(zone, name, existing_instance_id=None):
if not existing_instance_id:
region_zones = ec2_utils.GetELBZones(env.region, node_types=[env.nodetype])
assert zone, 'Availability zone not specified, available zones are: %s' % ' '.join(region_zones)
user_data = load_passphrase_from_file()
instance_id = ec2_utils.RunInstance(env.region, BASE_AMI, 'wwwkey', kInstanceType,
availability_zone=zone, user_data=user_data)
fprint('Launched new instance: %s' % instance_id)
else:
instance_id = existing_instance_id
fprint('Resuming setup of instance %s' % instance_id)
fprint('Adding tags NodeType=%s and Name=%s to instance %s' % (env.nodetype, name, instance_id))
ec2_utils.CreateTag(env.region, instance_id, 'NodeType', env.nodetype)
ec2_utils.CreateTag(env.region, instance_id, 'Name', name)
for i in range(60):
match = ec2_utils.GetInstance(env.region, instance_id)
if match is None:
fprint('Instance %s does not exist yet; waiting.' % instance_id)
elif match.state != 'running':
fprint('Instance %s in state %s; waiting.' % (instance_id, match.state))
else:
break
time.sleep(2)
else:
fprint('Timed out waiting for instance: %s' % instance_id)
raise Exception("timeout")
assert match is not None and match.state == 'running'
instance_hostname = match.public_dns_name
fprint('Instance %s in state "running". Public DNS: %s' % (instance_id, instance_hostname))
with settings(host_string=instance_hostname):
for i in range(60):
try:
run("true")
break
except NetworkError:
fprint('Waiting for instance to be sshable: %s' % instance_id)
# don't retry too aggressively, it looks like we get blocked by a
# firewall for too many failed attempts
time.sleep(3)
else:
fprint('timed out waiting for sshability')
raise Exception("timeout")
# Install required packages.
yum_install()
return instance_id, instance_hostname
@task
def drain():
"""Drain nodes of a given type.
This removes the instance from the region load balancers for this instance type (STAGING or PROD).
"""
ami = get_ami_metadata()
instance_id = ami['instance-id']
ec2_utils.RemoveELBInstance(env.region, instance_id, env.nodetype)
fprint('Removed instance %s from %s load balancers' % (instance_id, env.nodetype))
@task
def undrain():
"""Undrain nodes of a given type.
This adds the instance from the region load balancers for this instance type (STAGING or PROD).
After addition, we query the load balancers until the instance health is InService.
"""
ami = get_ami_metadata()
instance_id = ami['instance-id']
fprint('Waiting for healthy backend')
num_healthz_ok = 0
for i in range(60):
if get_healthz():
num_healthz_ok += 1
if num_healthz_ok >= 3:
break
else:
num_healthz_ok = 0
time.sleep(2)
if num_healthz_ok < 3:
raise Exception('healthz timeout')
ec2_utils.AddELBInstance(env.region, instance_id, env.nodetype)
fprint('Added instance %s to %s load balancers' % (instance_id, env.nodetype))
for i in range(60):
health = ec2_utils.GetELBInstanceHealth(env.region, instance_id, node_types=[env.nodetype])
if health is None:
fprint('No load balancer health information for instance %s; waiting.' % instance_id)
elif health == 'InService':
fprint('Load balancer health for instance %s is InService.' % instance_id)
return
else:
fprint('Load balancer health information for instance %s is %s; waiting.' % (instance_id, health))
time.sleep(2)
raise Exception('timeout')
def check_min_healthy_instances(min_healthy):
"""Lookup the number of instances by ELB state and assert if the minimum required is not met."""
healthy = ec2_utils.GetELBInstancesByHealth(env.region, node_types=[env.nodetype])
num_healthy = len(healthy['InService'])
assert num_healthy >= min_healthy, 'Not enough backends with healthy ELB status (%d vs %d)' % \
(num_healthy, min_healthy)
@task
def create_instance(nodetype, name, zone, existing_instance_id=None):
"""Create a new instance. Specify NodeType,Name,AvailabilityZone,[id_to_resume]."""
env.nodetype = nodetype
# Names must be unique across all node types.
named_instances = ec2_utils.ListInstances(env.region, names=[name])
if named_instances:
assert len(named_instances) == 1, 'Multiple instances found with name %s' % name
prev_id = named_instances[0].id
assert existing_instance_id is not None and existing_instance_id == prev_id, \
'Name %s already in use by instance %s' % (name, prev_id)
assert name.startswith(nodetype), 'Instance name must start with %s' % nodetype
instance_id, instance_hostname = setup_instance(zone, name, existing_instance_id=existing_instance_id)
with settings(host_string=instance_hostname):
deploy(new_instance=True)
@task
def destroy_instance(nodetype, instance_id):
"""Stop and terminate an instance. Specify NodeType and InstanceID."""
env.nodetype = nodetype
instance = ec2_utils.GetInstance(env.region, instance_id)
assert instance, 'Instance %s not found' % instance_id
with settings(host_string=instance.public_dns_name):
if instance.state == 'running':
check_min_healthy_instances(3)
drain()
stop()
fprint('Terminating instance %s' % instance_id)
ec2_utils.TerminateInstance(env.region, instance_id)
@task
def restart():
"""Restart supervisord and its managed jobs."""
fprint('Restarting supervisord')
sudo('cp ~ec2-user/viewfinder/scripts/supervisord.d /etc/init.d/supervisord')
sudo('/etc/init.d/supervisord restart')
@task
def stop():
"""Stop supervisord and its managed jobs."""
# TODO(marc): we should eventually use supervisordctl, but sending SIGTERM shuts it down properly for now.
fprint('Stopping supervisord')
# If we attempt a "deploy" with a new instance that hasn't been setup yet, we'll have no supervisord script to copy.
with settings(warn_only=True):
# We copy it since this may be the first call to supervisord.
# TODO(marc): remove 'cp' once supervisord init script is installed everywhere.
sudo('cp ~ec2-user/viewfinder/scripts/supervisord.d /etc/init.d/supervisord')
sudo('/etc/init.d/supervisord stop')
@task
def drainrestart():
"""Drain and restart nodes."""
check_min_healthy_instances(2)
drain()
# Stop first to make sure we no longer use the viewfinder init scripts.
stop()
restart()
undrain()
@task
def deploy(new_instance=False):
"""Deploy latest environment and code and restart backends."""
# Run yum update/install first. We may have new dependencies.
yum_install()
# Push and build haproxy.
haproxy_install()
# Stage code, environment, and crontab.
virtualenv_install()
code_install()
install_crontab()
if not new_instance:
# Remove backend from load balancer and stop. This would fail on non-running instances.
drain()
stop()
# Flip symlinks.
virtualenv_activate()
code_activate()
# Restart backend and re-add to load balancer.
restart()
undrain()
@task
def status():
"""Overall production status."""
cl_timestamps = defaultdict(str)
def _ResolveCLDate(rev):
if rev == '??' or rev in cl_timestamps.keys():
return
ts = hg_revision_timestamp(rev)
if ts is not None:
cl_timestamps[rev] = util.TimestampUTCToISO8601(ts)
env_rev = latest_requirements_revision()
code_rev = hg_revision()
_ResolveCLDate(env_rev)
_ResolveCLDate(code_rev)
print '=' * 80
print 'Local environment:'
print ' Env revision: %s (%s)' % (env_rev, cl_timestamps.get(env_rev, '??'))
print ' Code revision: %s (%s)' % (code_rev, cl_timestamps.get(code_rev, '??'))
for nodetype in ec2_utils.kValidNodeTypes:
elbs = ec2_utils.GetLoadBalancers(env.region, [nodetype])
assert len(elbs) == 1, 'Need exactly one %s load balancer in %s' % (nodetype, env.region)
elb = elbs[0]
instances = ec2_utils.ListInstances(env.region, node_types=[nodetype])
elb_zones = {z:0 for z in elb.availability_zones}
elb_health = {h.instance_id: h.state for h in elb.get_instance_health()}
for i in instances:
id = i.id
if i.state != 'running':
continue
zone = i.placement
if zone in elb_zones.keys():
elb_zones[zone] += 1
if id in elb_health:
setattr(i, '_elb_health', elb_health[id])
with settings(host_string=i.public_dns_name):
setattr(i, '_env_rev', active_env() or '??')
setattr(i, '_code_rev', active_code() or '??')
_ResolveCLDate(i._env_rev)
_ResolveCLDate(i._code_rev)
print '\n%s' % ('=' * 80)
print '%s ELB: %s' % (nodetype, elb.name)
print ' # Running instances by ELB zone:'
zone_str = ', '.join(['%s: %s' % (k, v) for k, v in elb_zones.iteritems()])
print ' %s' % zone_str
print ''
print '%s instances: %d' % (nodetype, len(instances))
print ' # %-8s %-12s %-13s %-10s %-12s %-13s %-10s %-13s %-10s' % \
('ID', 'Name', 'State', 'Zone', 'ELB state', 'Active env', 'Env date', 'Active code', 'Code date')
for i in instances:
env_rev = getattr(i, '_env_rev', '')
code_rev = getattr(i, '_code_rev', '')
print ' %-10s %-12s %-13s %-10s %-12s %-13s %-10s %-13s %-10s' % (i.id,
i.tags.get('Name', ''),
i.state,
i.placement,
getattr(i, '_elb_health', ''),
env_rev,
cl_timestamps[env_rev],
code_rev,
cl_timestamps[code_rev])
if instances and code_rev and cl_timestamps.get(code_rev):
# If the deployed revision exists locally, create a bookmark to it (one per nodetype).
# This allows queries like these (some aliases are defined in viewfinder.hgrc)
# hg log -r ::.-::deployed_staging
# hg log -r ::deployed_staging-::deployed_prod
local("hg bookmark -f -r %s deployed_%s" % (code_rev, nodetype.lower()))
@task
def cleanup():
"""Cleanup old env and code."""
assert env.host_string
# Search for active env.
active_env_rev = active_env()
assert active_env_rev, 'No active env, this could be a problem; aborting.'
active_env_date = hg_revision_timestamp(active_env_rev)
assert active_env_date, 'Could not determine timestamp for active env revision %s; aborting.' % active_env_rev
fprint('Current active environment is revision %s (%s)' %
(active_env_rev, util.TimestampUTCToISO8601(active_env_date)))
# Search for, and iterate over, all environments.
installed_env_revs = run('ls -d ~/env.*')
for r in installed_env_revs.split():
if not r.strip():
continue
rev = get_file_suffix('env', r)
if not rev:
continue
if rev == active_env_rev:
continue
ts = hg_revision_timestamp(rev)
if not ts:
continue
if ts >= active_env_date:
fprint('Env revision %s (%s) newer than active env revision %s (%s); skipping.' %
(rev, util.TimestampUTCToISO8601(ts), active_env_rev, util.TimestampUTCToISO8601(active_env_date)))
continue
answer = fprompt('Delete unused environment revision %s (%s)?' % (rev, util.TimestampUTCToISO8601(ts)),
default='N', validate='[yYnN]')
if answer == 'n' or answer == 'N':
continue
run('rm -r -f env.%s prod-requirements.txt.%s python-package.%s' % (rev, rev, rev))
fprint('Deleted environment revision %s (%s)' % (rev, util.TimestampUTCToISO8601(ts)))
# Search for active code.
active_code_rev = active_code()
assert active_code_rev, 'No active code, this could be a problem; aborting.'
active_code_date = hg_revision_timestamp(active_code_rev)
assert active_code_date, 'Could not determine timestamp for active code revision %s; aborting.' % active_code_rev
fprint('Current active code is revision %s (%s)' %
(active_code_rev, util.TimestampUTCToISO8601(active_code_date)))
# Search for, and iterate over, all code.
installed_code_revs = run('ls -d ~/viewfinder.*')
for r in installed_code_revs.split():
if not r.strip():
continue
rev = get_file_suffix('viewfinder', r)
if not rev:
continue
if rev == active_code_rev:
continue
ts = hg_revision_timestamp(rev)
if not ts:
continue
if ts >= active_code_date:
fprint('Code revision %s (%s) newer than active code revision %s (%s); skipping.' %
(rev, util.TimestampUTCToISO8601(ts), active_code_rev, util.TimestampUTCToISO8601(active_code_date)))
continue
answer = fprompt('Delete unused code revision %s (%s)?' % (rev, util.TimestampUTCToISO8601(ts)),
default='N', validate='[yYnN]')
if answer == 'n' or answer == 'N':
continue
run('rm -r -f viewfinder.%s' % rev)
fprint('Deleted code revision %s (%s)' % (rev, util.TimestampUTCToISO8601(ts)))
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles database requests from other nova services."""
import copy
import itertools
from oslo import messaging
import six
from nova.api.ec2 import ec2utils
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute_api
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova.db import base
from nova import exception
from nova.i18n import _, _LE
from nova import image
from nova import manager
from nova import network
from nova.network.security_group import openstack_driver
from nova import notifications
from nova import objects
from nova.objects import base as nova_object
from nova.openstack.common import excutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import client as scheduler_client
from nova.scheduler import driver as scheduler_driver
from nova.scheduler import utils as scheduler_utils
LOG = logging.getLogger(__name__)
# Instead of having a huge list of arguments to instance_update(), we just
# accept a dict of fields to update and use this whitelist to validate it.
allowed_updates = ['task_state', 'vm_state', 'expected_task_state',
'power_state', 'access_ip_v4', 'access_ip_v6',
'launched_at', 'terminated_at', 'host', 'node',
'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb',
'instance_type_id', 'root_device_name', 'launched_on',
'progress', 'vm_mode', 'default_ephemeral_device',
'default_swap_device', 'root_device_name',
'system_metadata', 'updated_at'
]
# Fields that we want to convert back into a datetime object.
datetime_fields = ['launched_at', 'terminated_at', 'updated_at']
class ConductorManager(manager.Manager):
"""Mission: Conduct things.
The methods in the base API for nova-conductor are various proxy operations
performed on behalf of the nova-compute service running on compute nodes.
Compute nodes are not allowed to directly access the database, so this set
of methods allows them to get specific work done without locally accessing
the database.
The nova-conductor service also exposes an API in the 'compute_task'
namespace. See the ComputeTaskManager class for details.
"""
target = messaging.Target(version='2.0')
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self._network_api = None
self._compute_api = None
self.compute_task_mgr = ComputeTaskManager()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.additional_endpoints.append(self.compute_task_mgr)
@property
def network_api(self):
# NOTE(danms): We need to instantiate our network_api on first use
# to avoid the circular dependency that exists between our init
# and network_api's
if self._network_api is None:
self._network_api = network.API()
return self._network_api
@property
def compute_api(self):
if self._compute_api is None:
self._compute_api = compute_api.API()
return self._compute_api
def ping(self, context, arg):
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# now a part of the base rpc API.
return jsonutils.to_primitive({'service': 'conductor', 'arg': arg})
@messaging.expected_exceptions(KeyError, ValueError,
exception.InvalidUUID,
exception.InstanceNotFound,
exception.UnexpectedTaskStateError)
def instance_update(self, context, instance_uuid,
updates, service):
for key, value in updates.iteritems():
if key not in allowed_updates:
LOG.error(_("Instance update attempted for "
"'%(key)s' on %(instance_uuid)s"),
{'key': key, 'instance_uuid': instance_uuid})
raise KeyError("unexpected update keyword '%s'" % key)
if key in datetime_fields and isinstance(value, six.string_types):
updates[key] = timeutils.parse_strtime(value)
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance_uuid, updates)
notifications.send_update(context, old_ref, instance_ref, service)
return jsonutils.to_primitive(instance_ref)
@messaging.expected_exceptions(exception.InstanceNotFound)
def instance_get_by_uuid(self, context, instance_uuid,
columns_to_join):
return jsonutils.to_primitive(
self.db.instance_get_by_uuid(context, instance_uuid,
columns_to_join))
def instance_get_all_by_host(self, context, host, node,
columns_to_join):
if node is not None:
result = self.db.instance_get_all_by_host_and_node(
context.elevated(), host, node)
else:
result = self.db.instance_get_all_by_host(context.elevated(), host,
columns_to_join)
return jsonutils.to_primitive(result)
def migration_get_in_progress_by_host_and_node(self, context,
host, node):
migrations = self.db.migration_get_in_progress_by_host_and_node(
context, host, node)
return jsonutils.to_primitive(migrations)
@messaging.expected_exceptions(exception.AggregateHostExists)
def aggregate_host_add(self, context, aggregate, host):
host_ref = self.db.aggregate_host_add(context.elevated(),
aggregate['id'], host)
return jsonutils.to_primitive(host_ref)
@messaging.expected_exceptions(exception.AggregateHostNotFound)
def aggregate_host_delete(self, context, aggregate, host):
self.db.aggregate_host_delete(context.elevated(),
aggregate['id'], host)
def aggregate_metadata_get_by_host(self, context, host,
key='availability_zone'):
result = self.db.aggregate_metadata_get_by_host(context, host, key)
return jsonutils.to_primitive(result)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed, update_cells):
if [bw_in, bw_out, last_ctr_in, last_ctr_out].count(None) != 4:
self.db.bw_usage_update(context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed,
update_cells=update_cells)
usage = self.db.bw_usage_get(context, uuid, start_period, mac)
return jsonutils.to_primitive(usage)
def provider_fw_rule_get_all(self, context):
rules = self.db.provider_fw_rule_get_all(context)
return jsonutils.to_primitive(rules)
# NOTE(danms): This can be removed in version 3.0 of the RPC API
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
info = self.db.agent_build_get_by_triple(context, hypervisor, os,
architecture)
return jsonutils.to_primitive(info)
def block_device_mapping_update_or_create(self, context, values, create):
if create is None:
bdm = self.db.block_device_mapping_update_or_create(context,
values)
elif create is True:
bdm = self.db.block_device_mapping_create(context, values)
else:
bdm = self.db.block_device_mapping_update(context,
values['id'],
values)
bdm_obj = objects.BlockDeviceMapping._from_db_object(
context, objects.BlockDeviceMapping(), bdm)
self.cells_rpcapi.bdm_update_or_create_at_top(context, bdm_obj,
create=create)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy):
bdms = self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid'])
if legacy:
bdms = block_device.legacy_mapping(bdms)
return jsonutils.to_primitive(bdms)
def instance_get_all_by_filters(self, context, filters, sort_key,
sort_dir, columns_to_join,
use_slave):
result = self.db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir,
columns_to_join=columns_to_join, use_slave=use_slave)
return jsonutils.to_primitive(result)
def instance_get_active_by_window(self, context, begin, end,
project_id, host):
# Unused, but cannot remove until major RPC version bump
result = self.db.instance_get_active_by_window(context, begin, end,
project_id, host)
return jsonutils.to_primitive(result)
def instance_get_active_by_window_joined(self, context, begin, end,
project_id, host):
result = self.db.instance_get_active_by_window_joined(
context, begin, end, project_id, host)
return jsonutils.to_primitive(result)
def instance_destroy(self, context, instance):
result = self.db.instance_destroy(context, instance['uuid'])
return jsonutils.to_primitive(result)
def instance_fault_create(self, context, values):
result = self.db.instance_fault_create(context, values)
return jsonutils.to_primitive(result)
# NOTE(kerrin): The last_refreshed argument is unused by this method
# and can be removed in v3.0 of the RPC API.
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed, update_totals):
vol_usage = self.db.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance['uuid'],
instance['project_id'],
instance['user_id'],
instance['availability_zone'],
update_totals)
# We have just updated the database, so send the notification now
self.notifier.info(context, 'volume.usage',
compute_utils.usage_volume_info(vol_usage))
@messaging.expected_exceptions(exception.ComputeHostNotFound,
exception.HostBinaryNotFound)
def service_get_all_by(self, context, topic, host, binary):
if not any((topic, host, binary)):
result = self.db.service_get_all(context)
elif all((topic, host)):
if topic == 'compute':
result = self.db.service_get_by_compute_host(context, host)
# FIXME(comstud) Potentially remove this on bump to v3.0
result = [result]
else:
result = self.db.service_get_by_host_and_topic(context,
host, topic)
elif all((host, binary)):
result = self.db.service_get_by_args(context, host, binary)
elif topic:
result = self.db.service_get_all_by_topic(context, topic)
elif host:
result = self.db.service_get_all_by_host(context, host)
return jsonutils.to_primitive(result)
@messaging.expected_exceptions(exception.InstanceActionNotFound)
def action_event_start(self, context, values):
evt = self.db.action_event_start(context, values)
return jsonutils.to_primitive(evt)
@messaging.expected_exceptions(exception.InstanceActionNotFound,
exception.InstanceActionEventNotFound)
def action_event_finish(self, context, values):
evt = self.db.action_event_finish(context, values)
return jsonutils.to_primitive(evt)
def service_create(self, context, values):
svc = self.db.service_create(context, values)
return jsonutils.to_primitive(svc)
@messaging.expected_exceptions(exception.ServiceNotFound)
def service_destroy(self, context, service_id):
self.db.service_destroy(context, service_id)
def compute_node_create(self, context, values):
result = self.db.compute_node_create(context, values)
return jsonutils.to_primitive(result)
def compute_node_update(self, context, node, values):
result = self.db.compute_node_update(context, node['id'], values)
return jsonutils.to_primitive(result)
def compute_node_delete(self, context, node):
result = self.db.compute_node_delete(context, node['id'])
return jsonutils.to_primitive(result)
@messaging.expected_exceptions(exception.ServiceNotFound)
def service_update(self, context, service, values):
svc = self.db.service_update(context, service['id'], values)
return jsonutils.to_primitive(svc)
def task_log_get(self, context, task_name, begin, end, host, state):
result = self.db.task_log_get(context, task_name, begin, end, host,
state)
return jsonutils.to_primitive(result)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items, message):
result = self.db.task_log_begin_task(context.elevated(), task_name,
begin, end, host, task_items,
message)
return jsonutils.to_primitive(result)
def task_log_end_task(self, context, task_name, begin, end, host,
errors, message):
result = self.db.task_log_end_task(context.elevated(), task_name,
begin, end, host, errors, message)
return jsonutils.to_primitive(result)
def notify_usage_exists(self, context, instance, current_period,
ignore_missing_network_data,
system_metadata, extra_usage_info):
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period,
ignore_missing_network_data,
system_metadata, extra_usage_info)
def security_groups_trigger_handler(self, context, event, args):
self.security_group_api.trigger_handler(event, context, *args)
def security_groups_trigger_members_refresh(self, context, group_ids):
self.security_group_api.trigger_members_refresh(context, group_ids)
def network_migrate_instance_start(self, context, instance, migration):
self.network_api.migrate_instance_start(context, instance, migration)
def network_migrate_instance_finish(self, context, instance, migration):
self.network_api.migrate_instance_finish(context, instance, migration)
def quota_commit(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.commit(context, reservations, project_id=project_id,
user_id=user_id)
def quota_rollback(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.rollback(context, reservations, project_id=project_id,
user_id=user_id)
def get_ec2_ids(self, context, instance):
ec2_ids = {}
ec2_ids['instance-id'] = ec2utils.id_to_ec2_inst_id(instance['uuid'])
ec2_ids['ami-id'] = ec2utils.glance_id_to_ec2_id(context,
instance['image_ref'])
for image_type in ['kernel', 'ramdisk']:
image_id = instance.get('%s_id' % image_type)
if image_id is not None:
ec2_image_type = ec2utils.image_type(image_type)
ec2_id = ec2utils.glance_id_to_ec2_id(context, image_id,
ec2_image_type)
ec2_ids['%s-id' % image_type] = ec2_id
return ec2_ids
def compute_unrescue(self, context, instance):
self.compute_api.unrescue(context, instance)
def _object_dispatch(self, target, method, context, args, kwargs):
"""Dispatch a call to an object method.
This ensures that object methods get called and any exception
that is raised gets wrapped in an ExpectedException for forwarding
back to the caller (without spamming the conductor logs).
"""
try:
# NOTE(danms): Keep the getattr inside the try block since
# a missing method is really a client problem
return getattr(target, method)(context, *args, **kwargs)
except Exception:
raise messaging.ExpectedException()
def object_class_action(self, context, objname, objmethod,
objver, args, kwargs):
"""Perform a classmethod action on an object."""
objclass = nova_object.NovaObject.obj_class_from_name(objname,
objver)
result = self._object_dispatch(objclass, objmethod, context,
args, kwargs)
# NOTE(danms): The RPC layer will convert to primitives for us,
# but in this case, we need to honor the version the client is
# asking for, so we do it before returning here.
return (result.obj_to_primitive(target_version=objver)
if isinstance(result, nova_object.NovaObject) else result)
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on an object."""
oldobj = objinst.obj_clone()
result = self._object_dispatch(objinst, objmethod, context,
args, kwargs)
updates = dict()
# NOTE(danms): Diff the object with the one passed to us and
# generate a list of changes to forward back
for name, field in objinst.fields.items():
if not objinst.obj_attr_is_set(name):
# Avoid demand-loading anything
continue
if (not oldobj.obj_attr_is_set(name) or
oldobj[name] != objinst[name]):
updates[name] = field.to_primitive(objinst, name,
objinst[name])
# This is safe since a field named this would conflict with the
# method anyway
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
def object_backport(self, context, objinst, target_version):
return objinst.obj_to_primitive(target_version=target_version)
class ComputeTaskManager(base.Base):
"""Namespace for compute methods.
This class presents an rpc API for nova-conductor under the 'compute_task'
namespace. The methods here are compute operations that are invoked
by the API service. These methods see the operation to completion, which
may involve coordinating activities on multiple compute nodes.
"""
target = messaging.Target(namespace='compute_task', version='1.9')
def __init__(self):
super(ComputeTaskManager, self).__init__()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.image_api = image.API()
self.scheduler_client = scheduler_client.SchedulerClient()
@messaging.expected_exceptions(exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceNotRunning,
exception.MigrationPreCheckError,
exception.LiveMigrationWithOldNovaNotSafe)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None):
if instance and not isinstance(instance, nova_object.NovaObject):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=attrs)
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit)
elif not live and not rebuild and flavor:
instance_uuid = instance['uuid']
with compute_utils.EventReporter(context, 'cold_migrate',
instance_uuid):
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties'],
reservations)
else:
raise NotImplementedError()
def _cold_migrate(self, context, instance, flavor, filter_properties,
reservations):
image_ref = instance.image_ref
image = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
request_spec = scheduler_utils.build_request_spec(
context, image, [instance], instance_type=flavor)
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
scheduler_utils.populate_retry(filter_properties, instance['uuid'])
hosts = self.scheduler_client.select_destinations(
context, request_spec, filter_properties)
host_state = hosts[0]
except exception.NoValidHost as ex:
vm_state = instance['vm_state']
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, 'migrate_server',
updates, ex, request_spec)
quotas.rollback()
# if the flavor IDs match, it's migrate; otherwise resize
if flavor['id'] == instance['instance_type_id']:
msg = _("No valid host found for cold migrate")
else:
msg = _("No valid host found for resize")
raise exception.NoValidHost(reason=msg)
try:
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
# context is not serializable
filter_properties.pop('context', None)
# TODO(timello): originally, instance_type in request_spec
# on compute.api.resize does not have 'extra_specs', so we
# remove it for now to keep tests backward compatibility.
request_spec['instance_type'].pop('extra_specs', None)
(host, node) = (host_state['host'], host_state['nodename'])
self.compute_rpcapi.prep_resize(
context, image, instance,
flavor, host,
reservations, request_spec=request_spec,
filter_properties=filter_properties, node=node)
except Exception as ex:
with excutils.save_and_reraise_exception():
updates = {'vm_state': instance['vm_state'],
'task_state': None}
self._set_vm_state_and_notify(context, 'migrate_server',
updates, ex, request_spec)
quotas.rollback()
def _set_vm_state_and_notify(self, context, method, updates, ex,
request_spec):
scheduler_utils.set_vm_state_and_notify(
context, 'compute_task', method, updates,
ex, request_spec, self.db)
def _live_migrate(self, context, instance, scheduler_hint,
block_migration, disk_over_commit):
destination = scheduler_hint.get("host")
try:
live_migrate.execute(context, instance, destination,
block_migration, disk_over_commit)
except (exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceNotRunning,
exception.MigrationPreCheckError,
exception.LiveMigrationWithOldNovaNotSafe) as ex:
with excutils.save_and_reraise_exception():
# TODO(johngarbutt) - eventually need instance actions here
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
scheduler_utils.set_vm_state_and_notify(context,
'compute_task', 'migrate_server',
dict(vm_state=instance['vm_state'],
task_state=None,
expected_task_state=task_states.MIGRATING,),
ex, request_spec, self.db)
except Exception as ex:
LOG.error(_('Migration of instance %(instance_id)s to host'
' %(dest)s unexpectedly failed.'),
{'instance_id': instance['uuid'], 'dest': destination},
exc_info=True)
raise exception.MigrationError(reason=ex)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping=None, legacy_bdm=True):
# TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
# 2.0 of the RPC API.
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
# TODO(danms): Remove this in version 2.0 of the RPC API
if (requested_networks and
not isinstance(requested_networks,
objects.NetworkRequestList)):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in requested_networks])
try:
# check retry policy. Rather ugly use of instances[0]...
# but if we've exceeded max retries... then we really only
# have a single instance.
scheduler_utils.populate_retry(filter_properties,
instances[0].uuid)
hosts = self.scheduler_client.select_destinations(context,
request_spec, filter_properties)
except Exception as exc:
for instance in instances:
scheduler_driver.handle_schedule_error(context, exc,
instance.uuid, request_spec)
return
for (instance, host) in itertools.izip(instances, hosts):
try:
instance.refresh()
except (exception.InstanceNotFound,
exception.InstanceInfoCacheNotFound):
LOG.debug('Instance deleted during build', instance=instance)
continue
local_filter_props = copy.deepcopy(filter_properties)
scheduler_utils.populate_filter_properties(local_filter_props,
host)
# The block_device_mapping passed from the api doesn't contain
# instance specific information
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self.compute_rpcapi.build_and_run_instance(context,
instance=instance, host=host['host'], image=image,
request_spec=request_spec,
filter_properties=local_filter_props,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=bdms, node=host['nodename'],
limits=host['limits'])
def _delete_image(self, context, image_id):
return self.image_api.delete(context, image_id)
def _schedule_instances(self, context, image, filter_properties,
*instances):
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
hosts = self.scheduler_client.select_destinations(context,
request_spec, filter_properties)
return hosts
def unshelve_instance(self, context, instance):
sys_meta = instance.system_metadata
def safe_image_show(ctx, image_id):
if image_id:
return self.image_api.get(ctx, image_id)
if instance.vm_state == vm_states.SHELVED:
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=task_states.UNSHELVING)
self.compute_rpcapi.start_instance(context, instance)
snapshot_id = sys_meta.get('shelved_image_id')
if snapshot_id:
self._delete_image(context, snapshot_id)
elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
image = None
image_id = sys_meta.get('shelved_image_id')
# No need to check for image if image_id is None as
# "shelved_image_id" key is not set for volume backed
# instance during the shelve process
if image_id:
with compute_utils.EventReporter(
context, 'get_image_info', instance.uuid):
try:
image = safe_image_show(context, image_id)
except exception.ImageNotFound:
instance.vm_state = vm_states.ERROR
instance.save()
reason = _('Unshelve attempted but the image %s '
'cannot be found.') % image_id
LOG.error(reason, instance=instance)
raise exception.UnshelveException(
instance_id=instance.uuid, reason=reason)
try:
with compute_utils.EventReporter(context, 'schedule_instances',
instance.uuid):
filter_properties = {}
hosts = self._schedule_instances(context, image,
filter_properties,
instance)
host_state = hosts[0]
scheduler_utils.populate_filter_properties(
filter_properties, host_state)
(host, node) = (host_state['host'], host_state['nodename'])
self.compute_rpcapi.unshelve_instance(
context, instance, host, image=image,
filter_properties=filter_properties, node=node)
except exception.NoValidHost:
instance.task_state = None
instance.save()
LOG.warning(_("No valid host found for unshelve instance"),
instance=instance)
return
except Exception:
with excutils.save_and_reraise_exception():
instance.task_state = None
instance.save()
LOG.error(_LE("Unshelve attempted but an error "
"has occurred"), instance=instance)
else:
LOG.error(_('Unshelve attempted but vm_state not SHELVED or '
'SHELVED_OFFLOADED'), instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
return
for key in ['shelved_at', 'shelved_image_id', 'shelved_host']:
if key in sys_meta:
del(sys_meta[key])
instance.system_metadata = sys_meta
instance.save()
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False, host=None):
with compute_utils.EventReporter(context, 'rebuild_server',
instance.uuid):
if not host:
# NOTE(lcostantino): Retrieve scheduler filters for the
# instance when the feature is available
filter_properties = {'ignore_hosts': [instance.host]}
request_spec = scheduler_utils.build_request_spec(context,
image_ref,
[instance])
try:
hosts = self.scheduler_client.select_destinations(context,
request_spec,
filter_properties)
host = hosts.pop(0)['host']
except exception.NoValidHost as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(context,
'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
LOG.warning(_("No valid host found for rebuild"),
instance=instance)
self.compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=recreate,
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
host=host)
|
|
# Copyright 2014-2015 The Alive authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from constants import *
from codegen import *
def getAllocSize(type):
# round to nearest byte boundary
return int((type.getSize() + 7) / 8) * 8
def alignSize(size, align):
if align == 0:
return size
assert align & (align-1) == 0
return (size + (align-1)) & ~(align-1)
def getPtrAlignCnstr(ptr, align):
if align == 0:
return BoolVal(True)
assert align & (align-1) == 0
return ptr & (align-1) == 0
def defined_align_access(state, defined, access_size, req_align, aptr):
must_access = []
for blck in state.ptrs:
ptr = blck.ptr
size = blck.size()
inbounds = And(UGE(aptr, ptr), UGE((size - access_size)/8, aptr - ptr))
if req_align != 0 and size >= access_size:
# overestimating the alignment is undefined behavior.
defined.append(Implies(inbounds, blck.align >= req_align))
if access_size <= size:
must_access.append(inbounds if blck.isAlloca() else BoolVal(True))
defined.append(mk_or(must_access))
################################
class MemInfo:
def __init__(self, ptr, mem, qvars, ty, block_size, num_elems, align):
self.ptr = ptr
self.mem = mem
self.qvars = qvars
self.ty = ty
self.block_size = block_size
self.num_elems = num_elems
self.align = align
def isAlloca(self):
return self.ty == 'alloca'
def size(self):
return self.block_size * self.num_elems
def __eq__(self, b):
return self.ptr.eq(b.ptr)
class State:
def __init__(self):
self.vars = collections.OrderedDict()
self.defined = [] # definedness so far in the BB
self.ptrs = []
self.bb_pres = {}
self.bb_mem = {}
def add(self, v, smt, defined, poison, qvars):
if v.getUniqueName() == '':
return
self.vars[v.getUniqueName()] = (smt, self.defined + defined, poison, qvars)
if isinstance(v, TerminatorInst):
for (bb,cond) in v.getSuccessors(self):
bb = bb[1:]
if bb not in self.bb_pres:
self.bb_pres[bb] = []
self.bb_mem[bb] = []
self.bb_pres[bb] += [cond]
self.bb_mem[bb].append((cond, self.mem))
def addAlloca(self, ptr, mem, block_size, num_elems, align):
self.ptrs.append(MemInfo(ptr, mem, [mem], 'alloca', block_size, num_elems,
align))
def addInputMem(self, ptr, mem, qvars, block_size, num_elems):
# precondition vcgen can call this function spuriously
if any(ptr.eq(blck.ptr) for blck in self.ptrs):
return
self.ptrs.append(MemInfo(ptr, mem, qvars, 'in', block_size, num_elems, 1))
def newBB(self, name):
if name in self.bb_pres:
self.defined = [mk_or(self.bb_pres[name])]
self.mem = fold_ite_list(self.bb_mem[name])
else:
self.defined = []
self.mem = Array('mem0', BitVecSort(get_ptr_size()), BitVecSort(8))
self.current_bb = name
def getAllocaConstraints(self):
# generate the following constraints:
# 1) Alloca ptrs are never null
# 2) Allocated regions do not overlap with each other and with input blocks
cnstr = []
for mem1 in self.ptrs:
if mem1.ty == 'in':
continue
ptr = mem1.ptr
size = mem1.size()
cnstr.append(ptr != 0)
for mem2 in self.ptrs:
if mem1 == mem2:
continue
cnstr.append(Or(UGE(mem2.ptr, ptr + size),
ULE(mem2.ptr + mem2.size(), ptr)))
return cnstr
def eval(self, v, defined, poison, qvars):
(smt, d, p, q) = self.vars[v.getUniqueName()]
defined += d
poison += p
qvars += q
return smt
def iteritems(self):
for k,v in self.vars.iteritems():
if k[0] != '%' and k[0] != 'C' and not k.startswith('ret_'):
continue
yield k,v
def has_key(self, k):
return self.vars.has_key(k)
def __getitem__(self, k):
return self.vars[k]
################################
class Instr(Value):
pass
################################
class CopyOperand(Instr):
def __init__(self, v, type):
self.v = v
self.type = type
assert isinstance(self.v, Value)
assert isinstance(self.type, Type)
def __repr__(self):
t = str(self.type)
if len(t) > 0:
t += ' '
return t + self.v.getName()
def toSMT(self, defined, poison, state, qvars):
return state.eval(self.v, defined, poison, qvars)
def getTypeConstraints(self):
return And(self.type == self.v.type,
self.type.getTypeConstraints())
def register_types(self, manager):
manager.register_type(self, self.type, UnknownType())
manager.unify(self, self.v)
# TODO: visit_source?
def visit_target(self, manager, use_builder=False):
instr = manager.get_cexp(self.v)
if use_builder:
isntr = CVariable('Builder').arr('Insert', [instr])
# TODO: this probably should use manager.get_ctype,
# but that currently doesn't distinguish source instructions (Value)
# from target instructions (Instruction)
if isinstance(self.v, Instr):
ctype = manager.PtrInstruction
else:
ctype = manager.PtrValue
return [CDefinition.init(
ctype,
manager.get_cexp(self),
instr)]
################################
class BinOp(Instr):
Add, Sub, Mul, UDiv, SDiv, URem, SRem, Shl, AShr, LShr, And, Or, Xor,\
Last = range(14)
opnames = {
Add: 'add',
Sub: 'sub',
Mul: 'mul',
UDiv: 'udiv',
SDiv: 'sdiv',
URem: 'urem',
SRem: 'srem',
Shl: 'shl',
AShr: 'ashr',
LShr: 'lshr',
And: 'and',
Or: 'or',
Xor: 'xor',
}
opids = {v:k for k, v in opnames.items()}
def __init__(self, op, type, v1, v2, flags = []):
assert isinstance(type, Type)
assert isinstance(v1, Value)
assert isinstance(v2, Value)
assert 0 <= op < self.Last
self.op = op
self.type = type
self.v1 = v1
self.v2 = v2
self.flags = list(flags)
self._check_op_flags()
def getOpName(self):
return self.opnames[self.op]
@staticmethod
def getOpId(name):
try:
return BinOp.opids[name]
except:
raise ParseError('Unknown binary instruction')
def __repr__(self):
t = str(self.type)
if len(t) > 0:
t = ' ' + t
flags = ' '.join(self.flags)
if len(flags) > 0:
flags = ' ' + flags
return '%s%s%s %s, %s' % (self.getOpName(), flags, t,
self.v1.getName(),
self.v2.getName())
def _check_op_flags(self):
allowed_flags = {
self.Add: ['nsw', 'nuw'],
self.Sub: ['nsw', 'nuw'],
self.Mul: ['nsw', 'nuw'],
self.UDiv: ['exact'],
self.SDiv: ['exact'],
self.URem: [],
self.SRem: [],
self.Shl: ['nsw', 'nuw'],
self.AShr: ['exact'],
self.LShr: ['exact'],
self.And: [],
self.Or: [],
self.Xor: [],
}[self.op]
for f in self.flags:
if f not in allowed_flags:
raise ParseError('Flag not supported by ' + self.getOpName(), f)
def _genSMTDefConds(self, v1, v2, poison):
bits = self.type.getSize()
poison_conds = {
self.Add: {'nsw': lambda a,b: SignExt(1,a)+SignExt(1,b) == SignExt(1,a+b),
'nuw': lambda a,b: ZeroExt(1,a)+ZeroExt(1,b) == ZeroExt(1,a+b),
},
self.Sub: {'nsw': lambda a,b: SignExt(1,a)-SignExt(1,b) == SignExt(1,a-b),
'nuw': lambda a,b: ZeroExt(1,a)-ZeroExt(1,b) == ZeroExt(1,a-b),
},
self.Mul: {'nsw': lambda a,b: no_overflow_smul(a, b),
'nuw': lambda a,b: no_overflow_umul(a, b),
},
self.UDiv:{'exact': lambda a,b: UDiv(a, b) * b == a,
},
self.SDiv:{'exact': lambda a,b: (a / b) * b == a,
},
self.URem:{},
self.SRem:{},
self.Shl: {'nsw': lambda a,b: (a << b) >> b == a,
'nuw': lambda a,b: LShR(a << b, b) == a,
},
self.AShr:{'exact': lambda a,b: (a >> b) << b == a,
},
self.LShr:{'exact': lambda a,b: LShR(a, b) << b == a,
},
self.And: {},
self.Or: {},
self.Xor: {},
}[self.op]
if do_infer_flags():
for flag,fn in poison_conds.iteritems():
bit = get_flag_var(flag, self.getName())
poison += [Implies(bit == 1, fn(v1, v2))]
else:
for f in self.flags:
poison += [poison_conds[f](v1, v2)]
# definedness of the instruction
return {
self.Add: lambda a,b: [],
self.Sub: lambda a,b: [],
self.Mul: lambda a,b: [],
self.UDiv: lambda a,b: [b != 0],
self.SDiv: lambda a,b: [b != 0, Or(a != (1 << (bits-1)), b != -1)],
self.URem: lambda a,b: [b != 0],
self.SRem: lambda a,b: [b != 0, Or(a != (1 << (bits-1)), b != -1)],
self.Shl: lambda a,b: [ULT(b, bits)],
self.AShr: lambda a,b: [ULT(b, bits)],
self.LShr: lambda a,b: [ULT(b, bits)],
self.And: lambda a,b: [],
self.Or: lambda a,b: [],
self.Xor: lambda a,b: [],
}[self.op](v1,v2)
def toSMT(self, defined, poison, state, qvars):
v1 = state.eval(self.v1, defined, poison, qvars)
v2 = state.eval(self.v2, defined, poison, qvars)
defined += self._genSMTDefConds(v1, v2, poison)
return {
self.Add: lambda a,b: a + b,
self.Sub: lambda a,b: a - b,
self.Mul: lambda a,b: a * b,
self.UDiv: lambda a,b: UDiv(a, b),
self.SDiv: lambda a,b: a / b,
self.URem: lambda a,b: URem(a, b),
self.SRem: lambda a,b: SRem(a, b),
self.Shl: lambda a,b: a << b,
self.AShr: lambda a,b: a >> b,
self.LShr: lambda a,b: LShR(a, b),
self.And: lambda a,b: a & b,
self.Or: lambda a,b: a | b,
self.Xor: lambda a,b: a ^ b,
}[self.op](v1, v2)
def getTypeConstraints(self):
return And(self.type == self.v1.type,
self.type == self.v2.type,
self.type.getTypeConstraints())
caps = {
Add: 'Add',
Sub: 'Sub',
Mul: 'Mul',
UDiv: 'UDiv',
SDiv: 'SDiv',
URem: 'URem',
SRem: 'SRem',
Shl: 'Shl',
AShr: 'AShr',
LShr: 'LShr',
And: 'And',
Or: 'Or',
Xor: 'Xor',
}
def register_types(self, manager):
manager.register_type(self, self.type, IntType())
manager.unify(self, self.v1, self.v2)
def visit_source(self, mb):
r1 = mb.subpattern(self.v1)
r2 = mb.subpattern(self.v2)
op = BinOp.caps[self.op]
if 'nsw' in self.flags and 'nuw' in self.flags:
return CFunctionCall('match',
mb.get_my_ref(),
CFunctionCall('m_CombineAnd',
CFunctionCall('m_NSW' + op, r1, r2),
CFunctionCall('m_NUW' + op,
CFunctionCall('m_Value'),
CFunctionCall('m_Value'))))
if 'nsw' in self.flags:
return mb.simple_match('m_NSW' + op, r1, r2)
if 'nuw' in self.flags:
return mb.simple_match('m_NUW' + op, r1, r2)
if 'exact' in self.flags:
return CFunctionCall('match',
mb.get_my_ref(),
CFunctionCall('m_Exact', CFunctionCall('m_' + op, r1, r2)))
return mb.simple_match('m_' + op, r1, r2)
def visit_target(self, manager, use_builder=False):
cons = CFunctionCall('BinaryOperator::Create' + self.caps[self.op],
manager.get_cexp(self.v1), manager.get_cexp(self.v2))
if use_builder:
cons = CVariable('Builder').arr('Insert', [cons])
gen = [CDefinition.init(CPtrType(CTypeName('BinaryOperator')), manager.get_cexp(self), cons)]
for f in self.flags:
setter = {'nsw': 'setHasNoSignedWrap', 'nuw': 'setHasNoUnsignedWrap', 'exact': 'setIsExact'}[f]
gen.append(manager.get_cexp(self).arr(setter, [CVariable('true')]))
return gen
################################
class ConversionOp(Instr):
Trunc, ZExt, SExt, ZExtOrTrunc, Ptr2Int, Int2Ptr, Bitcast, Last = range(8)
opnames = {
Trunc: 'trunc',
ZExt: 'zext',
SExt: 'sext',
ZExtOrTrunc: 'ZExtOrTrunc',
Ptr2Int: 'ptrtoint',
Int2Ptr: 'inttoptr',
Bitcast: 'bitcast',
}
opids = {v:k for k, v in opnames.items()}
def __init__(self, op, stype, v, type):
assert isinstance(stype, Type)
assert isinstance(type, Type)
assert isinstance(v, Value)
assert 0 <= op < self.Last
self.op = op
self.stype = stype
self.v = v
self.type = type
def getOpName(self):
return self.opnames[self.op]
@staticmethod
def getOpId(name):
try:
return ConversionOp.opids[name]
except:
raise ParseError('Unknown conversion instruction')
@staticmethod
def enforceIntSrc(op):
return op == ConversionOp.Trunc or\
op == ConversionOp.ZExt or\
op == ConversionOp.SExt or\
op == ConversionOp.ZExtOrTrunc or\
op == ConversionOp.Int2Ptr
@staticmethod
def enforcePtrSrc(op):
return op == ConversionOp.Ptr2Int
@staticmethod
def enforceIntTgt(op):
return op == ConversionOp.Trunc or\
op == ConversionOp.ZExt or\
op == ConversionOp.SExt or\
op == ConversionOp.ZExtOrTrunc or\
op == ConversionOp.Ptr2Int
@staticmethod
def enforcePtrTgt(op):
return op == ConversionOp.Int2Ptr
def __repr__(self):
st = str(self.stype)
if len(st) > 0:
st = ' ' + st
tt = str(self.type)
if len(tt) > 0:
tt = ' to ' + tt
return '%s%s %s%s' % (self.getOpName(), st, self.v.getName(), tt)
def toSMT(self, defined, poison, state, qvars):
return {
self.Trunc: lambda v: Extract(self.type.getSize()-1, 0, v),
self.ZExt: lambda v: ZeroExt(self.type.getSize() -
self.stype.getSize(), v),
self.SExt: lambda v: SignExt(self.type.getSize() -
self.stype.getSize(), v),
self.ZExtOrTrunc: lambda v: truncateOrZExt(v, self.type.getSize()),
self.Ptr2Int: lambda v: truncateOrZExt(v, self.type.getSize()),
self.Int2Ptr: lambda v: truncateOrZExt(v, self.type.getSize()),
self.Bitcast: lambda v: v,
}[self.op](state.eval(self.v, defined, poison, qvars))
def getTypeConstraints(self):
cnstr = {
self.Trunc: lambda src,tgt: src > tgt,
self.ZExt: lambda src,tgt: src < tgt,
self.SExt: lambda src,tgt: src < tgt,
self.ZExtOrTrunc: lambda src,tgt: BoolVal(True),
self.Ptr2Int: lambda src,tgt: BoolVal(True),
self.Int2Ptr: lambda src,tgt: BoolVal(True),
self.Bitcast: lambda src,tgt: src.getSize() == tgt.getSize(),
} [self.op](self.stype, self.type)
return And(self.stype == self.v.type,
self.type.getTypeConstraints(),
self.stype.getTypeConstraints(),
cnstr)
matcher = {
Trunc: 'm_Trunc',
ZExt: 'm_ZExt',
SExt: 'm_SExt',
Ptr2Int: 'm_PtrToInt',
Bitcast: 'm_BitCast',
}
constr = {
Trunc: 'TruncInst',
ZExt: 'ZExtInst',
SExt: 'SExtInst',
Ptr2Int: 'PtrToIntInst',
Int2Ptr: 'IntToPtrInst',
Bitcast: 'BitCastInst',
}
def register_types(self, manager):
if self.enforceIntSrc(self.op):
manager.register_type(self.v, self.stype, IntType())
elif self.enforcePtrSrc(self.op):
manager.register_type(self.v, self.stype, PtrType())
else:
manager.register_type(self.v, self.stype, UnknownType())
if self.enforceIntTgt(self.op):
manager.register_type(self, self.type, IntType())
elif self.enforcePtrTgt(self.op):
manager.register_type(self, self.type, PtrType())
else:
manager.register_type(self, self.type, UnknownType())
# TODO: inequalities for trunc/sext/zext
def visit_source(self, mb):
r = mb.subpattern(self.v)
if self.op == ConversionOp.ZExtOrTrunc:
return CFunctionCall('match',
mb.get_my_ref(),
CFunctionCall('m_CombineOr',
CFunctionCall('m_ZExt', r),
CFunctionCall('m_ZTrunc', r)))
return mb.simple_match(ConversionOp.matcher[self.op], r)
def visit_target(self, manager, use_builder=False):
if self.op == ConversionOp.ZExtOrTrunc:
assert use_builder #TODO: handle ZExtOrTrunk in root position
instr = CVariable('Builder').arr('CreateZExtOrTrunc',
[manager.get_cexp(self.v), manager.get_llvm_type(self)])
return [CDefinition.init(
manager.PtrValue,
manager.get_cexp(self),
instr)]
else:
instr = CFunctionCall('new ' + ConversionOp.constr[self.op],
manager.get_cexp(self.v), manager.get_llvm_type(self))
if use_builder:
instr = CVariable('Builder').arr('Insert', [instr])
return [CDefinition.init(
manager.PtrInstruction,
manager.get_cexp(self),
instr)]
################################
class Icmp(Instr):
EQ, NE, UGT, UGE, ULT, ULE, SGT, SGE, SLT, SLE, Var, Last = range(12)
opnames = {
EQ: 'eq',
NE: 'ne',
UGT: 'ugt',
UGE: 'uge',
ULT: 'ult',
ULE: 'ule',
SGT: 'sgt',
SGE: 'sge',
SLT: 'slt',
SLE: 'sle',
}
opids = {v:k for k, v in opnames.items()}
def __init__(self, op, type, v1, v2):
assert isinstance(type, Type)
assert isinstance(v1, Value)
assert isinstance(v2, Value)
self.op = self.getOpId(op)
if self.op == self.Var:
self.opname = op
self.type = IntType(1)
self.stype = type.ensureIntPtrOrVector()
self.v1 = v1
self.v2 = v2
def getOpName(self):
return 'icmp'
@staticmethod
def getOpId(name):
return Icmp.opids.get(name, Icmp.Var)
def __repr__(self):
op = self.opname if self.op == Icmp.Var else Icmp.opnames[self.op]
if len(op) > 0:
op = ' ' + op
t = str(self.stype)
if len(t) > 0:
t = ' ' + t
return 'icmp%s%s %s, %s' % (op, t, self.v1.getName(), self.v2.getName())
def opToSMT(self, op, a, b):
return {
self.EQ: lambda a,b: toBV(a == b),
self.NE: lambda a,b: toBV(a != b),
self.UGT: lambda a,b: toBV(UGT(a, b)),
self.UGE: lambda a,b: toBV(UGE(a, b)),
self.ULT: lambda a,b: toBV(ULT(a, b)),
self.ULE: lambda a,b: toBV(ULE(a, b)),
self.SGT: lambda a,b: toBV(a > b),
self.SGE: lambda a,b: toBV(a >= b),
self.SLT: lambda a,b: toBV(a < b),
self.SLE: lambda a,b: toBV(a <= b),
}[op](a, b)
def recurseSMT(self, ops, a, b, i):
if len(ops) == 1:
return self.opToSMT(ops[0], a, b)
opname = self.opname if self.opname != '' else self.getName()
var = BitVec('icmp_' + opname, 4)
assert 1 << 4 > self.Var
return If(var == i,
self.opToSMT(ops[0], a, b),
self.recurseSMT(ops[1:], a, b, i+1))
def toSMT(self, defined, poison, state, qvars):
# Generate all possible comparisons if icmp is generic. Set of comparisons
# can be restricted in the precondition.
ops = [self.op] if self.op != self.Var else range(self.Var)
return self.recurseSMT(ops, state.eval(self.v1, defined, poison, qvars),
state.eval(self.v2, defined, poison, qvars), 0)
def getTypeConstraints(self):
return And(self.stype == self.v1.type,
self.stype == self.v2.type,
self.type.getTypeConstraints(),
self.stype.getTypeConstraints())
op_enum = {
EQ: 'ICmpInst::ICMP_EQ',
NE: 'ICmpInst::ICMP_NE',
UGT: 'ICmpInst::ICMP_UGT',
UGE: 'ICmpInst::ICMP_UGE',
ULT: 'ICmpInst::ICMP_ULT',
ULE: 'ICmpInst::ICMP_ULE',
SGT: 'ICmpInst::ICMP_SGT',
SGE: 'ICmpInst::ICMP_SGE',
SLT: 'ICmpInst::ICMP_SLT',
SLE: 'ICmpInst::ICMP_SLE',
}
def register_types(self, manager):
manager.register_type(self, self.type, IntType(1))
manager.register_type(self.v1, self.stype, UnknownType().ensureIntPtrOrVector())
manager.unify(self.v1, self.v2)
PredType = CTypeName('CmpInst::Predicate')
def visit_source(self, mb):
r1 = mb.subpattern(self.v1)
r2 = mb.subpattern(self.v2)
if self.op == Icmp.Var:
opname = self.opname if self.opname else 'Pred ' + self.name
name = mb.manager.get_key_name(opname) #FIXME: call via mb?
rp = mb.binding(name, self.PredType)
return mb.simple_match('m_ICmp', rp, r1, r2)
pvar = mb.new_name('P')
rp = mb.binding(pvar, self.PredType)
return CBinExpr('&&',
mb.simple_match('m_ICmp', rp, r1, r2),
CBinExpr('==', CVariable(pvar), CVariable(Icmp.op_enum[self.op])))
def visit_target(self, manager, use_builder=False):
# determine the predicate
if self.op == Icmp.Var:
key = self.opname if self.opname else 'Pred ' + self.name
opname = manager.get_key_name(key)
assert manager.bound(opname)
# TODO: confirm type
else:
opname = Icmp.op_enum[self.op]
instr = CFunctionCall('new ICmpInst', CVariable(opname),
manager.get_cexp(self.v1),
manager.get_cexp(self.v2))
if use_builder:
instr = CVariable('Builder').arr('Insert', [instr])
return [
CDefinition.init(manager.PtrInstruction, manager.get_cexp(self), instr)]
################################
class Select(Instr):
def __init__(self, type, c, v1, v2):
assert isinstance(type, Type)
assert isinstance(c, Value)
#assert isinstance(c.type, IntType)
assert isinstance(v1, Value)
assert isinstance(v2, Value)
self.type = type.ensureFirstClass()
self.c = c
self.v1 = v1
self.v2 = v2
def __repr__(self):
t = str(self.type)
if len(t) > 0:
t = t + ' '
return 'select i1 %s, %s%s, %s%s' % (self.c.getName(), t, self.v1.getName(),
t, self.v2.getName())
def getOpName(self):
return 'select'
def toSMT(self, defined, poison, state, qvars):
return If(state.eval(self.c, defined, poison, qvars) == 1,
state.eval(self.v1, defined, poison, qvars),
state.eval(self.v2, defined, poison, qvars))
def getTypeConstraints(self):
return And(self.type == self.v1.type,
self.type == self.v2.type,
self.c.type == 1,
self.type.getTypeConstraints())
def register_types(self, manager):
manager.register_type(self, self.type, UnknownType().ensureFirstClass())
manager.register_type(self.c, self.c.type, IntType(1))
manager.unify(self, self.v1, self.v2)
def visit_source(self, mb):
c = mb.subpattern(self.c)
v1 = mb.subpattern(self.v1)
v2 = mb.subpattern(self.v2)
return mb.simple_match('m_Select', c, v1, v2)
def visit_target(self, manager, use_builder=False):
instr = CFunctionCall('SelectInst::Create',
manager.get_cexp(self.c),
manager.get_cexp(self.v1),
manager.get_cexp(self.v2))
if use_builder:
instr = CVariable('Builder').arr('Insert', [instr])
return [CDefinition.init(manager.PtrInstruction, manager.get_cexp(self), instr)]
################################
class Alloca(Instr):
def __init__(self, type, elemsType, numElems, align):
assert isinstance(elemsType, IntType)
assert isinstance(align, int)
self.type = PtrType(type)
self.elemsType = elemsType
self.numElems = TypeFixedValue(numElems, 1, 16)
self.align = align
def __repr__(self):
elems = self.numElems.getName()
if elems == '1':
elems = ''
else:
t = str(self.elemsType)
if len(t) > 0:
t += ' '
elems = ', ' + t + elems
align = ', align %d' % self.align if self.align != 0 else ''
return 'alloca %s%s%s' % (str(self.type.type), elems, align)
def getOpName(self):
return 'alloca'
def toSMT(self, defined, poison, state, qvars):
self.numElems.toSMT(defined, poison, state, qvars)
ptr = BitVec(self.getName(), self.type.getSize())
block_size = getAllocSize(self.type.type)
num_elems = self.numElems.getValue()
size = num_elems * block_size
if size == 0:
qvars.append(ptr)
return ptr
if size > 8:
defined.append(ULT(ptr, ptr + ((size >> 3) - 1)))
defined += [ptr != 0,
getPtrAlignCnstr(ptr, self.align)]
mem = freshBV('alloca' + self.getName(), size)
state.addAlloca(ptr, mem, block_size, num_elems, self.align)
if use_array_theory():
for i in range(0, size/8):
idx = 8*i
state.mem = Update(state.mem, ptr + i, Extract(idx+7, idx, mem))
return ptr
def getTypeConstraints(self):
return And(self.numElems.getType() == self.elemsType,
self.type.getTypeConstraints(),
self.elemsType.getTypeConstraints(),
self.numElems.getTypeConstraints())
################################
class GEP(Instr):
def __init__(self, type, ptr, idxs, inbounds):
assert isinstance(type, PtrType)
assert isinstance(ptr, Value)
assert isinstance(idxs, list)
assert isinstance(inbounds, bool)
for i in range(len(idxs)):
assert isinstance(idxs[i], IntType if (i & 1) == 0 else Value)
self.type = type
self.ptr = ptr
self.idxs = idxs[1:len(idxs):2]
self.inbounds = inbounds
def __repr__(self):
inb = 'inbounds ' if self.inbounds else ''
idxs = ''
for i in range(len(self.idxs)):
t = str(self.idxs[i].type)
if len(t) > 0:
t += ' '
idxs += ', %s%s' % (t, self.idxs[i].getName())
return 'getelementptr %s%s %s%s' % (inb, self.type, self.ptr.getName(),
idxs)
def getOpName(self):
return 'getelementptr'
def toSMT(self, defined, poison, state, qvars):
ptr = state.eval(self.ptr, defined, poison, qvars)
type = self.type
for i in range(len(self.idxs)):
idx = truncateOrSExt(state.eval(self.idxs[i], defined, poison, qvars),ptr)
ptr += getAllocSize(type.getPointeeType())/8 * idx
if i + 1 != len(self.idxs):
type = type.getUnderlyingType()
# TODO: handle inbounds
return ptr
def getTypeConstraints(self):
return And(self.type.ensureTypeDepth(len(self.idxs)),
Instr.getTypeConstraints(self))
################################
class Load(Instr):
def __init__(self, stype, v, align):
assert isinstance(stype, PtrType)
assert isinstance(v, Value)
assert isinstance(align, int)
self.stype = stype
stype.type = stype.type.ensureFirstClass()
self.type = stype.type
self.v = v
self.align = align
def __repr__(self):
align = ', align %d' % self.align if self.align != 0 else ''
return 'load %s %s%s' % (str(self.stype), self.v.getName(), align)
def getOpName(self):
return 'load'
def extractBV(self, BV, offset, size):
old_size = BV.size()
BV = BV << offset
return Extract(old_size - 1, old_size - size, BV)
def _mkBVLoad(self, l, v, defined, qvars):
if len(l) == 0:
return None
blck = l[0]
mem = blck.mem
ptr = blck.ptr
size = blck.size()
read_size = self.type.getSize()
if size > read_size:
offset = truncateOrZExt((v - ptr) << 3, mem)
mem = self.extractBV(mem, offset, read_size)
elif size < read_size:
# undef behavior; skip this block
return self._mkBVLoad(l[1:], v, defined, mustload, qvars)
inbounds = And(UGE(v, ptr), UGE((size - read_size)/8, v - ptr))
qvars += blck.qvars
mem2 = self._mkBVLoad(l[1:], v, defined, qvars)
return mem if mem2 is None else If(inbounds, mem, mem2)
def _mkArrayLoad(self, state, ptr, sz, qvars):
for blck in state.ptrs:
qvars += blck.qvars
bytes = []
rem = sz % 8
if rem != 0:
sz = sz - rem
bytes = [Extract(rem-1, 0, state.mem[ptr])]
ptr += 1
for i in range(0, sz/8):
# FIXME: assumes little-endian
bytes = [state.mem[ptr + i]] + bytes
return mk_concat(bytes)
def toSMT(self, defined, poison, state, qvars):
v = state.eval(self.v, defined, poison, qvars)
defined.append(v != 0)
access_sz = getAllocSize(self.type)
defined_align_access(state, defined, access_sz, self.align, v)
if use_array_theory():
val = self._mkArrayLoad(state, v, self.type.getSize(), qvars)
else:
val = self._mkBVLoad(state.ptrs, v, defined, qvars)
if val is None:
defined.append(BoolVal(False))
return BitVecVal(0, self.type.getSize())
return val
def getTypeConstraints(self):
return And(self.stype == self.v.type,
self.type == self.v.type.getPointeeType(),
self.type.getTypeConstraints())
################################
class Store(Instr):
def __init__(self, stype, src, type, dst, align):
assert isinstance(stype, Type)
assert isinstance(src, Value)
assert isinstance(type, PtrType)
assert isinstance(dst, Value)
assert isinstance(align, int)
self.stype = stype.ensureFirstClass()
self.src = src
self.type = type
self.dst = dst
self.align = align
self.setName('store')
self.id = mk_unique_id()
self.type.setName(self.getUniqueName())
def getUniqueName(self):
return self.getName() + '_' + self.id
def getOpName(self):
return 'store'
def __repr__(self):
t = str(self.stype)
if len(t) > 0:
t = t + ' '
align = ', align %d' % self.align if self.align != 0 else ''
return 'store %s%s, %s %s%s' % (t, self.src.getName(), str(self.type),
self.dst.getName(), align)
def _mkMem(self, src, tgt, ptr, size, mem):
write_size = self.stype.getSize()
if write_size == size:
return src
offset = (tgt - ptr) << 3
new = LShR(truncateOrPad(src, mem), truncateOrZExt(offset, mem))
# mask out bits that will be written
mask = BitVecVal((1 << size) - 1, size)
m1 = mask << truncateOrZExt(size - offset, mem)
m2 = LShR(mask, truncateOrZExt(write_size + offset, mem))
old = mem & (m1 | m2)
return new | old
def _bvVcGen(self, state, src, tgt, write_size, qvars_new):
for blck in state.ptrs:
size = blck.size()
# skip block if it is too small
if size < write_size:
continue
ptr = blck.ptr
mem = blck.mem
inbounds = And(UGE(tgt, ptr), UGE((size - write_size)/8, tgt - ptr))
writes = mk_and(state.defined + [inbounds])
blck.mem = If(writes, self._mkMem(src, tgt, ptr, size, mem), mem)
blck.qvars += qvars_new
def _arrayVcGen(self, state, src, tgt, write_size):
src_size = self.stype.getSize()
src_idx = 0
# FIXME: assumes little-endian
if src_size != write_size:
rem = src_size % 8
rest = Extract(rem-1, 0, src)
rest_old = Extract(7, rem, state.mem[tgt])
state.mem = If(mk_and(state.defined),
Update(state.mem, tgt, Concat(rest_old, rest)),
state.mem)
tgt += 1
write_size -= 8
assert (src_size-rem) == write_size
src_idx = rem
for i in range(0, write_size/8):
state.mem = If(mk_and(state.defined),
Update(state.mem, tgt+i, Extract(src_idx+7, src_idx, src)),
state.mem)
src_idx += 8
def toSMT(self, defined, poison, state, qvars):
qvars_new = []
src = state.eval(self.src, defined, poison, qvars_new)
tgt = state.eval(self.dst, defined, poison, qvars_new)
qvars += qvars_new
defined.append(tgt != 0)
# cutpoint; record BB definedness
state.defined += defined
write_size = getAllocSize(self.stype)
if use_array_theory():
self._arrayVcGen(state, src, tgt, write_size)
else:
self._bvVcGen(state, src, tgt, write_size, qvars_new)
defined_align_access(state, defined, write_size, self.align, tgt)
return None
def getTypeConstraints(self):
return And(self.stype == self.type.type,
self.src.type == self.stype,
self.dst.type == self.type,
self.stype.getTypeConstraints(),
self.type.getTypeConstraints())
################################
class Skip(Instr):
def __init__(self):
self.id = mk_unique_id()
def getUniqueName(self):
return 'skip_' + self.id
def __repr__(self):
return 'skip'
def toSMT(self, defined, poison, state, qvars):
return None
################################
class Unreachable(Instr):
def __init__(self):
self.id = mk_unique_id()
def getUniqueName(self):
return 'unreachable_' + self.id
def __repr__(self):
return 'unreachable'
def toSMT(self, defined, poison, state, qvars):
defined.append(BoolVal(False))
return None
################################
class TerminatorInst(Instr):
pass
################################
class Br(TerminatorInst):
def __init__(self, bb_label, cond, true, false):
assert isinstance(bb_label, str)
assert isinstance(cond, Value)
assert isinstance(true, str)
assert isinstance(false, str)
self.cond = cond
self.true = true
self.false = false
self.setName('br_' + bb_label)
def __repr__(self):
return "br i1 %s, label %s, label %s" % (self.cond.getName(),
self.true, self.false)
def getSuccessors(self, state):
defined = []
poison = []
qvars = []
cond = state.eval(self.cond, defined, poison, qvars)
assert qvars == []
return [(self.true, mk_and([cond != 0] + defined + poison)),
(self.false, mk_and([cond == 0] + defined + poison))]
def toSMT(self, defined, poison, state, qvars):
return None
################################
class Ret(TerminatorInst):
def __init__(self, bb_label, type, val):
assert isinstance(bb_label, str)
assert isinstance(type, Type)
assert isinstance(val, Value)
self.type = type
self.val = val
self.setName('ret_' + bb_label)
def __repr__(self):
t = str(self.type)
if len(t) > 0:
t = t + ' '
return "ret %s%s" % (t, self.val.getName())
def getSuccessors(self, state):
return []
def toSMT(self, defined, poison, state, qvars):
return state.eval(self.val, defined, poison, qvars)
def getTypeConstraints(self):
return And(self.type == self.val.type, self.type.getTypeConstraints())
################################
def print_prog(p, skip):
for bb, instrs in p.iteritems():
if bb != "":
print "%s:" % bb
for k,v in instrs.iteritems():
if k in skip:
continue
k = str(k)
if k[0] == '%':
print ' %s = %s' % (k, v)
else:
print " %s" % v
def countUsers(prog):
m = {}
for bb, instrs in prog.iteritems():
for k, v in instrs.iteritems():
v.countUsers(m)
return m
def getTypeConstraints(p):
t = [v.getTypeConstraints() for v in p.itervalues()]
# ensure all return instructions have the same type
ret_types = [v.type for v in p.itervalues() if isinstance(v, Ret)]
if len(ret_types) > 1:
t += mkTyEqual(ret_types)
return t
def fixupTypes(p, types):
for v in p.itervalues():
v.fixupTypes(types)
def toSMT(prog, idents, isSource):
set_smt_is_source(isSource)
state = State()
for k,v in idents.iteritems():
if isinstance(v, (Input, Constant)):
defined = []
poison = []
qvars = []
smt = v.toSMT(defined, poison, state, qvars)
assert defined == [] and poison == []
state.add(v, smt, [], [], qvars)
for bb, instrs in prog.iteritems():
state.newBB(bb)
for k,v in instrs.iteritems():
defined = []
poison = []
qvars = []
smt = v.toSMT(defined, poison, state, qvars)
state.add(v, smt, defined, poison, qvars)
return state
|
|
"""
Widgets for interacting with ImageViewer.
These widgets should be added to a Plugin subclass using its `add_widget`
method or calling::
plugin += Widget(...)
on a Plugin instance. The Plugin will delegate action based on the widget's
parameter type specified by its `ptype` attribute, which can be:
'arg' : positional argument passed to Plugin's `filter_image` method.
'kwarg' : keyword argument passed to Plugin's `filter_image` method.
'plugin' : attribute of Plugin. You'll probably need to add a class
property of the same name that updates the display.
"""
from ..qt import QtGui
from ..qt import QtCore
from ..qt.QtCore import Qt
from ..utils import RequiredAttr
__all__ = ['BaseWidget', 'Slider', 'ComboBox', 'Text']
class BaseWidget(QtGui.QWidget):
plugin = RequiredAttr("Widget is not attached to a Plugin.")
def __init__(self, name, ptype=None, callback=None):
super(BaseWidget, self).__init__()
self.name = name
self.ptype = ptype
self.callback = callback
self.plugin = None
@property
def val(self):
msg = "Subclass of BaseWidget requires `val` property"
raise NotImplementedError(msg)
def _value_changed(self, value):
self.callback(self.name, value)
class Text(BaseWidget):
def __init__(self, name=None, text=''):
super(Text, self).__init__(name)
self._label = QtGui.QLabel()
self.text = text
self.layout = QtGui.QHBoxLayout(self)
if name is not None:
name_label = QtGui.QLabel()
name_label.setText(name)
self.layout.addWidget(name_label)
self.layout.addWidget(self._label)
@property
def text(self):
return self._label.text()
@text.setter
def text(self, text_str):
self._label.setText(text_str)
class Slider(BaseWidget):
"""Slider widget for adjusting numeric parameters.
Parameters
----------
name : str
Name of slider parameter. If this parameter is passed as a keyword
argument, it must match the name of that keyword argument (spaces are
replaced with underscores). In addition, this name is displayed as the
name of the slider.
low, high : float
Range of slider values.
value : float
Default slider value. If None, use midpoint between `low` and `high`.
value_type : {'float' | 'int'}
Numeric type of slider value.
ptype : {'arg' | 'kwarg' | 'plugin'}
Parameter type.
callback : function
Callback function called in response to slider changes. This function
is typically set when the widget is added to a plugin.
orientation : {'horizontal' | 'vertical'}
Slider orientation.
update_on : {'release' | 'move'}
Control when callback function is called: on slider move or release.
"""
def __init__(self, name, low=0.0, high=1.0, value=None, value_type='float',
ptype='kwarg', callback=None, max_edit_width=60,
orientation='horizontal', update_on='release'):
super(Slider, self).__init__(name, ptype, callback)
if value is None:
value = (high - low) / 2.
# Set widget orientation
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if orientation == 'vertical':
self.slider = QtGui.QSlider(Qt.Vertical)
alignment = QtCore.Qt.AlignHCenter
align_text = QtCore.Qt.AlignHCenter
align_value = QtCore.Qt.AlignHCenter
self.layout = QtGui.QVBoxLayout(self)
elif orientation == 'horizontal':
self.slider = QtGui.QSlider(Qt.Horizontal)
alignment = QtCore.Qt.AlignVCenter
align_text = QtCore.Qt.AlignLeft
align_value = QtCore.Qt.AlignRight
self.layout = QtGui.QHBoxLayout(self)
else:
msg = "Unexpected value %s for 'orientation'"
raise ValueError(msg % orientation)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Set slider behavior for float and int values.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if value_type == 'float':
# divide slider into 1000 discrete values
slider_max = 1000
self._scale = float(high - low) / slider_max
self.slider.setRange(0, slider_max)
self.value_fmt = '%2.2f'
elif value_type == 'int':
self.slider.setRange(low, high)
self.value_fmt = '%d'
else:
msg = "Expected `value_type` to be 'float' or 'int'; received: %s"
raise ValueError(msg % value_type)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.value_type = value_type
self._low = low
self._high = high
# Update slider position to default value
self.val = value
if update_on == 'move':
self.slider.valueChanged.connect(self._on_slider_changed)
elif update_on == 'release':
self.slider.sliderReleased.connect(self._on_slider_changed)
else:
raise ValueError("Unexpected value %s for 'update_on'" % update_on)
self.slider.setFocusPolicy(QtCore.Qt.StrongFocus)
self.name_label = QtGui.QLabel()
self.name_label.setText(self.name)
self.name_label.setAlignment(align_text)
self.editbox = QtGui.QLineEdit()
self.editbox.setMaximumWidth(max_edit_width)
self.editbox.setText(self.value_fmt % self.val)
self.editbox.setAlignment(align_value)
self.editbox.editingFinished.connect(self._on_editbox_changed)
self.layout.addWidget(self.name_label)
self.layout.addWidget(self.slider)
self.layout.addWidget(self.editbox)
def _on_slider_changed(self):
"""Call callback function with slider's name and value as parameters"""
value = self.val
self.editbox.setText(str(value)[:4])
self.callback(self.name, value)
def _on_editbox_changed(self):
"""Validate input and set slider value"""
try:
value = float(self.editbox.text())
except ValueError:
self._bad_editbox_input()
return
if not self._low <= value <= self._high:
self._bad_editbox_input()
return
self.val = value
self._good_editbox_input()
self.callback(self.name, value)
def _good_editbox_input(self):
self.editbox.setStyleSheet("background-color: rgb(255, 255, 255)")
def _bad_editbox_input(self):
self.editbox.setStyleSheet("background-color: rgb(255, 200, 200)")
@property
def val(self):
value = self.slider.value()
if self.value_type == 'float':
value = value * self._scale + self._low
return value
@val.setter
def val(self, value):
if self.value_type == 'float':
value = (value - self._low) / self._scale
self.slider.setValue(value)
class ComboBox(BaseWidget):
"""ComboBox widget for selecting among a list of choices.
Parameters
----------
name : str
Name of slider parameter. If this parameter is passed as a keyword
argument, it must match the name of that keyword argument (spaces are
replaced with underscores). In addition, this name is displayed as the
name of the slider.
items: list
Allowed parameter values.
ptype : {'arg' | 'kwarg' | 'plugin'}
Parameter type.
callback : function
Callback function called in response to slider changes. This function
is typically set when the widget is added to a plugin.
"""
def __init__(self, name, items, ptype='kwarg', callback=None):
super(ComboBox, self).__init__(name, ptype, callback)
self.name_label = QtGui.QLabel()
self.name_label.setText(self.name)
self.name_label.setAlignment(QtCore.Qt.AlignLeft)
self._combo_box = QtGui.QComboBox()
self._combo_box.addItems(items)
self.layout = QtGui.QHBoxLayout(self)
self.layout.addWidget(self.name_label)
self.layout.addWidget(self._combo_box)
self._combo_box.currentIndexChanged.connect(self._value_changed)
# self.connect(self._combo_box,
# SIGNAL("currentIndexChanged(int)"), self.updateUi)
@property
def val(self):
return self._combo_box.value()
@property
def index(self):
return self._combo_box.currentIndex()
@index.setter
def index(self, i):
self._combo_box.setCurrentIndex(i)
|
|
# coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pants.backend.native.config.environment import Linker, LLVMCppToolchain, Platform
from pants.backend.native.subsystems.native_toolchain import NativeToolchain
from pants.backend.native.targets.native_artifact import NativeArtifact
from pants.backend.native.targets.native_library import NativeLibrary
from pants.backend.native.tasks.native_compile import NativeTargetDependencies, ObjectFiles
from pants.backend.native.tasks.native_external_library_fetch import NativeExternalLibraryFiles
from pants.backend.native.tasks.native_task import NativeTask
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnit, WorkUnitLabel
from pants.util.collections import assert_single_element
from pants.util.memo import memoized_property
from pants.util.objects import datatype
from pants.util.process_handler import subprocess
class SharedLibrary(datatype(['name', 'path'])): pass
class LinkSharedLibraryRequest(datatype([
('linker', Linker),
('object_files', tuple),
('native_artifact', NativeArtifact),
'output_dir',
('external_lib_dirs', tuple),
('external_lib_names', tuple),
])):
@classmethod
def with_external_libs_product(cls, external_libs_product=None, *args, **kwargs):
if external_libs_product is None:
lib_dirs = ()
lib_names = ()
else:
lib_dirs = (external_libs_product.lib_dir,)
lib_names = external_libs_product.lib_names
return cls(*args, external_lib_dirs=lib_dirs, external_lib_names=lib_names, **kwargs)
class LinkSharedLibraries(NativeTask):
options_scope = 'link-shared-libraries'
@classmethod
def product_types(cls):
return [SharedLibrary]
@classmethod
def prepare(cls, options, round_manager):
super(LinkSharedLibraries, cls).prepare(options, round_manager)
round_manager.require(NativeTargetDependencies)
round_manager.require(ObjectFiles)
round_manager.optional_product(NativeExternalLibraryFiles)
@property
def cache_target_dirs(self):
return True
@classmethod
def implementation_version(cls):
return super(LinkSharedLibraries, cls).implementation_version() + [('LinkSharedLibraries', 0)]
class LinkSharedLibrariesError(TaskError): pass
@classmethod
def subsystem_dependencies(cls):
return super(LinkSharedLibraries, cls).subsystem_dependencies() + (NativeToolchain.scoped(cls),)
@memoized_property
def _native_toolchain(self):
return NativeToolchain.scoped_instance(self)
@memoized_property
def _cpp_toolchain(self):
return self._request_single(LLVMCppToolchain, self._native_toolchain).cpp_toolchain
@memoized_property
def linker(self):
return self._cpp_toolchain.cpp_linker
@memoized_property
def platform(self):
# FIXME: convert this to a v2 engine dependency injection.
return Platform.create()
def _retrieve_single_product_at_target_base(self, product_mapping, target):
self.context.log.debug("product_mapping: {}".format(product_mapping))
self.context.log.debug("target: {}".format(target))
product = product_mapping.get(target)
single_base_dir = assert_single_element(product.keys())
single_product = assert_single_element(product[single_base_dir])
return single_product
def execute(self):
targets_providing_artifacts = self.context.targets(NativeLibrary.produces_ctypes_native_library)
native_target_deps_product = self.context.products.get(NativeTargetDependencies)
compiled_objects_product = self.context.products.get(ObjectFiles)
shared_libs_product = self.context.products.get(SharedLibrary)
external_libs_product = self.context.products.get_data(NativeExternalLibraryFiles)
all_shared_libs_by_name = {}
with self.invalidated(targets_providing_artifacts,
invalidate_dependents=True) as invalidation_check:
for vt in invalidation_check.all_vts:
if vt.valid:
shared_library = self._retrieve_shared_lib_from_cache(vt)
else:
# FIXME: We need to partition links based on proper dependency edges and not
# perform a link to every native_external_library for all targets in the closure.
# https://github.com/pantsbuild/pants/issues/6178
link_request = self._make_link_request(
vt, compiled_objects_product, native_target_deps_product, external_libs_product)
self.context.log.debug("link_request: {}".format(link_request))
shared_library = self._execute_link_request(link_request)
same_name_shared_lib = all_shared_libs_by_name.get(shared_library.name, None)
if same_name_shared_lib:
# TODO: test this branch!
raise self.LinkSharedLibrariesError(
"The name '{name}' was used for two shared libraries: {prev} and {cur}."
.format(name=shared_library.name,
prev=same_name_shared_lib,
cur=shared_library))
else:
all_shared_libs_by_name[shared_library.name] = shared_library
shared_libs_product.add(vt.target, vt.target.target_base).append(shared_library)
def _retrieve_shared_lib_from_cache(self, vt):
native_artifact = vt.target.ctypes_native_library
path_to_cached_lib = os.path.join(
vt.results_dir, native_artifact.as_shared_lib(self.platform))
if not os.path.isfile(path_to_cached_lib):
raise self.LinkSharedLibrariesError("The shared library at {} does not exist!"
.format(path_to_cached_lib))
return SharedLibrary(name=native_artifact.lib_name, path=path_to_cached_lib)
def _make_link_request(self,
vt,
compiled_objects_product,
native_target_deps_product,
external_libs_product):
self.context.log.debug("link target: {}".format(vt.target))
deps = self._retrieve_single_product_at_target_base(native_target_deps_product, vt.target)
all_compiled_object_files = []
for dep_tgt in deps:
self.context.log.debug("dep_tgt: {}".format(dep_tgt))
object_files = self._retrieve_single_product_at_target_base(compiled_objects_product, dep_tgt)
self.context.log.debug("object_files: {}".format(object_files))
object_file_paths = object_files.file_paths()
self.context.log.debug("object_file_paths: {}".format(object_file_paths))
all_compiled_object_files.extend(object_file_paths)
return LinkSharedLibraryRequest.with_external_libs_product(
linker=self.linker,
object_files=tuple(all_compiled_object_files),
native_artifact=vt.target.ctypes_native_library,
output_dir=vt.results_dir,
external_libs_product=external_libs_product)
_SHARED_CMDLINE_ARGS = {
'darwin': lambda: ['-Wl,-dylib'],
'linux': lambda: ['-shared'],
}
def _get_third_party_lib_args(self, link_request):
ext_libs = link_request.external_libs_info
if not ext_libs:
return []
return ext_libs.get_third_party_lib_args()
def _execute_link_request(self, link_request):
object_files = link_request.object_files
if len(object_files) == 0:
raise self.LinkSharedLibrariesError("No object files were provided in request {}!"
.format(link_request))
linker = link_request.linker
native_artifact = link_request.native_artifact
output_dir = link_request.output_dir
resulting_shared_lib_path = os.path.join(output_dir,
native_artifact.as_shared_lib(self.platform))
self.context.log.debug("resulting_shared_lib_path: {}".format(resulting_shared_lib_path))
# We are executing in the results_dir, so get absolute paths for everything.
cmd = ([linker.exe_filename] +
self.platform.resolve_platform_specific(self._SHARED_CMDLINE_ARGS) +
linker.extra_args +
['-o', os.path.abspath(resulting_shared_lib_path)] +
[os.path.abspath(obj) for obj in object_files])
self.context.log.debug("linker command: {}".format(cmd))
env = linker.as_invocation_environment_dict
self.context.log.debug("linker invocation environment: {}".format(env))
with self.context.new_workunit(name='link-shared-libraries',
labels=[WorkUnitLabel.LINKER]) as workunit:
try:
process = subprocess.Popen(
cmd,
cwd=output_dir,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'),
env=env)
except OSError as e:
workunit.set_outcome(WorkUnit.FAILURE)
raise self.LinkSharedLibrariesError(
"Error invoking the native linker with command {cmd} and environment {env} "
"for request {req}: {err}."
.format(cmd=cmd, env=env, req=link_request, err=e),
e)
rc = process.wait()
if rc != 0:
workunit.set_outcome(WorkUnit.FAILURE)
raise self.LinkSharedLibrariesError(
"Error linking native objects with command {cmd} and environment {env} "
"for request {req}. Exit code was: {rc}."
.format(cmd=cmd, env=env, req=link_request, rc=rc))
return SharedLibrary(name=native_artifact.lib_name, path=resulting_shared_lib_path)
|
|
VERSION = '3.15'
LICENSES = {
'0bsd': {'id': '0BSD', 'deprecated': False},
'aal': {'id': 'AAL', 'deprecated': False},
'abstyles': {'id': 'Abstyles', 'deprecated': False},
'adobe-2006': {'id': 'Adobe-2006', 'deprecated': False},
'adobe-glyph': {'id': 'Adobe-Glyph', 'deprecated': False},
'adsl': {'id': 'ADSL', 'deprecated': False},
'afl-1.1': {'id': 'AFL-1.1', 'deprecated': False},
'afl-1.2': {'id': 'AFL-1.2', 'deprecated': False},
'afl-2.0': {'id': 'AFL-2.0', 'deprecated': False},
'afl-2.1': {'id': 'AFL-2.1', 'deprecated': False},
'afl-3.0': {'id': 'AFL-3.0', 'deprecated': False},
'afmparse': {'id': 'Afmparse', 'deprecated': False},
'agpl-1.0': {'id': 'AGPL-1.0', 'deprecated': True},
'agpl-1.0-only': {'id': 'AGPL-1.0-only', 'deprecated': False},
'agpl-1.0-or-later': {'id': 'AGPL-1.0-or-later', 'deprecated': False},
'agpl-3.0': {'id': 'AGPL-3.0', 'deprecated': True},
'agpl-3.0-only': {'id': 'AGPL-3.0-only', 'deprecated': False},
'agpl-3.0-or-later': {'id': 'AGPL-3.0-or-later', 'deprecated': False},
'aladdin': {'id': 'Aladdin', 'deprecated': False},
'amdplpa': {'id': 'AMDPLPA', 'deprecated': False},
'aml': {'id': 'AML', 'deprecated': False},
'ampas': {'id': 'AMPAS', 'deprecated': False},
'antlr-pd': {'id': 'ANTLR-PD', 'deprecated': False},
'antlr-pd-fallback': {'id': 'ANTLR-PD-fallback', 'deprecated': False},
'apache-1.0': {'id': 'Apache-1.0', 'deprecated': False},
'apache-1.1': {'id': 'Apache-1.1', 'deprecated': False},
'apache-2.0': {'id': 'Apache-2.0', 'deprecated': False},
'apafml': {'id': 'APAFML', 'deprecated': False},
'apl-1.0': {'id': 'APL-1.0', 'deprecated': False},
'apsl-1.0': {'id': 'APSL-1.0', 'deprecated': False},
'apsl-1.1': {'id': 'APSL-1.1', 'deprecated': False},
'apsl-1.2': {'id': 'APSL-1.2', 'deprecated': False},
'apsl-2.0': {'id': 'APSL-2.0', 'deprecated': False},
'artistic-1.0': {'id': 'Artistic-1.0', 'deprecated': False},
'artistic-1.0-cl8': {'id': 'Artistic-1.0-cl8', 'deprecated': False},
'artistic-1.0-perl': {'id': 'Artistic-1.0-Perl', 'deprecated': False},
'artistic-2.0': {'id': 'Artistic-2.0', 'deprecated': False},
'bahyph': {'id': 'Bahyph', 'deprecated': False},
'barr': {'id': 'Barr', 'deprecated': False},
'beerware': {'id': 'Beerware', 'deprecated': False},
'bittorrent-1.0': {'id': 'BitTorrent-1.0', 'deprecated': False},
'bittorrent-1.1': {'id': 'BitTorrent-1.1', 'deprecated': False},
'blessing': {'id': 'blessing', 'deprecated': False},
'blueoak-1.0.0': {'id': 'BlueOak-1.0.0', 'deprecated': False},
'borceux': {'id': 'Borceux', 'deprecated': False},
'bsd-1-clause': {'id': 'BSD-1-Clause', 'deprecated': False},
'bsd-2-clause': {'id': 'BSD-2-Clause', 'deprecated': False},
'bsd-2-clause-freebsd': {'id': 'BSD-2-Clause-FreeBSD', 'deprecated': True},
'bsd-2-clause-netbsd': {'id': 'BSD-2-Clause-NetBSD', 'deprecated': True},
'bsd-2-clause-patent': {'id': 'BSD-2-Clause-Patent', 'deprecated': False},
'bsd-2-clause-views': {'id': 'BSD-2-Clause-Views', 'deprecated': False},
'bsd-3-clause': {'id': 'BSD-3-Clause', 'deprecated': False},
'bsd-3-clause-attribution': {'id': 'BSD-3-Clause-Attribution', 'deprecated': False},
'bsd-3-clause-clear': {'id': 'BSD-3-Clause-Clear', 'deprecated': False},
'bsd-3-clause-lbnl': {'id': 'BSD-3-Clause-LBNL', 'deprecated': False},
'bsd-3-clause-modification': {'id': 'BSD-3-Clause-Modification', 'deprecated': False},
'bsd-3-clause-no-military-license': {'id': 'BSD-3-Clause-No-Military-License', 'deprecated': False},
'bsd-3-clause-no-nuclear-license': {'id': 'BSD-3-Clause-No-Nuclear-License', 'deprecated': False},
'bsd-3-clause-no-nuclear-license-2014': {'id': 'BSD-3-Clause-No-Nuclear-License-2014', 'deprecated': False},
'bsd-3-clause-no-nuclear-warranty': {'id': 'BSD-3-Clause-No-Nuclear-Warranty', 'deprecated': False},
'bsd-3-clause-open-mpi': {'id': 'BSD-3-Clause-Open-MPI', 'deprecated': False},
'bsd-4-clause': {'id': 'BSD-4-Clause', 'deprecated': False},
'bsd-4-clause-shortened': {'id': 'BSD-4-Clause-Shortened', 'deprecated': False},
'bsd-4-clause-uc': {'id': 'BSD-4-Clause-UC', 'deprecated': False},
'bsd-protection': {'id': 'BSD-Protection', 'deprecated': False},
'bsd-source-code': {'id': 'BSD-Source-Code', 'deprecated': False},
'bsl-1.0': {'id': 'BSL-1.0', 'deprecated': False},
'busl-1.1': {'id': 'BUSL-1.1', 'deprecated': False},
'bzip2-1.0.5': {'id': 'bzip2-1.0.5', 'deprecated': False},
'bzip2-1.0.6': {'id': 'bzip2-1.0.6', 'deprecated': False},
'c-uda-1.0': {'id': 'C-UDA-1.0', 'deprecated': False},
'cal-1.0': {'id': 'CAL-1.0', 'deprecated': False},
'cal-1.0-combined-work-exception': {'id': 'CAL-1.0-Combined-Work-Exception', 'deprecated': False},
'caldera': {'id': 'Caldera', 'deprecated': False},
'catosl-1.1': {'id': 'CATOSL-1.1', 'deprecated': False},
'cc-by-1.0': {'id': 'CC-BY-1.0', 'deprecated': False},
'cc-by-2.0': {'id': 'CC-BY-2.0', 'deprecated': False},
'cc-by-2.5': {'id': 'CC-BY-2.5', 'deprecated': False},
'cc-by-2.5-au': {'id': 'CC-BY-2.5-AU', 'deprecated': False},
'cc-by-3.0': {'id': 'CC-BY-3.0', 'deprecated': False},
'cc-by-3.0-at': {'id': 'CC-BY-3.0-AT', 'deprecated': False},
'cc-by-3.0-de': {'id': 'CC-BY-3.0-DE', 'deprecated': False},
'cc-by-3.0-nl': {'id': 'CC-BY-3.0-NL', 'deprecated': False},
'cc-by-3.0-us': {'id': 'CC-BY-3.0-US', 'deprecated': False},
'cc-by-4.0': {'id': 'CC-BY-4.0', 'deprecated': False},
'cc-by-nc-1.0': {'id': 'CC-BY-NC-1.0', 'deprecated': False},
'cc-by-nc-2.0': {'id': 'CC-BY-NC-2.0', 'deprecated': False},
'cc-by-nc-2.5': {'id': 'CC-BY-NC-2.5', 'deprecated': False},
'cc-by-nc-3.0': {'id': 'CC-BY-NC-3.0', 'deprecated': False},
'cc-by-nc-3.0-de': {'id': 'CC-BY-NC-3.0-DE', 'deprecated': False},
'cc-by-nc-4.0': {'id': 'CC-BY-NC-4.0', 'deprecated': False},
'cc-by-nc-nd-1.0': {'id': 'CC-BY-NC-ND-1.0', 'deprecated': False},
'cc-by-nc-nd-2.0': {'id': 'CC-BY-NC-ND-2.0', 'deprecated': False},
'cc-by-nc-nd-2.5': {'id': 'CC-BY-NC-ND-2.5', 'deprecated': False},
'cc-by-nc-nd-3.0': {'id': 'CC-BY-NC-ND-3.0', 'deprecated': False},
'cc-by-nc-nd-3.0-de': {'id': 'CC-BY-NC-ND-3.0-DE', 'deprecated': False},
'cc-by-nc-nd-3.0-igo': {'id': 'CC-BY-NC-ND-3.0-IGO', 'deprecated': False},
'cc-by-nc-nd-4.0': {'id': 'CC-BY-NC-ND-4.0', 'deprecated': False},
'cc-by-nc-sa-1.0': {'id': 'CC-BY-NC-SA-1.0', 'deprecated': False},
'cc-by-nc-sa-2.0': {'id': 'CC-BY-NC-SA-2.0', 'deprecated': False},
'cc-by-nc-sa-2.0-fr': {'id': 'CC-BY-NC-SA-2.0-FR', 'deprecated': False},
'cc-by-nc-sa-2.0-uk': {'id': 'CC-BY-NC-SA-2.0-UK', 'deprecated': False},
'cc-by-nc-sa-2.5': {'id': 'CC-BY-NC-SA-2.5', 'deprecated': False},
'cc-by-nc-sa-3.0': {'id': 'CC-BY-NC-SA-3.0', 'deprecated': False},
'cc-by-nc-sa-3.0-de': {'id': 'CC-BY-NC-SA-3.0-DE', 'deprecated': False},
'cc-by-nc-sa-3.0-igo': {'id': 'CC-BY-NC-SA-3.0-IGO', 'deprecated': False},
'cc-by-nc-sa-4.0': {'id': 'CC-BY-NC-SA-4.0', 'deprecated': False},
'cc-by-nd-1.0': {'id': 'CC-BY-ND-1.0', 'deprecated': False},
'cc-by-nd-2.0': {'id': 'CC-BY-ND-2.0', 'deprecated': False},
'cc-by-nd-2.5': {'id': 'CC-BY-ND-2.5', 'deprecated': False},
'cc-by-nd-3.0': {'id': 'CC-BY-ND-3.0', 'deprecated': False},
'cc-by-nd-3.0-de': {'id': 'CC-BY-ND-3.0-DE', 'deprecated': False},
'cc-by-nd-4.0': {'id': 'CC-BY-ND-4.0', 'deprecated': False},
'cc-by-sa-1.0': {'id': 'CC-BY-SA-1.0', 'deprecated': False},
'cc-by-sa-2.0': {'id': 'CC-BY-SA-2.0', 'deprecated': False},
'cc-by-sa-2.0-uk': {'id': 'CC-BY-SA-2.0-UK', 'deprecated': False},
'cc-by-sa-2.1-jp': {'id': 'CC-BY-SA-2.1-JP', 'deprecated': False},
'cc-by-sa-2.5': {'id': 'CC-BY-SA-2.5', 'deprecated': False},
'cc-by-sa-3.0': {'id': 'CC-BY-SA-3.0', 'deprecated': False},
'cc-by-sa-3.0-at': {'id': 'CC-BY-SA-3.0-AT', 'deprecated': False},
'cc-by-sa-3.0-de': {'id': 'CC-BY-SA-3.0-DE', 'deprecated': False},
'cc-by-sa-4.0': {'id': 'CC-BY-SA-4.0', 'deprecated': False},
'cc-pddc': {'id': 'CC-PDDC', 'deprecated': False},
'cc0-1.0': {'id': 'CC0-1.0', 'deprecated': False},
'cddl-1.0': {'id': 'CDDL-1.0', 'deprecated': False},
'cddl-1.1': {'id': 'CDDL-1.1', 'deprecated': False},
'cdl-1.0': {'id': 'CDL-1.0', 'deprecated': False},
'cdla-permissive-1.0': {'id': 'CDLA-Permissive-1.0', 'deprecated': False},
'cdla-permissive-2.0': {'id': 'CDLA-Permissive-2.0', 'deprecated': False},
'cdla-sharing-1.0': {'id': 'CDLA-Sharing-1.0', 'deprecated': False},
'cecill-1.0': {'id': 'CECILL-1.0', 'deprecated': False},
'cecill-1.1': {'id': 'CECILL-1.1', 'deprecated': False},
'cecill-2.0': {'id': 'CECILL-2.0', 'deprecated': False},
'cecill-2.1': {'id': 'CECILL-2.1', 'deprecated': False},
'cecill-b': {'id': 'CECILL-B', 'deprecated': False},
'cecill-c': {'id': 'CECILL-C', 'deprecated': False},
'cern-ohl-1.1': {'id': 'CERN-OHL-1.1', 'deprecated': False},
'cern-ohl-1.2': {'id': 'CERN-OHL-1.2', 'deprecated': False},
'cern-ohl-p-2.0': {'id': 'CERN-OHL-P-2.0', 'deprecated': False},
'cern-ohl-s-2.0': {'id': 'CERN-OHL-S-2.0', 'deprecated': False},
'cern-ohl-w-2.0': {'id': 'CERN-OHL-W-2.0', 'deprecated': False},
'clartistic': {'id': 'ClArtistic', 'deprecated': False},
'cnri-jython': {'id': 'CNRI-Jython', 'deprecated': False},
'cnri-python': {'id': 'CNRI-Python', 'deprecated': False},
'cnri-python-gpl-compatible': {'id': 'CNRI-Python-GPL-Compatible', 'deprecated': False},
'coil-1.0': {'id': 'COIL-1.0', 'deprecated': False},
'community-spec-1.0': {'id': 'Community-Spec-1.0', 'deprecated': False},
'condor-1.1': {'id': 'Condor-1.1', 'deprecated': False},
'copyleft-next-0.3.0': {'id': 'copyleft-next-0.3.0', 'deprecated': False},
'copyleft-next-0.3.1': {'id': 'copyleft-next-0.3.1', 'deprecated': False},
'cpal-1.0': {'id': 'CPAL-1.0', 'deprecated': False},
'cpl-1.0': {'id': 'CPL-1.0', 'deprecated': False},
'cpol-1.02': {'id': 'CPOL-1.02', 'deprecated': False},
'crossword': {'id': 'Crossword', 'deprecated': False},
'crystalstacker': {'id': 'CrystalStacker', 'deprecated': False},
'cua-opl-1.0': {'id': 'CUA-OPL-1.0', 'deprecated': False},
'cube': {'id': 'Cube', 'deprecated': False},
'curl': {'id': 'curl', 'deprecated': False},
'd-fsl-1.0': {'id': 'D-FSL-1.0', 'deprecated': False},
'diffmark': {'id': 'diffmark', 'deprecated': False},
'doc': {'id': 'DOC', 'deprecated': False},
'dotseqn': {'id': 'Dotseqn', 'deprecated': False},
'drl-1.0': {'id': 'DRL-1.0', 'deprecated': False},
'dsdp': {'id': 'DSDP', 'deprecated': False},
'dvipdfm': {'id': 'dvipdfm', 'deprecated': False},
'ecl-1.0': {'id': 'ECL-1.0', 'deprecated': False},
'ecl-2.0': {'id': 'ECL-2.0', 'deprecated': False},
'ecos-2.0': {'id': 'eCos-2.0', 'deprecated': True},
'efl-1.0': {'id': 'EFL-1.0', 'deprecated': False},
'efl-2.0': {'id': 'EFL-2.0', 'deprecated': False},
'egenix': {'id': 'eGenix', 'deprecated': False},
'entessa': {'id': 'Entessa', 'deprecated': False},
'epics': {'id': 'EPICS', 'deprecated': False},
'epl-1.0': {'id': 'EPL-1.0', 'deprecated': False},
'epl-2.0': {'id': 'EPL-2.0', 'deprecated': False},
'erlpl-1.1': {'id': 'ErlPL-1.1', 'deprecated': False},
'etalab-2.0': {'id': 'etalab-2.0', 'deprecated': False},
'eudatagrid': {'id': 'EUDatagrid', 'deprecated': False},
'eupl-1.0': {'id': 'EUPL-1.0', 'deprecated': False},
'eupl-1.1': {'id': 'EUPL-1.1', 'deprecated': False},
'eupl-1.2': {'id': 'EUPL-1.2', 'deprecated': False},
'eurosym': {'id': 'Eurosym', 'deprecated': False},
'fair': {'id': 'Fair', 'deprecated': False},
'fdk-aac': {'id': 'FDK-AAC', 'deprecated': False},
'frameworx-1.0': {'id': 'Frameworx-1.0', 'deprecated': False},
'freebsd-doc': {'id': 'FreeBSD-DOC', 'deprecated': False},
'freeimage': {'id': 'FreeImage', 'deprecated': False},
'fsfap': {'id': 'FSFAP', 'deprecated': False},
'fsful': {'id': 'FSFUL', 'deprecated': False},
'fsfullr': {'id': 'FSFULLR', 'deprecated': False},
'ftl': {'id': 'FTL', 'deprecated': False},
'gd': {'id': 'GD', 'deprecated': False},
'gfdl-1.1': {'id': 'GFDL-1.1', 'deprecated': True},
'gfdl-1.1-invariants-only': {'id': 'GFDL-1.1-invariants-only', 'deprecated': False},
'gfdl-1.1-invariants-or-later': {'id': 'GFDL-1.1-invariants-or-later', 'deprecated': False},
'gfdl-1.1-no-invariants-only': {'id': 'GFDL-1.1-no-invariants-only', 'deprecated': False},
'gfdl-1.1-no-invariants-or-later': {'id': 'GFDL-1.1-no-invariants-or-later', 'deprecated': False},
'gfdl-1.1-only': {'id': 'GFDL-1.1-only', 'deprecated': False},
'gfdl-1.1-or-later': {'id': 'GFDL-1.1-or-later', 'deprecated': False},
'gfdl-1.2': {'id': 'GFDL-1.2', 'deprecated': True},
'gfdl-1.2-invariants-only': {'id': 'GFDL-1.2-invariants-only', 'deprecated': False},
'gfdl-1.2-invariants-or-later': {'id': 'GFDL-1.2-invariants-or-later', 'deprecated': False},
'gfdl-1.2-no-invariants-only': {'id': 'GFDL-1.2-no-invariants-only', 'deprecated': False},
'gfdl-1.2-no-invariants-or-later': {'id': 'GFDL-1.2-no-invariants-or-later', 'deprecated': False},
'gfdl-1.2-only': {'id': 'GFDL-1.2-only', 'deprecated': False},
'gfdl-1.2-or-later': {'id': 'GFDL-1.2-or-later', 'deprecated': False},
'gfdl-1.3': {'id': 'GFDL-1.3', 'deprecated': True},
'gfdl-1.3-invariants-only': {'id': 'GFDL-1.3-invariants-only', 'deprecated': False},
'gfdl-1.3-invariants-or-later': {'id': 'GFDL-1.3-invariants-or-later', 'deprecated': False},
'gfdl-1.3-no-invariants-only': {'id': 'GFDL-1.3-no-invariants-only', 'deprecated': False},
'gfdl-1.3-no-invariants-or-later': {'id': 'GFDL-1.3-no-invariants-or-later', 'deprecated': False},
'gfdl-1.3-only': {'id': 'GFDL-1.3-only', 'deprecated': False},
'gfdl-1.3-or-later': {'id': 'GFDL-1.3-or-later', 'deprecated': False},
'giftware': {'id': 'Giftware', 'deprecated': False},
'gl2ps': {'id': 'GL2PS', 'deprecated': False},
'glide': {'id': 'Glide', 'deprecated': False},
'glulxe': {'id': 'Glulxe', 'deprecated': False},
'glwtpl': {'id': 'GLWTPL', 'deprecated': False},
'gnuplot': {'id': 'gnuplot', 'deprecated': False},
'gpl-1.0': {'id': 'GPL-1.0', 'deprecated': True},
'gpl-1.0+': {'id': 'GPL-1.0+', 'deprecated': True},
'gpl-1.0-only': {'id': 'GPL-1.0-only', 'deprecated': False},
'gpl-1.0-or-later': {'id': 'GPL-1.0-or-later', 'deprecated': False},
'gpl-2.0': {'id': 'GPL-2.0', 'deprecated': True},
'gpl-2.0+': {'id': 'GPL-2.0+', 'deprecated': True},
'gpl-2.0-only': {'id': 'GPL-2.0-only', 'deprecated': False},
'gpl-2.0-or-later': {'id': 'GPL-2.0-or-later', 'deprecated': False},
'gpl-2.0-with-autoconf-exception': {'id': 'GPL-2.0-with-autoconf-exception', 'deprecated': True},
'gpl-2.0-with-bison-exception': {'id': 'GPL-2.0-with-bison-exception', 'deprecated': True},
'gpl-2.0-with-classpath-exception': {'id': 'GPL-2.0-with-classpath-exception', 'deprecated': True},
'gpl-2.0-with-font-exception': {'id': 'GPL-2.0-with-font-exception', 'deprecated': True},
'gpl-2.0-with-gcc-exception': {'id': 'GPL-2.0-with-GCC-exception', 'deprecated': True},
'gpl-3.0': {'id': 'GPL-3.0', 'deprecated': True},
'gpl-3.0+': {'id': 'GPL-3.0+', 'deprecated': True},
'gpl-3.0-only': {'id': 'GPL-3.0-only', 'deprecated': False},
'gpl-3.0-or-later': {'id': 'GPL-3.0-or-later', 'deprecated': False},
'gpl-3.0-with-autoconf-exception': {'id': 'GPL-3.0-with-autoconf-exception', 'deprecated': True},
'gpl-3.0-with-gcc-exception': {'id': 'GPL-3.0-with-GCC-exception', 'deprecated': True},
'gsoap-1.3b': {'id': 'gSOAP-1.3b', 'deprecated': False},
'haskellreport': {'id': 'HaskellReport', 'deprecated': False},
'hippocratic-2.1': {'id': 'Hippocratic-2.1', 'deprecated': False},
'hpnd': {'id': 'HPND', 'deprecated': False},
'hpnd-sell-variant': {'id': 'HPND-sell-variant', 'deprecated': False},
'htmltidy': {'id': 'HTMLTIDY', 'deprecated': False},
'ibm-pibs': {'id': 'IBM-pibs', 'deprecated': False},
'icu': {'id': 'ICU', 'deprecated': False},
'ijg': {'id': 'IJG', 'deprecated': False},
'imagemagick': {'id': 'ImageMagick', 'deprecated': False},
'imatix': {'id': 'iMatix', 'deprecated': False},
'imlib2': {'id': 'Imlib2', 'deprecated': False},
'info-zip': {'id': 'Info-ZIP', 'deprecated': False},
'intel': {'id': 'Intel', 'deprecated': False},
'intel-acpi': {'id': 'Intel-ACPI', 'deprecated': False},
'interbase-1.0': {'id': 'Interbase-1.0', 'deprecated': False},
'ipa': {'id': 'IPA', 'deprecated': False},
'ipl-1.0': {'id': 'IPL-1.0', 'deprecated': False},
'isc': {'id': 'ISC', 'deprecated': False},
'jasper-2.0': {'id': 'JasPer-2.0', 'deprecated': False},
'jpnic': {'id': 'JPNIC', 'deprecated': False},
'json': {'id': 'JSON', 'deprecated': False},
'lal-1.2': {'id': 'LAL-1.2', 'deprecated': False},
'lal-1.3': {'id': 'LAL-1.3', 'deprecated': False},
'latex2e': {'id': 'Latex2e', 'deprecated': False},
'leptonica': {'id': 'Leptonica', 'deprecated': False},
'lgpl-2.0': {'id': 'LGPL-2.0', 'deprecated': True},
'lgpl-2.0+': {'id': 'LGPL-2.0+', 'deprecated': True},
'lgpl-2.0-only': {'id': 'LGPL-2.0-only', 'deprecated': False},
'lgpl-2.0-or-later': {'id': 'LGPL-2.0-or-later', 'deprecated': False},
'lgpl-2.1': {'id': 'LGPL-2.1', 'deprecated': True},
'lgpl-2.1+': {'id': 'LGPL-2.1+', 'deprecated': True},
'lgpl-2.1-only': {'id': 'LGPL-2.1-only', 'deprecated': False},
'lgpl-2.1-or-later': {'id': 'LGPL-2.1-or-later', 'deprecated': False},
'lgpl-3.0': {'id': 'LGPL-3.0', 'deprecated': True},
'lgpl-3.0+': {'id': 'LGPL-3.0+', 'deprecated': True},
'lgpl-3.0-only': {'id': 'LGPL-3.0-only', 'deprecated': False},
'lgpl-3.0-or-later': {'id': 'LGPL-3.0-or-later', 'deprecated': False},
'lgpllr': {'id': 'LGPLLR', 'deprecated': False},
'libpng': {'id': 'Libpng', 'deprecated': False},
'libpng-2.0': {'id': 'libpng-2.0', 'deprecated': False},
'libselinux-1.0': {'id': 'libselinux-1.0', 'deprecated': False},
'libtiff': {'id': 'libtiff', 'deprecated': False},
'liliq-p-1.1': {'id': 'LiLiQ-P-1.1', 'deprecated': False},
'liliq-r-1.1': {'id': 'LiLiQ-R-1.1', 'deprecated': False},
'liliq-rplus-1.1': {'id': 'LiLiQ-Rplus-1.1', 'deprecated': False},
'linux-man-pages-copyleft': {'id': 'Linux-man-pages-copyleft', 'deprecated': False},
'linux-openib': {'id': 'Linux-OpenIB', 'deprecated': False},
'lpl-1.0': {'id': 'LPL-1.0', 'deprecated': False},
'lpl-1.02': {'id': 'LPL-1.02', 'deprecated': False},
'lppl-1.0': {'id': 'LPPL-1.0', 'deprecated': False},
'lppl-1.1': {'id': 'LPPL-1.1', 'deprecated': False},
'lppl-1.2': {'id': 'LPPL-1.2', 'deprecated': False},
'lppl-1.3a': {'id': 'LPPL-1.3a', 'deprecated': False},
'lppl-1.3c': {'id': 'LPPL-1.3c', 'deprecated': False},
'makeindex': {'id': 'MakeIndex', 'deprecated': False},
'miros': {'id': 'MirOS', 'deprecated': False},
'mit': {'id': 'MIT', 'deprecated': False},
'mit-0': {'id': 'MIT-0', 'deprecated': False},
'mit-advertising': {'id': 'MIT-advertising', 'deprecated': False},
'mit-cmu': {'id': 'MIT-CMU', 'deprecated': False},
'mit-enna': {'id': 'MIT-enna', 'deprecated': False},
'mit-feh': {'id': 'MIT-feh', 'deprecated': False},
'mit-modern-variant': {'id': 'MIT-Modern-Variant', 'deprecated': False},
'mit-open-group': {'id': 'MIT-open-group', 'deprecated': False},
'mitnfa': {'id': 'MITNFA', 'deprecated': False},
'motosoto': {'id': 'Motosoto', 'deprecated': False},
'mpich2': {'id': 'mpich2', 'deprecated': False},
'mpl-1.0': {'id': 'MPL-1.0', 'deprecated': False},
'mpl-1.1': {'id': 'MPL-1.1', 'deprecated': False},
'mpl-2.0': {'id': 'MPL-2.0', 'deprecated': False},
'mpl-2.0-no-copyleft-exception': {'id': 'MPL-2.0-no-copyleft-exception', 'deprecated': False},
'ms-pl': {'id': 'MS-PL', 'deprecated': False},
'ms-rl': {'id': 'MS-RL', 'deprecated': False},
'mtll': {'id': 'MTLL', 'deprecated': False},
'mulanpsl-1.0': {'id': 'MulanPSL-1.0', 'deprecated': False},
'mulanpsl-2.0': {'id': 'MulanPSL-2.0', 'deprecated': False},
'multics': {'id': 'Multics', 'deprecated': False},
'mup': {'id': 'Mup', 'deprecated': False},
'naist-2003': {'id': 'NAIST-2003', 'deprecated': False},
'nasa-1.3': {'id': 'NASA-1.3', 'deprecated': False},
'naumen': {'id': 'Naumen', 'deprecated': False},
'nbpl-1.0': {'id': 'NBPL-1.0', 'deprecated': False},
'ncgl-uk-2.0': {'id': 'NCGL-UK-2.0', 'deprecated': False},
'ncsa': {'id': 'NCSA', 'deprecated': False},
'net-snmp': {'id': 'Net-SNMP', 'deprecated': False},
'netcdf': {'id': 'NetCDF', 'deprecated': False},
'newsletr': {'id': 'Newsletr', 'deprecated': False},
'ngpl': {'id': 'NGPL', 'deprecated': False},
'nist-pd': {'id': 'NIST-PD', 'deprecated': False},
'nist-pd-fallback': {'id': 'NIST-PD-fallback', 'deprecated': False},
'nlod-1.0': {'id': 'NLOD-1.0', 'deprecated': False},
'nlod-2.0': {'id': 'NLOD-2.0', 'deprecated': False},
'nlpl': {'id': 'NLPL', 'deprecated': False},
'nokia': {'id': 'Nokia', 'deprecated': False},
'nosl': {'id': 'NOSL', 'deprecated': False},
'noweb': {'id': 'Noweb', 'deprecated': False},
'npl-1.0': {'id': 'NPL-1.0', 'deprecated': False},
'npl-1.1': {'id': 'NPL-1.1', 'deprecated': False},
'nposl-3.0': {'id': 'NPOSL-3.0', 'deprecated': False},
'nrl': {'id': 'NRL', 'deprecated': False},
'ntp': {'id': 'NTP', 'deprecated': False},
'ntp-0': {'id': 'NTP-0', 'deprecated': False},
'nunit': {'id': 'Nunit', 'deprecated': True},
'o-uda-1.0': {'id': 'O-UDA-1.0', 'deprecated': False},
'occt-pl': {'id': 'OCCT-PL', 'deprecated': False},
'oclc-2.0': {'id': 'OCLC-2.0', 'deprecated': False},
'odbl-1.0': {'id': 'ODbL-1.0', 'deprecated': False},
'odc-by-1.0': {'id': 'ODC-By-1.0', 'deprecated': False},
'ofl-1.0': {'id': 'OFL-1.0', 'deprecated': False},
'ofl-1.0-no-rfn': {'id': 'OFL-1.0-no-RFN', 'deprecated': False},
'ofl-1.0-rfn': {'id': 'OFL-1.0-RFN', 'deprecated': False},
'ofl-1.1': {'id': 'OFL-1.1', 'deprecated': False},
'ofl-1.1-no-rfn': {'id': 'OFL-1.1-no-RFN', 'deprecated': False},
'ofl-1.1-rfn': {'id': 'OFL-1.1-RFN', 'deprecated': False},
'ogc-1.0': {'id': 'OGC-1.0', 'deprecated': False},
'ogdl-taiwan-1.0': {'id': 'OGDL-Taiwan-1.0', 'deprecated': False},
'ogl-canada-2.0': {'id': 'OGL-Canada-2.0', 'deprecated': False},
'ogl-uk-1.0': {'id': 'OGL-UK-1.0', 'deprecated': False},
'ogl-uk-2.0': {'id': 'OGL-UK-2.0', 'deprecated': False},
'ogl-uk-3.0': {'id': 'OGL-UK-3.0', 'deprecated': False},
'ogtsl': {'id': 'OGTSL', 'deprecated': False},
'oldap-1.1': {'id': 'OLDAP-1.1', 'deprecated': False},
'oldap-1.2': {'id': 'OLDAP-1.2', 'deprecated': False},
'oldap-1.3': {'id': 'OLDAP-1.3', 'deprecated': False},
'oldap-1.4': {'id': 'OLDAP-1.4', 'deprecated': False},
'oldap-2.0': {'id': 'OLDAP-2.0', 'deprecated': False},
'oldap-2.0.1': {'id': 'OLDAP-2.0.1', 'deprecated': False},
'oldap-2.1': {'id': 'OLDAP-2.1', 'deprecated': False},
'oldap-2.2': {'id': 'OLDAP-2.2', 'deprecated': False},
'oldap-2.2.1': {'id': 'OLDAP-2.2.1', 'deprecated': False},
'oldap-2.2.2': {'id': 'OLDAP-2.2.2', 'deprecated': False},
'oldap-2.3': {'id': 'OLDAP-2.3', 'deprecated': False},
'oldap-2.4': {'id': 'OLDAP-2.4', 'deprecated': False},
'oldap-2.5': {'id': 'OLDAP-2.5', 'deprecated': False},
'oldap-2.6': {'id': 'OLDAP-2.6', 'deprecated': False},
'oldap-2.7': {'id': 'OLDAP-2.7', 'deprecated': False},
'oldap-2.8': {'id': 'OLDAP-2.8', 'deprecated': False},
'oml': {'id': 'OML', 'deprecated': False},
'openssl': {'id': 'OpenSSL', 'deprecated': False},
'opl-1.0': {'id': 'OPL-1.0', 'deprecated': False},
'opubl-1.0': {'id': 'OPUBL-1.0', 'deprecated': False},
'oset-pl-2.1': {'id': 'OSET-PL-2.1', 'deprecated': False},
'osl-1.0': {'id': 'OSL-1.0', 'deprecated': False},
'osl-1.1': {'id': 'OSL-1.1', 'deprecated': False},
'osl-2.0': {'id': 'OSL-2.0', 'deprecated': False},
'osl-2.1': {'id': 'OSL-2.1', 'deprecated': False},
'osl-3.0': {'id': 'OSL-3.0', 'deprecated': False},
'parity-6.0.0': {'id': 'Parity-6.0.0', 'deprecated': False},
'parity-7.0.0': {'id': 'Parity-7.0.0', 'deprecated': False},
'pddl-1.0': {'id': 'PDDL-1.0', 'deprecated': False},
'php-3.0': {'id': 'PHP-3.0', 'deprecated': False},
'php-3.01': {'id': 'PHP-3.01', 'deprecated': False},
'plexus': {'id': 'Plexus', 'deprecated': False},
'polyform-noncommercial-1.0.0': {'id': 'PolyForm-Noncommercial-1.0.0', 'deprecated': False},
'polyform-small-business-1.0.0': {'id': 'PolyForm-Small-Business-1.0.0', 'deprecated': False},
'postgresql': {'id': 'PostgreSQL', 'deprecated': False},
'psf-2.0': {'id': 'PSF-2.0', 'deprecated': False},
'psfrag': {'id': 'psfrag', 'deprecated': False},
'psutils': {'id': 'psutils', 'deprecated': False},
'python-2.0': {'id': 'Python-2.0', 'deprecated': False},
'qhull': {'id': 'Qhull', 'deprecated': False},
'qpl-1.0': {'id': 'QPL-1.0', 'deprecated': False},
'rdisc': {'id': 'Rdisc', 'deprecated': False},
'rhecos-1.1': {'id': 'RHeCos-1.1', 'deprecated': False},
'rpl-1.1': {'id': 'RPL-1.1', 'deprecated': False},
'rpl-1.5': {'id': 'RPL-1.5', 'deprecated': False},
'rpsl-1.0': {'id': 'RPSL-1.0', 'deprecated': False},
'rsa-md': {'id': 'RSA-MD', 'deprecated': False},
'rscpl': {'id': 'RSCPL', 'deprecated': False},
'ruby': {'id': 'Ruby', 'deprecated': False},
'sax-pd': {'id': 'SAX-PD', 'deprecated': False},
'saxpath': {'id': 'Saxpath', 'deprecated': False},
'scea': {'id': 'SCEA', 'deprecated': False},
'sendmail': {'id': 'Sendmail', 'deprecated': False},
'sendmail-8.23': {'id': 'Sendmail-8.23', 'deprecated': False},
'sgi-b-1.0': {'id': 'SGI-B-1.0', 'deprecated': False},
'sgi-b-1.1': {'id': 'SGI-B-1.1', 'deprecated': False},
'sgi-b-2.0': {'id': 'SGI-B-2.0', 'deprecated': False},
'shl-0.5': {'id': 'SHL-0.5', 'deprecated': False},
'shl-0.51': {'id': 'SHL-0.51', 'deprecated': False},
'simpl-2.0': {'id': 'SimPL-2.0', 'deprecated': False},
'sissl': {'id': 'SISSL', 'deprecated': False},
'sissl-1.2': {'id': 'SISSL-1.2', 'deprecated': False},
'sleepycat': {'id': 'Sleepycat', 'deprecated': False},
'smlnj': {'id': 'SMLNJ', 'deprecated': False},
'smppl': {'id': 'SMPPL', 'deprecated': False},
'snia': {'id': 'SNIA', 'deprecated': False},
'spencer-86': {'id': 'Spencer-86', 'deprecated': False},
'spencer-94': {'id': 'Spencer-94', 'deprecated': False},
'spencer-99': {'id': 'Spencer-99', 'deprecated': False},
'spl-1.0': {'id': 'SPL-1.0', 'deprecated': False},
'ssh-openssh': {'id': 'SSH-OpenSSH', 'deprecated': False},
'ssh-short': {'id': 'SSH-short', 'deprecated': False},
'sspl-1.0': {'id': 'SSPL-1.0', 'deprecated': False},
'standardml-nj': {'id': 'StandardML-NJ', 'deprecated': True},
'sugarcrm-1.1.3': {'id': 'SugarCRM-1.1.3', 'deprecated': False},
'swl': {'id': 'SWL', 'deprecated': False},
'tapr-ohl-1.0': {'id': 'TAPR-OHL-1.0', 'deprecated': False},
'tcl': {'id': 'TCL', 'deprecated': False},
'tcp-wrappers': {'id': 'TCP-wrappers', 'deprecated': False},
'tmate': {'id': 'TMate', 'deprecated': False},
'torque-1.1': {'id': 'TORQUE-1.1', 'deprecated': False},
'tosl': {'id': 'TOSL', 'deprecated': False},
'tu-berlin-1.0': {'id': 'TU-Berlin-1.0', 'deprecated': False},
'tu-berlin-2.0': {'id': 'TU-Berlin-2.0', 'deprecated': False},
'ucl-1.0': {'id': 'UCL-1.0', 'deprecated': False},
'unicode-dfs-2015': {'id': 'Unicode-DFS-2015', 'deprecated': False},
'unicode-dfs-2016': {'id': 'Unicode-DFS-2016', 'deprecated': False},
'unicode-tou': {'id': 'Unicode-TOU', 'deprecated': False},
'unlicense': {'id': 'Unlicense', 'deprecated': False},
'upl-1.0': {'id': 'UPL-1.0', 'deprecated': False},
'vim': {'id': 'Vim', 'deprecated': False},
'vostrom': {'id': 'VOSTROM', 'deprecated': False},
'vsl-1.0': {'id': 'VSL-1.0', 'deprecated': False},
'w3c': {'id': 'W3C', 'deprecated': False},
'w3c-19980720': {'id': 'W3C-19980720', 'deprecated': False},
'w3c-20150513': {'id': 'W3C-20150513', 'deprecated': False},
'watcom-1.0': {'id': 'Watcom-1.0', 'deprecated': False},
'wsuipa': {'id': 'Wsuipa', 'deprecated': False},
'wtfpl': {'id': 'WTFPL', 'deprecated': False},
'wxwindows': {'id': 'wxWindows', 'deprecated': True},
'x11': {'id': 'X11', 'deprecated': False},
'xerox': {'id': 'Xerox', 'deprecated': False},
'xfree86-1.1': {'id': 'XFree86-1.1', 'deprecated': False},
'xinetd': {'id': 'xinetd', 'deprecated': False},
'xnet': {'id': 'Xnet', 'deprecated': False},
'xpp': {'id': 'xpp', 'deprecated': False},
'xskat': {'id': 'XSkat', 'deprecated': False},
'ypl-1.0': {'id': 'YPL-1.0', 'deprecated': False},
'ypl-1.1': {'id': 'YPL-1.1', 'deprecated': False},
'zed': {'id': 'Zed', 'deprecated': False},
'zend-2.0': {'id': 'Zend-2.0', 'deprecated': False},
'zimbra-1.3': {'id': 'Zimbra-1.3', 'deprecated': False},
'zimbra-1.4': {'id': 'Zimbra-1.4', 'deprecated': False},
'zlib': {'id': 'Zlib', 'deprecated': False},
'zlib-acknowledgement': {'id': 'zlib-acknowledgement', 'deprecated': False},
'zpl-1.1': {'id': 'ZPL-1.1', 'deprecated': False},
'zpl-2.0': {'id': 'ZPL-2.0', 'deprecated': False},
'zpl-2.1': {'id': 'ZPL-2.1', 'deprecated': False},
}
EXCEPTIONS = {
'389-exception': {'id': '389-exception', 'deprecated': False},
'autoconf-exception-2.0': {'id': 'Autoconf-exception-2.0', 'deprecated': False},
'autoconf-exception-3.0': {'id': 'Autoconf-exception-3.0', 'deprecated': False},
'bison-exception-2.2': {'id': 'Bison-exception-2.2', 'deprecated': False},
'bootloader-exception': {'id': 'Bootloader-exception', 'deprecated': False},
'classpath-exception-2.0': {'id': 'Classpath-exception-2.0', 'deprecated': False},
'clisp-exception-2.0': {'id': 'CLISP-exception-2.0', 'deprecated': False},
'digirule-foss-exception': {'id': 'DigiRule-FOSS-exception', 'deprecated': False},
'ecos-exception-2.0': {'id': 'eCos-exception-2.0', 'deprecated': False},
'fawkes-runtime-exception': {'id': 'Fawkes-Runtime-exception', 'deprecated': False},
'fltk-exception': {'id': 'FLTK-exception', 'deprecated': False},
'font-exception-2.0': {'id': 'Font-exception-2.0', 'deprecated': False},
'freertos-exception-2.0': {'id': 'freertos-exception-2.0', 'deprecated': False},
'gcc-exception-2.0': {'id': 'GCC-exception-2.0', 'deprecated': False},
'gcc-exception-3.1': {'id': 'GCC-exception-3.1', 'deprecated': False},
'gnu-javamail-exception': {'id': 'gnu-javamail-exception', 'deprecated': False},
'gpl-3.0-linking-exception': {'id': 'GPL-3.0-linking-exception', 'deprecated': False},
'gpl-3.0-linking-source-exception': {'id': 'GPL-3.0-linking-source-exception', 'deprecated': False},
'gpl-cc-1.0': {'id': 'GPL-CC-1.0', 'deprecated': False},
'i2p-gpl-java-exception': {'id': 'i2p-gpl-java-exception', 'deprecated': False},
'lgpl-3.0-linking-exception': {'id': 'LGPL-3.0-linking-exception', 'deprecated': False},
'libtool-exception': {'id': 'Libtool-exception', 'deprecated': False},
'linux-syscall-note': {'id': 'Linux-syscall-note', 'deprecated': False},
'llvm-exception': {'id': 'LLVM-exception', 'deprecated': False},
'lzma-exception': {'id': 'LZMA-exception', 'deprecated': False},
'mif-exception': {'id': 'mif-exception', 'deprecated': False},
'nokia-qt-exception-1.1': {'id': 'Nokia-Qt-exception-1.1', 'deprecated': True},
'ocaml-lgpl-linking-exception': {'id': 'OCaml-LGPL-linking-exception', 'deprecated': False},
'occt-exception-1.0': {'id': 'OCCT-exception-1.0', 'deprecated': False},
'openjdk-assembly-exception-1.0': {'id': 'OpenJDK-assembly-exception-1.0', 'deprecated': False},
'openvpn-openssl-exception': {'id': 'openvpn-openssl-exception', 'deprecated': False},
'ps-or-pdf-font-exception-20170817': {'id': 'PS-or-PDF-font-exception-20170817', 'deprecated': False},
'qt-gpl-exception-1.0': {'id': 'Qt-GPL-exception-1.0', 'deprecated': False},
'qt-lgpl-exception-1.1': {'id': 'Qt-LGPL-exception-1.1', 'deprecated': False},
'qwt-exception-1.0': {'id': 'Qwt-exception-1.0', 'deprecated': False},
'shl-2.0': {'id': 'SHL-2.0', 'deprecated': False},
'shl-2.1': {'id': 'SHL-2.1', 'deprecated': False},
'swift-exception': {'id': 'Swift-exception', 'deprecated': False},
'u-boot-exception-2.0': {'id': 'u-boot-exception-2.0', 'deprecated': False},
'universal-foss-exception-1.0': {'id': 'Universal-FOSS-exception-1.0', 'deprecated': False},
'wxwindows-exception-3.1': {'id': 'WxWindows-exception-3.1', 'deprecated': False},
}
|
|
"""WSGI App Creator
This module is responsible for creating the basic Pylons WSGI
application (PylonsApp). It's generally assumed that it will be called
by Paste, though any WSGI server could create and call the WSGI app as
well.
"""
import logging
import sys
import paste.registry
from routes import request_config
from webob.exc import HTTPFound, HTTPNotFound
import pylons
import pylons.legacy
import pylons.templating
from pylons.controllers.util import Request, Response
from pylons.i18n.translation import _get_translator
from pylons.util import AttribSafeContextObj, ContextObj, PylonsContext, \
class_name_from_module_name
__all__ = ['PylonsApp']
log = logging.getLogger(__name__)
class PylonsApp(object):
"""Pylons WSGI Application
This basic WSGI app is provided should a web developer want to
get access to the most basic Pylons web application environment
available. By itself, this Pylons web application does little more
than dispatch to a controller and setup the context object, the
request object, and the globals object.
Additional functionality like sessions, and caching can be setup by
altering the ``environ['pylons.environ_config']`` setting to
indicate what key the ``session`` and ``cache`` functionality
should come from.
Resolving the URL and dispatching can be customized by sub-classing
or "monkey-patching" this class. Subclassing is the preferred
approach.
"""
def __init__(self, **kwargs):
"""Initialize a base Pylons WSGI application
The base Pylons WSGI application requires several keywords, the
package name, and the globals object. If no helpers object is
provided then h will be None.
"""
self.config = config = pylons.config._current_obj()
package_name = config['pylons.package']
self.helpers = config['pylons.h']
self.globals = config.get('pylons.app_globals') or config['pylons.g']
self.environ_config = config['pylons.environ_config']
self.package_name = package_name
self.request_options = config['pylons.request_options']
self.response_options = config['pylons.response_options']
self.controller_classes = {}
self.log_debug = False
self.config.setdefault('lang', None)
# Create the redirect function we'll use and save it
def redirect_to(url):
log.debug("Raising redirect to %s", url)
raise HTTPFound(location=url)
self.redirect_to = redirect_to
# Initialize Buffet and all our template engines, default engine is the
# first in the template_engines list
if config.get('buffet.template_engines'):
def_eng = config['buffet.template_engines'][0]
self.buffet = pylons.templating.Buffet(
def_eng['engine'],
template_root=def_eng['template_root'],
**def_eng['template_options'])
for e in config['buffet.template_engines'][1:]:
log.debug("Initializing additional template engine: %s",
e['engine'])
self.buffet.prepare(e['engine'],
template_root=e['template_root'],
alias=e['alias'], **e['template_options'])
else:
self.buffet = None
# Cache some options for use during requests
self._session_key = self.environ_config.get('session',
'beaker.session')
self._cache_key = self.environ_config.get('cache', 'beaker.cache')
def __call__(self, environ, start_response):
"""Setup and handle a web request
PylonsApp splits its functionality into several methods to
make it easier to subclass and customize core functionality.
The methods are called in the following order:
1. :meth:`~PylonsApp.setup_app_env`
2. :meth:`~PylonsApp.load_test_env` (Only if operating in
testing mode)
3. :meth:`~PylonsApp.resolve`
4. :meth:`~PylonsApp.dispatch`
The response from :meth:`~PylonsApp.dispatch` is expected to be
an iterable (valid :pep:`333` WSGI response), which is then
sent back as the response.
"""
# Cache the logging level for the request
log_debug = self.log_debug = logging.DEBUG >= log.getEffectiveLevel()
self.setup_app_env(environ, start_response)
if 'paste.testing_variables' in environ:
self.load_test_env(environ)
if environ['PATH_INFO'] == '/_test_vars':
paste.registry.restorer.save_registry_state(environ)
start_response('200 OK', [('Content-type', 'text/plain')])
return ['%s' % paste.registry.restorer.get_request_id(environ)]
controller = self.resolve(environ, start_response)
response = self.dispatch(controller, environ, start_response)
if 'paste.testing_variables' in environ and hasattr(response,
'wsgi_response'):
environ['paste.testing_variables']['response'] = response
try:
if hasattr(response, 'wsgi_response'):
# Transform Response objects from legacy Controller
if log_debug:
log.debug("Transforming legacy Response object into WSGI "
"response")
return response(environ, start_response)
elif response is not None:
return response
raise Exception("No content returned by controller (Did you "
"remember to 'return' it?) in: %r" %
controller.__name__)
finally:
# Help Python collect ram a bit faster by removing the reference
# cycle that the pylons object causes
if 'pylons.pylons' in environ:
del environ['pylons.pylons']
def register_globals(self, environ):
"""Registers globals in the environment, called from
:meth:`~PylonsApp.setup_app_env`
Override this to control how the Pylons API is setup. Note that
a custom render function will need to be used if the
``pylons.app_globals`` global is not available.
"""
pylons_obj = environ['pylons.pylons']
registry = environ['paste.registry']
registry.register(pylons.response, pylons_obj.response)
registry.register(pylons.request, pylons_obj.request)
registry.register(pylons.app_globals, self.globals)
registry.register(pylons.config, self.config)
registry.register(pylons.h, self.helpers or
pylons.legacy.load_h(self.package_name))
registry.register(pylons.c, pylons_obj.c)
registry.register(pylons.translator, pylons_obj.translator)
if self.buffet:
registry.register(pylons.buffet, self.buffet)
if 'session' in pylons_obj.__dict__:
registry.register(pylons.session, pylons_obj.session)
if 'cache' in pylons_obj.__dict__:
registry.register(pylons.cache, pylons_obj.cache)
if 'routes.url' in environ:
registry.register(pylons.url, environ['routes.url'])
def setup_app_env(self, environ, start_response):
"""Setup and register all the Pylons objects with the registry
After creating all the global objects for use in the request,
:meth:`~PylonsApp.register_globals` is called to register them
in the environment.
"""
if self.log_debug:
log.debug("Setting up Pylons stacked object globals")
# Setup the basic pylons global objects
req_options = self.request_options
req = Request(environ, charset=req_options['charset'],
unicode_errors=req_options['errors'],
decode_param_names=req_options['decode_param_names'])
req.language = req_options['language']
response = Response(
content_type=self.response_options['content_type'],
charset=self.response_options['charset'])
response.headers.update(self.response_options['headers'])
# Store a copy of the request/response in environ for faster access
pylons_obj = PylonsContext()
pylons_obj.config = self.config
pylons_obj.request = req
pylons_obj.response = response
pylons_obj.g = pylons_obj.app_globals = self.globals
pylons_obj.h = self.helpers
if self.buffet:
pylons_obj.buffet = self.buffet
environ['pylons.pylons'] = pylons_obj
environ['pylons.environ_config'] = self.environ_config
# Setup the translator object
lang = self.config['lang']
pylons_obj.translator = _get_translator(lang,
pylons_config=self.config)
if self.config['pylons.strict_c']:
c = ContextObj()
else:
c = AttribSafeContextObj()
pylons_obj.c = c
econf = self.config['pylons.environ_config']
if self._session_key in environ:
pylons_obj.session = environ[self._session_key]
if self._cache_key in environ:
pylons_obj.cache = environ[self._cache_key]
# Load the globals with the registry if around
if 'paste.registry' in environ:
self.register_globals(environ)
def resolve(self, environ, start_response):
"""Uses dispatching information found in
``environ['wsgiorg.routing_args']`` to retrieve a controller
name and return the controller instance from the appropriate
controller module.
Override this to change how the controller name is found and
returned.
"""
# Update the Routes config object in case we're using Routes
config = request_config()
config.redirect = self.redirect_to
match = environ['wsgiorg.routing_args'][1]
environ['pylons.routes_dict'] = match
controller = match.get('controller')
if not controller:
return
if self.log_debug:
log.debug("Resolved URL to controller: %r", controller)
return self.find_controller(controller)
def find_controller(self, controller):
"""Locates a controller by attempting to import it then grab
the SomeController instance from the imported module.
Override this to change how the controller object is found once
the URL has been resolved.
"""
# Check to see if we've cached the class instance for this name
if controller in self.controller_classes:
return self.controller_classes[controller]
# Pull the controllers class name, import controller
full_module_name = self.package_name + '.controllers.' \
+ controller.replace('/', '.')
# Hide the traceback here if the import fails (bad syntax and such)
__traceback_hide__ = 'before_and_this'
__import__(full_module_name)
if hasattr(sys.modules[full_module_name], '__controller__'):
mycontroller = getattr(
sys.modules[full_module_name],
sys.modules[full_module_name].__controller__)
else:
module_name = controller.split('/')[-1]
class_name = class_name_from_module_name(module_name) + \
'Controller'
if self.log_debug:
log.debug("Found controller, module: '%s', class: '%s'",
full_module_name, class_name)
mycontroller = getattr(sys.modules[full_module_name], class_name)
self.controller_classes[controller] = mycontroller
return mycontroller
def dispatch(self, controller, environ, start_response):
"""Dispatches to a controller, will instantiate the controller
if necessary.
Override this to change how the controller dispatch is handled.
"""
log_debug = self.log_debug
if not controller:
if log_debug:
log.debug("No controller found, returning 404 HTTP Not Found")
return HTTPNotFound()(environ, start_response)
# If it's a class, instantiate it
if hasattr(controller, '__bases__'):
if log_debug:
log.debug("Controller appears to be a class, instantiating")
controller = controller()
controller._pylons_log_debug = log_debug
# Add a reference to the controller app located
environ['pylons.controller'] = controller
# Controller is assumed to handle a WSGI call
if log_debug:
log.debug("Calling controller class with WSGI interface")
return controller(environ, start_response)
def load_test_env(self, environ):
"""Sets up our Paste testing environment"""
if self.log_debug:
log.debug("Setting up paste testing environment variables")
testenv = environ['paste.testing_variables']
pylons_obj = environ['pylons.pylons']
testenv['req'] = pylons_obj.request
testenv['response'] = pylons_obj.response
testenv['tmpl_context'] = testenv['c'] = pylons_obj.c
testenv['app_globals'] = testenv['g'] = pylons_obj.app_globals
testenv['h'] = self.config['pylons.h'] or pylons_obj.h
testenv['config'] = self.config
if hasattr(pylons_obj, 'session'):
testenv['session'] = pylons_obj.session
if hasattr(pylons_obj, 'cache'):
testenv['cache'] = pylons_obj.cache
|
|
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from neutron import context
from neutron.db import servicetype_db as st_db
from neutron.extensions import portbindings
from neutron import manager
from neutron.plugins.common import constants
from neutron.tests.unit import testlib_api
from oslo_utils import uuidutils
import six
from six import moves
from webob import exc
from neutron_lbaas.db.loadbalancer import loadbalancer_db as ldb
from neutron_lbaas.extensions import loadbalancer
from neutron_lbaas.services.loadbalancer.drivers.common \
import agent_driver_base
from neutron_lbaas.tests import base
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer
class TestLoadBalancerPluginBase(
test_db_loadbalancer.LoadBalancerPluginDbTestCase):
def setUp(self):
def reset_device_driver():
agent_driver_base.AgentDriverBase.device_driver = None
self.addCleanup(reset_device_driver)
self.mock_importer = mock.patch.object(
agent_driver_base, 'importutils').start()
# needed to reload provider configuration
st_db.ServiceTypeManager._instance = None
agent_driver_base.AgentDriverBase.device_driver = 'dummy'
super(TestLoadBalancerPluginBase, self).setUp(
lbaas_provider=('LOADBALANCER:lbaas:neutron_lbaas.services.'
'loadbalancer.drivers.common.agent_driver_base.'
'AgentDriverBase:default'))
# we need access to loaded plugins to modify models
loaded_plugins = manager.NeutronManager().get_service_plugins()
self.plugin_instance = loaded_plugins[constants.LOADBALANCER]
class TestLoadBalancerCallbacks(TestLoadBalancerPluginBase):
def setUp(self):
super(TestLoadBalancerCallbacks, self).setUp()
self.callbacks = agent_driver_base.LoadBalancerCallbacks(
self.plugin_instance
)
get_lbaas_agents_patcher = mock.patch(
'neutron_lbaas.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin.get_lbaas_agents')
get_lbaas_agents_patcher.start()
def test_get_ready_devices(self):
with self.vip() as vip:
with mock.patch('neutron_lbaas.services.loadbalancer.'
'agent_scheduler.LbaasAgentSchedulerDbMixin.'
'list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {
'pools': [{'id': vip['vip']['pool_id']}]}
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertEqual(ready, [vip['vip']['pool_id']])
def test_get_ready_devices_multiple_vips_and_pools(self):
ctx = context.get_admin_context()
# add 3 pools and 2 vips directly to DB
# to create 2 "ready" devices and one pool without vip
pools = []
for i in moves.range(3):
pools.append(ldb.Pool(id=uuidutils.generate_uuid(),
subnet_id=self._subnet_id,
protocol="HTTP",
lb_method="ROUND_ROBIN",
status=constants.ACTIVE,
admin_state_up=True))
ctx.session.add(pools[i])
vip0 = ldb.Vip(id=uuidutils.generate_uuid(),
protocol_port=80,
protocol="HTTP",
pool_id=pools[0].id,
status=constants.ACTIVE,
admin_state_up=True,
connection_limit=3)
ctx.session.add(vip0)
pools[0].vip_id = vip0.id
vip1 = ldb.Vip(id=uuidutils.generate_uuid(),
protocol_port=80,
protocol="HTTP",
pool_id=pools[1].id,
status=constants.ACTIVE,
admin_state_up=True,
connection_limit=3)
ctx.session.add(vip1)
pools[1].vip_id = vip1.id
ctx.session.flush()
self.assertEqual(ctx.session.query(ldb.Pool).count(), 3)
self.assertEqual(ctx.session.query(ldb.Vip).count(), 2)
with mock.patch('neutron_lbaas.services.loadbalancer.agent_scheduler'
'.LbaasAgentSchedulerDbMixin'
'.list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {'pools': [{'id': pools[0].id},
{'id': pools[1].id},
{'id': pools[2].id}]}
ready = self.callbacks.get_ready_devices(ctx)
self.assertEqual(len(ready), 3)
self.assertIn(pools[0].id, ready)
self.assertIn(pools[1].id, ready)
self.assertIn(pools[2].id, ready)
# cleanup
ctx.session.query(ldb.Pool).delete()
ctx.session.query(ldb.Vip).delete()
def test_get_ready_devices_inactive_vip(self):
with self.vip() as vip:
# set the vip inactive need to use plugin directly since
# status is not tenant mutable
self.plugin_instance.update_vip(
context.get_admin_context(),
vip['vip']['id'],
{'vip': {'status': constants.INACTIVE}}
)
with mock.patch('neutron_lbaas.services.loadbalancer.'
'agent_scheduler.LbaasAgentSchedulerDbMixin.'
'list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {
'pools': [{'id': vip['vip']['pool_id']}]}
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertEqual([vip['vip']['pool_id']], ready)
def test_get_ready_devices_inactive_pool(self):
with self.vip() as vip:
# set the pool inactive need to use plugin directly since
# status is not tenant mutable
self.plugin_instance.update_pool(
context.get_admin_context(),
vip['vip']['pool_id'],
{'pool': {'status': constants.INACTIVE}}
)
with mock.patch('neutron_lbaas.services.loadbalancer.'
'agent_scheduler.LbaasAgentSchedulerDbMixin.'
'list_pools_on_lbaas_agent') as mock_agent_pools:
mock_agent_pools.return_value = {
'pools': [{'id': vip['vip']['pool_id']}]}
ready = self.callbacks.get_ready_devices(
context.get_admin_context(),
)
self.assertFalse(ready)
def test_get_logical_device_non_active(self):
with self.pool() as pool:
ctx = context.get_admin_context()
for status in ('INACTIVE', 'PENDING_CREATE', 'PENDING_UPDATE'):
self.plugin_instance.update_status(
ctx, ldb.Pool, pool['pool']['id'], status)
pool['pool']['status'] = status
expected = {
'pool': pool['pool'],
'members': [],
'healthmonitors': [],
'driver': 'dummy'
}
logical_config = self.callbacks.get_logical_device(
ctx, pool['pool']['id']
)
self.assertEqual(expected, logical_config)
def test_get_logical_device_active(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
# activate objects
self.plugin_instance.update_status(
ctx, ldb.Pool, pool['pool']['id'], 'ACTIVE')
self.plugin_instance.update_status(
ctx, ldb.Member, member['member']['id'], 'ACTIVE')
self.plugin_instance.update_status(
ctx, ldb.Vip, vip['vip']['id'], 'ACTIVE')
# build the expected
port = self.plugin_instance._core_plugin.get_port(
ctx, vip['vip']['port_id']
)
subnet = self.plugin_instance._core_plugin.get_subnet(
ctx, vip['vip']['subnet_id']
)
port['fixed_ips'][0]['subnet'] = subnet
# reload pool to add members and vip
pool = self.plugin_instance.get_pool(
ctx, pool['pool']['id']
)
pool['status'] = constants.ACTIVE
vip['vip']['status'] = constants.ACTIVE
vip['vip']['port'] = port
member['member']['status'] = constants.ACTIVE
expected = {
'pool': pool,
'vip': vip['vip'],
'members': [member['member']],
'healthmonitors': [],
'driver': 'dummy'
}
logical_config = self.callbacks.get_logical_device(
ctx, pool['id']
)
self.assertEqual(logical_config, expected)
def test_get_logical_device_inactive_member(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
self.plugin_instance.update_status(ctx, ldb.Pool,
pool['pool']['id'],
'ACTIVE')
self.plugin_instance.update_status(ctx, ldb.Vip,
vip['vip']['id'],
'ACTIVE')
self.plugin_instance.update_status(ctx, ldb.Member,
member['member']['id'],
'INACTIVE')
logical_config = self.callbacks.get_logical_device(
ctx, pool['pool']['id'])
member['member']['status'] = constants.INACTIVE
self.assertEqual([member['member']],
logical_config['members'])
def test_get_logical_device_pending_create_member(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
self.plugin_instance.update_status(ctx, ldb.Pool,
pool['pool']['id'],
'ACTIVE')
self.plugin_instance.update_status(ctx, ldb.Vip,
vip['vip']['id'],
'ACTIVE')
member = self.plugin_instance.get_member(
ctx, member['member']['id'])
self.assertEqual('PENDING_CREATE',
member['status'])
logical_config = self.callbacks.get_logical_device(
ctx, pool['pool']['id'])
self.assertEqual([member], logical_config['members'])
def test_get_logical_device_pending_create_health_monitor(self):
with self.health_monitor() as monitor:
with self.pool() as pool:
with self.vip(pool=pool) as vip:
ctx = context.get_admin_context()
self.plugin_instance.update_status(ctx, ldb.Pool,
pool['pool']['id'],
'ACTIVE')
self.plugin_instance.update_status(ctx, ldb.Vip,
vip['vip']['id'],
'ACTIVE')
self.plugin_instance.create_pool_health_monitor(
ctx, monitor, pool['pool']['id'])
pool = self.plugin_instance.get_pool(
ctx, pool['pool']['id'])
monitor = self.plugin_instance.get_health_monitor(
ctx, monitor['health_monitor']['id'])
self.assertEqual(
'PENDING_CREATE',
pool['health_monitors_status'][0]['status'])
logical_config = self.callbacks.get_logical_device(
ctx, pool['id'])
self.assertEqual([monitor],
logical_config['healthmonitors'])
def _update_port_test_helper(self, expected, func, **kwargs):
core = self.plugin_instance._core_plugin
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']):
ctx = context.get_admin_context()
func(ctx, port_id=vip['vip']['port_id'], **kwargs)
db_port = core.get_port(ctx, vip['vip']['port_id'])
for k, v in six.iteritems(expected):
self.assertEqual(db_port[k], v)
def test_plug_vip_port(self):
exp = {
'device_owner': 'neutron:' + constants.LOADBALANCER,
'admin_state_up': True
}
self._update_port_test_helper(
exp,
self.callbacks.plug_vip_port,
host='host'
)
def test_plug_vip_port_mock_with_host(self):
exp = {
'device_owner': 'neutron:' + constants.LOADBALANCER,
'admin_state_up': True,
portbindings.HOST_ID: 'host'
}
with mock.patch.object(
self.plugin._core_plugin, 'update_port') as mock_update_port:
with self.pool() as pool:
with self.vip(pool=pool) as vip:
ctx = context.get_admin_context()
self.callbacks.plug_vip_port(
ctx, port_id=vip['vip']['port_id'], host='host')
mock_update_port.assert_called_once_with(
ctx, vip['vip']['port_id'],
{'port': testlib_api.SubDictMatch(exp)})
def test_unplug_vip_port(self):
exp = {
'device_owner': '',
'device_id': '',
'admin_state_up': False
}
self._update_port_test_helper(
exp,
self.callbacks.unplug_vip_port,
host='host'
)
def test_pool_deployed(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
with self.member(pool_id=vip['vip']['pool_id']) as member:
ctx = context.get_admin_context()
p = self.plugin_instance.get_pool(ctx, pool['pool']['id'])
self.assertEqual('PENDING_CREATE', p['status'])
v = self.plugin_instance.get_vip(ctx, vip['vip']['id'])
self.assertEqual('PENDING_CREATE', v['status'])
m = self.plugin_instance.get_member(
ctx, member['member']['id'])
self.assertEqual('PENDING_CREATE', m['status'])
self.callbacks.pool_deployed(ctx, pool['pool']['id'])
p = self.plugin_instance.get_pool(ctx, pool['pool']['id'])
self.assertEqual('ACTIVE', p['status'])
v = self.plugin_instance.get_vip(ctx, vip['vip']['id'])
self.assertEqual('ACTIVE', v['status'])
m = self.plugin_instance.get_member(
ctx, member['member']['id'])
self.assertEqual('ACTIVE', m['status'])
def test_update_status_pool(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
p = self.plugin_instance.get_pool(ctx, pool_id)
self.assertEqual('PENDING_CREATE', p['status'])
self.callbacks.update_status(ctx, 'pool', pool_id, 'ACTIVE')
p = self.plugin_instance.get_pool(ctx, pool_id)
self.assertEqual('ACTIVE', p['status'])
def test_update_status_pool_deleted_already(self):
with mock.patch.object(agent_driver_base, 'LOG') as mock_log:
pool_id = 'deleted_pool'
ctx = context.get_admin_context()
self.assertRaises(loadbalancer.PoolNotFound,
self.plugin_instance.get_pool, ctx, pool_id)
self.callbacks.update_status(ctx, 'pool', pool_id, 'ACTIVE')
self.assertTrue(mock_log.warning.called)
def test_update_status_health_monitor(self):
with contextlib.nested(
self.health_monitor(),
self.pool()
) as (hm, pool):
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id)
hm_id = hm['health_monitor']['id']
h = self.plugin_instance.get_pool_health_monitor(ctx, hm_id,
pool_id)
self.assertEqual('PENDING_CREATE', h['status'])
self.callbacks.update_status(
ctx, 'health_monitor',
{'monitor_id': hm_id, 'pool_id': pool_id}, 'ACTIVE')
h = self.plugin_instance.get_pool_health_monitor(ctx, hm_id,
pool_id)
self.assertEqual('ACTIVE', h['status'])
class TestLoadBalancerAgentApi(base.BaseTestCase):
def setUp(self):
super(TestLoadBalancerAgentApi, self).setUp()
self.api = agent_driver_base.LoadBalancerAgentApi('topic')
def test_init(self):
self.assertEqual(self.api.client.target.topic, 'topic')
def _call_test_helper(self, method_name, method_args):
with contextlib.nested(
mock.patch.object(self.api.client, 'cast'),
mock.patch.object(self.api.client, 'prepare'),
) as (
rpc_mock, prepare_mock
):
prepare_mock.return_value = self.api.client
getattr(self.api, method_name)(mock.sentinel.context,
host='host',
**method_args)
prepare_args = {'server': 'host'}
prepare_mock.assert_called_once_with(**prepare_args)
if method_name == 'agent_updated':
method_args = {'payload': method_args}
rpc_mock.assert_called_once_with(mock.sentinel.context, method_name,
**method_args)
def test_agent_updated(self):
self._call_test_helper('agent_updated', {'admin_state_up': 'test'})
def test_create_pool(self):
self._call_test_helper('create_pool', {'pool': 'test',
'driver_name': 'dummy'})
def test_update_pool(self):
self._call_test_helper('update_pool', {'old_pool': 'test',
'pool': 'test'})
def test_delete_pool(self):
self._call_test_helper('delete_pool', {'pool': 'test'})
def test_create_vip(self):
self._call_test_helper('create_vip', {'vip': 'test'})
def test_update_vip(self):
self._call_test_helper('update_vip', {'old_vip': 'test',
'vip': 'test'})
def test_delete_vip(self):
self._call_test_helper('delete_vip', {'vip': 'test'})
def test_create_member(self):
self._call_test_helper('create_member', {'member': 'test'})
def test_update_member(self):
self._call_test_helper('update_member', {'old_member': 'test',
'member': 'test'})
def test_delete_member(self):
self._call_test_helper('delete_member', {'member': 'test'})
def test_create_monitor(self):
self._call_test_helper('create_pool_health_monitor',
{'health_monitor': 'test', 'pool_id': 'test'})
def test_update_monitor(self):
self._call_test_helper('update_pool_health_monitor',
{'old_health_monitor': 'test',
'health_monitor': 'test',
'pool_id': 'test'})
def test_delete_monitor(self):
self._call_test_helper('delete_pool_health_monitor',
{'health_monitor': 'test', 'pool_id': 'test'})
class TestLoadBalancerPluginNotificationWrapper(TestLoadBalancerPluginBase):
def setUp(self):
self.log = mock.patch.object(agent_driver_base, 'LOG')
api_cls = mock.patch.object(agent_driver_base,
'LoadBalancerAgentApi').start()
super(TestLoadBalancerPluginNotificationWrapper, self).setUp()
self.mock_api = api_cls.return_value
self.mock_get_driver = mock.patch.object(self.plugin_instance,
'_get_driver')
self.mock_get_driver.return_value = (agent_driver_base.
AgentDriverBase(
self.plugin_instance
))
def test_create_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet) as vip:
self.mock_api.create_vip.assert_called_once_with(
mock.ANY,
vip['vip'],
'host'
)
def test_update_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet) as vip:
ctx = context.get_admin_context()
old_vip = vip['vip'].copy()
vip['vip'].pop('status')
new_vip = self.plugin_instance.update_vip(
ctx,
vip['vip']['id'],
vip
)
self.mock_api.update_vip.assert_called_once_with(
mock.ANY,
old_vip,
new_vip,
'host'
)
self.assertEqual(
new_vip['status'],
constants.PENDING_UPDATE
)
def test_delete_vip(self):
with self.subnet() as subnet:
with self.pool(subnet=subnet) as pool:
with self.vip(pool=pool, subnet=subnet,
do_delete=False) as vip:
ctx = context.get_admin_context()
self.plugin_instance.delete_vip(ctx, vip['vip']['id'])
vip['vip']['status'] = 'PENDING_DELETE'
self.mock_api.delete_vip.assert_called_once_with(
mock.ANY,
vip['vip'],
'host'
)
def test_create_pool(self):
with self.pool() as pool:
self.mock_api.create_pool.assert_called_once_with(
mock.ANY,
pool['pool'],
mock.ANY,
'dummy'
)
def test_update_pool_non_active(self):
with self.pool() as pool:
pool['pool']['status'] = 'INACTIVE'
ctx = context.get_admin_context()
orig_pool = pool['pool'].copy()
del pool['pool']['provider']
self.plugin_instance.update_pool(ctx, pool['pool']['id'], pool)
self.mock_api.delete_pool.assert_called_once_with(
mock.ANY, orig_pool, 'host')
def test_update_pool_no_vip_id(self):
with self.pool() as pool:
ctx = context.get_admin_context()
orig_pool = pool['pool'].copy()
del pool['pool']['provider']
updated = self.plugin_instance.update_pool(
ctx, pool['pool']['id'], pool)
self.mock_api.update_pool.assert_called_once_with(
mock.ANY, orig_pool, updated, 'host')
def test_update_pool_with_vip_id(self):
with self.pool() as pool:
with self.vip(pool=pool) as vip:
ctx = context.get_admin_context()
old_pool = pool['pool'].copy()
old_pool['vip_id'] = vip['vip']['id']
del pool['pool']['provider']
updated = self.plugin_instance.update_pool(
ctx, pool['pool']['id'], pool)
self.mock_api.update_pool.assert_called_once_with(
mock.ANY, old_pool, updated, 'host')
def test_delete_pool(self):
with self.pool(do_delete=False) as pool:
req = self.new_delete_request('pools',
pool['pool']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
pool['pool']['status'] = 'PENDING_DELETE'
self.mock_api.delete_pool.assert_called_once_with(
mock.ANY, pool['pool'], 'host')
def test_create_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id) as member:
self.mock_api.create_member.assert_called_once_with(
mock.ANY, member['member'], 'host')
def test_update_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id) as member:
ctx = context.get_admin_context()
updated = self.plugin_instance.update_member(
ctx, member['member']['id'], member)
self.mock_api.update_member.assert_called_once_with(
mock.ANY, member['member'], updated, 'host')
def test_update_member_new_pool(self):
with self.pool() as pool1:
pool1_id = pool1['pool']['id']
with self.pool() as pool2:
pool2_id = pool2['pool']['id']
with self.member(pool_id=pool1_id) as member:
self.mock_api.create_member.reset_mock()
ctx = context.get_admin_context()
old_member = member['member'].copy()
member['member']['pool_id'] = pool2_id
updated = self.plugin_instance.update_member(
ctx, member['member']['id'], member)
self.mock_api.delete_member.assert_called_once_with(
mock.ANY, old_member, 'host')
self.mock_api.create_member.assert_called_once_with(
mock.ANY, updated, 'host')
def test_delete_member(self):
with self.pool() as pool:
pool_id = pool['pool']['id']
with self.member(pool_id=pool_id,
do_delete=False) as member:
req = self.new_delete_request('members',
member['member']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
member['member']['status'] = 'PENDING_DELETE'
self.mock_api.delete_member.assert_called_once_with(
mock.ANY, member['member'], 'host')
def test_create_pool_health_monitor(self):
with contextlib.nested(
self.health_monitor(),
self.pool(),
) as (hm, pool):
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id)
# hm now has a ref to the pool with which it is associated
hm = self.plugin.get_health_monitor(
ctx, hm['health_monitor']['id'])
self.mock_api.create_pool_health_monitor.assert_called_once_with(
mock.ANY, hm, pool_id, 'host')
def test_delete_pool_health_monitor(self):
with contextlib.nested(
self.pool(),
self.health_monitor()
) as (pool, hm):
pool_id = pool['pool']['id']
ctx = context.get_admin_context()
self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id)
# hm now has a ref to the pool with which it is associated
hm = self.plugin.get_health_monitor(
ctx, hm['health_monitor']['id'])
hm['pools'][0]['status'] = 'PENDING_DELETE'
self.plugin_instance.delete_pool_health_monitor(
ctx, hm['id'], pool_id)
self.mock_api.delete_pool_health_monitor.assert_called_once_with(
mock.ANY, hm, pool_id, 'host')
def test_update_health_monitor_associated_with_pool(self):
with contextlib.nested(
self.health_monitor(type='HTTP'),
self.pool()
) as (monitor, pool):
data = {
'health_monitor': {
'id': monitor['health_monitor']['id'],
'tenant_id': self._tenant_id
}
}
req = self.new_create_request(
'pools',
data,
fmt=self.fmt,
id=pool['pool']['id'],
subresource='health_monitors')
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
# hm now has a ref to the pool with which it is associated
ctx = context.get_admin_context()
hm = self.plugin.get_health_monitor(
ctx, monitor['health_monitor']['id'])
self.mock_api.create_pool_health_monitor.assert_called_once_with(
mock.ANY,
hm,
pool['pool']['id'],
'host'
)
self.mock_api.reset_mock()
data = {'health_monitor': {'delay': 20,
'timeout': 20,
'max_retries': 2,
'admin_state_up': False}}
updated = hm.copy()
updated.update(data['health_monitor'])
req = self.new_update_request("health_monitors",
data,
monitor['health_monitor']['id'])
req.get_response(self.ext_api)
self.mock_api.update_pool_health_monitor.assert_called_once_with(
mock.ANY,
hm,
updated,
pool['pool']['id'],
'host')
|
|
"""The tests for the Owntracks device tracker."""
import asyncio
import json
import os
from collections import defaultdict
import unittest
from unittest.mock import patch
from tests.common import (assert_setup_component, fire_mqtt_message,
get_test_home_assistant, mock_mqtt_component)
import homeassistant.components.device_tracker.owntracks as owntracks
from homeassistant.setup import setup_component
from homeassistant.components import device_tracker
from homeassistant.const import CONF_PLATFORM, STATE_NOT_HOME
from homeassistant.util.async import run_coroutine_threadsafe
USER = 'greg'
DEVICE = 'phone'
LOCATION_TOPIC = 'owntracks/{}/{}'.format(USER, DEVICE)
EVENT_TOPIC = 'owntracks/{}/{}/event'.format(USER, DEVICE)
WAYPOINT_TOPIC = owntracks.WAYPOINT_TOPIC.format(USER, DEVICE)
USER_BLACKLIST = 'ram'
WAYPOINT_TOPIC_BLOCKED = owntracks.WAYPOINT_TOPIC.format(
USER_BLACKLIST, DEVICE)
DEVICE_TRACKER_STATE = 'device_tracker.{}_{}'.format(USER, DEVICE)
IBEACON_DEVICE = 'keys'
REGION_TRACKER_STATE = 'device_tracker.beacon_{}'.format(IBEACON_DEVICE)
CONF_MAX_GPS_ACCURACY = 'max_gps_accuracy'
CONF_WAYPOINT_IMPORT = owntracks.CONF_WAYPOINT_IMPORT
CONF_WAYPOINT_WHITELIST = owntracks.CONF_WAYPOINT_WHITELIST
CONF_SECRET = owntracks.CONF_SECRET
LOCATION_MESSAGE = {
'batt': 92,
'cog': 248,
'tid': 'user',
'lon': 1.0,
't': 'u',
'alt': 27,
'acc': 60,
'p': 101.3977584838867,
'vac': 4,
'lat': 2.0,
'_type': 'location',
'tst': 1,
'vel': 0}
LOCATION_MESSAGE_INACCURATE = {
'batt': 92,
'cog': 248,
'tid': 'user',
'lon': 2.0,
't': 'u',
'alt': 27,
'acc': 2000,
'p': 101.3977584838867,
'vac': 4,
'lat': 6.0,
'_type': 'location',
'tst': 1,
'vel': 0}
LOCATION_MESSAGE_ZERO_ACCURACY = {
'batt': 92,
'cog': 248,
'tid': 'user',
'lon': 2.0,
't': 'u',
'alt': 27,
'acc': 0,
'p': 101.3977584838867,
'vac': 4,
'lat': 6.0,
'_type': 'location',
'tst': 1,
'vel': 0}
REGION_ENTER_MESSAGE = {
'lon': 1.0,
'event': 'enter',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
't': 'b',
'acc': 60,
'tst': 2,
'lat': 2.0,
'_type': 'transition'}
REGION_LEAVE_MESSAGE = {
'lon': 1.0,
'event': 'leave',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
't': 'b',
'acc': 60,
'tst': 2,
'lat': 2.0,
'_type': 'transition'}
REGION_LEAVE_INACCURATE_MESSAGE = {
'lon': 10.0,
'event': 'leave',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
't': 'b',
'acc': 2000,
'tst': 2,
'lat': 20.0,
'_type': 'transition'}
WAYPOINTS_EXPORTED_MESSAGE = {
"_type": "waypoints",
"_creator": "test",
"waypoints": [
{
"_type": "waypoint",
"tst": 3,
"lat": 47,
"lon": 9,
"rad": 10,
"desc": "exp_wayp1"
},
{
"_type": "waypoint",
"tst": 4,
"lat": 3,
"lon": 9,
"rad": 500,
"desc": "exp_wayp2"
}
]
}
WAYPOINTS_UPDATED_MESSAGE = {
"_type": "waypoints",
"_creator": "test",
"waypoints": [
{
"_type": "waypoint",
"tst": 4,
"lat": 9,
"lon": 47,
"rad": 50,
"desc": "exp_wayp1"
},
]
}
WAYPOINT_ENTITY_NAMES = ['zone.greg_phone__exp_wayp1',
'zone.greg_phone__exp_wayp2',
'zone.ram_phone__exp_wayp1',
'zone.ram_phone__exp_wayp2']
REGION_ENTER_ZERO_MESSAGE = {
'lon': 1.0,
'event': 'enter',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
't': 'b',
'acc': 0,
'tst': 2,
'lat': 2.0,
'_type': 'transition'}
REGION_LEAVE_ZERO_MESSAGE = {
'lon': 10.0,
'event': 'leave',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
't': 'b',
'acc': 0,
'tst': 2,
'lat': 20.0,
'_type': 'transition'}
BAD_JSON_PREFIX = '--$this is bad json#--'
BAD_JSON_SUFFIX = '** and it ends here ^^'
TEST_SECRET_KEY = 's3cretkey'
ENCRYPTED_LOCATION_MESSAGE = {
# Encrypted version of LOCATION_MESSAGE using libsodium and TEST_SECRET_KEY
'_type': 'encrypted',
'data': ('qm1A83I6TVFRmH5343xy+cbex8jBBxDFkHRuJhELVKVRA/DgXcyKtghw'
'9pOw75Lo4gHcyy2wV5CmkjrpKEBR7Qhye4AR0y7hOvlx6U/a3GuY1+W8'
'I4smrLkwMvGgBOzXSNdVTzbFTHDvG3gRRaNHFkt2+5MsbH2Dd6CXmpzq'
'DIfSN7QzwOevuvNIElii5MlFxI6ZnYIDYA/ZdnAXHEVsNIbyT2N0CXt3'
'fTPzgGtFzsufx40EEUkC06J7QTJl7lLG6qaLW1cCWp86Vp0eL3vtZ6xq')}
MOCK_ENCRYPTED_LOCATION_MESSAGE = {
# Mock-encrypted version of LOCATION_MESSAGE using pickle
'_type': 'encrypted',
'data': ('gANDCXMzY3JldGtleXEAQ6p7ImxvbiI6IDEuMCwgInQiOiAidSIsICJi'
'YXR0IjogOTIsICJhY2MiOiA2MCwgInZlbCI6IDAsICJfdHlwZSI6ICJs'
'b2NhdGlvbiIsICJ2YWMiOiA0LCAicCI6IDEwMS4zOTc3NTg0ODM4ODY3'
'LCAidHN0IjogMSwgImxhdCI6IDIuMCwgImFsdCI6IDI3LCAiY29nIjog'
'MjQ4LCAidGlkIjogInVzZXIifXEBhnECLg==')
}
class BaseMQTT(unittest.TestCase):
"""Base MQTT assert functions."""
hass = None
def send_message(self, topic, message, corrupt=False):
"""Test the sending of a message."""
str_message = json.dumps(message)
if corrupt:
mod_message = BAD_JSON_PREFIX + str_message + BAD_JSON_SUFFIX
else:
mod_message = str_message
fire_mqtt_message(self.hass, topic, mod_message)
self.hass.block_till_done()
def assert_location_state(self, location):
"""Test the assertion of a location state."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.state, location)
def assert_location_latitude(self, latitude):
"""Test the assertion of a location latitude."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.attributes.get('latitude'), latitude)
def assert_location_longitude(self, longitude):
"""Test the assertion of a location longitude."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.attributes.get('longitude'), longitude)
def assert_location_accuracy(self, accuracy):
"""Test the assertion of a location accuracy."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.attributes.get('gps_accuracy'), accuracy)
class TestDeviceTrackerOwnTracks(BaseMQTT):
"""Test the OwnTrack sensor."""
# pylint: disable=invalid-name
def setup_method(self, _):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_mqtt_component(self.hass)
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_MAX_GPS_ACCURACY: 200,
CONF_WAYPOINT_IMPORT: True,
CONF_WAYPOINT_WHITELIST: ['jon', 'greg']
}})
self.hass.states.set(
'zone.inner', 'zoning',
{
'name': 'zone',
'latitude': 2.1,
'longitude': 1.1,
'radius': 10
})
self.hass.states.set(
'zone.inner_2', 'zoning',
{
'name': 'zone',
'latitude': 2.1,
'longitude': 1.1,
'radius': 10
})
self.hass.states.set(
'zone.outer', 'zoning',
{
'name': 'zone',
'latitude': 2.0,
'longitude': 1.0,
'radius': 100000
})
# Clear state between teste
self.hass.states.set(DEVICE_TRACKER_STATE, None)
owntracks.REGIONS_ENTERED = defaultdict(list)
owntracks.MOBILE_BEACONS_ACTIVE = defaultdict(list)
def teardown_method(self, _):
"""Stop everything that was started."""
self.hass.stop()
try:
os.remove(self.hass.config.path(device_tracker.YAML_DEVICES))
except FileNotFoundError:
pass
def assert_tracker_state(self, location):
"""Test the assertion of a tracker state."""
state = self.hass.states.get(REGION_TRACKER_STATE)
self.assertEqual(state.state, location)
def assert_tracker_latitude(self, latitude):
"""Test the assertion of a tracker latitude."""
state = self.hass.states.get(REGION_TRACKER_STATE)
self.assertEqual(state.attributes.get('latitude'), latitude)
def assert_tracker_accuracy(self, accuracy):
"""Test the assertion of a tracker accuracy."""
state = self.hass.states.get(REGION_TRACKER_STATE)
self.assertEqual(state.attributes.get('gps_accuracy'), accuracy)
def test_location_invalid_devid(self): # pylint: disable=invalid-name
"""Test the update of a location."""
self.send_message('owntracks/paulus/nexus-5x', LOCATION_MESSAGE)
state = self.hass.states.get('device_tracker.paulus_nexus5x')
assert state.state == 'outer'
def test_location_update(self):
"""Test the update of a location."""
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.assert_location_latitude(2.0)
self.assert_location_accuracy(60.0)
self.assert_location_state('outer')
def test_location_inaccurate_gps(self):
"""Test the location for inaccurate GPS information."""
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE_INACCURATE)
self.assert_location_latitude(2.0)
self.assert_location_longitude(1.0)
def test_location_zero_accuracy_gps(self):
"""Ignore the location for zero accuracy GPS information."""
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE_ZERO_ACCURACY)
self.assert_location_latitude(2.0)
self.assert_location_longitude(1.0)
def test_event_entry_exit(self):
"""Test the entry event."""
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
# Updates ignored when in a zone
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
self.send_message(EVENT_TOPIC, REGION_LEAVE_MESSAGE)
# Exit switches back to GPS
self.assert_location_latitude(2.0)
self.assert_location_accuracy(60.0)
self.assert_location_state('outer')
# Left clean zone state
self.assertFalse(owntracks.REGIONS_ENTERED[USER])
def test_event_with_spaces(self):
"""Test the entry event."""
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "inner 2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner 2')
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "inner 2"
self.send_message(EVENT_TOPIC, message)
# Left clean zone state
self.assertFalse(owntracks.REGIONS_ENTERED[USER])
def test_event_entry_exit_inaccurate(self):
"""Test the event for inaccurate exit."""
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
self.send_message(EVENT_TOPIC, REGION_LEAVE_INACCURATE_MESSAGE)
# Exit doesn't use inaccurate gps
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
# But does exit region correctly
self.assertFalse(owntracks.REGIONS_ENTERED[USER])
def test_event_entry_exit_zero_accuracy(self):
"""Test entry/exit events with accuracy zero."""
self.send_message(EVENT_TOPIC, REGION_ENTER_ZERO_MESSAGE)
# Enter uses the zone's gps co-ords
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
self.send_message(EVENT_TOPIC, REGION_LEAVE_ZERO_MESSAGE)
# Exit doesn't use zero gps
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
# But does exit region correctly
self.assertFalse(owntracks.REGIONS_ENTERED[USER])
def test_event_exit_outside_zone_sets_away(self):
"""Test the event for exit zone."""
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
self.assert_location_state('inner')
# Exit message far away GPS location
message = REGION_LEAVE_MESSAGE.copy()
message['lon'] = 90.1
message['lat'] = 90.1
self.send_message(EVENT_TOPIC, message)
# Exit forces zone change to away
self.assert_location_state(STATE_NOT_HOME)
def test_event_entry_exit_right_order(self):
"""Test the event for ordering."""
# Enter inner zone
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
self.assert_location_state('inner')
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
# Enter inner2 zone
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner_2')
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
# Exit inner_2 - should be in 'inner'
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner')
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
# Exit inner - should be in 'outer'
self.send_message(EVENT_TOPIC, REGION_LEAVE_MESSAGE)
self.assert_location_state('outer')
self.assert_location_latitude(2.0)
self.assert_location_accuracy(60.0)
def test_event_entry_exit_wrong_order(self):
"""Test the event for wrong order."""
# Enter inner zone
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
self.assert_location_state('inner')
# Enter inner2 zone
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner_2')
# Exit inner - should still be in 'inner_2'
self.send_message(EVENT_TOPIC, REGION_LEAVE_MESSAGE)
self.assert_location_state('inner_2')
# Exit inner_2 - should be in 'outer'
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('outer')
def test_event_entry_unknown_zone(self):
"""Test the event for unknown zone."""
# Just treat as location update
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "unknown"
self.send_message(EVENT_TOPIC, message)
self.assert_location_latitude(2.0)
self.assert_location_state('outer')
def test_event_exit_unknown_zone(self):
"""Test the event for unknown zone."""
# Just treat as location update
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "unknown"
self.send_message(EVENT_TOPIC, message)
self.assert_location_latitude(2.0)
self.assert_location_state('outer')
def test_event_entry_zone_loading_dash(self):
"""Test the event for zone landing."""
# Make sure the leading - is ignored
# Ownracks uses this to switch on hold
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "-inner"
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
self.assert_location_state('inner')
def test_mobile_enter_move_beacon(self):
"""Test the movement of a beacon."""
# Enter mobile beacon, should set location
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = IBEACON_DEVICE
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.0)
self.assert_tracker_state('outer')
# Move should move beacon
message = LOCATION_MESSAGE.copy()
message['lat'] = "3.0"
self.send_message(LOCATION_TOPIC, message)
self.assert_tracker_latitude(3.0)
self.assert_tracker_state(STATE_NOT_HOME)
def test_mobile_enter_exit_region_beacon(self):
"""Test the enter and the exit of a region beacon."""
# Start tracking beacon
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = IBEACON_DEVICE
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.0)
self.assert_tracker_state('outer')
# Enter location should move beacon
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.1)
self.assert_tracker_state('inner_2')
# Exit location should switch to gps
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.0)
def test_mobile_exit_move_beacon(self):
"""Test the exit move of a beacon."""
# Start tracking beacon
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = IBEACON_DEVICE
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.0)
self.assert_tracker_state('outer')
# Exit mobile beacon, should set location
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = IBEACON_DEVICE
message['lat'] = "3.0"
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(3.0)
# Move after exit should do nothing
message = LOCATION_MESSAGE.copy()
message['lat'] = "4.0"
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.assert_tracker_latitude(3.0)
def test_mobile_multiple_async_enter_exit(self):
"""Test the multiple entering."""
# Test race condition
enter_message = REGION_ENTER_MESSAGE.copy()
enter_message['desc'] = IBEACON_DEVICE
exit_message = REGION_LEAVE_MESSAGE.copy()
exit_message['desc'] = IBEACON_DEVICE
for _ in range(0, 20):
fire_mqtt_message(
self.hass, EVENT_TOPIC, json.dumps(enter_message))
fire_mqtt_message(
self.hass, EVENT_TOPIC, json.dumps(exit_message))
fire_mqtt_message(
self.hass, EVENT_TOPIC, json.dumps(enter_message))
self.hass.block_till_done()
self.send_message(EVENT_TOPIC, exit_message)
self.assertEqual(owntracks.MOBILE_BEACONS_ACTIVE['greg_phone'], [])
def test_mobile_multiple_enter_exit(self):
"""Test the multiple entering."""
# Should only happen if the iphone dies
enter_message = REGION_ENTER_MESSAGE.copy()
enter_message['desc'] = IBEACON_DEVICE
exit_message = REGION_LEAVE_MESSAGE.copy()
exit_message['desc'] = IBEACON_DEVICE
self.send_message(EVENT_TOPIC, enter_message)
self.send_message(EVENT_TOPIC, enter_message)
self.send_message(EVENT_TOPIC, exit_message)
self.assertEqual(owntracks.MOBILE_BEACONS_ACTIVE['greg_phone'], [])
def test_waypoint_import_simple(self):
"""Test a simple import of list of waypoints."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC, waypoints_message)
# Check if it made it into states
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[0])
self.assertTrue(wayp is not None)
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[1])
self.assertTrue(wayp is not None)
def test_waypoint_import_blacklist(self):
"""Test import of list of waypoints for blacklisted user."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC_BLOCKED, waypoints_message)
# Check if it made it into states
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[2])
self.assertTrue(wayp is None)
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[3])
self.assertTrue(wayp is None)
def test_waypoint_import_no_whitelist(self):
"""Test import of list of waypoints with no whitelist set."""
@asyncio.coroutine
def mock_see(**kwargs):
"""Fake see method for owntracks."""
return
test_config = {
CONF_PLATFORM: 'owntracks',
CONF_MAX_GPS_ACCURACY: 200,
CONF_WAYPOINT_IMPORT: True
}
run_coroutine_threadsafe(owntracks.async_setup_scanner(
self.hass, test_config, mock_see), self.hass.loop).result()
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC_BLOCKED, waypoints_message)
# Check if it made it into states
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[2])
self.assertTrue(wayp is not None)
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[3])
self.assertTrue(wayp is not None)
def test_waypoint_import_bad_json(self):
"""Test importing a bad JSON payload."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC, waypoints_message, True)
# Check if it made it into states
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[2])
self.assertTrue(wayp is None)
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[3])
self.assertTrue(wayp is None)
def test_waypoint_import_existing(self):
"""Test importing a zone that exists."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC, waypoints_message)
# Get the first waypoint exported
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[0])
# Send an update
waypoints_message = WAYPOINTS_UPDATED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC, waypoints_message)
new_wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[0])
self.assertTrue(wayp == new_wayp)
def mock_cipher():
"""Return a dummy pickle-based cipher."""
def mock_decrypt(ciphertext, key):
"""Decrypt/unpickle."""
import pickle
(mkey, plaintext) = pickle.loads(ciphertext)
if key != mkey:
raise ValueError()
return plaintext
return (len(TEST_SECRET_KEY), mock_decrypt)
class TestDeviceTrackerOwnTrackConfigs(BaseMQTT):
"""Test the OwnTrack sensor."""
# pylint: disable=invalid-name
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_mqtt_component(self.hass)
def teardown_method(self, method):
"""Tear down resources."""
self.hass.stop()
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload(self):
"""Test encrypted payload."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: TEST_SECRET_KEY,
}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(2.0)
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_topic_key(self):
"""Test encrypted payload with a topic key."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: {
LOCATION_TOPIC: TEST_SECRET_KEY,
}}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(2.0)
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_no_key(self):
"""Test encrypted payload with no key, ."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
# key missing
}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(None)
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_wrong_key(self):
"""Test encrypted payload with wrong key."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: 'wrong key',
}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(None)
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_wrong_topic_key(self):
"""Test encrypted payload with wrong topic key."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: {
LOCATION_TOPIC: 'wrong key'
}}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(None)
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_no_topic_key(self):
"""Test encrypted payload with no topic key."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: {
'owntracks/{}/{}'.format(USER, 'otherdevice'): 'foobar'
}}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(None)
try:
import libnacl
except (ImportError, OSError):
libnacl = None
@unittest.skipUnless(libnacl, "libnacl/libsodium is not installed")
def test_encrypted_payload_libsodium(self):
"""Test sending encrypted message payload."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: TEST_SECRET_KEY,
}})
self.send_message(LOCATION_TOPIC, ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(2.0)
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import StringIO
import time
import testtools
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class ListImageFiltersTestJSON(base.BaseV2ComputeTest):
@classmethod
def resource_setup(cls):
super(ListImageFiltersTestJSON, cls).resource_setup()
if not CONF.service_available.glance:
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
cls.client = cls.images_client
cls.glance_client = cls.os.image_client
def _create_image():
name = data_utils.rand_name('image')
body = cls.glance_client.create_image(name=name,
container_format='bare',
disk_format='raw',
is_public=False)
image_id = body['id']
cls.images.append(image_id)
# Wait 1 second between creation and upload to ensure a delta
# between created_at and updated_at.
time.sleep(1)
image_file = StringIO.StringIO(('*' * 1024))
cls.glance_client.update_image(image_id, data=image_file)
cls.client.wait_for_image_status(image_id, 'ACTIVE')
body = cls.client.get_image(image_id)
return body
# Create non-snapshot images via glance
cls.image1 = _create_image()
cls.image1_id = cls.image1['id']
cls.image2 = _create_image()
cls.image2_id = cls.image2['id']
cls.image3 = _create_image()
cls.image3_id = cls.image3['id']
if not CONF.compute_feature_enabled.snapshot:
return
# Create instances and snapshots via nova
resp, cls.server1 = cls.create_test_server()
resp, cls.server2 = cls.create_test_server(wait_until='ACTIVE')
# NOTE(sdague) this is faster than doing the sync wait_util on both
cls.servers_client.wait_for_server_status(cls.server1['id'],
'ACTIVE')
# Create images to be used in the filter tests
cls.snapshot1 = cls.create_image_from_server(
cls.server1['id'], wait_until='ACTIVE')
cls.snapshot1_id = cls.snapshot1['id']
# Servers have a hidden property for when they are being imaged
# Performing back-to-back create image calls on a single
# server will sometimes cause failures
cls.snapshot3 = cls.create_image_from_server(
cls.server2['id'], wait_until='ACTIVE')
cls.snapshot3_id = cls.snapshot3['id']
# Wait for the server to be active after the image upload
cls.snapshot2 = cls.create_image_from_server(
cls.server1['id'], wait_until='ACTIVE')
cls.snapshot2_id = cls.snapshot2['id']
@test.attr(type='gate')
def test_list_images_filter_by_status(self):
# The list of images should contain only images with the
# provided status
params = {'status': 'ACTIVE'}
images = self.client.list_images(params)
self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
self.assertTrue(any([i for i in images if i['id'] == self.image2_id]))
self.assertTrue(any([i for i in images if i['id'] == self.image3_id]))
@test.attr(type='gate')
def test_list_images_filter_by_name(self):
# List of all images should contain the expected images filtered
# by name
params = {'name': self.image1['name']}
images = self.client.list_images(params)
self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
self.assertFalse(any([i for i in images if i['id'] == self.image2_id]))
self.assertFalse(any([i for i in images if i['id'] == self.image3_id]))
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
@test.attr(type='gate')
def test_list_images_filter_by_server_id(self):
# The images should contain images filtered by server id
params = {'server': self.server1['id']}
images = self.client.list_images(params)
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot1_id]),
"Failed to find image %s in images. Got images %s" %
(self.image1_id, images))
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot2_id]))
self.assertFalse(any([i for i in images
if i['id'] == self.snapshot3_id]))
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
@test.attr(type='gate')
def test_list_images_filter_by_server_ref(self):
# The list of servers should be filtered by server ref
server_links = self.server2['links']
# Try all server link types
for link in server_links:
params = {'server': link['href']}
images = self.client.list_images(params)
self.assertFalse(any([i for i in images
if i['id'] == self.snapshot1_id]))
self.assertFalse(any([i for i in images
if i['id'] == self.snapshot2_id]))
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot3_id]))
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
@test.attr(type='gate')
def test_list_images_filter_by_type(self):
# The list of servers should be filtered by image type
params = {'type': 'snapshot'}
images = self.client.list_images(params)
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot1_id]))
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot2_id]))
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot3_id]))
self.assertFalse(any([i for i in images
if i['id'] == self.image_ref]))
@test.attr(type='gate')
def test_list_images_limit_results(self):
# Verify only the expected number of results are returned
params = {'limit': '1'}
images = self.client.list_images(params)
# when _interface='xml', one element for images_links in images
# ref: Question #224349
self.assertEqual(1, len([x for x in images if 'id' in x]))
@test.attr(type='gate')
def test_list_images_filter_by_changes_since(self):
# Verify only updated images are returned in the detailed list
# Becoming ACTIVE will modify the updated time
# Filter by the image's created time
params = {'changes-since': self.image3['created']}
images = self.client.list_images(params)
found = any([i for i in images if i['id'] == self.image3_id])
self.assertTrue(found)
@test.attr(type='gate')
def test_list_images_with_detail_filter_by_status(self):
# Detailed list of all images should only contain images
# with the provided status
params = {'status': 'ACTIVE'}
images = self.client.list_images_with_detail(params)
self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
self.assertTrue(any([i for i in images if i['id'] == self.image2_id]))
self.assertTrue(any([i for i in images if i['id'] == self.image3_id]))
@test.attr(type='gate')
def test_list_images_with_detail_filter_by_name(self):
# Detailed list of all images should contain the expected
# images filtered by name
params = {'name': self.image1['name']}
images = self.client.list_images_with_detail(params)
self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
self.assertFalse(any([i for i in images if i['id'] == self.image2_id]))
self.assertFalse(any([i for i in images if i['id'] == self.image3_id]))
@test.attr(type='gate')
def test_list_images_with_detail_limit_results(self):
# Verify only the expected number of results (with full details)
# are returned
params = {'limit': '1'}
images = self.client.list_images_with_detail(params)
self.assertEqual(1, len(images))
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
@test.attr(type='gate')
def test_list_images_with_detail_filter_by_server_ref(self):
# Detailed list of servers should be filtered by server ref
server_links = self.server2['links']
# Try all server link types
for link in server_links:
params = {'server': link['href']}
images = self.client.list_images_with_detail(params)
self.assertFalse(any([i for i in images
if i['id'] == self.snapshot1_id]))
self.assertFalse(any([i for i in images
if i['id'] == self.snapshot2_id]))
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot3_id]))
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
@test.attr(type='gate')
def test_list_images_with_detail_filter_by_type(self):
# The detailed list of servers should be filtered by image type
params = {'type': 'snapshot'}
images = self.client.list_images_with_detail(params)
self.client.get_image(self.image_ref)
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot1_id]))
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot2_id]))
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot3_id]))
self.assertFalse(any([i for i in images
if i['id'] == self.image_ref]))
@test.attr(type='gate')
def test_list_images_with_detail_filter_by_changes_since(self):
# Verify an update image is returned
# Becoming ACTIVE will modify the updated time
# Filter by the image's created time
params = {'changes-since': self.image1['created']}
images = self.client.list_images_with_detail(params)
self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
|
|
"""
MIT License
Copyright (c) 2017 Code Society
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""Robinhood.py: a collection of utilities for working with Robinhood's Private API"""
import getpass
import logging
import warnings
from enum import Enum
import requests
import six
from six.moves.urllib.parse import unquote
from six.moves.urllib.request import getproxies
from six.moves import input
from . import exceptions as RH_exception
from _datetime import datetime
class Bounds(Enum):
"""enum for bounds in `historicals` endpoint"""
REGULAR = 'regular'
EXTENDED = 'extended'
class Transaction(Enum):
"""enum for buy/sell orders"""
BUY = 'buy'
SELL = 'sell'
class Robinhood:
"""wrapper class for fetching/parsing Robinhood endpoints"""
endpoints = {
"login": "https://api.robinhood.com/api-token-auth/",
"logout": "https://api.robinhood.com/api-token-logout/",
"investment_profile": "https://api.robinhood.com/user/investment_profile/",
"accounts": "https://api.robinhood.com/accounts/",
"ach_iav_auth": "https://api.robinhood.com/ach/iav/auth/",
"ach_relationships": "https://api.robinhood.com/ach/relationships/",
"ach_transfers": "https://api.robinhood.com/ach/transfers/",
"applications": "https://api.robinhood.com/applications/",
"dividends": "https://api.robinhood.com/dividends/",
"edocuments": "https://api.robinhood.com/documents/",
"instruments": "https://api.robinhood.com/instruments/",
"margin_upgrades": "https://api.robinhood.com/margin/upgrades/",
"markets": "https://api.robinhood.com/markets/",
"market_info": "https://api.robinhood.com/markets/ARCX/hours/",
"notifications": "https://api.robinhood.com/notifications/",
"orders": "https://api.robinhood.com/orders/",
"password_reset": "https://api.robinhood.com/password_reset/request/",
"portfolios": "https://api.robinhood.com/portfolios/",
"positions": "https://api.robinhood.com/positions/",
"quotes": "https://api.robinhood.com/quotes/",
"historicals": "https://api.robinhood.com/quotes/historicals/",
"document_requests": "https://api.robinhood.com/upload/document_requests/",
"user": "https://api.robinhood.com/user/",
"watchlists": "https://api.robinhood.com/watchlists/",
"news": "https://api.robinhood.com/midlands/news/",
"fundamentals": "https://api.robinhood.com/fundamentals/",
}
session = None
username = None
password = None
headers = None
auth_token = None
logger = logging.getLogger('Robinhood')
logger.addHandler(logging.NullHandler())
##############################
#Logging in and initializing
##############################
def __init__(self):
self.session = requests.session()
self.session.proxies = getproxies()
self.headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en;q=1, fr;q=0.9, de;q=0.8, ja;q=0.7, nl;q=0.6, it;q=0.5",
"Content-Type": "application/x-www-form-urlencoded; charset=utf-8",
"X-Robinhood-API-Version": "1.0.0",
"Connection": "keep-alive",
"User-Agent": "Robinhood/823 (iPhone; iOS 7.1.2; Scale/2.00)"
}
self.session.headers = self.headers
def login_prompt(self): #pragma: no cover
"""Prompts user for username and password and calls login()."""
username = input("Username: ")
password = getpass.getpass()
return self.login(username=username, password=password)
def login(
self,
username,
password,
mfa_code=None
):
"""save and test login info for Robinhood accounts
Args:
username (str): username
password (str): password
Returns:
(bool): received valid auth token
"""
self.username = username
self.password = password
payload = {
'client_id': 'c82SH0WZOsabOXGP2sxqcj34FxkvfnWRZBKlBjFS',
'expires_in': 86400,
'grant_type': 'password',
'password': self.password,
'scope': 'internal',
'username': self.username
}
if mfa_code:
payload['mfa_code'] = mfa_code
try:
res = self.session.post('https://api.robinhood.com/oauth2/token/', data=payload, timeout=15)
res.raise_for_status()
data = res.json()
print(data)
except requests.exceptions.HTTPError:
raise RH_exception.LoginFailed()
if 'mfa_required' in data.keys(): # pragma: no cover
raise RH_exception.TwoFactorRequired() # requires a second call to enable 2FA
if 'access_token' in data.keys():
self.oauth_token = data['access_token']
self.headers['Authorization'] = 'Bearer ' + self.oauth_token
return True
return False
def loginOld(
self,
username,
password,
mfa_code=None
):
"""save and test login info for Robinhood accounts
Args:
username (str): username
password (str): password
Returns:
(bool): received valid auth token
"""
self.username = username
self.password = password
payload = {
'password': self.password,
'username': self.username
}
if mfa_code:
payload['mfa_code'] = mfa_code
try:
res = self.session.post(
self.endpoints['login'],
data=payload
)
res.raise_for_status()
data = res.json()
except requests.exceptions.HTTPError as err_msg:
print(err_msg)
warnings.warn('Failed to log out ' + repr(err_msg))
raise RH_exception.LoginFailed()
if 'mfa_required' in data.keys(): #pragma: no cover
raise RH_exception.TwoFactorRequired() #requires a second call to enable 2FA
if 'token' in data.keys():
self.auth_token = data['token']
self.headers['Authorization'] = 'Token ' + self.auth_token
return True
return False
def logout(self):
"""logout from Robinhood
Returns:
(:obj:`requests.request`) result from logout endpoint
"""
try:
req = self.session.post(self.endpoints['logout'])
req.raise_for_status()
except requests.exceptions.HTTPError as err_msg:
warnings.warn('Failed to log out ' + repr(err_msg))
self.headers['Authorization'] = None
self.auth_token = None
return req
##############################
#GET DATA
##############################
def get_url_content_json(self, url):
"""fetch url content"""
res = self.session.get(url)
res.raise_for_status() #will throw without auth
data = res.json()
return data
def post_url_content_json(self, url):
"""fetch url content"""
res = self.session.post(url)
res.raise_for_status() #will throw without auth
data = res.json()
return data
def investment_profile(self):
"""fetch investment_profile"""
res = self.session.get(self.endpoints['investment_profile'])
res.raise_for_status() #will throw without auth
data = res.json()
return data
def instruments(self, symbol):
''' Generates an instrument object. Currently this is only used for
placing orders, and generating and using the instrument object are handled
for you, so you can ignore this method'''
res = self.session.get(self.endpoints['instruments'], params={'query':symbol.upper()})
print(self.endpoints['instruments'])
if res.status_code == 200:
return res.json()['results']
else:
print(res.content)
raise Exception("Could not generate instrument object: %s " % res.headers)
def market_info(self):
"""fetch market info using one of the prominent exchanges
Args:
Returns:
(:obj:`dict`): JSON contents from `quotes` endpoint
"""
current_date_str = datetime.now().strftime("%Y-%m-%d")
url = str(self.endpoints['market_info']) + current_date_str + "/"
try:
req = requests.get(url)
req.raise_for_status()
data = req.json()
except requests.exceptions.HTTPError:
raise NameError('Invalid Query: ' + stock) #TODO: custom exception
return data
def quote_data(self, stock=''):
"""fetch stock quote
Args:
stock (str): stock ticker, prompt if blank
Returns:
(:obj:`dict`): JSON contents from `quotes` endpoint
"""
url = None
if stock.find(',') == -1:
url = str(self.endpoints['quotes']) + str(stock) + "/"
else:
url = str(self.endpoints['quotes']) + "?symbols=" + str(stock)
#Check for validity of symbol
try:
res = self.session.get(url)
res.raise_for_status() # auth required
data = res.json()
except requests.exceptions.HTTPError:
raise NameError('Invalid Symbol: ' + stock) #TODO: custom exception
return data
# We will keep for compatibility until next major release
def quotes_data(self, stocks):
"""Fetch quote for multiple stocks, in one single Robinhood API call
Args:
stocks (list<str>): stock tickers
Returns:
(:obj:`list` of :obj:`dict`): List of JSON contents from `quotes` endpoint, in the
same order of input args. If any ticker is invalid, a None will occur at that position.
"""
url = str(self.endpoints['quotes']) + "?symbols=" + ",".join(stocks)
try:
req = requests.get(url)
req.raise_for_status()
data = req.json()
except requests.exceptions.HTTPError:
raise NameError('Invalid Symbols: ' + ",".join(stocks)) #TODO: custom exception
return data["results"]
def get_quote_list(self, stock='', key=''):
"""Returns multiple stock info and keys from quote_data (prompt if blank)
Args:
stock (str): stock ticker (or tickers separated by a comma)
, prompt if blank
key (str): key attributes that the function should return
Returns:
(:obj:`list`): Returns values from each stock or empty list
if none of the stocks were valid
"""
#Creates a tuple containing the information we want to retrieve
def append_stock(stock):
keys = key.split(',')
myStr = ''
for item in keys:
myStr += str(stock[item]) + ","
return (myStr.split(','))
#Prompt for stock if not entered
if not stock: #pragma: no cover
stock = input("Symbol: ")
data = self.quote_data(stock)
res = []
# Handles the case of multple tickers
if stock.find(',') != -1:
for stock in data['results']:
if stock == None:
continue
res.append(append_stock(stock))
else:
res.append(append_stock(data))
return res
def get_quote(self, stock=''):
"""wrapper for quote_data"""
data = self.quote_data(stock)
return data["symbol"]
def get_historical_quotes(
self,
stock,
interval,
span,
bounds=Bounds.REGULAR
):
"""fetch historical data for stock
Note: valid interval/span configs
interval = 5minute | 10minute + span = day, week
interval = day + span = year
interval = week
TODO: NEEDS TESTS
Args:
stock (str): stock ticker
interval (str): resolution of data
span (str): length of data
bounds (:enum:`Bounds`, optional): 'extended' or 'regular' trading hours
Returns:
(:obj:`dict`) values returned from `historicals` endpoint
"""
if isinstance(bounds, str): #recast to Enum
bounds = Bounds(bounds)
symbols = stock
if isinstance(symbols, list):
symbols = ','.join(stock).upper()
params = {
'symbols': symbols,
'interval': interval,
'span': span,
'bounds': bounds.name.lower()
}
res = self.session.get(
self.endpoints['historicals'],
params=params
)
return res.json()
def get_news(self, stock):
"""fetch news endpoint
Args:
stock (str): stock ticker
Returns:
(:obj:`dict`) values returned from `news` endpoint
"""
return self.session.get(self.endpoints['news']+stock.upper()+"/").json()
def print_quote(self, stock=''): #pragma: no cover
"""print quote information
Args:
stock (str): ticker to fetch
Returns:
None
"""
data = self.get_quote_list(stock,'symbol,last_trade_price')
for item in data:
quote_str = item[0] + ": $" + item[1]
print(quote_str)
self.logger.info(quote_str)
def print_quotes(self, stocks): #pragma: no cover
"""print a collection of stocks
Args:
stocks (:obj:`list`): list of stocks to pirnt
Returns:
None
"""
for stock in stocks:
self.print_quote(stock)
def ask_price(self, stock=''):
"""get asking price for a stock
Note:
queries `quote` endpoint, dict wrapper
Args:
stock (str): stock ticker
Returns:
(float): ask price
"""
return self.get_quote_list(stock,'ask_price')
def ask_size(self, stock=''):
"""get ask size for a stock
Note:
queries `quote` endpoint, dict wrapper
Args:
stock (str): stock ticker
Returns:
(int): ask size
"""
return self.get_quote_list(stock,'ask_size')
def bid_price(self, stock=''):
"""get bid price for a stock
Note:
queries `quote` endpoint, dict wrapper
Args:
stock (str): stock ticker
Returns:
(float): bid price
"""
return self.get_quote_list(stock,'bid_price')
def bid_size(self, stock=''):
"""get bid size for a stock
Note:
queries `quote` endpoint, dict wrapper
Args:
stock (str): stock ticker
Returns:
(int): bid size
"""
return self.get_quote_list(stock,'bid_size')
def last_trade_price(self, stock=''):
"""get last trade price for a stock
Note:
queries `quote` endpoint, dict wrapper
Args:
stock (str): stock ticker
Returns:
(float): last trade price
"""
return self.get_quote_list(stock,'last_trade_price')
def previous_close(self, stock=''):
"""get previous closing price for a stock
Note:
queries `quote` endpoint, dict wrapper
Args:
stock (str): stock ticker
Returns:
(float): previous closing price
"""
return self.get_quote_list(stock,'previous_close')
def previous_close_date(self, stock=''):
"""get previous closing date for a stock
Note:
queries `quote` endpoint, dict wrapper
Args:
stock (str): stock ticker
Returns:
(str): previous close date
"""
return self.get_quote_list(stock,'previous_close_date')
def adjusted_previous_close(self, stock=''):
"""get adjusted previous closing price for a stock
Note:
queries `quote` endpoint, dict wrapper
Args:
stock (str): stock ticker
Returns:
(float): adjusted previous closing price
"""
return self.get_quote_list(stock,'adjusted_previous_close')
def symbol(self, stock=''):
"""get symbol for a stock
Note:
queries `quote` endpoint, dict wrapper
Args:
stock (str): stock ticker
Returns:
(str): stock symbol
"""
return self.get_quote_list(stock,'symbol')
def last_updated_at(self, stock=''):
"""get last update datetime
Note:
queries `quote` endpoint, dict wrapper
Args:
stock (str): stock ticker
Returns:
(str): last update datetime
"""
return self.get_quote_list(stock,'last_updated_at')
#TODO: recast to datetime object?
def get_account(self):
"""fetch account information
Returns:
(:obj:`dict`): `accounts` endpoint payload
"""
res = self.session.get(self.endpoints['accounts'])
res.raise_for_status() #auth required
res = res.json()
return res['results'][0]
def get_url(self, url):
"""flat wrapper for fetching URL directly"""
return self.session.get(url).json()
##############################
#GET FUNDAMENTALS
##############################
def get_fundamentals(self, stock=''):
"""find stock fundamentals data
Args:
(str): stock ticker
Returns:
(:obj:`dict`): contents of `fundamentals` endpoint
"""
#Prompt for stock if not entered
if not stock: #pragma: no cover
stock = input("Symbol: ")
url = str(self.endpoints['fundamentals']) + str(stock.upper()) + "/"
#Check for validity of symbol
try:
req = requests.get(url)
req.raise_for_status()
data = req.json()
except requests.exceptions.HTTPError:
raise NameError('Invalid Symbol: ' + stock) #TODO wrap custom exception
return data
def fundamentals(self, stock=''):
"""wrapper for get_fundamentlals function"""
return self.get_fundamentals(stock)
##############################
# PORTFOLIOS DATA
##############################
def portfolios(self):
"""Returns the user's portfolio data."""
req = self.session.get(self.endpoints['portfolios'])
req.raise_for_status()
return req.json()['results'][0]
def adjusted_equity_previous_close(self):
"""wrapper for portfolios
get `adjusted_equity_previous_close` value
"""
return float(self.portfolios()['adjusted_equity_previous_close'])
def equity(self):
"""wrapper for portfolios
get `equity` value
"""
return float(self.portfolios()['equity'])
def equity_previous_close(self):
"""wrapper for portfolios
get `equity_previous_close` value
"""
return float(self.portfolios()['equity_previous_close'])
def excess_margin(self):
"""wrapper for portfolios
get `excess_margin` value
"""
return float(self.portfolios()['excess_margin'])
def extended_hours_equity(self):
"""wrapper for portfolios
get `extended_hours_equity` value
"""
try:
return float(self.portfolios()['extended_hours_equity'])
except TypeError:
return None
def extended_hours_market_value(self):
"""wrapper for portfolios
get `extended_hours_market_value` value
"""
try:
return float(self.portfolios()['extended_hours_market_value'])
except TypeError:
return None
def last_core_equity(self):
"""wrapper for portfolios
get `last_core_equity` value
"""
return float(self.portfolios()['last_core_equity'])
def last_core_market_value(self):
"""wrapper for portfolios
get `last_core_market_value` value
"""
return float(self.portfolios()['last_core_market_value'])
def market_value(self):
"""wrapper for portfolios
get `market_value` value
"""
return float(self.portfolios()['market_value'])
def order_history(self):
"""wrapper for portfolios
get orders from account
"""
return self.session.get(self.endpoints['orders']).json()
def dividends(self):
"""wrapper for portfolios
get dividends from account
"""
return self.session.get(self.endpoints['dividends']).json()
##############################
# POSITIONS DATA
##############################
def positions(self):
"""Returns the user's positions data."""
return self.session.get(self.endpoints['positions']).json()
def securities_owned(self):
"""
Returns a list of symbols of securities of which there are more
than zero shares in user's portfolio.
"""
return self.session.get(self.endpoints['positions']+'?nonzero=true').json()
##############################
#PLACE ORDER
##############################
def place_order(
self,
instrument,
quantity=1,
price=None,
stop_price=None,
transaction = None,
trigger='immediate',
order='market',
time_in_force = 'gfd'
):
"""place an order with Robinhood
Notes:
OMFG TEST THIS PLEASE!
Just realized this won't work since if type is LIMIT you need to use "price" and if
a STOP you need to use "stop_price". Oops.
Reference: https://github.com/sanko/Robinhood/blob/master/Order.md#place-an-order
Args:
instrument (dict): the RH URL and symbol in dict for the instrument to be traded
quantity (int): quantity of stocks in order
price (float): limit price for order
stop_price (float): stop price for order
transaction (:enum:`Transaction`): BUY or SELL enum
trigger (:enum:`Trigger`): IMMEDIATE or STOP enum
order (:enum:`Order`): MARKET or LIMIT
time_in_force (:enum:`TIME_IN_FORCE`): GFD or GTC (day or until cancelled)
Returns:
(:obj:`requests.request`): result from `orders` put command
"""
if isinstance(transaction, str):
transaction = Transaction(transaction)
if transaction == Transaction.BUY and not price:
bid_price = self.quote_data(instrument['symbol'])['bid_price'];
price = bid_price
# if not price:
# if transaction == Transaction.BUY:
# price = self.quote_data(instrument['symbol'])['bid_price']
# else:
# price = self.quote_data(instrument['symbol'])['ask_price']
payload = {
'account': self.get_account()['url'],
'instrument': unquote(instrument['url']),
'quantity': quantity,
'side': transaction.name.lower(),
'symbol': instrument['symbol'],
'time_in_force': time_in_force.lower(),
'trigger': trigger,
'type': order.lower()
}
if price:
payload['price'] = float(price)
if stop_price:
payload['stop_price'] = float(stop_price)
# data = 'account=%s&instrument=%s&price=%f&quantity=%d&side=%s&symbol=%s#&time_in_force=gfd&trigger=immediate&type=market' % (
# self.get_account()['url'],
# urllib.parse.unquote(instrument['url']),
# float(bid_price),
# quantity,
# transaction,
# instrument['symbol']
#)
res = self.session.post(
self.endpoints['orders'],
data=payload
)
print(payload)
print(res.json())
res.raise_for_status()
return res.json()
def place_buy_order(
self,
instrument,
quantity,
bid_price=0.0
):
"""wrapper for placing buy orders
Args:
instrument (dict): the RH URL and symbol in dict for the instrument to be traded
quantity (int): quantity of stocks in order
bid_price (float): price for order
Returns:
(:obj:`requests.request`): result from `orders` put command
"""
transaction = Transaction.BUY
return self.place_order(instrument, quantity, bid_price, transaction)
def place_sell_order(
self,
instrument,
quantity,
bid_price=0.0
):
"""wrapper for placing sell orders
Args:
instrument (dict): the RH URL and symbol in dict for the instrument to be traded
quantity (int): quantity of stocks in order
bid_price (float): price for order
Returns:
(:obj:`requests.request`): result from `orders` put command
"""
transaction = Transaction.SELL
return self.place_order(instrument, quantity, bid_price, transaction)
|
|
try:
from pandas.plotting._converter import DatetimeConverter
except ImportError:
from pandas.tseries.converter import DatetimeConverter
import pandas as pd
from pandas import to_datetime, date_range, Series, DataFrame, period_range
import datetime as dt
from pandas.tseries.frequencies import infer_freq
import numpy as np
if hasattr(Series, 'convert'):
Series.resample = Series.convert
class DatetimeIndex(object):
goal_time = 0.2
def setup(self):
self.N = 100000
self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
self.rng2 = date_range(start='1/1/2000 9:30', periods=10000,
freq='S', tz='US/Eastern')
self.index_repeated = date_range(start='1/1/2000',
periods=1000, freq='T').repeat(10)
self.rng3 = date_range(start='1/1/2000', periods=1000, freq='H')
self.df = DataFrame(np.random.randn(len(self.rng3), 2), self.rng3)
self.rng4 = date_range(start='1/1/2000', periods=1000,
freq='H', tz='US/Eastern')
self.df2 = DataFrame(np.random.randn(len(self.rng4), 2),
index=self.rng4)
N = 100000
self.dti = pd.date_range('2011-01-01', freq='H', periods=N).repeat(5)
self.dti_tz = pd.date_range('2011-01-01', freq='H', periods=N,
tz='Asia/Tokyo').repeat(5)
self.rng5 = date_range(start='1/1/2000',
end='3/1/2000', tz='US/Eastern')
self.dst_rng = date_range(start='10/29/2000 1:00:00',
end='10/29/2000 1:59:59', freq='S')
self.index = date_range(start='10/29/2000',
end='10/29/2000 00:59:59', freq='S')
self.index = self.index.append(self.dst_rng)
self.index = self.index.append(self.dst_rng)
self.index = self.index.append(date_range(start='10/29/2000 2:00:00',
end='10/29/2000 3:00:00',
freq='S'))
self.N = 10000
self.rng6 = date_range(start='1/1/1', periods=self.N, freq='B')
self.rng7 = date_range(start='1/1/1700', freq='D', periods=100000)
self.no_freq = self.rng7[:50000].append(self.rng7[50002:])
self.d_freq = self.rng7[:50000].append(self.rng7[50000:])
self.rng8 = date_range(start='1/1/1700', freq='B', periods=75000)
self.b_freq = self.rng8[:50000].append(self.rng8[50000:])
def time_add_timedelta(self):
(self.rng + dt.timedelta(minutes=2))
def time_normalize(self):
self.rng2.normalize()
def time_unique(self):
self.index_repeated.unique()
def time_reset_index(self):
self.df.reset_index()
def time_reset_index_tz(self):
self.df2.reset_index()
def time_dti_factorize(self):
self.dti.factorize()
def time_dti_tz_factorize(self):
self.dti_tz.factorize()
def time_dti_time(self):
self.dst_rng.time
def time_timestamp_tzinfo_cons(self):
self.rng5[0]
def time_infer_dst(self):
self.index.tz_localize('US/Eastern', infer_dst=True)
def time_timeseries_is_month_start(self):
self.rng6.is_month_start
def time_infer_freq_none(self):
infer_freq(self.no_freq)
def time_infer_freq_daily(self):
infer_freq(self.d_freq)
def time_infer_freq_business(self):
infer_freq(self.b_freq)
def time_to_date(self):
self.rng.date
def time_to_pydatetime(self):
self.rng.to_pydatetime()
class TimeDatetimeConverter(object):
goal_time = 0.2
def setup(self):
self.N = 100000
self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
def time_convert(self):
DatetimeConverter.convert(self.rng, None, None)
class Iteration(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
self.M = 10000
self.idx1 = date_range(start='20140101', freq='T', periods=self.N)
self.idx2 = period_range(start='20140101', freq='T', periods=self.N)
def iter_n(self, iterable, n=None):
self.i = 0
for _ in iterable:
self.i += 1
if ((n is not None) and (self.i > n)):
break
def time_iter_datetimeindex(self):
self.iter_n(self.idx1)
def time_iter_datetimeindex_preexit(self):
self.iter_n(self.idx1, self.M)
def time_iter_periodindex(self):
self.iter_n(self.idx2)
def time_iter_periodindex_preexit(self):
self.iter_n(self.idx2, self.M)
# ----------------------------------------------------------------------
# Resampling
class ResampleDataFrame(object):
goal_time = 0.2
def setup(self):
self.rng = date_range(start='20130101', periods=100000, freq='50L')
self.df = DataFrame(np.random.randn(100000, 2), index=self.rng)
def time_max_numpy(self):
self.df.resample('1s', how=np.max)
def time_max_string(self):
self.df.resample('1s', how='max')
def time_mean_numpy(self):
self.df.resample('1s', how=np.mean)
def time_mean_string(self):
self.df.resample('1s', how='mean')
def time_min_numpy(self):
self.df.resample('1s', how=np.min)
def time_min_string(self):
self.df.resample('1s', how='min')
class ResampleSeries(object):
goal_time = 0.2
def setup(self):
self.rng1 = period_range(start='1/1/2000', end='1/1/2001', freq='T')
self.ts1 = Series(np.random.randn(len(self.rng1)), index=self.rng1)
self.rng2 = date_range(start='1/1/2000', end='1/1/2001', freq='T')
self.ts2 = Series(np.random.randn(len(self.rng2)), index=self.rng2)
self.rng3 = date_range(start='2000-01-01 00:00:00',
end='2000-01-01 10:00:00', freq='555000U')
self.int_ts = Series(5, self.rng3, dtype='int64')
self.dt_ts = self.int_ts.astype('datetime64[ns]')
def time_period_downsample_mean(self):
self.ts1.resample('D', how='mean')
def time_timestamp_downsample_mean(self):
self.ts2.resample('D', how='mean')
def time_resample_datetime64(self):
# GH 7754
self.dt_ts.resample('1S', how='last')
def time_1min_5min_mean(self):
self.ts2[:10000].resample('5min', how='mean')
def time_1min_5min_ohlc(self):
self.ts2[:10000].resample('5min', how='ohlc')
class AsOf(object):
goal_time = 0.2
def setup(self):
self.N = 10000
self.rng = date_range(start='1/1/1990', periods=self.N, freq='53s')
self.ts = Series(np.random.randn(self.N), index=self.rng)
self.dates = date_range(start='1/1/1990',
periods=(self.N * 10), freq='5s')
self.ts2 = self.ts.copy()
self.ts2[250:5000] = np.nan
self.ts3 = self.ts.copy()
self.ts3[-5000:] = np.nan
# test speed of pre-computing NAs.
def time_asof(self):
self.ts.asof(self.dates)
# should be roughly the same as above.
def time_asof_nan(self):
self.ts2.asof(self.dates)
# test speed of the code path for a scalar index
# without *while* loop
def time_asof_single(self):
self.ts.asof(self.dates[0])
# test speed of the code path for a scalar index
# before the start. should be the same as above.
def time_asof_single_early(self):
self.ts.asof(self.dates[0] - dt.timedelta(10))
# test the speed of the code path for a scalar index
# with a long *while* loop. should still be much
# faster than pre-computing all the NAs.
def time_asof_nan_single(self):
self.ts3.asof(self.dates[-1])
class AsOfDataFrame(object):
goal_time = 0.2
def setup(self):
self.N = 10000
self.M = 100
self.rng = date_range(start='1/1/1990', periods=self.N, freq='53s')
self.dates = date_range(start='1/1/1990',
periods=(self.N * 10), freq='5s')
self.ts = DataFrame(np.random.randn(self.N, self.M), index=self.rng)
self.ts2 = self.ts.copy()
self.ts2.iloc[250:5000] = np.nan
self.ts3 = self.ts.copy()
self.ts3.iloc[-5000:] = np.nan
# test speed of pre-computing NAs.
def time_asof(self):
self.ts.asof(self.dates)
# should be roughly the same as above.
def time_asof_nan(self):
self.ts2.asof(self.dates)
# test speed of the code path for a scalar index
# with pre-computing all NAs.
def time_asof_single(self):
self.ts.asof(self.dates[0])
# should be roughly the same as above.
def time_asof_nan_single(self):
self.ts3.asof(self.dates[-1])
# test speed of the code path for a scalar index
# before the start. should be without the cost of
# pre-computing all the NAs.
def time_asof_single_early(self):
self.ts.asof(self.dates[0] - dt.timedelta(10))
class TimeSeries(object):
goal_time = 0.2
def setup(self):
self.N = 100000
self.rng = date_range(start='1/1/2000', periods=self.N, freq='s')
self.rng = self.rng.take(np.random.permutation(self.N))
self.ts = Series(np.random.randn(self.N), index=self.rng)
self.rng2 = date_range(start='1/1/2000', periods=self.N, freq='T')
self.ts2 = Series(np.random.randn(self.N), index=self.rng2)
self.lindex = np.random.permutation(self.N)[:(self.N // 2)]
self.rindex = np.random.permutation(self.N)[:(self.N // 2)]
self.left = Series(self.ts2.values.take(self.lindex),
index=self.ts2.index.take(self.lindex))
self.right = Series(self.ts2.values.take(self.rindex),
index=self.ts2.index.take(self.rindex))
self.rng3 = date_range(start='1/1/2000', periods=1500000, freq='S')
self.ts3 = Series(1, index=self.rng3)
def time_sort_index_monotonic(self):
self.ts2.sort_index()
def time_sort_index_non_monotonic(self):
self.ts.sort_index()
def time_timeseries_slice_minutely(self):
self.ts2[:10000]
def time_add_irregular(self):
(self.left + self.right)
def time_large_lookup_value(self):
self.ts3[self.ts3.index[(len(self.ts3) // 2)]]
self.ts3.index._cleanup()
class ToDatetime(object):
goal_time = 0.2
def setup(self):
self.rng = date_range(start='1/1/2000', periods=10000, freq='D')
self.stringsD = Series(self.rng.strftime('%Y%m%d'))
self.rng = date_range(start='1/1/2000', periods=20000, freq='H')
self.strings = self.rng.strftime('%Y-%m-%d %H:%M:%S').tolist()
self.strings_nosep = self.rng.strftime('%Y%m%d %H:%M:%S').tolist()
self.strings_tz_space = [x.strftime('%Y-%m-%d %H:%M:%S') + ' -0800'
for x in self.rng]
self.s = Series((['19MAY11', '19MAY11:00:00:00'] * 100000))
self.s2 = self.s.str.replace(':\\S+$', '')
self.unique_numeric_seconds = range(10000)
self.dup_numeric_seconds = [1000] * 10000
self.dup_string_dates = ['2000-02-11'] * 10000
self.dup_string_with_tz = ['2000-02-11 15:00:00-0800'] * 10000
def time_format_YYYYMMDD(self):
to_datetime(self.stringsD, format='%Y%m%d')
def time_iso8601(self):
to_datetime(self.strings)
def time_iso8601_nosep(self):
to_datetime(self.strings_nosep)
def time_iso8601_format(self):
to_datetime(self.strings, format='%Y-%m-%d %H:%M:%S')
def time_iso8601_format_no_sep(self):
to_datetime(self.strings_nosep, format='%Y%m%d %H:%M:%S')
def time_iso8601_tz_spaceformat(self):
to_datetime(self.strings_tz_space)
def time_format_exact(self):
to_datetime(self.s2, format='%d%b%y')
def time_format_no_exact(self):
to_datetime(self.s, format='%d%b%y', exact=False)
def time_cache_true_with_unique_seconds_and_unit(self):
to_datetime(self.unique_numeric_seconds, unit='s', cache=True)
def time_cache_false_with_unique_seconds_and_unit(self):
to_datetime(self.unique_numeric_seconds, unit='s', cache=False)
def time_cache_true_with_dup_seconds_and_unit(self):
to_datetime(self.dup_numeric_seconds, unit='s', cache=True)
def time_cache_false_with_dup_seconds_and_unit(self):
to_datetime(self.dup_numeric_seconds, unit='s', cache=False)
def time_cache_true_with_dup_string_dates(self):
to_datetime(self.dup_string_dates, cache=True)
def time_cache_false_with_dup_string_dates(self):
to_datetime(self.dup_string_dates, cache=False)
def time_cache_true_with_dup_string_dates_and_format(self):
to_datetime(self.dup_string_dates, format='%Y-%m-%d', cache=True)
def time_cache_false_with_dup_string_dates_and_format(self):
to_datetime(self.dup_string_dates, format='%Y-%m-%d', cache=False)
def time_cache_true_with_dup_string_tzoffset_dates(self):
to_datetime(self.dup_string_with_tz, cache=True)
def time_cache_false_with_dup_string_tzoffset_dates(self):
to_datetime(self.dup_string_with_tz, cache=False)
class DatetimeAccessor(object):
def setup(self):
self.N = 100000
self.series = pd.Series(
pd.date_range(start='1/1/2000', periods=self.N, freq='T')
)
def time_dt_accessor(self):
self.series.dt
def time_dt_accessor_normalize(self):
self.series.dt.normalize()
|
|
#!/usr/bin/env python
"""camqadm
.. program:: camqadm
"""
import cmd
import sys
import shlex
import pprint
from itertools import count
from amqplib import client_0_8 as amqp
from celery.app import app_or_default
from celery.bin.base import Command
from celery.utils import padlist
# Valid string -> bool coercions.
BOOLS = {"1": True, "0": False,
"on": True, "off": False,
"yes": True, "no": False,
"true": True, "False": False}
# Map to coerce strings to other types.
COERCE = {bool: lambda value: BOOLS[value.lower()]}
HELP_HEADER = """
Commands
--------
""".rstrip()
EXAMPLE_TEXT = """
Example:
-> queue.delete myqueue yes no
"""
def say(m):
sys.stderr.write("%s\n" % (m, ))
class Spec(object):
"""AMQP Command specification.
Used to convert arguments to Python values and display various help
and tooltips.
:param args: see :attr:`args`.
:keyword returns: see :attr:`returns`.
.. attribute args::
List of arguments this command takes. Should
contain `(argument_name, argument_type)` tuples.
.. attribute returns:
Helpful human string representation of what this command returns.
May be :const:`None`, to signify the return type is unknown.
"""
def __init__(self, *args, **kwargs):
self.args = args
self.returns = kwargs.get("returns")
def coerce(self, index, value):
"""Coerce value for argument at index.
E.g. if :attr:`args` is `[("is_active", bool)]`:
>>> coerce(0, "False")
False
"""
arg_info = self.args[index]
arg_type = arg_info[1]
# Might be a custom way to coerce the string value,
# so look in the coercion map.
return COERCE.get(arg_type, arg_type)(value)
def str_args_to_python(self, arglist):
"""Process list of string arguments to values according to spec.
e.g:
>>> spec = Spec([("queue", str), ("if_unused", bool)])
>>> spec.str_args_to_python("pobox", "true")
("pobox", True)
"""
return tuple(self.coerce(index, value)
for index, value in enumerate(arglist))
def format_response(self, response):
"""Format the return value of this command in a human-friendly way."""
if not self.returns:
if response is None:
return "ok."
return response
if callable(self.returns):
return self.returns(response)
return self.returns % (response, )
def format_arg(self, name, type, default_value=None):
if default_value is not None:
return "%s:%s" % (name, default_value)
return name
def format_signature(self):
return " ".join(self.format_arg(*padlist(list(arg), 3))
for arg in self.args)
def dump_message(message):
if message is None:
return "No messages in queue. basic.publish something."
return {"body": message.body,
"properties": message.properties,
"delivery_info": message.delivery_info}
def format_declare_queue(ret):
return "ok. queue:%s messages:%s consumers:%s." % ret
class AMQShell(cmd.Cmd):
"""AMQP API Shell.
:keyword connect: Function used to connect to the server, must return
connection object.
:keyword silent: If :const:`True`, the commands won't have annoying
output not relevant when running in non-shell mode.
.. attribute: builtins
Mapping of built-in command names -> method names
.. attribute:: amqp
Mapping of AMQP API commands and their :class:`Spec`.
"""
conn = None
chan = None
prompt_fmt = "%d> "
identchars = cmd.IDENTCHARS = "."
needs_reconnect = False
counter = 1
inc_counter = count(2).next
builtins = {"EOF": "do_exit",
"exit": "do_exit",
"help": "do_help"}
amqp = {
"exchange.declare": Spec(("exchange", str),
("type", str),
("passive", bool, "no"),
("durable", bool, "no"),
("auto_delete", bool, "no"),
("internal", bool, "no")),
"exchange.delete": Spec(("exchange", str),
("if_unused", bool)),
"queue.bind": Spec(("queue", str),
("exchange", str),
("routing_key", str)),
"queue.declare": Spec(("queue", str),
("passive", bool, "no"),
("durable", bool, "no"),
("exclusive", bool, "no"),
("auto_delete", bool, "no"),
returns=format_declare_queue),
"queue.delete": Spec(("queue", str),
("if_unused", bool, "no"),
("if_empty", bool, "no"),
returns="ok. %d messages deleted."),
"queue.purge": Spec(("queue", str),
returns="ok. %d messages deleted."),
"basic.get": Spec(("queue", str),
("no_ack", bool, "off"),
returns=dump_message),
"basic.publish": Spec(("msg", amqp.Message),
("exchange", str),
("routing_key", str),
("mandatory", bool, "no"),
("immediate", bool, "no")),
"basic.ack": Spec(("delivery_tag", int)),
}
def __init__(self, *args, **kwargs):
self.connect = kwargs.pop("connect")
self.silent = kwargs.pop("silent", False)
cmd.Cmd.__init__(self, *args, **kwargs)
self._reconnect()
def say(self, m):
"""Say something to the user. Disabled if :attr:`silent`."""
if not self.silent:
say(m)
def get_amqp_api_command(self, cmd, arglist):
"""With a command name and a list of arguments, convert the arguments
to Python values and find the corresponding method on the AMQP channel
object.
:returns: tuple of `(method, processed_args)`.
Example:
>>> get_amqp_api_command("queue.delete", ["pobox", "yes", "no"])
(<bound method Channel.queue_delete of
<amqplib.client_0_8.channel.Channel object at 0x...>>,
('testfoo', True, False))
"""
spec = self.amqp[cmd]
args = spec.str_args_to_python(arglist)
attr_name = cmd.replace(".", "_")
if self.needs_reconnect:
self._reconnect()
return getattr(self.chan, attr_name), args, spec.format_response
def do_exit(self, *args):
"""The `"exit"` command."""
self.say("\n-> please, don't leave!")
sys.exit(0)
def display_command_help(self, cmd, short=False):
spec = self.amqp[cmd]
say("%s %s" % (cmd, spec.format_signature()))
def do_help(self, *args):
if not args:
say(HELP_HEADER)
for cmd_name in self.amqp.keys():
self.display_command_help(cmd_name, short=True)
say(EXAMPLE_TEXT)
else:
self.display_command_help(args[0])
def default(self, line):
say("unknown syntax: '%s'. how about some 'help'?" % line)
def get_names(self):
return set(self.builtins.keys() + self.amqp.keys())
def completenames(self, text, *ignored):
"""Return all commands starting with `text`, for tab-completion."""
names = self.get_names()
first = [cmd for cmd in names
if cmd.startswith(text.replace("_", "."))]
if first:
return first
return [cmd for cmd in names
if cmd.partition(".")[2].startswith(text)]
def dispatch(self, cmd, argline):
"""Dispatch and execute the command.
Lookup order is: :attr:`builtins` -> :attr:`amqp`.
"""
arglist = shlex.split(argline)
if cmd in self.builtins:
return getattr(self, self.builtins[cmd])(*arglist)
fun, args, formatter = self.get_amqp_api_command(cmd, arglist)
return formatter(fun(*args))
def parseline(self, line):
"""Parse input line.
:returns: tuple of three items:
`(command_name, arglist, original_line)`
E.g::
>>> parseline("queue.delete A 'B' C")
("queue.delete", "A 'B' C", "queue.delete A 'B' C")
"""
parts = line.split()
if parts:
return parts[0], " ".join(parts[1:]), line
return "", "", line
def onecmd(self, line):
"""Parse line and execute command."""
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if cmd == '':
return self.default(line)
else:
self.counter = self.inc_counter()
try:
self.respond(self.dispatch(cmd, arg))
except (AttributeError, KeyError), exc:
self.default(line)
except Exception, exc:
say(exc)
self.needs_reconnect = True
def respond(self, retval):
"""What to do with the return value of a command."""
if retval is not None:
if isinstance(retval, basestring):
say(retval)
else:
pprint.pprint(retval)
def _reconnect(self):
"""Re-establish connection to the AMQP server."""
self.conn = self.connect(self.conn)
self.chan = self.conn.channel()
self.needs_reconnect = False
@property
def prompt(self):
return self.prompt_fmt % self.counter
class AMQPAdmin(object):
"""The celery :program:`camqadm` utility."""
def __init__(self, *args, **kwargs):
self.app = app_or_default(kwargs.get("app"))
self.silent = bool(args)
if "silent" in kwargs:
self.silent = kwargs["silent"]
self.args = args
def connect(self, conn=None):
if conn:
conn.close()
conn = self.app.broker_connection()
self.say("-> connecting to %s." % conn.as_uri())
conn.connect()
self.say("-> connected.")
return conn
def run(self):
shell = AMQShell(connect=self.connect)
if self.args:
return shell.onecmd(" ".join(self.args))
try:
return shell.cmdloop()
except KeyboardInterrupt:
self.say("(bibi)")
pass
def say(self, m):
if not self.silent:
say(m)
class AMQPAdminCommand(Command):
def run(self, *args, **options):
options["app"] = self.app
return AMQPAdmin(*args, **options).run()
def camqadm(*args, **options):
AMQPAdmin(*args, **options).run()
def main():
AMQPAdminCommand().execute_from_commandline()
if __name__ == "__main__": # pragma: no cover
main()
|
|
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NFS driver module."""
import errno
import os
import mock
import mox as mox_lib
from mox import stubout
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers import nfs
from cinder.volume.drivers import remotefs
class DumbVolume(object):
fields = {}
def __setitem__(self, key, value):
self.fields[key] = value
def __getitem__(self, item):
return self.fields[item]
class RemoteFsDriverTestCase(test.TestCase):
TEST_FILE_NAME = 'test.txt'
TEST_EXPORT = 'nas-host1:/export'
TEST_MNT_POINT = '/mnt/nas'
def setUp(self):
super(RemoteFsDriverTestCase, self).setUp()
self._driver = remotefs.RemoteFSDriver()
self._mox = mox_lib.Mox()
self.configuration = mox_lib.MockObject(conf.Configuration)
self.configuration.append_config_values(mox_lib.IgnoreArg())
self.configuration.nas_secure_file_permissions = 'false'
self.configuration.nas_secure_file_operations = 'false'
self._driver = remotefs.RemoteFSDriver(
configuration=self.configuration)
self.addCleanup(self._mox.UnsetStubs)
def test_create_sparsed_file(self):
(mox, drv) = self._mox, self._driver
mox.StubOutWithMock(drv, '_execute')
drv._execute('truncate', '-s', '1G', '/path', run_as_root=True).\
AndReturn("")
mox.ReplayAll()
drv._create_sparsed_file('/path', 1)
mox.VerifyAll()
def test_create_regular_file(self):
(mox, drv) = self._mox, self._driver
mox.StubOutWithMock(drv, '_execute')
drv._execute('dd', 'if=/dev/zero', 'of=/path', 'bs=1M', 'count=1024',
run_as_root=True)
mox.ReplayAll()
drv._create_regular_file('/path', 1)
mox.VerifyAll()
def test_create_qcow2_file(self):
(mox, drv) = self._mox, self._driver
file_size = 1
mox.StubOutWithMock(drv, '_execute')
drv._execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'preallocation=metadata', '/path',
'%s' % str(file_size * units.Gi), run_as_root=True)
mox.ReplayAll()
drv._create_qcow2_file('/path', file_size)
mox.VerifyAll()
def test_set_rw_permissions_for_all(self):
(mox, drv) = self._mox, self._driver
mox.StubOutWithMock(drv, '_execute')
drv._execute('chmod', 'ugo+rw', '/path', run_as_root=True)
mox.ReplayAll()
drv._set_rw_permissions_for_all('/path')
mox.VerifyAll()
@mock.patch.object(remotefs, 'LOG')
def test_set_rw_permissions_with_secure_file_permissions(self, LOG):
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
drv.configuration.nas_secure_file_permissions = 'true'
self.stubs.Set(drv, '_execute', mock.Mock())
drv._set_rw_permissions(self.TEST_FILE_NAME)
self.assertFalse(LOG.warn.called)
@mock.patch.object(remotefs, 'LOG')
def test_set_rw_permissions_without_secure_file_permissions(self, LOG):
drv = self._driver
self.configuration.nas_secure_file_permissions = 'false'
self.stubs.Set(drv, '_execute', mock.Mock())
drv._set_rw_permissions(self.TEST_FILE_NAME)
self.assertTrue(LOG.warn.called)
warn_msg = "%s is being set with open permissions: ugo+rw" % \
self.TEST_FILE_NAME
LOG.warn.assert_called_once_with(warn_msg)
@mock.patch('os.path.join')
@mock.patch('os.path.isfile', return_value=False)
def test_determine_nas_security_options_when_auto_and_new_install(
self,
mock_isfile,
mock_join):
"""Test the setting of the NAS Security Option
In this test case, we will create the marker file. No pre-exxisting
Cinder volumes found during bootup.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT
is_new_install = True
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
mock_join.return_value = file_path
secure_file_permissions = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
secure_file_operations = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
@mock.patch('os.path.join')
@mock.patch('os.path.isfile')
def test_determine_nas_security_options_when_auto_and_new_install_exists(
self,
isfile,
join):
"""Test the setting of the NAS Security Option
In this test case, the marker file already exists. Cinder volumes
found during bootup.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
join.return_value = file_path
isfile.return_value = True
secure_file_permissions = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
secure_file_operations = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
@mock.patch('os.path.join')
@mock.patch('os.path.isfile')
def test_determine_nas_security_options_when_auto_and_old_install(self,
isfile,
join):
"""Test the setting of the NAS Security Option
In this test case, the marker file does not exist. There are also
pre-existing Cinder volumes.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
join.return_value = file_path
isfile.return_value = False
secure_file_permissions = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
secure_file_operations = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
def test_determine_nas_security_options_when_admin_set_true(self):
"""Test the setting of the NAS Security Option
In this test case, the Admin set the flag to 'true'.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
secure_file_permissions = 'true'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
secure_file_operations = 'true'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
def test_determine_nas_security_options_when_admin_set_false(self):
"""Test the setting of the NAS Security Option
In this test case, the Admin set the flag to 'true'.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
secure_file_permissions = 'false'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
secure_file_operations = 'false'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
@mock.patch.object(remotefs, 'LOG')
def test_set_nas_security_options(self, LOG):
"""Test setting of NAS Security options.
The RemoteFS driver will force set options to false. The derived
objects will provide an inherited interface to properly set options.
"""
drv = self._driver
is_new_install = False
drv.set_nas_security_options(is_new_install)
self.assertEqual('false', drv.configuration.nas_secure_file_operations)
self.assertEqual('false',
drv.configuration.nas_secure_file_permissions)
self.assertTrue(LOG.warn.called)
def test_secure_file_operations_enabled_true(self):
"""Test nas_secure_file_operations = 'true'
Networked file system based drivers may support secure file
operations. This test verifies the settings when secure.
"""
drv = self._driver
self.configuration.nas_secure_file_operations = 'true'
ret_flag = drv.secure_file_operations_enabled()
self.assertTrue(ret_flag)
def test_secure_file_operations_enabled_false(self):
"""Test nas_secure_file_operations = 'false'
Networked file system based drivers may support secure file
operations. This test verifies the settings when not secure.
"""
drv = self._driver
self.configuration.nas_secure_file_operations = 'false'
ret_flag = drv.secure_file_operations_enabled()
self.assertFalse(ret_flag)
class NfsDriverTestCase(test.TestCase):
"""Test case for NFS driver."""
TEST_NFS_HOST = 'nfs-host1'
TEST_NFS_SHARE_PATH = '/export'
TEST_NFS_EXPORT1 = '%s:%s' % (TEST_NFS_HOST, TEST_NFS_SHARE_PATH)
TEST_NFS_EXPORT2 = 'nfs-host2:/export'
TEST_NFS_EXPORT2_OPTIONS = '-o intr'
TEST_SIZE_IN_GB = 1
TEST_MNT_POINT = '/mnt/nfs'
TEST_MNT_POINT_BASE = '/mnt/test'
TEST_LOCAL_PATH = '/mnt/nfs/volume-123'
TEST_FILE_NAME = 'test.txt'
TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf'
TEST_NFS_EXPORT_SPACES = 'nfs-host3:/export this'
TEST_MNT_POINT_SPACES = '/ 0 0 0 /foo'
def setUp(self):
super(NfsDriverTestCase, self).setUp()
self._mox = mox_lib.Mox()
self.stubs = stubout.StubOutForTesting()
self.configuration = mox_lib.MockObject(conf.Configuration)
self.configuration.append_config_values(mox_lib.IgnoreArg())
self.configuration.nfs_shares_config = None
self.configuration.nfs_sparsed_volumes = True
self.configuration.nfs_used_ratio = 0.95
self.configuration.nfs_oversub_ratio = 1.0
self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
self.configuration.nfs_mount_options = None
self.configuration.nfs_mount_attempts = 3
self.configuration.nfs_qcow2_volumes = False
self.configuration.nas_secure_file_permissions = 'false'
self.configuration.nas_secure_file_operations = 'false'
self.configuration.nas_ip = None
self.configuration.nas_share_path = None
self.configuration.nas_mount_options = None
self.configuration.volume_dd_blocksize = '1M'
self._driver = nfs.NfsDriver(configuration=self.configuration)
self._driver.shares = {}
self.addCleanup(self.stubs.UnsetAll)
self.addCleanup(self._mox.UnsetStubs)
def stub_out_not_replaying(self, obj, attr_name):
attr_to_replace = getattr(obj, attr_name)
stub = mox_lib.MockObject(attr_to_replace)
self.stubs.Set(obj, attr_name, stub)
def test_local_path(self):
"""local_path common use case."""
self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
drv = self._driver
volume = DumbVolume()
volume['provider_location'] = self.TEST_NFS_EXPORT1
volume['name'] = 'volume-123'
self.assertEqual(
'/mnt/test/2f4f60214cf43c595666dd815f0360a4/volume-123',
drv.local_path(volume))
def test_copy_image_to_volume(self):
"""resize_image common case usage."""
mox = self._mox
drv = self._driver
TEST_IMG_SOURCE = 'foo.img'
volume = {'size': self.TEST_SIZE_IN_GB, 'name': TEST_IMG_SOURCE}
def fake_local_path(volume):
return volume['name']
self.stubs.Set(drv, 'local_path', fake_local_path)
mox.StubOutWithMock(image_utils, 'fetch_to_raw')
image_utils.fetch_to_raw(None, None, None, TEST_IMG_SOURCE,
mox_lib.IgnoreArg(),
size=self.TEST_SIZE_IN_GB,
run_as_root=True)
mox.StubOutWithMock(image_utils, 'resize_image')
image_utils.resize_image(TEST_IMG_SOURCE, self.TEST_SIZE_IN_GB,
run_as_root=True)
mox.StubOutWithMock(image_utils, 'qemu_img_info')
data = mox_lib.MockAnything()
data.virtual_size = 1 * units.Gi
image_utils.qemu_img_info(TEST_IMG_SOURCE,
run_as_root=True).AndReturn(data)
mox.ReplayAll()
drv.copy_image_to_volume(None, volume, None, None)
mox.VerifyAll()
def test_get_mount_point_for_share(self):
"""_get_mount_point_for_share should calculate correct value."""
drv = self._driver
self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
self.assertEqual('/mnt/test/2f4f60214cf43c595666dd815f0360a4',
drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1))
def test_get_capacity_info(self):
"""_get_capacity_info should calculate correct value."""
mox = self._mox
drv = self._driver
stat_total_size = 2620544
stat_avail = 2129984
stat_output = '1 %d %d' % (stat_total_size, stat_avail)
du_used = 490560
du_output = '%d /mnt' % du_used
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1).\
AndReturn(self.TEST_MNT_POINT)
mox.StubOutWithMock(drv, '_execute')
drv._execute('stat', '-f', '-c', '%S %b %a',
self.TEST_MNT_POINT,
run_as_root=True).AndReturn((stat_output, None))
drv._execute('du', '-sb', '--apparent-size',
'--exclude', '*snapshot*',
self.TEST_MNT_POINT,
run_as_root=True).AndReturn((du_output, None))
mox.ReplayAll()
self.assertEqual((stat_total_size, stat_avail, du_used),
drv._get_capacity_info(self.TEST_NFS_EXPORT1))
mox.VerifyAll()
def test_get_capacity_info_for_share_and_mount_point_with_spaces(self):
"""_get_capacity_info should calculate correct value."""
mox = self._mox
drv = self._driver
stat_total_size = 2620544
stat_avail = 2129984
stat_output = '1 %d %d' % (stat_total_size, stat_avail)
du_used = 490560
du_output = '%d /mnt' % du_used
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
drv._get_mount_point_for_share(self.TEST_NFS_EXPORT_SPACES).\
AndReturn(self.TEST_MNT_POINT_SPACES)
mox.StubOutWithMock(drv, '_execute')
drv._execute('stat', '-f', '-c', '%S %b %a',
self.TEST_MNT_POINT_SPACES,
run_as_root=True).AndReturn((stat_output, None))
drv._execute('du', '-sb', '--apparent-size',
'--exclude', '*snapshot*',
self.TEST_MNT_POINT_SPACES,
run_as_root=True).AndReturn((du_output, None))
mox.ReplayAll()
self.assertEqual((stat_total_size, stat_avail, du_used),
drv._get_capacity_info(self.TEST_NFS_EXPORT_SPACES))
mox.VerifyAll()
def test_load_shares_config(self):
mox = self._mox
drv = self._driver
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
mox.StubOutWithMock(drv, '_read_config_file')
config_data = []
config_data.append(self.TEST_NFS_EXPORT1)
config_data.append('#' + self.TEST_NFS_EXPORT2)
config_data.append('')
config_data.append(self.TEST_NFS_EXPORT2 + ' ' +
self.TEST_NFS_EXPORT2_OPTIONS)
config_data.append('broken:share_format')
drv._read_config_file(self.TEST_SHARES_CONFIG_FILE).\
AndReturn(config_data)
mox.ReplayAll()
drv._load_shares_config(drv.configuration.nfs_shares_config)
self.assertIn(self.TEST_NFS_EXPORT1, drv.shares)
self.assertIn(self.TEST_NFS_EXPORT2, drv.shares)
self.assertEqual(2, len(drv.shares))
self.assertEqual(self.TEST_NFS_EXPORT2_OPTIONS,
drv.shares[self.TEST_NFS_EXPORT2])
mox.VerifyAll()
def test_load_shares_config_nas_opts(self):
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, '_read_config_file') # ensure not called
drv.configuration.nas_ip = self.TEST_NFS_HOST
drv.configuration.nas_share_path = self.TEST_NFS_SHARE_PATH
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
mox.ReplayAll()
drv._load_shares_config(drv.configuration.nfs_shares_config)
self.assertIn(self.TEST_NFS_EXPORT1, drv.shares)
self.assertEqual(len(drv.shares), 1)
mox.VerifyAll()
def test_ensure_shares_mounted_should_save_mounting_successfully(self):
"""_ensure_shares_mounted should save share if mounted with success."""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, '_read_config_file')
config_data = []
config_data.append(self.TEST_NFS_EXPORT1)
drv._read_config_file(self.TEST_SHARES_CONFIG_FILE).\
AndReturn(config_data)
mox.StubOutWithMock(drv, '_ensure_share_mounted')
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
drv._ensure_share_mounted(self.TEST_NFS_EXPORT1)
mox.ReplayAll()
drv._ensure_shares_mounted()
self.assertEqual(1, len(drv._mounted_shares))
self.assertEqual(self.TEST_NFS_EXPORT1, drv._mounted_shares[0])
mox.VerifyAll()
def test_ensure_shares_mounted_should_not_save_mounting_with_error(self):
"""_ensure_shares_mounted should not save share if failed to mount."""
config_data = []
config_data.append(self.TEST_NFS_EXPORT1)
self._driver.configuration.nfs_shares_config =\
self.TEST_SHARES_CONFIG_FILE
self.mock_object(self._driver, '_read_config_file',
mock.Mock(return_value=config_data))
self.mock_object(self._driver, '_ensure_share_mounted',
mock.Mock(side_effect=Exception()))
self.mock_object(remotefs, 'LOG')
self._driver._ensure_shares_mounted()
self.assertEqual(0, len(self._driver._mounted_shares))
self._driver._read_config_file.assert_called_once_with(
self.TEST_SHARES_CONFIG_FILE)
self._driver._ensure_share_mounted.assert_called_once_with(
self.TEST_NFS_EXPORT1)
self.assertEqual(1, remotefs.LOG.error.call_count)
def test_setup_should_throw_error_if_shares_config_not_configured(self):
"""do_setup should throw error if shares config is not configured."""
drv = self._driver
self.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
self.assertRaises(exception.NfsException,
drv.do_setup, mox_lib.IsA(context.RequestContext))
def test_setup_should_throw_error_if_oversub_ratio_less_than_zero(self):
"""do_setup should throw error if nfs_oversub_ratio is less than 0."""
drv = self._driver
self.configuration.nfs_oversub_ratio = -1
self.assertRaises(exception.NfsException,
drv.do_setup,
mox_lib.IsA(context.RequestContext))
def test_setup_should_throw_error_if_used_ratio_less_than_zero(self):
"""do_setup should throw error if nfs_used_ratio is less than 0."""
drv = self._driver
self.configuration.nfs_used_ratio = -1
self.assertRaises(exception.NfsException,
drv.do_setup,
mox_lib.IsA(context.RequestContext))
def test_setup_should_throw_error_if_used_ratio_greater_than_one(self):
"""do_setup should throw error if nfs_used_ratio is greater than 1."""
drv = self._driver
self.configuration.nfs_used_ratio = 2
self.assertRaises(exception.NfsException,
drv.do_setup,
mox_lib.IsA(context.RequestContext))
def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self):
"""do_setup should throw error if nfs client is not installed."""
mox = self._mox
drv = self._driver
self.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
mox.StubOutWithMock(os.path, 'exists')
os.path.exists(self.TEST_SHARES_CONFIG_FILE).AndReturn(True)
mox.StubOutWithMock(drv, '_execute')
drv._execute('mount.nfs', check_exit_code=False, run_as_root=False).\
AndRaise(OSError(errno.ENOENT, 'No such file or directory'))
mox.ReplayAll()
self.assertRaises(exception.NfsException,
drv.do_setup, mox_lib.IsA(context.RequestContext))
mox.VerifyAll()
def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self):
"""_find_share should throw error if there is no mounted shares."""
drv = self._driver
drv._mounted_shares = []
self.assertRaises(exception.NfsNoSharesMounted, drv._find_share,
self.TEST_SIZE_IN_GB)
def test_find_share(self):
"""_find_share simple use case."""
mox = self._mox
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info(self.TEST_NFS_EXPORT1).\
AndReturn((5 * units.Gi, 2 * units.Gi,
2 * units.Gi))
drv._get_capacity_info(self.TEST_NFS_EXPORT1).\
AndReturn((5 * units.Gi, 2 * units.Gi,
2 * units.Gi))
drv._get_capacity_info(self.TEST_NFS_EXPORT2).\
AndReturn((10 * units.Gi, 3 * units.Gi,
1 * units.Gi))
drv._get_capacity_info(self.TEST_NFS_EXPORT2).\
AndReturn((10 * units.Gi, 3 * units.Gi,
1 * units.Gi))
mox.ReplayAll()
self.assertEqual(self.TEST_NFS_EXPORT2,
drv._find_share(self.TEST_SIZE_IN_GB))
mox.VerifyAll()
def test_find_share_should_throw_error_if_there_is_no_enough_place(self):
"""_find_share should throw error if there is no share to host vol."""
mox = self._mox
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info(self.TEST_NFS_EXPORT1).\
AndReturn((5 * units.Gi, 0, 5 * units.Gi))
drv._get_capacity_info(self.TEST_NFS_EXPORT2).\
AndReturn((10 * units.Gi, 0,
10 * units.Gi))
mox.ReplayAll()
self.assertRaises(exception.NfsNoSuitableShareFound, drv._find_share,
self.TEST_SIZE_IN_GB)
mox.VerifyAll()
def _simple_volume(self):
volume = DumbVolume()
volume['provider_location'] = '127.0.0.1:/mnt'
volume['name'] = 'volume_name'
volume['size'] = 10
return volume
def test_create_sparsed_volume(self):
mox = self._mox
drv = self._driver
volume = self._simple_volume()
self.override_config('nfs_sparsed_volumes', True)
mox.StubOutWithMock(drv, '_create_sparsed_file')
mox.StubOutWithMock(drv, '_set_rw_permissions')
drv._create_sparsed_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._set_rw_permissions(mox_lib.IgnoreArg())
mox.ReplayAll()
drv._do_create_volume(volume)
mox.VerifyAll()
def test_create_nonsparsed_volume(self):
mox = self._mox
drv = self._driver
self.configuration.nfs_sparsed_volumes = False
volume = self._simple_volume()
self.override_config('nfs_sparsed_volumes', False)
mox.StubOutWithMock(drv, '_create_regular_file')
mox.StubOutWithMock(drv, '_set_rw_permissions')
drv._create_regular_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._set_rw_permissions(mox_lib.IgnoreArg())
mox.ReplayAll()
drv._do_create_volume(volume)
mox.VerifyAll()
def test_create_volume_should_ensure_nfs_mounted(self):
"""create_volume ensures shares provided in config are mounted."""
mox = self._mox
drv = self._driver
self.stub_out_not_replaying(nfs, 'LOG')
self.stub_out_not_replaying(drv, '_find_share')
self.stub_out_not_replaying(drv, '_do_create_volume')
mox.StubOutWithMock(drv, '_ensure_shares_mounted')
drv._ensure_shares_mounted()
mox.ReplayAll()
volume = DumbVolume()
volume['size'] = self.TEST_SIZE_IN_GB
drv.create_volume(volume)
mox.VerifyAll()
def test_create_volume_should_return_provider_location(self):
"""create_volume should return provider_location with found share."""
mox = self._mox
drv = self._driver
self.stub_out_not_replaying(nfs, 'LOG')
self.stub_out_not_replaying(drv, '_ensure_shares_mounted')
self.stub_out_not_replaying(drv, '_do_create_volume')
mox.StubOutWithMock(drv, '_find_share')
drv._find_share(self.TEST_SIZE_IN_GB).AndReturn(self.TEST_NFS_EXPORT1)
mox.ReplayAll()
volume = DumbVolume()
volume['size'] = self.TEST_SIZE_IN_GB
result = drv.create_volume(volume)
self.assertEqual(self.TEST_NFS_EXPORT1, result['provider_location'])
mox.VerifyAll()
def test_delete_volume(self):
"""delete_volume simple test case."""
mox = self._mox
drv = self._driver
self.stub_out_not_replaying(drv, '_ensure_share_mounted')
volume = DumbVolume()
volume['name'] = 'volume-123'
volume['provider_location'] = self.TEST_NFS_EXPORT1
mox.StubOutWithMock(drv, 'local_path')
drv.local_path(volume).AndReturn(self.TEST_LOCAL_PATH)
mox.StubOutWithMock(drv, '_execute')
drv._execute('rm', '-f', self.TEST_LOCAL_PATH, run_as_root=True)
mox.ReplayAll()
drv.delete_volume(volume)
mox.VerifyAll()
def test_delete_should_ensure_share_mounted(self):
"""delete_volume should ensure that corresponding share is mounted."""
mox = self._mox
drv = self._driver
self.stub_out_not_replaying(drv, '_execute')
volume = DumbVolume()
volume['name'] = 'volume-123'
volume['provider_location'] = self.TEST_NFS_EXPORT1
mox.StubOutWithMock(drv, '_ensure_share_mounted')
drv._ensure_share_mounted(self.TEST_NFS_EXPORT1)
mox.ReplayAll()
drv.delete_volume(volume)
mox.VerifyAll()
def test_delete_should_not_delete_if_provider_location_not_provided(self):
"""delete_volume shouldn't delete if provider_location missed."""
drv = self._driver
self.stubs.Set(drv, '_ensure_share_mounted', mock.Mock())
self.stubs.Set(drv, 'local_path', mock.Mock())
volume = DumbVolume()
volume['name'] = 'volume-123'
volume['provider_location'] = None
with mock.patch.object(drv, '_execute') as mock_execute:
drv.delete_volume(volume)
self.assertEqual(0, mock_execute.call_count)
def test_get_volume_stats(self):
"""get_volume_stats must fill the correct values."""
mox = self._mox
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
mox.StubOutWithMock(drv, '_ensure_shares_mounted')
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._ensure_shares_mounted()
drv._get_capacity_info(self.TEST_NFS_EXPORT1).\
AndReturn((10 * units.Gi, 2 * units.Gi,
2 * units.Gi))
drv._get_capacity_info(self.TEST_NFS_EXPORT2).\
AndReturn((20 * units.Gi, 3 * units.Gi,
3 * units.Gi))
mox.ReplayAll()
drv.get_volume_stats()
self.assertEqual(30.0, drv._stats['total_capacity_gb'])
self.assertEqual(5.0, drv._stats['free_capacity_gb'])
mox.VerifyAll()
def _check_is_share_eligible(self, total_size, total_available,
total_allocated, requested_volume_size):
with mock.patch.object(self._driver, '_get_capacity_info')\
as mock_get_capacity_info:
mock_get_capacity_info.return_value = (total_size,
total_available,
total_allocated)
return self._driver._is_share_eligible('fake_share',
requested_volume_size)
def test_is_share_eligible(self):
total_size = 100.0 * units.Gi
total_available = 90.0 * units.Gi
total_allocated = 10.0 * units.Gi
requested_volume_size = 1 # GiB
self.assertTrue(self._check_is_share_eligible(total_size,
total_available,
total_allocated,
requested_volume_size))
def test_is_share_eligible_above_used_ratio(self):
total_size = 100.0 * units.Gi
total_available = 4.0 * units.Gi
total_allocated = 96.0 * units.Gi
requested_volume_size = 1 # GiB
# Check used > used_ratio statement entered
self.assertFalse(self._check_is_share_eligible(total_size,
total_available,
total_allocated,
requested_volume_size))
def test_is_share_eligible_above_oversub_ratio(self):
total_size = 100.0 * units.Gi
total_available = 10.0 * units.Gi
total_allocated = 90.0 * units.Gi
requested_volume_size = 10 # GiB
# Check apparent_available <= requested_volume_size statement entered
self.assertFalse(self._check_is_share_eligible(total_size,
total_available,
total_allocated,
requested_volume_size))
def test_is_share_eligible_reserved_space_above_oversub_ratio(self):
total_size = 100.0 * units.Gi
total_available = 10.0 * units.Gi
total_allocated = 100.0 * units.Gi
requested_volume_size = 1 # GiB
# Check total_allocated / total_size >= oversub_ratio
# statement entered
self.assertFalse(self._check_is_share_eligible(total_size,
total_available,
total_allocated,
requested_volume_size))
def test_extend_volume(self):
"""Extend a volume by 1."""
drv = self._driver
volume = {'id': '80ee16b6-75d2-4d54-9539-ffc1b4b0fb10', 'size': 1,
'provider_location': 'nfs_share'}
path = 'path'
newSize = volume['size'] + 1
with mock.patch.object(image_utils, 'resize_image') as resize:
with mock.patch.object(drv, 'local_path', return_value=path):
with mock.patch.object(drv, '_is_share_eligible',
return_value=True):
with mock.patch.object(drv, '_is_file_size_equal',
return_value=True):
drv.extend_volume(volume, newSize)
resize.assert_called_once_with(path, newSize,
run_as_root=True)
def test_extend_volume_failure(self):
"""Error during extend operation."""
drv = self._driver
volume = {'id': '80ee16b6-75d2-4d54-9539-ffc1b4b0fb10', 'size': 1,
'provider_location': 'nfs_share'}
with mock.patch.object(image_utils, 'resize_image'):
with mock.patch.object(drv, 'local_path', return_value='path'):
with mock.patch.object(drv, '_is_share_eligible',
return_value=True):
with mock.patch.object(drv, '_is_file_size_equal',
return_value=False):
self.assertRaises(exception.ExtendVolumeError,
drv.extend_volume, volume, 2)
def test_extend_volume_insufficient_space(self):
"""Insufficient space on nfs_share during extend operation."""
drv = self._driver
volume = {'id': '80ee16b6-75d2-4d54-9539-ffc1b4b0fb10', 'size': 1,
'provider_location': 'nfs_share'}
with mock.patch.object(image_utils, 'resize_image'):
with mock.patch.object(drv, 'local_path', return_value='path'):
with mock.patch.object(drv, '_is_share_eligible',
return_value=False):
with mock.patch.object(drv, '_is_file_size_equal',
return_value=False):
self.assertRaises(exception.ExtendVolumeError,
drv.extend_volume, volume, 2)
def test_is_file_size_equal(self):
"""File sizes are equal."""
drv = self._driver
path = 'fake/path'
size = 2
data = mock.MagicMock()
data.virtual_size = size * units.Gi
with mock.patch.object(image_utils, 'qemu_img_info',
return_value=data):
self.assertTrue(drv._is_file_size_equal(path, size))
def test_is_file_size_equal_false(self):
"""File sizes are not equal."""
drv = self._driver
path = 'fake/path'
size = 2
data = mock.MagicMock()
data.virtual_size = (size + 1) * units.Gi
with mock.patch.object(image_utils, 'qemu_img_info',
return_value=data):
self.assertFalse(drv._is_file_size_equal(path, size))
@mock.patch.object(nfs, 'LOG')
def test_set_nas_security_options_when_true(self, LOG):
"""Test higher level setting of NAS Security options.
The NFS driver overrides the base method with a driver specific
version.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
is_new_install = True
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._determine_nas_security_option_setting = mock.Mock(
return_value='true')
drv.set_nas_security_options(is_new_install)
self.assertEqual('true', drv.configuration.nas_secure_file_operations)
self.assertEqual('true', drv.configuration.nas_secure_file_permissions)
self.assertFalse(LOG.warn.called)
@mock.patch.object(nfs, 'LOG')
def test_set_nas_security_options_when_false(self, LOG):
"""Test higher level setting of NAS Security options.
The NFS driver overrides the base method with a driver specific
version.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._determine_nas_security_option_setting = mock.Mock(
return_value='false')
drv.set_nas_security_options(is_new_install)
self.assertEqual('false', drv.configuration.nas_secure_file_operations)
self.assertEqual('false',
drv.configuration.nas_secure_file_permissions)
self.assertTrue(LOG.warn.called)
def test_set_nas_security_options_exception_if_no_mounted_shares(self):
"""Ensure proper exception is raised if there are no mounted shares."""
drv = self._driver
drv._ensure_shares_mounted = mock.Mock()
drv._mounted_shares = []
is_new_cinder_install = 'does not matter'
self.assertRaises(exception.NfsNoSharesMounted,
drv.set_nas_security_options,
is_new_cinder_install)
def test_ensure_share_mounted(self):
"""Case where the mount works the first time."""
self.mock_object(self._driver._remotefsclient, 'mount')
drv = self._driver
drv.configuration.nfs_mount_attempts = 3
drv.shares = {self.TEST_NFS_EXPORT1: ''}
drv._ensure_share_mounted(self.TEST_NFS_EXPORT1)
drv._remotefsclient.mount.called_once()
def test_ensure_share_mounted_exception(self):
"""Make the configured number of attempts when mounts fail."""
num_attempts = 3
self.mock_object(self._driver._remotefsclient, 'mount',
mock.Mock(side_effect=Exception))
drv = self._driver
drv.configuration.nfs_mount_attempts = num_attempts
drv.shares = {self.TEST_NFS_EXPORT1: ''}
self.assertRaises(exception.NfsException, drv._ensure_share_mounted,
self.TEST_NFS_EXPORT1)
self.assertEqual(num_attempts, drv._remotefsclient.mount.call_count)
def test_ensure_share_mounted_at_least_one_attempt(self):
"""Make at least one mount attempt even if configured for less."""
min_num_attempts = 1
num_attempts = 0
self.mock_object(self._driver._remotefsclient, 'mount',
mock.Mock(side_effect=Exception))
drv = self._driver
drv.configuration.nfs_mount_attempts = num_attempts
drv.shares = {self.TEST_NFS_EXPORT1: ''}
self.assertRaises(exception.NfsException, drv._ensure_share_mounted,
self.TEST_NFS_EXPORT1)
self.assertEqual(min_num_attempts,
drv._remotefsclient.mount.call_count)
|
|
"""
This module provides convenient functions to transform sympy expressions to
lambda functions which can be used to calculate numerical values very fast.
"""
from __future__ import division
from sympy.core.sympify import sympify
# These are the namespaces the lambda functions will use.
MATH = {}
MPMATH = {}
NUMPY = {}
SYMPY = {}
# Mappings between sympy and other modules function names.
MATH_TRANSLATIONS = {
"abs":"fabs",
"ceiling":"ceil",
"E":"e",
"ln":"log",
}
MPMATH_TRANSLATIONS = {
"ceiling":"ceil",
"chebyshevt":"chebyt",
"chebyshevu":"chebyu",
"E":"e",
"I":"j",
"ln":"log",
#"lowergamma":"lower_gamma",
"oo":"inf",
#"uppergamma":"upper_gamma",
"LambertW":"lambertw",
"Matrix":"matrix",
"conjugate":"conj",
}
NUMPY_TRANSLATIONS = {
"acos":"arccos",
"acosh":"arccosh",
"arg":"angle",
"asin":"arcsin",
"asinh":"arcsinh",
"atan":"arctan",
"atan2":"arctan2",
"atanh":"arctanh",
"ceiling":"ceil",
"E":"e",
"im":"imag",
"ln":"log",
"Matrix":"matrix",
"max_":"amax",
"min_":"amin",
"oo":"inf",
"re":"real",
}
# Available modules:
MODULES = {
"math":(MATH, MATH_TRANSLATIONS, ("from math import *",)),
"mpmath":(MPMATH, MPMATH_TRANSLATIONS, ("from sympy.mpmath import *",)),
"numpy":(NUMPY, NUMPY_TRANSLATIONS, ("from numpy import *",)),
"sympy":(SYMPY, {}, ("from sympy.functions import *",
"from sympy.matrices import Matrix",
"from sympy import Integral"))
}
def _import(module, reload="False"):
"""
Creates a global translation dictionary for module.
The argument module has to be one of the following strings: "math",
"mpmath", "numpy", "sympy".
These dictionaries map names of python functions to their equivalent in
other modules.
"""
if not module in MODULES:
raise NameError("This module can't be used for lambdification.")
namespace, translations, import_commands = MODULES[module]
# Clear namespace or exit
if namespace:
# The namespace was already generated, don't do it again if not forced.
if reload:
namespace.clear()
else:
return
# It's possible that numpy is not available.
for import_command in import_commands:
try:
exec import_command in {}, namespace
except ImportError:
raise ImportError("Can't import %s with command %s" % (module, import_command))
# Add translated names to namespace
for sympyname, translation in translations.iteritems():
namespace[sympyname] = namespace[translation]
def lambdify(args, expr, modules=None):
"""
Returns a lambda function for fast calculation of numerical values.
Usage:
>>> from sympy import symbols, sqrt, sin
>>> x,y,z = symbols('xyz')
>>> f = lambdify(x, x**2)
>>> f(2)
4
>>> f = lambdify((x,y,z), [z,y,x])
>>> f(1,2,3)
[3, 2, 1]
>>> f = lambdify(x, sqrt(x))
>>> f(4)
2.0
>>> f = lambdify((x,y), sin(x*y)**2)
>>> f(0, 5)
0.0
If not specified differently by the user, Sympy functions are replaced as
far as possible by either python-math, numpy (if available) or mpmath
functions - exactly in this order.
To change this behaviour, the "modules" argument can be used.
It accepts:
- the strings "math", "mpmath", "numpy", "sympy"
- any modules (e.g. math)
- dictionaries that map names of sympy functions to arbitrary functions
- lists that contain a mix of the arguments above. (Entries that are first
in the list have higher priority)
Examples:
(1) Use one of the provided modules:
>> f = lambdify(x, sin(x), "math")
Attention: Functions that are not in the math module will throw a name
error when the lambda function is evaluated! So this would
be better:
>> f = lambdify(x, sin(x)*gamma(x), ("math", "mpmath", "sympy"))
(2) Use some other module:
>> import numpy
>> f = lambdify((x,y), tan(x*y), numpy)
Attention: There are naming diferences between numpy and sympy. So if
you simply take the numpy module, e.g. sympy.atan will not be
translated to numpy.arctan. Use the modified module instead
by passing the string "numpy".
(3) Use own dictionaries:
>> def my_cool_function(x): ...
>> dic = {"sin" : my_cool_function}
>> f = lambdify(x, sin(x), dic)
Now f would look like:
>> lambda x: my_cool_function(x)
"""
# If the user hasn't specified any modules, use what is available.
if modules is None:
# Use either numpy (if available) or python.math where possible.
# XXX: This leads to different behaviour on different systems and
# might be the reason for irreproducible errors.
try:
_import("numpy")
modules = ("math", "numpy", "mpmath", "sympy")
except ImportError:
modules = ("math", "mpmath", "sympy")
# Get the needed namespaces.
if isinstance(modules, dict): # Check for dict before "__iter__"
namespace = _get_namespace(modules)
elif hasattr(modules, "__iter__"):
namespace = {}
for m in modules:
buf = _get_namespace(m)
buf.update(namespace)
namespace = buf
else:
namespace = _get_namespace(modules)
# Create lambda function.
lstr = lambdastr(args, expr)
return eval(lstr, namespace)
def _get_namespace(m):
"""
This is used by _lambdify to parse it's arguments.
"""
if isinstance(m, str):
_import(m)
return MODULES[m][0]
elif isinstance(m, dict):
return m
elif hasattr(m, "__dict__"):
return m.__dict__
else:
raise TypeError("Argument must be either a string, dict or module but it is: %s" % m)
def lambdastr(args, expr):
"""
Returns a string that can be evaluated to a lambda function.
>>> from sympy import symbols
>>> x,y,z = symbols('xyz')
>>> lambdastr(x, x**2)
'lambda x: (x**2)'
>>> lambdastr((x,y,z), [z,y,x])
'lambda x,y,z: ([z, y, x])'
"""
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import lambdarepr
# Transform everything to strings.
expr = lambdarepr(expr)
if isinstance(args, str):
pass
elif hasattr(args, "__iter__"):
args = ",".join(str(a) for a in args)
else:
args = str(args)
return "lambda %s: (%s)" % (args, expr)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from copy import deepcopy
import inspect
import itertools
from types import FunctionType
from .common import * # pylint: disable=redefined-builtin
from .datastructures import OrderedDict, Context
from .exceptions import *
from .transforms import (
atoms, export_loop,
convert, to_native, to_primitive,
)
from .validate import validate, prepare_validator
from .types import BaseType
from .types.serializable import Serializable
from .undefined import Undefined
from .util import get_ident
class FieldDescriptor(object):
"""
``FieldDescriptor`` instances serve as field accessors on models.
"""
def __init__(self, name):
"""
:param name:
The field's name
"""
self.name = name
def __get__(self, instance, cls):
"""
For a model instance, returns the field's current value.
For a model class, returns the field's type object.
"""
if instance is None:
return cls._fields[self.name]
else:
value = instance._data.get(self.name, Undefined)
if value is Undefined:
raise UndefinedValueError(instance, self.name)
else:
return value
def __set__(self, instance, value):
"""
Sets the field's value.
"""
field = instance._fields[self.name]
value = field.pre_setattr(value)
instance._data[self.name] = value
def __delete__(self, instance):
"""
Deletes the field's value.
"""
del instance._data[self.name]
class ModelOptions(object):
"""
This class is a container for all model configuration options. Its
primary purpose is to create an independent instance of a model's
options for every class.
"""
def __init__(self, klass, namespace=None, roles=None, export_level=DEFAULT,
serialize_when_none=None, export_order=False):
"""
:param klass:
The class which this options instance belongs to.
:param namespace:
A namespace identifier that can be used with persistence layers.
:param roles:
Allows to specify certain subsets of the model's fields for
serialization.
:param serialize_when_none:
When ``False``, serialization skips fields that are None.
Default: ``True``
:param export_order:
Specifies whether to maintain the original field order when exporting
the model. This entails returning an ``OrderedDictionary`` instead of
a regular dictionary.
Default: ``False``
"""
self.klass = klass
self.namespace = namespace
self.roles = roles or {}
self.export_level = export_level
if serialize_when_none is True:
self.export_level = DEFAULT
elif serialize_when_none is False:
self.export_level = NONEMPTY
self.export_order = export_order
class ModelMeta(type):
"""
Metaclass for Models.
"""
def __new__(mcs, name, bases, attrs):
"""
This metaclass adds four attributes to host classes: mcs._fields,
mcs._serializables, mcs._validator_functions, and mcs._options.
This function creates those attributes like this:
``mcs._fields`` is list of fields that are Schematics types
``mcs._serializables`` is a list of ``Serializable`` objects
``mcs._validator_functions`` are class-level validation functions
``mcs._options`` is the end result of parsing the ``Options`` class
"""
# Structures used to accumulate meta info
fields = OrderedDict()
serializables = {}
validator_functions = {} # Model level
# Accumulate metas info from parent classes
for base in reversed(bases):
if hasattr(base, '_fields'):
fields.update(deepcopy(base._fields))
if hasattr(base, '_serializables'):
serializables.update(deepcopy(base._serializables))
if hasattr(base, '_validator_functions'):
validator_functions.update(base._validator_functions)
# Parse this class's attributes into meta structures
for key, value in iteritems(attrs):
if key.startswith('validate_') and isinstance(value, (FunctionType, classmethod)):
validator_functions[key[9:]] = prepare_validator(value, 4)
if isinstance(value, BaseType):
fields[key] = value
if isinstance(value, Serializable):
serializables[key] = value
# Parse meta options
options = mcs._read_options(name, bases, attrs)
# Convert list of types into fields for new klass
fields.sort(key=lambda i: i[1]._position_hint)
for key, field in iteritems(fields):
attrs[key] = FieldDescriptor(key)
for key, serializable in iteritems(serializables):
attrs[key] = serializable
# Ready meta data to be klass attributes
attrs['_fields'] = fields
attrs['_field_list'] = list(fields.items())
attrs['_serializables'] = serializables
attrs['_validator_functions'] = validator_functions
attrs['_options'] = options
klass = type.__new__(mcs, name, bases, attrs)
klass = str_compat(klass)
# Register class on ancestor models
klass._subclasses = []
for base in klass.__mro__[1:]:
if isinstance(base, ModelMeta):
base._subclasses.append(klass)
# Finalize fields
for field_name, field in fields.items():
field._setup(field_name, klass)
for field_name, field in serializables.items():
field._setup(field_name, klass)
klass._valid_input_keys = (
set(itertools.chain(*(field.get_input_keys() for field in fields.values())))
| set(serializables))
return klass
@classmethod
def _read_options(mcs, name, bases, attrs):
"""
Parses `ModelOptions` instance into the options value attached to
`Model` instances.
"""
options_members = {}
for base in reversed(bases):
if hasattr(base, "_options"):
for key, value in inspect.getmembers(base._options):
if not key.startswith("_") and not key == "klass":
options_members[key] = value
options_class = attrs.get('__optionsclass__', ModelOptions)
if 'Options' in attrs:
for key, value in inspect.getmembers(attrs['Options']):
if not key.startswith("_"):
if key == "roles":
roles = options_members.get("roles", {}).copy()
roles.update(value)
options_members["roles"] = roles
else:
options_members[key] = value
return options_class(mcs, **options_members)
@property
def fields(cls):
return cls._fields
@metaclass(ModelMeta)
class Model(object):
"""
Enclosure for fields and validation. Same pattern deployed by Django
models, SQLAlchemy declarative extension and other developer friendly
libraries.
:param Mapping raw_data:
The data to be imported into the model instance.
:param Mapping deserialize_mapping:
Can be used to provide alternative input names for fields. Values may be
strings or lists of strings, keyed by the actual field name.
:param bool partial:
Allow partial data to validate. Essentially drops the ``required=True``
settings from field definitions. Default: True
:param bool strict:
Complain about unrecognized keys. Default: True
"""
__optionsclass__ = ModelOptions
def __init__(self, raw_data=None, trusted_data=None, deserialize_mapping=None,
init=True, partial=True, strict=True, validate=False, app_data=None,
**kwargs):
self._initial = raw_data or {}
kwargs.setdefault('init_values', init)
kwargs.setdefault('apply_defaults', init)
self._data = self.convert(raw_data,
trusted_data=trusted_data, mapping=deserialize_mapping,
partial=partial, strict=strict, validate=validate, new=True,
app_data=app_data, **kwargs)
def validate(self, partial=False, convert=True, app_data=None, **kwargs):
"""
Validates the state of the model. If the data is invalid, raises a ``DataError``
with error messages.
:param bool partial:
Allow partial data to validate. Essentially drops the ``required=True``
settings from field definitions. Default: False
:param convert:
Controls whether to perform import conversion before validating.
Can be turned off to skip an unnecessary conversion step if all values
are known to have the right datatypes (e.g., when validating immediately
after the initial import). Default: True
"""
data = self.convert(self, validate=True, partial=partial, convert=convert,
app_data=app_data, **kwargs)
if convert:
self._data.update(**data)
def import_data(self, raw_data, recursive=False, **kwargs):
"""
Converts and imports the raw data into an existing model instance.
:param raw_data:
The data to be imported.
"""
self._data = self.convert(raw_data, trusted_data=self, recursive=recursive, **kwargs)
return self
@classmethod
def convert(cls, raw_data, context=None, **kw):
"""
Converts the raw data into richer Python constructs according to the
fields on the model
:param raw_data:
The data to be converted
"""
_validate = getattr(context, 'validate', None) or kw.get('validate', False)
if _validate:
return validate(cls, raw_data, oo=True, context=context, **kw)
else:
return convert(cls, raw_data, oo=True, context=context, **kw)
def export(self, field_converter=None, role=None, app_data=None, **kwargs):
return export_loop(self.__class__, self, field_converter=field_converter,
role=role, app_data=app_data, **kwargs)
def to_native(self, role=None, app_data=None, **kwargs):
return to_native(self.__class__, self, role=role, app_data=app_data, **kwargs)
def to_primitive(self, role=None, app_data=None, **kwargs):
return to_primitive(self.__class__, self, role=role, app_data=app_data, **kwargs)
def serialize(self, *args, **kwargs):
return self.to_primitive(*args, **kwargs)
def atoms(self):
"""
Iterator for the atomic components of a model definition and relevant
data that creates a 3-tuple of the field's name, its type instance and
its value.
"""
return atoms(self.__class__, self)
def __iter__(self):
return (k for k in self._fields if k in self._data)
def keys(self):
return list(iter(self))
def items(self):
return [(k, self._data[k]) for k in self]
def values(self):
return [self._data[k] for k in self]
def get(self, key, default=None):
return getattr(self, key, default)
@classmethod
def get_mock_object(cls, context=None, overrides={}):
"""Get a mock object.
:param dict context:
:param dict overrides: overrides for the model
"""
context = Context._make(context)
context._setdefault('memo', set())
context.memo.add(cls)
values = {}
for name, field in cls.fields.items():
if name in overrides:
continue
if getattr(field, 'model_class', None) in context.memo:
continue
try:
values[name] = field.mock(context)
except MockCreationError as exc:
raise MockCreationError('%s: %s' % (name, exc.message))
values.update(overrides)
return cls(values)
def __getitem__(self, name):
if name in self._fields or name in self._serializables:
return getattr(self, name)
else:
raise UnknownFieldError(self, name)
def __setitem__(self, name, value):
if name in self._fields:
return setattr(self, name, value)
else:
raise UnknownFieldError(self, name)
def __delitem__(self, name):
if name in self._fields:
return delattr(self, name)
else:
raise UnknownFieldError(self, name)
def __contains__(self, name):
return name in self._data \
or name in self._serializables and getattr(self, name, Undefined) is not Undefined
def __len__(self):
return len(self._data)
def __eq__(self, other, memo=set()):
if self is other:
return True
if type(self) is not type(other):
return NotImplemented
key = (id(self), id(other), get_ident())
if key in memo:
return True
else:
memo.add(key)
try:
return self._data == other._data
finally:
memo.remove(key)
def __ne__(self, other):
return not self == other
def __repr__(self):
model = self.__class__.__name__
info = self._repr_info()
if info:
return '<%s: %s>' % (model, info)
else:
return '<%s instance>' % model
def _repr_info(self):
"""
Subclasses may implement this method to augment the ``__repr__()`` output for the instance::
class Person(Model):
...
def _repr_info(self):
return self.name
>>> Person({'name': 'Mr. Pink'})
<Person: Mr. Pink>
"""
return None
__all__ = module_exports(__name__)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
# pylint: disable=no-self-use
"""
csv2ofx.ofx
~~~~~~~~~~~
Provides methods for generating OFX content
Examples:
literal blocks::
python example_google.py
Attributes:
ENCODING (str): Default file encoding.
"""
from datetime import datetime as dt
from builtins import *
from meza.fntools import chunk, xmlize
from meza.process import group
from . import Content, utils
class OFX(Content):
"""An OFX object"""
def __init__(self, mapping=None, **kwargs):
"""OFX constructor
Args:
mapping (dict): bank mapper (see csv2ofx.mappings)
kwargs (dict): Keyword arguments
Kwargs:
def_type (str): Default account type.
start (date): Date from which to begin including transactions.
end (date): Date from which to exclude transactions.
Examples:
>>> from csv2ofx.mappings.mint import mapping
>>> OFX(mapping) # doctest: +ELLIPSIS
<csv2ofx.ofx.OFX object at 0x...>
"""
# TODO: Add timezone info # pylint: disable=fixme
super(OFX, self).__init__(mapping, **kwargs)
self.resp_type = "INTRATRNRS" if self.split_account else "STMTTRNRS"
self.def_type = kwargs.get("def_type")
self.prev_group = None
self.account_types = {
"CHECKING": ("checking", "income", "receivable", "payable"),
"SAVINGS": ("savings",),
"MONEYMRKT": ("market", "cash", "expenses"),
"CREDITLINE": ("visa", "master", "express", "discover"),
}
def header(self, **kwargs):
""" Gets OFX format transaction content
Kwargs:
date (datetime): The datetime (default: `datetime.now()`).
language (str:) The ISO formatted language (defaul: ENG).
Returns:
(str): the OFX content
Examples:
>>> kwargs = {'date': dt(2012, 1, 15)}
>>> header = 'DATA:OFXSGMLENCODING:UTF-8<OFX><SIGNONMSGSRSV1>\
<SONRS><STATUS><CODE>0</CODE><SEVERITY>INFO</SEVERITY></STATUS><DTSERVER>\
20120115000000</DTSERVER><LANGUAGE>ENG</LANGUAGE></SONRS></SIGNONMSGSRSV1>\
<BANKMSGSRSV1><STMTTRNRS><TRNUID></TRNUID><STATUS><CODE>0</CODE><SEVERITY>INFO\
</SEVERITY></STATUS>'
>>> result = OFX().header(**kwargs)
>>> header == result.replace('\\n', '').replace('\\t', '')
True
"""
kwargs.setdefault("language", "ENG")
# yyyymmddhhmmss
time_stamp = kwargs.get("date", dt.now()).strftime("%Y%m%d%H%M%S")
content = "DATA:OFXSGML\n"
content += "ENCODING:UTF-8\n"
content += "<OFX>\n"
content += "\t<SIGNONMSGSRSV1>\n"
content += "\t\t<SONRS>\n"
content += "\t\t\t<STATUS>\n"
content += "\t\t\t\t<CODE>0</CODE>\n"
content += "\t\t\t\t<SEVERITY>INFO</SEVERITY>\n"
content += "\t\t\t</STATUS>\n"
content += "\t\t\t<DTSERVER>%s</DTSERVER>\n" % time_stamp
content += "\t\t\t<LANGUAGE>%(language)s</LANGUAGE>\n" % kwargs
content += "\t\t</SONRS>\n"
content += "\t</SIGNONMSGSRSV1>\n"
content += "\t<BANKMSGSRSV1>\n"
content += "\t\t<%s>\n" % self.resp_type
content += "\t\t\t<TRNUID></TRNUID>\n"
content += "\t\t\t<STATUS>\n"
content += "\t\t\t\t<CODE>0</CODE>\n"
content += "\t\t\t\t<SEVERITY>INFO</SEVERITY>\n"
content += "\t\t\t</STATUS>\n"
return content
def transaction_data(self, trxn):
"""gets OFX transaction data
Args:
trxn (dict): the transaction
Returns:
(dict): the OFX transaction data
Examples:
>>> import datetime
>>> from csv2ofx.mappings.mint import mapping
>>> from decimal import Decimal
>>> trxn = {
... 'Transaction Type': 'DEBIT', 'Amount': 1000.00,
... 'Date': '06/12/10', 'Description': 'payee',
... 'Original Description': 'description', 'Notes': 'notes',
... 'Category': 'Checking', 'Account Name': 'account'}
>>> OFX(mapping, def_type='CHECKING').transaction_data(trxn) == {
... 'account_id': 'e268443e43d93dab7ebef303bbe9642f',
... 'account': 'account', 'currency': 'USD',
... 'account_type': 'CHECKING', 'shares': Decimal('0'),
... 'is_investment': False, 'bank': 'account',
... 'split_account_type': 'CHECKING',
... 'split_account_id': '195917574edc9b6bbeb5be9785b6a479',
... 'class': None, 'amount': Decimal('-1000.00'),
... 'memo': 'description notes',
... 'id': 'ee86450a47899254e2faa82dca3c2cf2',
... 'split_account': 'Checking', 'action': '', 'payee': 'payee',
... 'date': dt(2010, 6, 12, 0, 0), 'category': '',
... 'bank_id': 'e268443e43d93dab7ebef303bbe9642f',
... 'price': Decimal('0'), 'symbol': '', 'check_num': None,
... 'inv_split_account': None, 'x_action': '', 'type': 'DEBIT'}
True
"""
data = super(OFX, self).transaction_data(trxn)
args = [self.account_types, self.def_type]
split = data["split_account"]
sa_type = utils.get_account_type(split, *args) if split else None
memo = data.get("memo")
_class = data.get("class")
memo = "%s %s" % (memo, _class) if memo and _class else memo or _class
new_data = {
"account_type": utils.get_account_type(data["account"], *args),
"split_account_type": sa_type,
"memo": memo,
}
data.update(new_data)
return data
def account_start(self, **kwargs):
""" Gets OFX format transaction account start content
Args:
kwargs (dict): Output from `transaction_data`.
Kwargs:
currency (str): The ISO formatted currency (required).
bank_id (str): A unique bank identifier (required).
account_id (str): A unique account identifier (required).
account_type (str): The account type. One of [
'CHECKING', 'SAVINGS', 'MONEYMRKT', 'CREDITLINE'] (required).
Returns:
(str): the OFX content
Examples:
>>> kwargs = {'start': dt(2012, 1, 1), 'end': dt(2012, 2, 1)}
>>> akwargs = {'currency': 'USD', 'bank_id': 1, 'account_id': 1, \
'account_type': 'CHECKING'}
>>> start = '<STMTRS><CURDEF>USD</CURDEF><BANKACCTFROM><BANKID>1\
</BANKID><ACCTID>1</ACCTID><ACCTTYPE>CHECKING</ACCTTYPE></BANKACCTFROM>\
<BANKTRANLIST><DTSTART>20120101</DTSTART><DTEND>20120201</DTEND>'
>>> result = OFX(**kwargs).account_start(**akwargs)
>>> start == result.replace('\\n', '').replace('\\t', '')
True
"""
kwargs.update(
{
"start_date": self.start.strftime("%Y%m%d"),
"end_date": self.end.strftime("%Y%m%d"),
}
)
content = "\t\t\t<STMTRS>\n"
content += "\t\t\t\t<CURDEF>%(currency)s</CURDEF>\n" % kwargs
content += "\t\t\t\t<BANKACCTFROM>\n"
content += "\t\t\t\t\t<BANKID>%(bank_id)s</BANKID>\n" % kwargs
content += "\t\t\t\t\t<ACCTID>%(account_id)s</ACCTID>\n" % kwargs
content += "\t\t\t\t\t<ACCTTYPE>%(account_type)s</ACCTTYPE>\n" % kwargs
content += "\t\t\t\t</BANKACCTFROM>\n"
content += "\t\t\t\t<BANKTRANLIST>\n"
content += "\t\t\t\t\t<DTSTART>%(start_date)s</DTSTART>\n" % kwargs
content += "\t\t\t\t\t<DTEND>%(end_date)s</DTEND>\n" % kwargs
return content
def transaction(self, **kwargs):
""" Gets OFX format transaction content
Args:
kwargs (dict): Output from `transaction_data`.
Kwargs:
date (datetime): the transaction date (required)
type (str): the transaction type (required)
amount (number): the transaction amount (required)
id (str): the transaction id (required)
check_num (str): the check num
payee (str): the payee (required)
memo (str): the transaction memo
Returns:
(str): the OFX content
Examples:
>>> kwargs = {'date': dt(2012, 1, 15), 'type': 'DEBIT', \
'amount': 100, 'id': 1, 'check_num': 1, 'payee': 'payee', 'memo': 'memo'}
>>> trxn = '<STMTTRN><TRNTYPE>DEBIT</TRNTYPE><DTPOSTED>\
20120115000000</DTPOSTED><TRNAMT>100.00</TRNAMT><FITID>1</FITID><CHECKNUM>1\
</CHECKNUM><NAME>payee</NAME><MEMO>memo</MEMO></STMTTRN>'
>>> result = OFX().transaction(**kwargs)
>>> trxn == result.replace('\\n', '').replace('\\t', '')
True
"""
time_stamp = kwargs["date"].strftime("%Y%m%d%H%M%S") # yyyymmddhhmmss
content = "\t\t\t\t\t<STMTTRN>\n"
content += "\t\t\t\t\t\t<TRNTYPE>%(type)s</TRNTYPE>\n" % kwargs
content += "\t\t\t\t\t\t<DTPOSTED>%s</DTPOSTED>\n" % time_stamp
content += "\t\t\t\t\t\t<TRNAMT>%(amount)0.2f</TRNAMT>\n" % kwargs
content += "\t\t\t\t\t\t<FITID>%(id)s</FITID>\n" % kwargs
if kwargs.get("check_num") is not None:
extra = "\t\t\t\t\t\t<CHECKNUM>%(check_num)s</CHECKNUM>\n"
content += extra % kwargs
if kwargs.get("payee") is not None:
content += "\t\t\t\t\t\t<NAME>%(payee)s</NAME>\n" % kwargs
if kwargs.get("memo"):
content += "\t\t\t\t\t\t<MEMO>%(memo)s</MEMO>\n" % kwargs
content += "\t\t\t\t\t</STMTTRN>\n"
return content
def account_end(self, **kwargs):
""" Gets OFX format transaction account end content
Kwargs:
date (datetime): the transaction date (required)
balance (number): the account balance
Returns:
(str): the OFX content
Examples:
>>> kwargs = {'balance': 150, 'date': dt(2012, 1, 15)}
>>> end = '</BANKTRANLIST><LEDGERBAL><BALAMT>150.00</BALAMT>\
<DTASOF>20120115000000</DTASOF></LEDGERBAL></STMTRS>'
>>> result = OFX().account_end(**kwargs)
>>> end == result.replace('\\n', '').replace('\\t', '')
True
"""
time_stamp = kwargs["date"].strftime("%Y%m%d%H%M%S") # yyyymmddhhmmss
content = "\t\t\t\t</BANKTRANLIST>\n"
if kwargs.get("balance") is not None:
content += "\t\t\t\t<LEDGERBAL>\n"
content += "\t\t\t\t\t<BALAMT>%(balance)0.2f</BALAMT>\n" % kwargs
content += "\t\t\t\t\t<DTASOF>%s</DTASOF>\n" % time_stamp
content += "\t\t\t\t</LEDGERBAL>\n"
content += "\t\t\t</STMTRS>\n"
return content
def transfer(self, **kwargs):
""" Gets OFX transfer start
Args:
kwargs (dict): Output from `transaction_data`.
Kwargs:
account_type (str): The account type. One of [
'CHECKING', 'SAVINGS', 'MONEYMRKT', 'CREDITLINE']
currency (str): The ISO formatted currency (required).
id (str):
amount (number): the transaction amount (required)
bank_id (str): A unique bank identifier (required).
account_id (str): A unique account identifier (required).
account_type (str): The account type. One of [
'CHECKING', 'SAVINGS', 'MONEYMRKT', 'CREDITLINE'] (required).
Returns:
(str): the start of an OFX transfer
Examples:
>>> kwargs = {'currency': 'USD', 'date': dt(2012, 1, 15), \
'bank_id': 1, 'account_id': 1, 'account_type': 'CHECKING', 'amount': 100, \
'id': 'jbaevf'}
>>> trxn = '<INTRARS><CURDEF>USD</CURDEF><SRVRTID>jbaevf</SRVRTID>\
<XFERINFO><TRNAMT>100.00</TRNAMT><BANKACCTFROM><BANKID>1</BANKID><ACCTID>1\
</ACCTID><ACCTTYPE>CHECKING</ACCTTYPE></BANKACCTFROM>'
>>> result = OFX().transfer(**kwargs)
>>> trxn == result.replace('\\n', '').replace('\\t', '')
True
"""
content = "\t\t\t<INTRARS>\n"
content += "\t\t\t\t<CURDEF>%(currency)s</CURDEF>\n" % kwargs
content += "\t\t\t\t<SRVRTID>%(id)s</SRVRTID>\n" % kwargs
content += "\t\t\t\t<XFERINFO>\n"
content += "\t\t\t\t\t<TRNAMT>%(amount)0.2f</TRNAMT>\n" % kwargs
content += "\t\t\t\t\t<BANKACCTFROM>\n"
content += "\t\t\t\t\t\t<BANKID>%(bank_id)s</BANKID>\n" % kwargs
content += "\t\t\t\t\t\t<ACCTID>%(account_id)s</ACCTID>\n" % kwargs
content += "\t\t\t\t\t\t<ACCTTYPE>%(account_type)s" % kwargs
content += "</ACCTTYPE>\n"
content += "\t\t\t\t\t</BANKACCTFROM>\n"
return content
def split_content(self, **kwargs):
""" Gets OFX split content
Args:
kwargs (dict): Output from `transaction_data`.
Kwargs:
split_account (str): Account to use as the transfer recipient.
(useful in cases when the transaction data isn't already split)
bank_id (str): A unique bank identifier (required).
split_account_id (str): A unique account identifier (required if a
`split_account` is given).
split_account_type (str): The account type. One of [
'CHECKING', 'SAVINGS', 'MONEYMRKT', 'CREDITLINE'] (required if
a `split_account` is given).
account_id (str): A unique account identifier (required if a
`split_account` isn't given).
account_type (str): The account type. One of [
'CHECKING', 'SAVINGS', 'MONEYMRKT', 'CREDITLINE'] (required if
a `split_account` isn't given).
Returns:
(str): the OFX split content
Examples:
>>> kwargs = {'bank_id': 1, 'split_account': 'Checking', \
'split_account_id': 2, 'split_account_type': 'CHECKING', 'amount': 100 , \
'id': 'jbaevf'}
>>> split = '<BANKACCTTO><BANKID>1</BANKID><ACCTID>2</ACCTID>\
<ACCTTYPE>CHECKING</ACCTTYPE></BANKACCTTO>'
>>> result = OFX().split_content(**kwargs)
>>> split == result.replace('\\n', '').replace('\\t', '')
True
>>> kwargs = {'bank_id': 1, 'account': 'Checking', 'account_id': \
3, 'account_type': 'CHECKING', 'amount': 100 , 'id': 'jbaevf'}
>>> split = '<BANKACCTTO><BANKID>1</BANKID><ACCTID>3</ACCTID>\
<ACCTTYPE>CHECKING</ACCTTYPE></BANKACCTTO>'
>>> result = OFX().split_content(**kwargs)
>>> split == result.replace('\\n', '').replace('\\t', '')
True
"""
content = "\t\t\t\t\t<BANKACCTTO>\n"
content += "\t\t\t\t\t\t<BANKID>%(bank_id)s</BANKID>\n" % kwargs
if kwargs.get("split_account"):
content += "\t\t\t\t\t\t<ACCTID>%(split_account_id)s" % kwargs
else:
content += "\t\t\t\t\t\t<ACCTID>%(account_id)s" % kwargs
content += "</ACCTID>\n"
if kwargs.get("split_account"):
content += "\t\t\t\t\t\t<ACCTTYPE>%(split_account_type)s" % kwargs
else:
content += "\t\t\t\t\t\t<ACCTTYPE>%(account_type)s" % kwargs
content += "</ACCTTYPE>\n"
content += "\t\t\t\t\t</BANKACCTTO>\n"
return content
# pylint: disable=unused-argument
def transfer_end(self, date=None, **kwargs):
"""Gets OFX transfer end
Args:
date (datetime): the transfer date (required)
Returns:
(str): the end of an OFX transfer
Examples:
>>> end = '</XFERINFO><DTPOSTED>20120115000000</DTPOSTED></INTRARS>'
>>> result = OFX().transfer_end(dt(2012, 1, 15))
>>> end == result.replace('\\n', '').replace('\\t', '')
True
"""
time_stamp = date.strftime("%Y%m%d%H%M%S") # yyyymmddhhmmss
content = "\t\t\t\t</XFERINFO>\n"
content += "\t\t\t\t<DTPOSTED>%s</DTPOSTED>\n" % time_stamp
content += "\t\t\t</INTRARS>\n"
return content
def footer(self, **kwargs):
"""Gets OFX transfer end
Kwargs:
date (datetime): The datetime (default: `datetime.now()`).
Returns:
(str): the OFX content
Examples:
>>> ft = '</BANKTRANLIST></STMTRS></STMTTRNRS></BANKMSGSRSV1></OFX>'
>>> result = OFX().footer(date=dt(2012, 1, 15))
>>> ft == result.replace('\\n', '').replace('\\t', '')
True
"""
kwargs.setdefault("date", dt.now())
if self.is_split:
content = self.transfer_end(**kwargs)
elif not self.split_account:
content = self.account_end(**kwargs)
else:
content = ""
content += "\t\t</%s>\n\t</BANKMSGSRSV1>\n</OFX>\n" % self.resp_type
return content
def gen_body(self, data): # noqa: C901
"""Generate the OFX body"""
for datum in data:
grp = datum["group"]
if self.is_split and datum["len"] > 2:
# OFX doesn't support more than 2 splits
raise TypeError("Group %s has too many splits.\n" % grp)
trxn_data = self.transaction_data(datum["trxn"])
split_like = self.is_split or self.split_account
full_split = self.is_split and self.split_account
new_group = self.prev_group and self.prev_group != grp
if new_group and full_split:
yield self.transfer_end(**trxn_data)
elif new_group and not split_like:
yield self.account_end(**trxn_data)
if self.split_account:
yield self.transfer(**trxn_data)
yield self.split_content(**trxn_data)
yield self.transfer_end(**trxn_data)
elif self.is_split and datum["is_main"]:
yield self.transfer(**trxn_data)
elif self.is_split:
yield self.split_content(**trxn_data)
elif datum["is_main"]:
yield self.account_start(**trxn_data)
yield self.transaction(**trxn_data)
else:
yield self.transaction(**trxn_data)
self.prev_group = grp
def gen_groups(self, records, chunksize=None):
"""Generate the OFX groups"""
for chnk in chunk(records, chunksize):
cleansed = [{k: next(xmlize([v])) for k, v in c.items()} for c in chnk]
keyfunc = self.id if self.is_split else self.account
for gee in group(cleansed, keyfunc):
yield gee
|
|
"""
Query specifications.
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
The central idea of a Specification is to separate the statement of how to
match a candidate from the candidate object that it is matched against.
Read http://en.wikipedia.org/wiki/Specification_pattern for more info and
especially http://www.martinfowler.com/apsupp/spec.pdf
Created on Jul 5, 2011.
"""
import re
from pyramid.compat import string_types
from pyramid.threadlocal import get_current_registry
from everest.querying.interfaces import IFilterSpecificationFactory
from everest.querying.interfaces import IOrderSpecificationFactory
from everest.querying.interfaces import ISpecification
from everest.querying.operators import ASCENDING
from everest.querying.operators import CONJUNCTION
from everest.querying.operators import CONTAINED
from everest.querying.operators import CONTAINS
from everest.querying.operators import DESCENDING
from everest.querying.operators import DISJUNCTION
from everest.querying.operators import ENDS_WITH
from everest.querying.operators import EQUAL_TO
from everest.querying.operators import GREATER_OR_EQUALS
from everest.querying.operators import GREATER_THAN
from everest.querying.operators import IN_RANGE
from everest.querying.operators import LESS_OR_EQUALS
from everest.querying.operators import LESS_THAN
from everest.querying.operators import NEGATION
from everest.querying.operators import STARTS_WITH
from everest.resources.interfaces import ICollectionResource
from everest.resources.interfaces import IMemberResource
from everest.utils import get_nested_attribute
from zope.interface import implementer # pylint: disable=E0611,F0401
__docformat__ = 'reStructuredText en'
__all__ = ['AscendingOrderSpecification',
'CompositeFilterSpecification',
'ConjunctionFilterSpecification',
'ConjunctionOrderSpecification',
'CriterionFilterSpecification',
'DescendingOrderSpecification',
'DisjunctionFilterSpecification',
'FilterSpecification',
'FilterSpecificationFactory',
'LeafFilterSpecification',
'NaturalOrderSpecification',
'NegationFilterSpecification',
'ObjectOrderSpecification',
'OrderSpecification',
'OrderSpecificationFactory',
'Specification',
'ValueContainedFilterSpecification',
'ValueContainsFilterSpecification',
'ValueEndsWithFilterSpecification',
'ValueEqualToFilterSpecification',
'ValueGreaterThanFilterSpecification',
'ValueGreaterThanOrEqualToFilterSpecification',
'ValueInRangeFilterSpecification',
'ValueLessThanFilterSpecification',
'ValueLessThanOrEqualToFilterSpecification',
'ValueStartsWithFilterSpecification',
'asc',
'cnts',
'cntd',
'desc',
'eq',
'ends',
'ge',
'gt',
'le',
'lt',
'order',
'rng',
'starts',
]
@implementer(ISpecification)
class Specification(object):
"""
Abstract base classs for all specifications.
"""
operator = None
def __init__(self):
if self.__class__ is Specification:
raise NotImplementedError('Abstract class')
def accept(self, visitor):
raise NotImplementedError('Abstract method')
class FilterSpecification(Specification):
"""
Abstract base class for all filter specifications.
"""
def __init__(self):
if self.__class__ is FilterSpecification:
raise NotImplementedError('Abstract class')
Specification.__init__(self)
def is_satisfied_by(self, candidate):
"""
Tells if the given candidate object matches this specification.
:param candidate: the candidate object
:type candidate: object
:returns: True if the specification is met by the candidate.
:rtype: bool
"""
raise NotImplementedError('Abstract method')
def __and__(self, other):
return ConjunctionFilterSpecification(self, other)
def __or__(self, other):
return DisjunctionFilterSpecification(self, other)
def __invert__(self):
return NegationFilterSpecification(self)
class LeafFilterSpecification(FilterSpecification): # still abstract pylint: disable=W0223
"""
Abstract base class for specifications that represent leaves in a
specification tree.
"""
def __init__(self):
if self.__class__ is LeafFilterSpecification:
raise NotImplementedError('Abstract class')
FilterSpecification.__init__(self)
def accept(self, visitor):
visitor.visit_nullary(self)
class CriterionFilterSpecification(LeafFilterSpecification):
"""
Abstract base class for specifications representing filter criteria.
"""
def __init__(self, attr_name, attr_value):
"""
Constructs a filter specification for a query criterion.
:param operator: operator
:type operator: :class:`everest.querying.operators.Operator`
:param attr_name: the candidate's attribute name
:type attr_name: str
:param attr_value: the value that satisfies the specification
:type from_value: object
"""
if self.__class__ is CriterionFilterSpecification:
raise NotImplementedError('Abstract class')
LeafFilterSpecification.__init__(self)
self.attr_name = attr_name
self.attr_value = attr_value
def __eq__(self, other):
return (isinstance(other, CriterionFilterSpecification)
and self.attr_name == other.attr_name
and self.attr_value == other.attr_value)
def __ne__(self, other):
return not (self == other)
def __str__(self):
str_format = '<%s op_name: %s, attr_name: %s, attr_value: %s>'
params = (self.__class__.__name__,
self.operator.name, self.attr_name, self.attr_value)
return str_format % params
def is_satisfied_by(self, candidate):
cand_value = self._get_candidate_value(candidate)
if IMemberResource.providedBy(self.attr_value): # pylint: disable=E1101
attr_value = self.attr_value.get_entity()
elif ICollectionResource.providedBy(self.attr_value): # pylint: disable=E1101
attr_value = self.attr_value.get_aggregate()
else:
attr_value = self.attr_value
return self.operator.apply(cand_value, attr_value)
def _get_candidate_value(self, candidate):
attr_func = get_nested_attribute if '.' in self.attr_name else getattr
return attr_func(candidate, self.attr_name)
class CompositeFilterSpecification(FilterSpecification):
"""
Abstract base class for specifications that are composed of two other
specifications.
"""
def __init__(self, left_spec, right_spec):
"""
Constructs a CompositeFilterSpecification
:param left_spec: the left part of the composite specification
:type left_spec: :class:`FilterSpecification`
:param right_spec: the right part of the composite specification
:type right_spec: :class:`FilterSpecification`
"""
if self.__class__ is CompositeFilterSpecification:
raise NotImplementedError('Abstract class')
FilterSpecification.__init__(self)
self.__left_spec = left_spec
self.__right_spec = right_spec
def __str__(self):
str_format = '<%s left_spec: %s, right_spec: %s>'
params = (self.__class__.__name__, self.left_spec, self.right_spec)
return str_format % params
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.left_spec == other.left_spec
and self.right_spec == other.right_spec)
def __ne__(self, other):
return not self.__eq__(other)
def accept(self, visitor):
self.left_spec.accept(visitor)
self.right_spec.accept(visitor)
visitor.visit_binary(self)
@property
def left_spec(self):
return self.__left_spec
@property
def right_spec(self):
return self.__right_spec
def is_satisfied_by(self, candidate):
return self.operator.apply(self.left_spec.is_satisfied_by(candidate),
self.right_spec.is_satisfied_by(candidate))
class ConjunctionFilterSpecification(CompositeFilterSpecification):
"""
Concrete conjunction filter specification.
"""
operator = CONJUNCTION
class DisjunctionFilterSpecification(CompositeFilterSpecification):
"""
Concrete disjuction filter specification.
"""
operator = DISJUNCTION
class NegationFilterSpecification(FilterSpecification):
"""
Concrete negation specification.
"""
operator = NEGATION
def __init__(self, wrapped_spec):
"""
Constructs a NegationFilterSpecification.
:param wrapped: the wrapped specification
:type wrapped: :class:`FilterSpecification`
"""
FilterSpecification.__init__(self)
self.__wrapped_spec = wrapped_spec
def __eq__(self, other):
"""Equality operator"""
return (isinstance(other, NegationFilterSpecification) and
self.wrapped_spec == other.wrapped_spec)
def __ne__(self, other):
"""Inequality operator"""
return not (self == other)
def __str__(self):
str_format = '<%s wrapped_spec: %s>'
params = (self.__class__.__name__, self.wrapped_spec)
return str_format % params
def is_satisfied_by(self, candidate):
return self.operator.apply(
self.wrapped_spec.is_satisfied_by(candidate))
def accept(self, visitor):
self.wrapped_spec.accept(visitor)
visitor.visit_unary(self)
@property
def wrapped_spec(self):
"""
Returns the wrapped (negated) specification.
"""
return self.__wrapped_spec
class ValueStartsWithFilterSpecification(CriterionFilterSpecification):
"""
Concrete value starts with specification.
"""
operator = STARTS_WITH
class ValueEndsWithFilterSpecification(CriterionFilterSpecification):
"""
Concrete value ends with specification.
"""
operator = ENDS_WITH
class ValueContainsFilterSpecification(CriterionFilterSpecification):
"""
Concrete value contains specification.
"""
operator = CONTAINS
class ValueContainedFilterSpecification(CriterionFilterSpecification):
"""
Concrete value contained in a list of values specification.
"""
operator = CONTAINED
class ValueEqualToFilterSpecification(CriterionFilterSpecification):
"""
Concrete value equal to specification.
"""
operator = EQUAL_TO
class ValueGreaterThanFilterSpecification(CriterionFilterSpecification):
"""
Concrete value greater than specification.
"""
operator = GREATER_THAN
class ValueLessThanFilterSpecification(CriterionFilterSpecification):
"""
Concrete value less than specification.
"""
operator = LESS_THAN
class ValueGreaterThanOrEqualToFilterSpecification(
CriterionFilterSpecification):
"""
Concrete value greater than or equal to specification.
"""
operator = GREATER_OR_EQUALS
class ValueLessThanOrEqualToFilterSpecification(CriterionFilterSpecification):
"""
Concrete value less than or equal to specification.
"""
operator = LESS_OR_EQUALS
class ValueInRangeFilterSpecification(CriterionFilterSpecification):
"""
Concrete specification for a range of values.
"""
operator = IN_RANGE
@property
def from_value(self):
"""
Returns the first (FROM) value from the range specification.
"""
return self.attr_value[0]
@property
def to_value(self):
"""
Returns the second (TO) value from the range specification.
"""
return self.attr_value[1]
@implementer(IFilterSpecificationFactory)
class FilterSpecificationFactory(object):
"""
Filter specification factory.
"""
def create_equal_to(self, attr_name, attr_value):
return ValueEqualToFilterSpecification(attr_name, attr_value)
def create_starts_with(self, attr_name, attr_value):
return ValueStartsWithFilterSpecification(attr_name, attr_value)
def create_ends_with(self, attr_name, attr_value):
return ValueEndsWithFilterSpecification(attr_name, attr_value)
def create_contains(self, attr_name, attr_value):
return ValueContainsFilterSpecification(attr_name, attr_value)
def create_contained(self, attr_name, attr_value):
return ValueContainedFilterSpecification(attr_name, attr_value)
def create_greater_than_or_equal_to(self, attr_name, attr_value):
return ValueGreaterThanOrEqualToFilterSpecification(attr_name,
attr_value)
def create_greater_than(self, attr_name, attr_value):
return ValueGreaterThanFilterSpecification(attr_name, attr_value)
def create_less_than_or_equal_to(self, attr_name, attr_value):
return ValueLessThanOrEqualToFilterSpecification(attr_name, attr_value)
def create_less_than(self, attr_name, attr_value):
return ValueLessThanFilterSpecification(attr_name, attr_value)
def create_in_range(self, attr_name, range_tuple):
return ValueInRangeFilterSpecification(attr_name, range_tuple)
def create_conjunction(self, left_spec, right_spec):
return ConjunctionFilterSpecification(left_spec, right_spec)
def create_disjunction(self, left_spec, right_spec):
return DisjunctionFilterSpecification(left_spec, right_spec)
def create_negation(self, wrapped):
return NegationFilterSpecification(wrapped)
class OrderSpecification(Specification):
"""
Abstract base class for all order specifications.
"""
def __init__(self):
if self.__class__ is OrderSpecification:
raise NotImplementedError('Abstract class')
Specification.__init__(self)
def eq(self, x, y):
raise NotImplementedError('Abstract method')
def lt(self, x, y):
raise NotImplementedError('Abstract method')
def le(self, x, y):
raise NotImplementedError('Abstract method')
def cmp(self, x, y):
raise NotImplementedError('Abstract method')
def ne(self, x, y):
return not self.eq(x, y)
def gt(self, x, y):
return not self.le(x, y)
def ge(self, x, y):
return not self.lt(x, y)
def __and__(self, other):
return ConjunctionOrderSpecification(self, other)
class ObjectOrderSpecification(OrderSpecification): # pylint: disable=W0223
"""
Abstract base class for all order specifications operating on object
attributes.
"""
def __init__(self, attr_name):
if self.__class__ is ObjectOrderSpecification:
raise NotImplementedError('Abstract class')
OrderSpecification.__init__(self)
self.__attr_name = attr_name
self.__attr_func = \
get_nested_attribute if '.' in attr_name else getattr
def __str__(self):
str_format = '<%s attr_name: %s>'
params = (self.__class__.__name__, self.attr_name)
return str_format % params
@property
def attr_name(self):
return self.__attr_name
def eq(self, x, y):
res = self.operator.apply(self._get_value(x), self._get_value(y))
return res == 0
def lt(self, x, y):
res = self.operator.apply(self._get_value(x), self._get_value(y))
return res == -1
def le(self, x, y):
res = self.operator.apply(self._get_value(x), self._get_value(y))
return res == -1 or res == 0
def cmp(self, x, y):
return self.operator.apply(self._get_value(x), self._get_value(y))
def accept(self, visitor):
visitor.visit_nullary(self)
def _get_value(self, obj):
return self.__attr_func(obj, self.attr_name)
class AscendingOrderSpecification(ObjectOrderSpecification):
"""
Concrete ascending order specification.
"""
operator = ASCENDING
class DescendingOrderSpecification(ObjectOrderSpecification):
"""
Concrete descending order specification.
"""
operator = DESCENDING
class NaturalOrderSpecification(ObjectOrderSpecification):
"""
See http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html
"""
operator = ASCENDING
def _get_value(self, obj):
value = ObjectOrderSpecification._get_value(self, obj)
if isinstance(value, string_types):
res = [self.__convert(c) for c in re.split(r'([0-9]+)', value)]
else:
res = value
return res
def __convert(self, txt):
return int(txt) if txt.isdigit() else txt
class ConjunctionOrderSpecification(OrderSpecification):
"""
Concrete conjunction order specification.
"""
operator = CONJUNCTION
def __init__(self, left, right):
OrderSpecification.__init__(self)
self.__left = left
self.__right = right
def __str__(self):
str_format = '<%s left_spec: %s, right_spec: %s>'
params = (self.__class__.__name__, self.left, self.right)
return str_format % params
def eq(self, x, y):
return self.left.eq(x, y) and self.right.eq(x, y)
def lt(self, x, y):
if self.left.eq(x, y):
res = self.right.lt(x, y)
else:
res = self.left.lt(x, y)
return res
def le(self, x, y):
if self.left.eq(x, y):
res = self.right.le(x, y)
else:
res = self.left.le(x, y)
return res
def cmp(self, x, y):
left_cmp = self.left.cmp(x, y)
if left_cmp == 0:
res = self.right.cmp(x, y)
else:
res = left_cmp
return res
@property
def left(self):
return self.__left
@property
def right(self):
return self.__right
def accept(self, visitor):
self.__left.accept(visitor)
self.__right.accept(visitor)
visitor.visit_binary(self)
@implementer(IOrderSpecificationFactory)
class OrderSpecificationFactory(object):
"""
Order specification factory.
"""
def create_ascending(self, attr_name):
return AscendingOrderSpecification(attr_name)
def create_descending(self, attr_name):
return DescendingOrderSpecification(attr_name)
def create_natural(self, attr_name):
return NaturalOrderSpecification(attr_name)
def create_conjunction(self, left_spec, right_spec):
return ConjunctionOrderSpecification(left_spec, right_spec)
class specification_attribute(object):
"""
Helper descriptor for the :class:`SpecificationGenerator`.
"""
def __init__(self, ifactory, method_name):
self.__method_name = method_name
self.__ifactory = ifactory
self.__factory = None
def __get__(self, generator, generator_class):
if generator is None:
generator = generator_class(self.factory)
generator.method_name = self.__method_name
return generator
@property
def factory(self):
if self.__factory is None:
reg = get_current_registry()
self.__factory = reg.queryUtility(self.__ifactory)
return self.__factory
class _SpecificationGenerator(object):
"""
Base class for specification generators.
"""
def __init__(self, factory):
self._factory = factory
self.method_name = None
class FilterSpecificationGenerator(_SpecificationGenerator):
"""
Helper class to simplify the generation of filter specifications.
"""
eq = specification_attribute(IFilterSpecificationFactory,
'create_equal_to')
starts = specification_attribute(IFilterSpecificationFactory,
'create_starts_with')
ends = specification_attribute(IFilterSpecificationFactory,
'create_ends_with')
lt = specification_attribute(IFilterSpecificationFactory,
'create_less_than')
le = specification_attribute(IFilterSpecificationFactory,
'create_less_than_or_equal_to')
gt = specification_attribute(IFilterSpecificationFactory,
'create_greater_than')
ge = specification_attribute(IFilterSpecificationFactory,
'create_greater_than_or_equal_to')
cnts = specification_attribute(IFilterSpecificationFactory,
'create_contains')
cntd = specification_attribute(IFilterSpecificationFactory,
'create_contained')
rng = specification_attribute(IFilterSpecificationFactory,
'create_in_range')
def __call__(self, **kw):
fn = getattr(self._factory, self.method_name)
spec = None
for (attr, value) in kw.items():
if spec is None:
spec = fn(attr, value)
else:
spec = spec & fn(attr, value)
return spec
class SingleOrderSpecificationGenerator(_SpecificationGenerator):
"""
Helper class to simplify the generation of order specifications.
"""
asc = specification_attribute(IOrderSpecificationFactory,
'create_ascending')
desc = specification_attribute(IOrderSpecificationFactory,
'create_descending')
def __call__(self, *args):
fn = getattr(self._factory, self.method_name)
spec = None
for attr in args:
if spec is None:
spec = fn(attr)
else:
spec = spec & (fn(attr))
return spec
class GenericOrderSpecificationGenerator(_SpecificationGenerator):
"""
Helper class to simplify the generation of generic order specifications.
"""
order = specification_attribute(IOrderSpecificationFactory, None)
def __call__(self, *args):
spec = None
for order_crit in args:
name, order_op = order_crit
if order_op == ASCENDING:
fn = self._factory.create_ascending
elif order_op == DESCENDING:
fn = self._factory.create_descending
else:
raise ValueError('Invalid ordering operator "%s".' % order_op)
item_spec = fn(name)
if spec is None:
spec = item_spec
else:
spec &= item_spec
return spec
def eq(**kw):
"Convenience function to create an equal_to specification."
return FilterSpecificationGenerator.eq(**kw)
def starts(**kw):
"Convenience function to create a starts_with specification."
return FilterSpecificationGenerator.starts(**kw)
def ends(**kw):
"Convenience function to create an ends_with specification."
return FilterSpecificationGenerator.ends(**kw)
def lt(**kw):
"Convenience function to create a less_than specification."
return FilterSpecificationGenerator.lt(**kw)
def le(**kw):
"Convenience function to create less_than_or_equal_to specification."
return FilterSpecificationGenerator.le(**kw)
def gt(**kw):
"Convenience function to create a greater_than specification."
return FilterSpecificationGenerator.gt(**kw)
def ge(**kw):
"Convenience function to create a greater_than_or_equal specification."
return FilterSpecificationGenerator.ge(**kw)
def cnts(**kw):
"Convenience function to create a contains specification."
return FilterSpecificationGenerator.cnts(**kw)
def cntd(**kw):
"Convenience function to create a contained specification."
return FilterSpecificationGenerator.cntd(**kw)
def rng(**kw):
"Convenience function to create an in_range specification."
return FilterSpecificationGenerator.rng(**kw)
def asc(*args):
"Convenience function to create an ascending order specification."
return SingleOrderSpecificationGenerator.asc(*args)
def desc(*args):
"Convenience function to create a descending order specification."
return SingleOrderSpecificationGenerator.desc(*args)
def order(*args):
"Convenience function to create an order specification."
return GenericOrderSpecificationGenerator.order(*args)
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import requests
from airflow import configuration as conf
from airflow.configuration import AirflowConfigException
from airflow.utils.file import mkdirs
from airflow.utils.helpers import parse_template_string
class FileTaskHandler(logging.Handler):
"""
FileTaskHandler is a python log handler that handles and reads
task instance logs. It creates and delegates log handling
to `logging.FileHandler` after receiving task instance context.
It reads logs from task instance's host machine.
"""
def __init__(self, base_log_folder, filename_template):
"""
:param base_log_folder: Base log folder to place logs.
:param filename_template: template filename string
"""
super(FileTaskHandler, self).__init__()
self.handler = None
self.local_base = base_log_folder
self.filename_template, self.filename_jinja_template = \
parse_template_string(filename_template)
def set_context(self, ti):
"""
Provide task_instance context to airflow task handler.
:param ti: task instance object
"""
local_loc = self._init_file(ti)
self.handler = logging.FileHandler(local_loc)
self.handler.setFormatter(self.formatter)
self.handler.setLevel(self.level)
def emit(self, record):
if self.handler is not None:
self.handler.emit(record)
def flush(self):
if self.handler is not None:
self.handler.flush()
def close(self):
if self.handler is not None:
self.handler.close()
def _render_filename(self, ti, try_number):
if self.filename_jinja_template:
jinja_context = ti.get_template_context()
jinja_context['try_number'] = try_number
return self.filename_jinja_template.render(**jinja_context)
return self.filename_template.format(dag_id=ti.dag_id,
task_id=ti.task_id,
execution_date=ti.execution_date.isoformat(),
try_number=try_number)
def _read(self, ti, try_number, metadata=None):
"""
Template method that contains custom logic of reading
logs given the try_number.
:param ti: task instance record
:param try_number: current try_number to read log from
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
:return: log message as a string and metadata.
"""
# Task instance here might be different from task instance when
# initializing the handler. Thus explicitly getting log location
# is needed to get correct log path.
log_relative_path = self._render_filename(ti, try_number)
location = os.path.join(self.local_base, log_relative_path)
log = ""
if os.path.exists(location):
try:
with open(location) as f:
log += "*** Reading local file: {}\n".format(location)
log += "".join(f.readlines())
except Exception as e:
log = "*** Failed to load local log file: {}\n".format(location)
log += "*** {}\n".format(str(e))
else:
url = os.path.join(
"http://{ti.hostname}:{worker_log_server_port}/log", log_relative_path
).format(
ti=ti,
worker_log_server_port=conf.get('celery', 'WORKER_LOG_SERVER_PORT')
)
log += "*** Log file does not exist: {}\n".format(location)
log += "*** Fetching from: {}\n".format(url)
try:
timeout = None # No timeout
try:
timeout = conf.getint('webserver', 'log_fetch_timeout_sec')
except (AirflowConfigException, ValueError):
pass
response = requests.get(url, timeout=timeout)
# Check if the resource was properly fetched
response.raise_for_status()
log += '\n' + response.text
except Exception as e:
log += "*** Failed to fetch log file from worker. {}\n".format(str(e))
return log, {'end_of_log': True}
def read(self, task_instance, try_number=None, metadata=None):
"""
Read logs of given task instance from local machine.
:param task_instance: task instance object
:param try_number: task instance try_number to read logs from. If None
it returns all logs separated by try_number
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
:return: a list of logs
"""
# Task instance increments its try number when it starts to run.
# So the log for a particular task try will only show up when
# try number gets incremented in DB, i.e logs produced the time
# after cli run and before try_number + 1 in DB will not be displayed.
if try_number is None:
next_try = task_instance.next_try_number
try_numbers = list(range(1, next_try))
elif try_number < 1:
logs = [
'Error fetching the logs. Try number {} is invalid.'.format(try_number),
]
return logs
else:
try_numbers = [try_number]
logs = [''] * len(try_numbers)
metadatas = [{}] * len(try_numbers)
for i, try_number in enumerate(try_numbers):
log, metadata = self._read(task_instance, try_number, metadata)
logs[i] += log
metadatas[i] = metadata
return logs, metadatas
def _init_file(self, ti):
"""
Create log directory and give it correct permissions.
:param ti: task instance object
:return: relative log path of the given task instance
"""
# To handle log writing when tasks are impersonated, the log files need to
# be writable by the user that runs the Airflow command and the user
# that is impersonated. This is mainly to handle corner cases with the
# SubDagOperator. When the SubDagOperator is run, all of the operators
# run under the impersonated user and create appropriate log files
# as the impersonated user. However, if the user manually runs tasks
# of the SubDagOperator through the UI, then the log files are created
# by the user that runs the Airflow command. For example, the Airflow
# run command may be run by the `airflow_sudoable` user, but the Airflow
# tasks may be run by the `airflow` user. If the log files are not
# writable by both users, then it's possible that re-running a task
# via the UI (or vice versa) results in a permission error as the task
# tries to write to a log file created by the other user.
relative_path = self._render_filename(ti, ti.try_number)
full_path = os.path.join(self.local_base, relative_path)
directory = os.path.dirname(full_path)
# Create the log file and give it group writable permissions
# TODO(aoen): Make log dirs and logs globally readable for now since the SubDag
# operator is not compatible with impersonation (e.g. if a Celery executor is used
# for a SubDag operator and the SubDag operator has a different owner than the
# parent DAG)
if not os.path.exists(directory):
# Create the directory as globally writable using custom mkdirs
# as os.makedirs doesn't set mode properly.
mkdirs(directory, 0o777)
if not os.path.exists(full_path):
open(full_path, "a").close()
# TODO: Investigate using 444 instead of 666.
os.chmod(full_path, 0o666)
return full_path
|
|
"""
A plot to visualize trees
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.core.edit_subset_mode import EditSubsetMode
from glue.core.state import lookup_class_with_patches
from glue.core.subset import CategorySubsetState
from glue.core.roi import PointROI
from glue.core.callback_property import CallbackProperty, add_callback, delay_callback
from glue.core.data import IncompatibleAttribute, Data
from glue.viewers.common.viz_client import GenericMplClient
from glue.plugins.dendro_viewer.layer_artist import DendroLayerArtist
from glue.utils import nonpartial
class DendroClient(GenericMplClient):
height_attr = CallbackProperty()
parent_attr = CallbackProperty()
order_attr = CallbackProperty()
ylog = CallbackProperty(False)
display_data = CallbackProperty(None)
select_substruct = CallbackProperty(True)
def __init__(self, *args, **kwargs):
super(DendroClient, self).__init__(*args, **kwargs)
self._layout = None
self.axes.set_xticks([])
self.axes.spines['top'].set_visible(False)
self.axes.spines['bottom'].set_visible(False)
def _connect(self):
add_callback(self, 'ylog', self._set_ylog)
add_callback(self, 'height_attr', nonpartial(self._relayout))
add_callback(self, 'order_attr', nonpartial(self._relayout))
add_callback(self, 'parent_attr', nonpartial(self._relayout))
def _default_attributes(self):
assert self.display_data is not None
fallback = self.display_data.components[0]
with delay_callback(self, 'height_attr', 'parent_attr',
'order_attr'):
if self.height_attr is None:
comp = self.display_data.find_component_id('height') or fallback
self.height_attr = comp
if self.parent_attr is None:
comp = self.display_data.find_component_id('parent') or fallback
self.parent_attr = comp
if self.order_attr is None:
self.order_attr = self.height_attr
def new_layer_artist(self, layer):
return DendroLayerArtist(layer, self.axes)
def _set_ylog(self, log):
self.axes.set_yscale('log' if log else 'linear')
self._redraw()
def _relayout(self):
if self.display_data is None:
return
if self.height_attr is None:
return
if self.parent_attr is None:
return
if self.order_attr is None:
return
try:
parent = np.asarray(self.display_data[self.parent_attr],
dtype=np.int).ravel()
y = self.display_data[self.height_attr].ravel()
key = self.display_data[self.order_attr].ravel()
except IncompatibleAttribute:
return
children = self._children
pos = np.zeros(key.size) - 1
cur_pos = 0
for struct in _iter_sorted(children, parent, key):
if children[struct].size == 0: # leaf
pos[struct] = cur_pos
cur_pos += 1
else: # branch
assert pos[children[struct]].mean() >= 0
pos[struct] = pos[children[struct]].mean()
layout = np.zeros((2, 3 * y.size))
layout[0, ::3] = pos
layout[0, 1::3] = pos
layout[0, 2::3] = np.where(parent >= 0, pos[parent], np.nan)
layout[1, ::3] = y
layout[1, 1::3] = np.where(parent >= 0, y[parent], y.min())
layout[1, 2::3] = layout[1, 1::3]
self._layout = layout
self._snap_limits()
self._update_all()
def _snap_limits(self):
if self._layout is None:
return
x, y = self._layout[:, ::3]
xlim = np.array([x.min(), x.max()])
xpad = .05 * xlim.ptp()
xlim[0] -= xpad
xlim[1] += xpad
ylim = np.array([y.min(), y.max()])
if self.ylog:
ylim = np.maximum(ylim, 1e-5)
pad = 1.05 * ylim[1] / ylim[0]
ylim[0] /= pad
ylim[1] *= pad
else:
pad = .05 * ylim.ptp()
ylim[0] -= pad
ylim[1] += pad
self.axes.set_xlim(*xlim)
self.axes.set_ylim(*ylim)
def add_layer(self, layer):
if layer.data.ndim != 1:
return
if layer.data not in self.data:
raise TypeError("Layer not in data collection")
if layer in self.artists:
return self.artists[layer][0]
self.display_data = self.display_data or layer.data
result = DendroLayerArtist(layer, self.axes)
self.artists.append(result)
self._update_layer(layer)
self._ensure_subsets_added(layer)
self._default_attributes()
return result
def _ensure_subsets_added(self, layer):
if not isinstance(layer, Data):
return
for subset in layer.subsets:
self.add_layer(subset)
def _update_layer(self, layer):
for artist in self.artists[layer]:
if not isinstance(artist, DendroLayerArtist):
continue
artist.layout = self._layout
artist.update()
self._redraw()
def remove_layer(self, layer):
super(DendroClient, self).remove_layer(layer)
if layer is self.display_data:
self.display_data = None
@property
def _parents(self):
return np.asarray(self.display_data[self.parent_attr],
dtype=np.int).ravel()
@property
def _children(self):
children = _dendro_children(self._parents)
return children
def _substructures(self, idx):
"""
Return an array of all substructure indices of a given index.
The input is included in the output.
Parameters
----------
idx : int
The structure to extract.
Returns
-------
array
"""
children = self._children
result = []
todo = [idx]
while todo:
result.append(todo.pop())
todo.extend(children[result[-1]])
return np.array(result, dtype=np.int)
def apply_roi(self, roi):
if not isinstance(roi, PointROI):
raise NotImplementedError("Only PointROI supported")
if self._layout is None or self.display_data is None:
return
x, y = roi.x, roi.y
if not roi.defined():
return
xs, ys = self._layout[:, ::3]
parent_ys = self._layout[1, 1::3]
delt = np.abs(x - xs)
delt[y > ys] = np.nan
delt[y < parent_ys] = np.nan
if np.isfinite(delt).any():
select = np.nanargmin(delt)
if self.select_substruct:
select = self._substructures(select)
select = np.asarray(select, dtype=np.int)
else:
select = np.array([], dtype=np.int)
state = CategorySubsetState(self.display_data.pixel_component_ids[0],
select)
EditSubsetMode().update(self.collect, state,
focus_data=self.display_data)
def restore_layers(self, layers, context):
"""
Re-generate a list of plot layers from a glue-serialized list
"""
for l in layers:
cls = lookup_class_with_patches(l.pop('_type'))
if cls != DendroLayerArtist:
raise ValueError("Dendrogram client cannot restore layer of type "
"%s" % cls)
props = dict((k, context.object(v)) for k, v in l.items())
layer = self.add_layer(props['layer'])
layer.properties = props
def _dendro_children(parent):
children = [[] for _ in range(parent.size)]
for i, p in enumerate(parent):
if p < 0:
continue
children[p].append(i)
return list(map(np.asarray, children))
def _iter_sorted(children, parent, key):
# must yield both children before parent
yielded = set()
trunks = np.array([i for i, p in enumerate(parent) if p < 0], dtype=np.int)
for idx in np.argsort(key[trunks]):
idx = trunks[idx]
for item in _postfix_iter(idx, children, parent, yielded, key):
yield item
def _postfix_iter(node, children, parent, yielded, key):
"""
Iterate over a node and its children, in the following fashion:
parents are yielded after children
children are yielded in order of ascending key value
"""
todo = [node]
expanded = set()
while todo:
node = todo[-1]
if node in yielded:
todo.pop()
continue
if children[node].size == 0 or node in expanded:
yield todo.pop()
yielded.add(node)
continue
c = children[node]
ind = np.argsort(key[c])[::-1]
todo.extend(c[ind])
expanded.add(node)
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A simple project generator for Native Client projects written in C or C++.
This script accepts a few argument which it uses as a description of a new NaCl
project. It sets up a project with a given name and a given primary language
(default: C++, optionally, C) using the appropriate files from this area.
This script does not handle setup for complex applications, just the basic
necessities to get a functional native client application stub. When this
script terminates a compileable project stub will exist with the specified
name, at the specified location.
GetCamelCaseName(): Converts an underscore name to a camel case name.
GetCodeDirectory(): Decides what directory to pull source code from.
GetCodeSoureFiles(): Decides what source files to pull into the stub.
GetCommonSourceFiles(): Gives list of files needed by all project types.
GetHTMLDirectory(): Decides what directory to pull HTML stub from.
GetHTMLSourceFiles(): Gives HTML files to be included in project stub.
GetTargetFileName(): Converts a source file name into a project file name.
ParseArguments(): Parses the arguments provided by the user.
ReplaceInFile(): Replaces a given string with another in a given file.
ProjectInitializer: Maintains some state applicable to setting up a project.
main(): Executes the script.
"""
__author__ = 'mlinck@google.com (Michael Linck)'
import fileinput
import optparse
import os.path
import shutil
import sys
import uuid
# A list of all platforms that should have make.cmd.
WINDOWS_BUILD_PLATFORMS = ['cygwin', 'win32']
# Tags that will be replaced in our the new project's source files.
PROJECT_NAME_TAG = '<PROJECT_NAME>'
PROJECT_NAME_CAMEL_CASE_TAG = '<ProjectName>'
SDK_ROOT_TAG = '<NACL_SDK_ROOT>'
NACL_PLATFORM_TAG = '<NACL_PLATFORM>'
VS_PROJECT_UUID_TAG = '<VS_PROJECT_UUID>'
VS_SOURCE_UUID_TAG = '<VS_SOURCE_UUID>'
VS_HEADER_UUID_TAG = '<VS_HEADER_UUID>'
VS_RESOURCE_UUID_TAG = '<VS_RESOURCE_UUID>'
# This string is the part of the file name that will be replaced.
PROJECT_FILE_NAME = 'project_file'
# Lists of source files that will be used for the new project.
COMMON_PROJECT_FILES = ['scons']
C_SOURCE_FILES = ['build.scons', '%s.c' % PROJECT_FILE_NAME]
CC_SOURCE_FILES = ['build.scons', '%s.cc' % PROJECT_FILE_NAME]
HTML_FILES = ['%s.html' % PROJECT_FILE_NAME]
VS_FILES = ['%s.sln' % PROJECT_FILE_NAME, '%s.vcproj' % PROJECT_FILE_NAME]
# Error needs to be a class, since we 'raise' it in several places.
class Error(Exception):
pass
def GetCamelCaseName(lower_case_name):
"""Converts an underscore name to a camel case name.
Args:
lower_case_name: The name in underscore-delimited lower case format.
Returns:
The name in camel case format.
"""
camel_case_name = ''
name_parts = lower_case_name.split('_')
for part in name_parts:
if part:
camel_case_name += part.capitalize()
return camel_case_name
def GetCodeDirectory(is_c_project, project_templates_dir):
"""Decides what directory to pull source code from.
Args:
is_c_project: A boolean indicating whether this project is in C or not.
project_templates_dir: The path to the project_templates directory.
Returns:
The code directory for the given project type.
"""
stub_directory = ''
if is_c_project:
stub_directory = os.path.join(project_templates_dir, 'c')
else:
stub_directory = os.path.join(project_templates_dir, 'cc')
return stub_directory
def GetCodeSourceFiles(is_c_project):
"""Decides what source files to pull into the stub.
Args:
is_c_project: A boolean indicating whether this project is in C or not.
Returns:
The files that are specific to the requested type of project and live in its
directory.
"""
project_files = []
if is_c_project:
project_files = C_SOURCE_FILES
else:
project_files = CC_SOURCE_FILES
return project_files
def GetCommonSourceFiles():
"""Gives list of files needed by all project types.
Returns:
The files C and C++ projects have in common. These are the files that live
in the top level project_templates directory.
"""
project_files = COMMON_PROJECT_FILES
if sys.platform in WINDOWS_BUILD_PLATFORMS:
project_files.extend(['scons.bat'])
return project_files
def GetVsDirectory(project_templates_dir):
"""Decides what directory to pull Visual Studio stub from.
Args:
project_templates_dir: The path to the project_templates directory.
Returns:
The directory where the HTML stub is to be found.
"""
return os.path.join(project_templates_dir, 'vs')
def GetVsProjectFiles():
"""Gives VisualStudio files to be included in project stub.
Returns:
The VisualStudio files needed for the project.
"""
return VS_FILES
def GetHTMLDirectory(project_templates_dir):
"""Decides what directory to pull HTML stub from.
Args:
project_templates_dir: The path to the project_templates directory.
Returns:
The directory where the HTML stub is to be found.
"""
return os.path.join(project_templates_dir, 'html')
def GetHTMLSourceFiles():
"""Gives HTML files to be included in project stub.
Returns:
The HTML files needed for the project.
"""
return HTML_FILES
def GetTargetFileName(source_file_name, project_name):
"""Converts a source file name into a project file name.
Args:
source_file_name: The name of a file that is to be included in the project
stub, as it appears at the source location.
project_name: The name of the project that is being generated.
Returns:
The target file name for a given source file. All project files are run
through this filter and it modifies them as needed.
"""
target_file_name = ''
if source_file_name.startswith(PROJECT_FILE_NAME):
target_file_name = source_file_name.replace(PROJECT_FILE_NAME,
project_name)
else:
target_file_name = source_file_name
return target_file_name
def GetDefaultProjectDir():
"""Determines the default project directory.
The default directory root for new projects is called 'nacl_projects' under
the user's home directory. There are two ways to override this: you can set
the NACL_PROJECT_ROOT environment variable, or use the --directory option.
Returns:
An os-specific path to the default project directory, which is called
'nacl_projects' under the user's home directory.
"""
return os.getenv('NACL_PROJECT_ROOT',
os.path.join(os.path.expanduser('~'), 'nacl_projects'))
def ParseArguments(argv):
"""Parses the arguments provided by the user.
Parses the command line options and makes sure the script errors when it is
supposed to.
Args:
argv: The argument array.
Returns:
The options structure that represents the arguments after they have been
parsed.
"""
parser = optparse.OptionParser()
parser.add_option(
'-n', '--name', dest='project_name',
default='',
help=('Required: the name of the new project to be stubbed out.\n'
'Please use lower case names with underscore, i.e. hello_world.'))
parser.add_option(
'-d', '--directory', dest='project_directory',
default=GetDefaultProjectDir(),
help=('Optional: If set, the new project will be created under this '
'directory and the directory created if necessary.'))
parser.add_option(
'-c', action='store_true', dest='is_c_project',
default=False,
help=('Optional: If set, this will generate a C project. Default '
'is C++.'))
parser.add_option(
'-p', '--nacl-platform', dest='nacl_platform',
default='pepper_17',
help=('Optional: if set, the new project will target the given nacl\n'
'platform. Default is the most current platform. e.g. pepper_17'))
parser.add_option(
'--vsproj', action='store_true', dest='is_vs_project',
default=False,
help=('Optional: If set, generate Visual Studio project files.'))
result = parser.parse_args(argv)
options = result[0]
args = result[1]
#options, args) = parser.parse_args(argv)
if args:
parser.print_help()
sys.exit(1)
elif not options.project_name.islower():
print('--name missing or in incorrect format. Please use -h for '
'instructions.')
sys.exit(1)
return options
class ProjectInitializer(object):
"""Maintains the state of the project that is being created."""
def __init__(self, is_c_project, is_vs_project, project_name,
project_location, nacl_platform, project_templates_dir,
nacl_sdk_root=None, os_resource=os):
"""Initializes all the fields that are known after parsing the parameters.
Args:
is_c_project: A boolean indicating whether this project is in C or not.
is_vs_project: A boolean indicating whether this project has Visual
Studio support.
project_name: A string containing the name of the project to be created.
project_location: A path indicating where the new project is to be placed.
project_templates_dir: The path to the project_templates directory.
os_resource: A resource to be used as os. Provided for unit testing.
"""
self.__is_c_project = is_c_project
self.__is_vs_project = is_vs_project
self.__project_files = []
self.__project_dir = None
self.__project_name = project_name
self.__project_location = project_location
self.__nacl_platform = nacl_platform
self.__project_templates_dir = project_templates_dir
# System resources are properties so mocks can be inserted.
self.__fileinput = fileinput
self.__nacl_sdk_root = nacl_sdk_root
self.__os = os_resource
self.__shutil = shutil
self.__sys = sys
self.__CreateProjectDirectory()
def CopyAndRenameFiles(self, source_dir, file_names):
"""Places files in the new project's directory and renames them as needed.
Copies the given files from the given source directory into the new
project's directory, renaming them as necessary. Each file that is created
in the project directory is also added to self.__project_files.
Args:
source_dir: A path indicating where the files are to be copied from.
file_names: The list of files that is to be copied out of source_dir.
"""
for source_file_name in file_names:
target_file_name = GetTargetFileName(source_file_name,
self.__project_name)
copy_source_file = self.os.path.join(source_dir, source_file_name)
copy_target_file = self.os.path.join(self.__project_dir, target_file_name)
self.shutil.copy(copy_source_file, copy_target_file)
self.__project_files += [copy_target_file]
def __CreateProjectDirectory(self):
"""Creates the project's directory and any parents as necessary."""
self.__project_dir = self.os.path.join(self.__project_location,
self.__project_name)
if self.os.path.exists(self.__project_dir):
raise Error("Error: directory '%s' already exists" % self.__project_dir)
self.os.makedirs(self.__project_dir)
def PrepareDirectoryContent(self):
"""Prepares the directory for the new project.
This function's job is to know what directories need to be used and what
files need to be copied and renamed. It uses several tiny helper functions
to do this.
There are three locations from which files are copied to create a project.
That number may change in the future.
"""
code_source_dir = GetCodeDirectory(self.__is_c_project,
self.__project_templates_dir)
code_source_files = GetCodeSourceFiles(self.__is_c_project)
html_source_dir = GetHTMLDirectory(self.__project_templates_dir)
html_source_files = GetHTMLSourceFiles()
common_source_files = GetCommonSourceFiles()
self.CopyAndRenameFiles(code_source_dir, code_source_files)
self.CopyAndRenameFiles(html_source_dir, html_source_files)
self.CopyAndRenameFiles(self.__project_templates_dir,
common_source_files)
if self.__is_vs_project:
vs_source_dir = GetVsDirectory(self.__project_templates_dir)
vs_files = GetVsProjectFiles()
self.CopyAndRenameFiles(vs_source_dir, vs_files)
print('init_project has copied the appropriate files to: %s' %
self.__project_dir)
def PrepareFileContent(self):
"""Changes contents of files in the new project as needed.
Goes through each file in the project that is being created and replaces
contents as necessary.
"""
camel_case_name = GetCamelCaseName(self.__project_name)
sdk_root_dir = self.__nacl_sdk_root
if not sdk_root_dir:
raise Error("Error: NACL_SDK_ROOT is not set")
sdk_root_dir = self.os.path.abspath(sdk_root_dir)
if self.__is_vs_project:
project_uuid = str(uuid.uuid4()).upper()
vs_source_uuid = str(uuid.uuid4()).upper()
vs_header_uuid = str(uuid.uuid4()).upper()
vs_resource_uuid = str(uuid.uuid4()).upper()
for project_file in self.__project_files:
self.ReplaceInFile(project_file, PROJECT_NAME_TAG, self.__project_name)
self.ReplaceInFile(project_file,
PROJECT_NAME_CAMEL_CASE_TAG,
camel_case_name)
self.ReplaceInFile(project_file, SDK_ROOT_TAG, sdk_root_dir)
self.ReplaceInFile(project_file, NACL_PLATFORM_TAG, self.__nacl_platform)
if self.__is_vs_project:
self.ReplaceInFile(project_file, VS_PROJECT_UUID_TAG, project_uuid)
self.ReplaceInFile(project_file, VS_SOURCE_UUID_TAG, vs_source_uuid)
self.ReplaceInFile(project_file, VS_HEADER_UUID_TAG, vs_header_uuid)
self.ReplaceInFile(project_file, VS_RESOURCE_UUID_TAG, vs_resource_uuid)
def ReplaceInFile(self, file_path, old_text, new_text):
"""Replaces a given string with another in a given file.
Args:
file_path: The path to the file that is to be modified.
old_text: The text that is to be removed.
new_text: The text that is to be added in place of old_text.
"""
for line in self.fileinput.input(file_path, inplace=1, mode='U'):
self.sys.stdout.write(line.replace(old_text, new_text))
# The following properties exist to make unit testing possible.
def _GetFileinput(self):
"""Accessor for Fileinput property."""
return self.__fileinput
def __GetFileinput(self):
"""Indirect Accessor for _GetFileinput."""
return self._GetFileinput()
def _SetFileinput(self, fileinput_resource):
"""Accessor for Fileinput property."""
self.__fileinput = fileinput_resource
def __SetFileinput(self, fileinput_resource):
"""Indirect Accessor for _SetFileinput."""
return self._SetFileinput(fileinput_resource)
fileinput = property(
__GetFileinput, __SetFileinput,
doc="""Gets and sets the resource to use as fileinput.""")
def _GetOS(self):
"""Accessor for os property."""
return self.__os
def __GetOS(self):
"""Indirect Accessor for _GetOS."""
return self._GetOS()
def _SetOS(self, os_resource):
"""Accessor for os property."""
self.__os = os_resource
def __SetOS(self, os_resource):
"""Indirect Accessor for _SetOS."""
return self._SetOS(os_resource)
os = property(__GetOS, __SetOS,
doc="""Gets and sets the resource to use as os.""")
def _GetShutil(self):
"""Accessor for shutil property."""
return self.__shutil
def __GetShutil(self):
"""Indirect Accessor for _GetShutil."""
return self._GetShutil()
def _SetShutil(self, shutil_resource):
"""Accessor for shutil property."""
self.__shutil = shutil_resource
def __SetShutil(self, shutil_resource):
"""Indirect Accessor for _SetShutil."""
return self._SetShutil(shutil_resource)
shutil = property(__GetShutil, __SetShutil,
doc="""Gets and sets the resource to use as shutil.""")
def _GetSys(self):
"""Accessor for sys property."""
return self.__sys
def __GetSys(self):
"""Indirect Accessor for _GetSys."""
return self._GetSys()
def _SetSys(self, sys_resource):
"""Accessor for sys property."""
self.__sys = sys_resource
def __SetSys(self, sys_resource):
"""Indirect Accessor for _SetSys."""
return self._SetSys(sys_resource)
sys = property(__GetSys, __SetSys,
doc="""Gets and sets the resource to use as sys.""")
def main(argv):
"""Prepares the new project.
Args:
argv: The arguments passed to the script by the shell.
"""
print 'init_project parsing its arguments.'
script_dir = os.path.abspath(os.path.dirname(__file__))
options = ParseArguments(argv)
print 'init_project is preparing your project.'
# Check to see if the project is going into the SDK bundle. If so, issue a
# warning.
sdk_root_dir = os.getenv('NACL_SDK_ROOT',
os.path.dirname(os.path.dirname(script_dir)))
if sdk_root_dir:
if os.path.normpath(options.project_directory).count(
os.path.normpath(sdk_root_dir)) > 0:
print('WARNING: It looks like you are creating projects in the NaCl SDK '
'directory %s.\nThese might be removed at the next update.' %
sdk_root_dir)
project_initializer = ProjectInitializer(options.is_c_project,
options.is_vs_project,
options.project_name,
options.project_directory,
options.nacl_platform,
script_dir,
nacl_sdk_root=sdk_root_dir)
project_initializer.PrepareDirectoryContent()
project_initializer.PrepareFileContent()
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except Exception as error:
print error
sys.exit(1)
|
|
import os.path
from typing import Dict, List, Optional, TypeVar, Any, Text
from django.conf import settings
from django.conf.urls import url
from django.core.urlresolvers import LocaleRegexProvider
from django.utils.module_loading import import_string
from django.utils.safestring import mark_safe
from django.template import loader
from zerver.templatetags.app_filters import render_markdown_path
"""This module declares all of the (documented) integrations available
in the Zulip server. The Integration class is used as part of
generating the documentation on the /integrations page, while the
WebhookIntegration class is also used to generate the URLs in
`zproject/urls.py` for webhook integrations.
To add a new non-webhook integration, add code to the INTEGRATIONS
dictionary below.
To add a new webhook integration, declare a WebhookIntegration in the
WEBHOOK_INTEGRATIONS list below (it will be automatically added to
INTEGRATIONS).
Over time, we expect this registry to grow additional convenience
features for writing and configuring integrations efficiently.
"""
class Integration(object):
DEFAULT_LOGO_STATIC_PATH_PNG = 'static/images/integrations/logos/{name}.png'
DEFAULT_LOGO_STATIC_PATH_SVG = 'static/images/integrations/logos/{name}.svg'
def __init__(self, name, client_name, logo=None, secondary_line_text=None, display_name=None, doc=None):
# type: (str, str, Optional[str], Optional[str], Optional[str], Optional[str]) -> None
self.name = name
self.client_name = client_name
self.secondary_line_text = secondary_line_text
self.doc = doc
self.doc_context = None # type: Dict[Any, Any]
if logo is None:
if os.path.isfile(self.DEFAULT_LOGO_STATIC_PATH_SVG.format(name=name)):
logo = self.DEFAULT_LOGO_STATIC_PATH_SVG.format(name=name)
else:
logo = self.DEFAULT_LOGO_STATIC_PATH_PNG.format(name=name)
self.logo = logo
if display_name is None:
display_name = name.title()
self.display_name = display_name
def is_enabled(self):
# type: () -> bool
return True
def add_doc_context(self, context):
# type: (Dict[Any, Any]) -> None
self.doc_context = context
@property
def help_content(self):
# type: () -> Text
doc_context = self.doc_context or {}
if self.doc.endswith('.md'):
return render_markdown_path(self.doc, doc_context)
else:
template = loader.get_template(self.doc)
return mark_safe(template.render(doc_context))
class EmailIntegration(Integration):
def is_enabled(self):
# type: () -> bool
return settings.EMAIL_GATEWAY_BOT != ""
class WebhookIntegration(Integration):
DEFAULT_FUNCTION_PATH = 'zerver.webhooks.{name}.view.api_{name}_webhook'
DEFAULT_URL = 'api/v1/external/{name}'
DEFAULT_CLIENT_NAME = 'Zulip{name}Webhook'
DEFAULT_DOC_PATH = '{name}/doc.{ext}'
def __init__(self, name, client_name=None, logo=None, secondary_line_text=None,
function=None, url=None, display_name=None, doc=None):
# type: (str, Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], Optional[str]) -> None
if client_name is None:
client_name = self.DEFAULT_CLIENT_NAME.format(name=name.title())
super(WebhookIntegration, self).__init__(name, client_name, logo, secondary_line_text, display_name)
if function is None:
function = self.DEFAULT_FUNCTION_PATH.format(name=name)
if isinstance(function, str):
function = import_string(function)
self.function = function
if url is None:
url = self.DEFAULT_URL.format(name=name)
self.url = url
if doc is None:
path = os.path.join(settings.DEPLOY_ROOT, 'zerver', 'webhooks')
md_doc = self.DEFAULT_DOC_PATH.format(name=name, ext='md')
if os.path.exists(os.path.join(path, md_doc)):
doc = md_doc
else:
doc = self.DEFAULT_DOC_PATH.format(name=name, ext='html')
self.doc = doc
@property
def url_object(self):
# type: () -> LocaleRegexProvider
return url(self.url, self.function)
class HubotLozenge(Integration):
GIT_URL_TEMPLATE = "https://github.com/hubot-scripts/hubot-{}"
def __init__(self, name, display_name=None, logo=None, logo_alt=None, git_url=None):
# type: (str, Optional[str], Optional[str], Optional[str], Optional[str]) -> None
if logo_alt is None:
logo_alt = "{} logo".format(name.title())
self.logo_alt = logo_alt
if git_url is None:
git_url = self.GIT_URL_TEMPLATE.format(name)
self.git_url = git_url
super(HubotLozenge, self).__init__(name, name, logo, display_name=display_name)
class GithubIntegration(WebhookIntegration):
"""
We need this class to don't creating url object for git integrations.
We want to have one generic url with dispatch function for github service and github webhook.
"""
@property
def url_object(self):
# type: () -> None
return
WEBHOOK_INTEGRATIONS = [
WebhookIntegration('airbrake'),
WebhookIntegration('appfollow', display_name='AppFollow'),
WebhookIntegration('beanstalk'),
WebhookIntegration('basecamp'),
WebhookIntegration('bitbucket2', logo='static/images/integrations/logos/bitbucket.svg', display_name='Bitbucket'),
WebhookIntegration('bitbucket', display_name='Bitbucket', secondary_line_text='(Enterprise)'),
WebhookIntegration('circleci', display_name='CircleCI'),
WebhookIntegration('codeship'),
WebhookIntegration('crashlytics'),
WebhookIntegration('delighted', display_name='Delighted'),
WebhookIntegration('deskdotcom', logo='static/images/integrations/logos/deskcom.png', display_name='Desk.com'),
WebhookIntegration('freshdesk'),
GithubIntegration(
'github',
function='zerver.webhooks.github.view.api_github_landing',
display_name='GitHub',
secondary_line_text='(deprecated)'
),
GithubIntegration(
'github_webhook',
display_name='GitHub',
logo='static/images/integrations/logos/github.svg',
secondary_line_text='(webhook)',
function='zerver.webhooks.github_webhook.view.api_github_webhook'
),
WebhookIntegration('gitlab', display_name='GitLab'),
WebhookIntegration('gogs'),
WebhookIntegration('gosquared', display_name='GoSquared'),
WebhookIntegration('greenhouse', display_name='Greenhouse'),
WebhookIntegration('hellosign', display_name='HelloSign'),
WebhookIntegration('helloworld', display_name='Hello World'),
WebhookIntegration('heroku', display_name='Heroku'),
WebhookIntegration('homeassistant', display_name='Home Assistant'),
WebhookIntegration('ifttt', function='zerver.webhooks.ifttt.view.api_iftt_app_webhook', display_name='IFTTT'),
WebhookIntegration('jira', secondary_line_text='(hosted or v5.2+)', display_name='JIRA'),
WebhookIntegration('librato'),
WebhookIntegration('mention', display_name='Mention'),
WebhookIntegration('newrelic', display_name='New Relic'),
WebhookIntegration('pagerduty'),
WebhookIntegration('papertrail'),
WebhookIntegration('pingdom'),
WebhookIntegration('pivotal', display_name='Pivotal Tracker'),
WebhookIntegration('semaphore'),
WebhookIntegration('sentry'),
WebhookIntegration('slack'),
WebhookIntegration('solano', display_name='Solano Labs'),
WebhookIntegration('splunk', display_name='Splunk'),
WebhookIntegration('stripe', display_name='Stripe'),
WebhookIntegration('taiga'),
WebhookIntegration('teamcity'),
WebhookIntegration('transifex'),
WebhookIntegration('travis', display_name='Travis CI'),
WebhookIntegration('trello', secondary_line_text='(webhook)'),
WebhookIntegration('updown'),
WebhookIntegration(
'yo',
function='zerver.webhooks.yo.view.api_yo_app_webhook',
display_name='Yo App'
),
WebhookIntegration('wordpress', display_name='WordPress'),
WebhookIntegration('zapier'),
WebhookIntegration('zendesk')
] # type: List[WebhookIntegration]
INTEGRATIONS = {
'asana': Integration('asana', 'asana', doc='zerver/integrations/asana.html'),
'capistrano': Integration('capistrano', 'capistrano', doc='zerver/integrations/capistrano.html'),
'codebase': Integration('codebase', 'codebase', doc='zerver/integrations/codebase.html'),
'email': EmailIntegration('email', 'email', doc='zerver/integrations/email.html'),
'git': Integration('git', 'git', doc='zerver/integrations/git.html'),
'google-calendar': Integration(
'google-calendar',
'google-calendar',
display_name='Google Calendar',
doc='zerver/integrations/google-calendar.html'
),
'hubot': Integration('hubot', 'hubot', doc='zerver/integrations/hubot.html'),
'jenkins': Integration(
'jenkins',
'jenkins',
secondary_line_text='(or Hudson)',
doc='zerver/integrations/jenkins.html'
),
'jira-plugin': Integration(
'jira-plugin',
'jira-plugin',
logo='static/images/integrations/logos/jira.svg',
secondary_line_text='(locally installed)',
display_name='JIRA',
doc='zerver/integrations/jira-plugin.html'
),
'mercurial': Integration(
'mercurial',
'mercurial',
display_name='Mercurial (hg)',
doc='zerver/integrations/mercurial.html'
),
'nagios': Integration('nagios', 'nagios', doc='zerver/integrations/nagios.html'),
'openshift': Integration(
'openshift',
'openshift',
display_name='OpenShift',
doc='zerver/integrations/openshift.html'
),
'perforce': Integration('perforce', 'perforce', doc='zerver/integrations/perforce.html'),
'phabricator': Integration('phabricator', 'phabricator', doc='zerver/integrations/phabricator.html'),
'puppet': Integration('puppet', 'puppet', doc='zerver/integrations/puppet.html'),
'redmine': Integration('redmine', 'redmine', doc='zerver/integrations/redmine.html'),
'rss': Integration('rss', 'rss', display_name='RSS', doc='zerver/integrations/rss.html'),
'subversion': Integration('subversion', 'subversion', doc='zerver/integrations/subversion.html'),
'trac': Integration('trac', 'trac', doc='zerver/integrations/trac.html'),
'trello-plugin': Integration(
'trello-plugin',
'trello-plugin',
logo='static/images/integrations/logos/trello.svg',
secondary_line_text='(legacy)',
display_name='Trello',
doc='zerver/integrations/trello-plugin.html'
),
'twitter': Integration('twitter', 'twitter', doc='zerver/integrations/twitter.html'),
} # type: Dict[str, Integration]
HUBOT_LOZENGES = {
'assembla': HubotLozenge('assembla'),
'bonusly': HubotLozenge('bonusly'),
'chartbeat': HubotLozenge('chartbeat'),
'darksky': HubotLozenge('darksky', display_name='Dark Sky', logo_alt='Dark Sky logo'),
'hangouts': HubotLozenge('google-hangouts', display_name="Hangouts"),
'instagram': HubotLozenge('instagram', logo='static/images/integrations/logos/instagram.png'),
'mailchimp': HubotLozenge('mailchimp', display_name='MailChimp', logo_alt='MailChimp logo'),
'translate': HubotLozenge('google-translate', display_name="Translate", logo_alt='Google Translate logo'),
'youtube': HubotLozenge('youtube', display_name='YouTube', logo_alt='YouTube logo')
}
for integration in WEBHOOK_INTEGRATIONS:
INTEGRATIONS[integration.name] = integration
|
|
# Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import Mock
from opencensus.metrics.export import cumulative, gauge, metric_descriptor
from opencensus.metrics.export import value as value_module
class TestCumulativePointLong(unittest.TestCase):
def test_init(self):
point = cumulative.CumulativePointLong()
self.assertEqual(point.value, 0)
self.assertIsInstance(point.value, int)
def test_add(self):
point = cumulative.CumulativePointLong()
point.add(10)
self.assertEqual(point.value, 10)
point.add(-1)
self.assertEqual(point.value, 10)
# Check that we report type errors for args that we'd otherwise ignore
with self.assertRaises(ValueError):
point.add(-1.0)
with self.assertRaises(ValueError):
point.add(10.0)
def test_get_value(self):
point = cumulative.CumulativePointLong()
point.add(10)
self.assertEqual(point.value, 10)
self.assertEqual(point.get_value(), point.value)
class TestCumulativePointDouble(unittest.TestCase):
def test_init(self):
point = cumulative.CumulativePointDouble()
self.assertEqual(point.value, 0.0)
self.assertIsInstance(point.value, float)
def test_add(self):
point = cumulative.CumulativePointDouble()
point.add(10)
self.assertEqual(point.value, 10.0)
point.add(-1.0)
self.assertEqual(point.value, 10.0)
def test_get_value(self):
point = cumulative.CumulativePointDouble()
point.add(10.1)
self.assertEqual(point.value, 10.1)
self.assertEqual(point.get_value(), point.value)
# TestLongCumulative and TestDoubleCumulative check that the reported value of
# a cumulative never decreases. We rely on the other tests to check the
# behavior these classes have in common with gauges.
class TestLongCumulative(unittest.TestCase):
def test_get_metric(self):
name = Mock()
description = Mock()
unit = Mock()
label_keys = [Mock(), Mock]
long_cumulative = cumulative.LongCumulative(
name, description, unit, label_keys)
self.assertEqual(
long_cumulative.descriptor.type,
metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64)
timestamp = Mock()
null_metric = long_cumulative.get_metric(timestamp)
self.assertIsNone(null_metric)
lv1 = [Mock(), Mock()]
lv2 = [Mock(), Mock()]
point1 = long_cumulative.get_or_create_time_series(lv1)
point2 = long_cumulative.get_or_create_time_series(lv2)
metric = long_cumulative.get_metric(timestamp)
self.assertEqual(metric.descriptor, long_cumulative.descriptor)
self.assertEqual(len(metric.time_series), 2)
self.assertEqual(len(metric.time_series[0].points), 1)
self.assertEqual(len(metric.time_series[1].points), 1)
self.assertIsInstance(metric.time_series[0].points[0].value,
value_module.ValueLong)
self.assertIsInstance(metric.time_series[1].points[0].value,
value_module.ValueLong)
self.assertEqual(metric.time_series[0].points[0].value.value, 0)
self.assertEqual(metric.time_series[1].points[0].value.value, 0)
timestamp2 = Mock()
point1.add(2)
point2.add(4)
metric = long_cumulative.get_metric(timestamp2)
self.assertEqual(metric.descriptor, long_cumulative.descriptor)
self.assertEqual(len(metric.time_series), 2)
self.assertEqual(len(metric.time_series[0].points), 1)
self.assertEqual(len(metric.time_series[1].points), 1)
self.assertIsInstance(metric.time_series[0].points[0].value,
value_module.ValueLong)
self.assertIsInstance(metric.time_series[1].points[0].value,
value_module.ValueLong)
self.assertEqual(metric.time_series[0].points[0].value.value, 2)
self.assertEqual(metric.time_series[1].points[0].value.value, 4)
timestamp3 = Mock()
point1.add(1)
point2.add(-1)
metric = long_cumulative.get_metric(timestamp3)
self.assertEqual(metric.descriptor, long_cumulative.descriptor)
self.assertEqual(len(metric.time_series), 2)
self.assertEqual(len(metric.time_series[0].points), 1)
self.assertEqual(len(metric.time_series[1].points), 1)
self.assertIsInstance(metric.time_series[0].points[0].value,
value_module.ValueLong)
self.assertIsInstance(metric.time_series[1].points[0].value,
value_module.ValueLong)
self.assertEqual(metric.time_series[0].points[0].value.value, 3)
self.assertEqual(metric.time_series[1].points[0].value.value, 4)
class TestDoubleCumulative(unittest.TestCase):
def test_get_metric(self):
name = Mock()
description = Mock()
unit = Mock()
label_keys = [Mock(), Mock]
double_cumulative = cumulative.DoubleCumulative(
name, description, unit, label_keys)
self.assertEqual(
double_cumulative.descriptor.type,
metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE)
timestamp = Mock()
null_metric = double_cumulative.get_metric(timestamp)
self.assertIsNone(null_metric)
lv1 = [Mock(), Mock()]
lv2 = [Mock(), Mock()]
point1 = double_cumulative.get_or_create_time_series(lv1)
point2 = double_cumulative.get_or_create_time_series(lv2)
metric = double_cumulative.get_metric(timestamp)
self.assertEqual(metric.descriptor, double_cumulative.descriptor)
self.assertEqual(len(metric.time_series), 2)
self.assertEqual(len(metric.time_series[0].points), 1)
self.assertEqual(len(metric.time_series[1].points), 1)
self.assertIsInstance(metric.time_series[0].points[0].value,
value_module.ValueDouble)
self.assertIsInstance(metric.time_series[1].points[0].value,
value_module.ValueDouble)
self.assertEqual(metric.time_series[0].points[0].value.value, 0)
self.assertEqual(metric.time_series[1].points[0].value.value, 0)
timestamp2 = Mock()
point1.add(2.125)
point2.add(1.125)
metric = double_cumulative.get_metric(timestamp2)
self.assertEqual(metric.descriptor, double_cumulative.descriptor)
self.assertEqual(len(metric.time_series), 2)
self.assertEqual(len(metric.time_series[0].points), 1)
self.assertEqual(len(metric.time_series[1].points), 1)
self.assertIsInstance(metric.time_series[0].points[0].value,
value_module.ValueDouble)
self.assertIsInstance(metric.time_series[1].points[0].value,
value_module.ValueDouble)
self.assertEqual(metric.time_series[0].points[0].value.value, 2.125)
self.assertEqual(metric.time_series[1].points[0].value.value, 1.125)
timestamp3 = Mock()
point1.add(-1.125)
point2.add(1.125)
metric = double_cumulative.get_metric(timestamp3)
self.assertEqual(metric.descriptor, double_cumulative.descriptor)
self.assertEqual(len(metric.time_series), 2)
self.assertEqual(len(metric.time_series[0].points), 1)
self.assertEqual(len(metric.time_series[1].points), 1)
self.assertIsInstance(metric.time_series[0].points[0].value,
value_module.ValueDouble)
self.assertIsInstance(metric.time_series[1].points[0].value,
value_module.ValueDouble)
self.assertEqual(metric.time_series[0].points[0].value.value, 2.125)
self.assertEqual(metric.time_series[1].points[0].value.value, 2.25)
class TestDerivedLongCumulative(unittest.TestCase):
def test_ts_point_type(self):
derived_cumulative = cumulative.DerivedLongCumulative(
Mock(), Mock(), Mock(), [Mock(), Mock])
mock_fn = Mock()
default_point = derived_cumulative.create_default_time_series(mock_fn)
self.assertIsInstance(default_point, gauge.DerivedGaugePoint)
self.assertIsInstance(default_point.gauge_point,
cumulative.CumulativePointLong)
mock_fn.assert_not_called()
point = derived_cumulative.create_time_series(
[Mock(), Mock()], mock_fn)
self.assertIsInstance(point, gauge.DerivedGaugePoint)
self.assertIsInstance(point.gauge_point,
cumulative.CumulativePointLong)
mock_fn.assert_not_called()
def test_get_metric(self):
derived_cumulative = cumulative.DerivedLongCumulative(
Mock(), Mock(), Mock(), [])
mock_fn = Mock()
mock_fn.return_value = 123
derived_cumulative.create_default_time_series(mock_fn)
now1 = Mock()
[ts] = derived_cumulative.get_metric(now1).time_series
[ts_point] = ts.points
self.assertEqual(ts_point.timestamp, now1)
self.assertEqual(ts_point.value.value, 123)
self.assertIsInstance(ts_point.value, value_module.ValueLong)
def test_point_value_increases(self):
derived_cumulative = cumulative.DerivedLongCumulative(
Mock(), Mock(), Mock(), [])
mock_fn = Mock()
point = derived_cumulative.create_default_time_series(mock_fn)
mock_fn.return_value = -10
self.assertEqual(point.get_value(), 0)
mock_fn.return_value = 10
self.assertEqual(point.get_value(), 10)
mock_fn.return_value = 9
self.assertEqual(point.get_value(), 10)
def test_raise_on_float(self):
derived_cumulative = cumulative.DerivedLongCumulative(
Mock(), Mock(), Mock(), [])
mock_fn = Mock()
mock_fn.return_value = 1.125
point = derived_cumulative.create_default_time_series(mock_fn)
with self.assertRaises(ValueError):
point.get_value()
class TestDerivedDoubleCumulative(unittest.TestCase):
def test_ts_point_type(self):
derived_cumulative = cumulative.DerivedDoubleCumulative(
Mock(), Mock(), Mock(), [Mock(), Mock])
mock_fn = Mock()
default_point = derived_cumulative.create_default_time_series(mock_fn)
self.assertIsInstance(default_point, gauge.DerivedGaugePoint)
self.assertIsInstance(default_point.gauge_point,
cumulative.CumulativePointDouble)
mock_fn.assert_not_called()
point = derived_cumulative.create_time_series(
[Mock(), Mock()], mock_fn)
self.assertIsInstance(point, gauge.DerivedGaugePoint)
self.assertIsInstance(point.gauge_point,
cumulative.CumulativePointDouble)
mock_fn.assert_not_called()
def test_get_metric(self):
derived_cumulative = cumulative.DerivedDoubleCumulative(
Mock(), Mock(), Mock(), [])
mock_fn = Mock()
mock_fn.return_value = 1.23
derived_cumulative.create_default_time_series(mock_fn)
now1 = Mock()
[ts] = derived_cumulative.get_metric(now1).time_series
[ts_point] = ts.points
self.assertEqual(ts_point.timestamp, now1)
self.assertEqual(ts_point.value.value, 1.23)
self.assertIsInstance(ts_point.value, value_module.ValueDouble)
def test_point_value_increases(self):
derived_cumulative = cumulative.DerivedDoubleCumulative(
Mock(), Mock(), Mock(), [])
mock_fn = Mock()
point = derived_cumulative.create_default_time_series(mock_fn)
mock_fn.return_value = -1.1
self.assertEqual(point.get_value(), 0)
mock_fn.return_value = 2.3
self.assertEqual(point.get_value(), 2.3)
mock_fn.return_value = 1.2
self.assertEqual(point.get_value(), 2.3)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from ..node import Node
from ...visuals.mesh import MeshVisual
from ...visuals.transforms import STTransform
from ...util.event import Event
from ...geometry import Rect
from ...color import Color
class Widget(Node):
""" A widget takes up a rectangular space, intended for use in
a 2D pixel coordinate frame.
The widget is positioned using the transform attribute (as any
node), and its extent (size) is kept as a separate property.
Parameters
----------
pos : (x, y)
A 2-element tuple to specify the top left corner of the widget.
size : (w, h)
A 2-element tuple to spicify the size of the widget.
border_color : color
The color of the border.
bgcolor : color
The background color.
clip : bool
Not used :)
padding : int
The amount of padding in the widget (i.e. the space reserved between
the contents and the border).
margin : int
The margin to keep outside the widget's border.
"""
def __init__(self, pos=(0, 0), size=(10, 10), border_color=None,
bgcolor=None, clip=False, padding=0, margin=0, **kwargs):
Node.__init__(self, **kwargs)
# For drawing border.
# A mesh is required because GL lines cannot be drawn with predictable
# shape across all platforms.
self._border_color = self._bgcolor = Color(None)
self._face_colors = None
self._visual = MeshVisual(mode='triangles')
self._visual.set_gl_state('translucent', depth_test=False)
# whether this widget should clip its children
# (todo)
self._clip = clip
# reserved space inside border
self._padding = padding
# reserved space outside border
self._margin = margin
self.events.add(resize=Event)
self._size = 16, 16
self.transform = STTransform()
# todo: TTransform (translate only for widgets)
self._widgets = []
self.pos = pos
self.size = size
self.border_color = border_color
self.bgcolor = bgcolor
@property
def pos(self):
return tuple(self.transform.translate[:2])
@pos.setter
def pos(self, p):
assert isinstance(p, tuple)
assert len(p) == 2
if p == self.pos:
return
self.transform.translate = p[0], p[1], 0, 0
self._update_line()
#self.events.resize()
@property
def size(self):
# Note that we cannot let the size be reflected in the transform.
# Consider a widget of 40x40 in a pixel grid, a child widget therin
# with size 20x20 would get a scale of 800x800!
return self._size
@size.setter
def size(self, s):
assert isinstance(s, tuple)
assert len(s) == 2
if self._size == s:
return
self._size = s
self._update_line()
self.events.resize()
self._update_child_widgets()
@property
def rect(self):
return Rect((0, 0), self.size)
@rect.setter
def rect(self, r):
with self.events.resize.blocker():
self.pos = r.pos
self.size = r.size
self.update()
self.events.resize()
@property
def inner_rect(self):
"""The rectangular area inside the margin, border and padding.
Generally widgets should avoid drawing or placing widgets outside this
rectangle.
"""
m = self.margin + self.padding
if not self.border_color.is_blank:
m += 1
return Rect((m, m), (self.size[0]-2*m, self.size[1]-2*m))
@property
def border_color(self):
""" The color of the border.
"""
return self._border_color
@border_color.setter
def border_color(self, b):
self._border_color = Color(b)
self._update_colors()
self._update_line()
self.update()
@property
def bgcolor(self):
""" The background color of the Widget.
"""
return self._bgcolor
@bgcolor.setter
def bgcolor(self, value):
self._bgcolor = Color(value)
self._update_colors()
self._update_line()
self.update()
@property
def margin(self):
return self._margin
@margin.setter
def margin(self, m):
self._margin = m
self._update_line()
@property
def padding(self):
return self._padding
@padding.setter
def padding(self, p):
self._padding = p
self._update_child_widgets()
def _update_line(self):
""" Update border line to match new shape """
w = 1 # XXX Eventually this can be a parameter
m = int(self.margin)
# border is drawn within the boundaries of the widget:
#
# size = (8, 7) margin=2
# internal rect = (3, 3, 2, 1)
# ........
# ........
# ..BBBB..
# ..B B..
# ..BBBB..
# ........
# ........
#
l = b = m
r = int(self.size[0]) - m
t = int(self.size[1]) - m
pos = np.array([
[l, b], [l+w, b+w],
[r, b], [r-w, b+w],
[r, t], [r-w, t-w],
[l, t], [l+w, t-w],
], dtype=np.float32)
faces = np.array([
[0, 2, 1],
[1, 2, 3],
[2, 4, 3],
[3, 5, 4],
[4, 5, 6],
[5, 7, 6],
[6, 0, 7],
[7, 0, 1],
[5, 3, 1],
[1, 5, 7],
], dtype=np.int32)
start = 8 if self._border_color.is_blank else 0
stop = 8 if self._bgcolor.is_blank else 10
face_colors = None
if self._face_colors is not None:
face_colors = self._face_colors[start:stop]
self._visual.set_data(vertices=pos, faces=faces[start:stop],
face_colors=face_colors)
def _update_colors(self):
self._face_colors = np.concatenate(
(np.tile(self.border_color.rgba, (8, 1)),
np.tile(self.bgcolor.rgba, (2, 1)))).astype(np.float32)
def draw(self, event):
"""Draw the widget borders
Parameters
----------
event : instance of Event
The event containing the transforms.
"""
if self.border_color.is_blank and self.bgcolor.is_blank:
return
self._visual.draw(event)
def on_resize(self, event):
"""On resize handler
Parameters
----------
event : instance of Event
The resize event.
"""
self._update_child_widgets()
def _update_child_widgets(self):
# Set the position and size of child boxes (only those added
# using add_widget)
for ch in self._widgets:
ch.rect = self.rect.padded(self.padding + self.margin)
def add_widget(self, widget):
"""
Add a Widget as a managed child of this Widget.
The child will be
automatically positioned and sized to fill the entire space inside
this Widget (unless _update_child_widgets is redefined).
Parameters
----------
widget : instance of Widget
The widget to add.
Returns
-------
widget : instance of Widget
The widget.
"""
self._widgets.append(widget)
widget.parent = self
self._update_child_widgets()
return widget
def add_grid(self, *args, **kwargs):
"""
Create a new Grid and add it as a child widget.
All arguments are given to Grid().
"""
from .grid import Grid
grid = Grid(*args, **kwargs)
return self.add_widget(grid)
def add_view(self, *args, **kwargs):
"""
Create a new ViewBox and add it as a child widget.
All arguments are given to ViewBox().
"""
from .viewbox import ViewBox
view = ViewBox(*args, **kwargs)
return self.add_widget(view)
def remove_widget(self, widget):
"""
Remove a Widget as a managed child of this Widget.
Parameters
----------
widget : instance of Widget
The widget to remove.
"""
self._widgets.remove(widget)
widget.remove_parent(self)
self._update_child_widgets()
|
|
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import os
from pyface.confirmation_dialog import confirm
from pyface.constant import YES
from pyface.message_dialog import information
from pyface.tasks.action.task_action import TaskAction
from traitsui.menu import Action
from pychron.envisage.resources import icon
from pychron.envisage.ui_actions import UIAction, UITaskAction
class EditorAction(TaskAction):
enabled_name = "active_editor"
class FigureAction(TaskAction):
enabled_name = "is_figure_editor"
class IdentifyPeaksDemoAction(TaskAction):
name = "Id peaks"
method = "identify_peaks"
class SavePipelineTemplateAction(TaskAction):
name = "Save Pipeline Template"
method = "save_pipeline_template"
class RunAction(TaskAction):
name = "Run"
method = "run"
image = icon("start")
visible_name = "engine.run_enabled"
accelerator = "Ctrl+R"
class ResumeAction(TaskAction):
name = "Resume"
method = "resume"
image = icon("edit-redo-3")
visible_name = "engine.resume_enabled"
class RunFromAction(TaskAction):
name = "Run From"
method = "run_from"
image = icon("start")
class ResetAction(TaskAction):
name = "Reset"
method = "reset"
image = icon("arrow_refresh")
class ClearAction(TaskAction):
name = "Clear"
method = "clear"
image = icon("clear")
class SwitchToBrowserAction(TaskAction):
name = "To Browser"
method = "switch_to_browser"
image = icon("start")
class ConfigureRecallAction(UITaskAction):
name = "Recall Configuration..."
method = "configure_recall"
image = icon("cog")
class PlayVideoAction(UITaskAction):
name = "Video"
method = "play_analysis_video"
# image = icon('cog')
# class ConfigureAnalysesTableAction(TaskAction):
# name = 'Configure Analyses Table'
# dname = 'Configure Analyses Table'
# method = 'configure_analyses_table'
# image = icon('cog')
#
#
# class ConfigureSampleTableAction(TaskAction):
# name = 'Configure Sample Table'
# dname = 'Configure Sample Table'
# method = 'configure_sample_table'
# image = icon('cog')
class LoadReviewStatusAction(TaskAction):
name = "Review Status"
method = "load_review_status"
image = icon("check_boxes")
class EditAnalysisAction(TaskAction):
name = "Edit Analysis"
method = "edit_analysis"
image = icon("application-form-edit")
class DiffViewAction(TaskAction):
name = "Diff View"
method = "diff_analysis"
image = icon("edit_diff")
enabled_name = "diff_enabled"
class TabularViewAction(TaskAction):
name = "Tabular View"
method = "tabular_view"
image = icon("table")
class PipelineAction(UIAction):
def perform(self, event):
app = event.task.window.application
task = app.get_task("pychron.pipeline.task")
if hasattr(task, self.action):
getattr(task, self.action)()
class BrowserAction(Action):
_task_id = "pychron.browser.task"
def perform(self, event):
task = self._get_task(event)
if hasattr(task, self.action):
getattr(task, self.action)()
def _get_task(self, event):
app = event.task.window.application
task = app.get_task(self._task_id)
return task
class RecallAction(PipelineAction):
name = "Recall..."
action = "pipeline_recall"
class InterpretedAgeRecallAction(PipelineAction):
name = "Interpreted Age Recall..."
action = "pipeline_interpreted_age_recall"
class TimeViewBrowserAction(BrowserAction):
name = "Time View Recall..."
action = "open_time_view_browser"
class ReductionAction(PipelineAction):
pass
class IsoEvolutionAction(PipelineAction):
name = "Isotope Evolutions"
action = "set_isotope_evolutions_template"
class BlanksAction(PipelineAction):
name = "Blanks"
action = "set_blanks_template"
class ICFactorAction(PipelineAction):
name = "ICFactor"
action = "set_icfactor_template"
class FluxAction(PipelineAction):
name = "Flux"
action = "set_flux_template"
class FreezeProductionRatios(PipelineAction):
name = "Freeze Production Ratios"
action = "freeze_production_ratios"
class FreezeFlux(PipelineAction):
name = "Freeze Flux"
action = "freeze_flux"
class AnalysisTableAction(PipelineAction):
name = "Analysis Table"
action = "set_analysis_table_template"
class PipelineRecallAction(TaskAction):
name = "Recall"
method = "pipeline_recall"
class ClearAnalysisSetsAction(UIAction):
name = "Clear Analysis Sets"
def perform(self, event):
from pychron.paths import paths
p = paths.hidden_path("analysis_sets")
if os.path.isfile(p):
if (
confirm(None, "Are you sure you want to clear the Analysis Sets?")
== YES
):
os.remove(p)
else:
information(None, "No Analysis Sets to remove")
# ============= Plotting Actions =============================================
class ResetFactoryDefaultsAction(UIAction):
name = "Reset Factory Defaults"
def perform(self, event):
from pychron.paths import paths
if confirm(None, "Are you sure you want to reset to Factory Default settings"):
paths.reset_plot_factory_defaults()
class PlotAction(PipelineAction):
pass
class IdeogramAction(PlotAction):
name = "Ideogram"
action = "set_ideogram_template"
image = icon("histogram")
accelerator = "Ctrl+i"
class SubgroupIdeogramAction(PlotAction):
name = "SubGroup Ideogram"
action = "set_subgroup_ideogram_template"
image = icon("histogram")
class HybridIdeogramAction(PlotAction):
name = "Hybrid Ideogram"
action = "set_hybrid_ideogram_template"
image = icon("histogram")
class HistoryIdeogramAction(PlotAction):
name = "History Ideogram"
action = "set_history_ideogram_template"
image = icon("histogram")
class SpectrumAction(PlotAction):
name = "Spectrum"
action = "set_spectrum_template"
accelerator = "Ctrl+D"
# image = icon('histogram')
class IsochronAction(PlotAction):
name = "Isochron"
action = "set_isochron_template"
# image = icon('histogram')
class InverseIsochronAction(PlotAction):
name = "InverseIsochron"
action = "set_inverse_isochron_template"
class SeriesAction(PlotAction):
name = "Series"
action = "set_series_template"
id = "pychron.series"
class VerticalFluxAction(PipelineAction):
name = "Vertical Flux"
action = "set_vertical_flux_template"
class ExtractionAction(UIAction):
name = "Extraction Results..."
def perform(self, event):
app = event.task.window.application
windows = app.windows
for tid in ("pychron.browser.task", "pychron.pipeline.task"):
for win in windows:
task = win.active_task
if task and task.id == tid:
getattr(task, "show_extraction_graph")()
break
class MassSpecReducedAction(PipelineAction):
name = "Mass Spec Reduced Transfer"
action = "mass_spec_reduced_transfer"
class ImportOptionsActions(PipelineAction):
name = "Import Options..."
action = "import_options"
# ============= Quick Series ====================================
# class LastNAnalysesSeriesAction(PipelineAction):
# name = 'Last N...'
# action = 'set_last_n_analyses_template'
#
#
# class LastNHoursSeriesAction(PipelineAction):
# name = 'Last N Hours...'
# action = 'set_last_n_hours_template'
#
#
# class LastDaySeriesAction(PipelineAction):
# name = 'Last Day'
# action = 'set_last_day_template'
#
#
# class LastWeekSeriesAction(PipelineAction):
# name = 'Last Week'
# action = 'set_last_week_template'
#
#
# class LastMonthSeriesAction(PipelineAction):
# name = 'Last Month'
# action = 'set_last_month_template'
# ============= tag =============================================
class TagAction(TaskAction):
name = "Tag..."
dname = "Tag"
# accelerator = 'Ctrl+Shift+t'
method = "set_tag"
image = icon("tag-blue-add")
id = "pychron.tag"
class SetInvalidAction(TaskAction):
name = "Set Invalid"
method = "set_invalid"
image = icon("edit-delete-2")
class SetFilteringTagAction(TaskAction):
name = "Set Filtering Tag"
method = "set_filtering_tag"
image = icon("flag")
# ============= Interperted Age =================================
class SetInterpretedAgeAction(TaskAction):
name = "Set Interpreted Age"
method = "set_interpreted_age"
enabled_name = "set_interpreted_enabled"
image = icon("brick-add")
class SavePDFAction(FigureAction):
name = "Save PDF"
method = "save_figure_pdf"
image = icon("file_pdf")
class SaveFigureAction(FigureAction):
name = "Save Figure"
method = "save_figure"
class PrintFigureAction(FigureAction):
name = "Print"
method = "print_figure"
image = icon("printer")
class SaveTableAction(TaskAction):
name = "Save Table"
method = "save_table"
image = icon("table_save")
enabled_name = "set_interpreted_enabled"
# ============= EOF =============================================
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.