gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
"""
Utilities for working with data format specifications.
See :ref:`data_specs` for a high level overview of the relevant concepts.
"""
from collections import Sized
from pylearn2.space import CompositeSpace, NullSpace, Space
from pylearn2.utils import safe_zip
class DataSpecsMapping(object):
"""
Converts between nested tuples and non-redundant flattened ones.
The mapping is built from data specifications, provided as a
(space, sources) pair, where space can be a composite space (possibly
of other composite spaces), and sources is a tuple of string identifiers
or other sources. Both space and sources must have the same structure.
Parameters
----------
data_specs : WRITEME
WRITEME
Attributes
----------
specs_to_index : dict
Maps one elementary (not composite) data_specs pair to its
index in the flattened space. Not sure if this one should
be a member, or passed as a parameter to _fill_mapping. It
might be us
"""
#might be useful to get the index of one data_specs later
#but if it is not, then we should remove it.
def __init__(self, data_specs):
self.specs_to_index = {}
# Size of the flattened space
self.n_unique_specs = 0
# Builds the mapping
space, source = data_specs
assert isinstance(space, Space), 'Given space: ' + str(space) + \
' was not a instance of Space.'
self.spec_mapping = self._fill_mapping(space, source)
def _fill_mapping(self, space, source):
"""
Builds a nested tuple of integers representing the mapping
Parameters
----------
space : WRITEME
source : WRITEME
Returns
-------
WRITEME
"""
if isinstance(space, NullSpace):
# This Space does not contain any data, and should not
# be mapped to anything
assert source == ''
return None
elif not isinstance(space, CompositeSpace):
# Space is a simple Space, source should be a simple source
if isinstance(source, (tuple, list)):
source, = source
# If (space, source) has not already been seen, insert it.
# We need both the space and the source to match.
if (space, source) in self.specs_to_index:
spec_index = self.specs_to_index[(space, source)]
else:
spec_index = self.n_unique_specs
self.specs_to_index[(space, source)] = spec_index
self.n_unique_specs += 1
return spec_index
else:
# Recursively fill the mapping, and return it
spec_mapping = tuple(
self._fill_mapping(sub_space, sub_source)
for sub_space, sub_source in safe_zip(
space.components, source))
return spec_mapping
def _fill_flat(self, nested, mapping, rval):
"""
Auxiliary recursive function used by self.flatten
Parameters
----------
nested : WRITEME
mapping : WRITEME
rval : WRITEME
Returns
-------
WRITEME
"""
if isinstance(nested, CompositeSpace):
nested = tuple(nested.components)
if mapping is None:
# The corresponding Space was a NullSpace, which does
# not correspond to actual data, so nested should evaluate
# to False, and should not be included in the flattened version
if not isinstance(nested, NullSpace):
assert not nested, ("The following element is mapped to "
"NullSpace, so it should evaluate to False (for instance, "
"None, an empty string or an empty tuple), but is %s"
% nested)
return
if isinstance(mapping, int):
# "nested" should actually be a single element
idx = mapping
if isinstance(nested, (tuple, list)):
if len(nested) != 1:
raise ValueError("When mapping is an int, we expect "
"nested to be a single element. But mapping is "
+ str(mapping) + " and nested is a tuple of "
"length " + str(len(nested)))
nested, = nested
if rval[idx] is None:
rval[idx] = nested
else:
assert rval[idx] == nested, ("This mapping was built "
"with the same element occurring more than once "
"in the nested representation, but current nested "
"sequence has different values (%s and %s) at "
"these positions." % (rval[idx], nested))
else:
for sub_nested, sub_mapping in safe_zip(nested, mapping):
self._fill_flat(sub_nested, sub_mapping, rval)
def flatten(self, nested, return_tuple=False):
"""
Iterate jointly through nested and spec_mapping, returns a flat tuple.
The integer in spec_mapping corresponding to each element in nested
represents the index of that element in the returned sequence.
If the original data_specs had duplicate elements at different places,
then "nested" also have to have equal elements at these positions.
"nested" can be a nested tuple, or composite space. If it is a
composite space, a flattened composite space will be returned.
If `return_tuple` is True, a tuple is always returned (tuple of
non-composite Spaces if nested is a Space, empty tuple if all
Spaces are NullSpaces, length-1 tuple if there is only one
non-composite Space, etc.).
Parameters
----------
nested : WRITEME
return_tuple : WRITEME
Returns
-------
WRITEME
"""
# Initialize the flatten returned value with Nones
rval = [None] * self.n_unique_specs
# Fill rval with the auxiliary function
self._fill_flat(nested, self.spec_mapping, rval)
assert None not in rval, ("This mapping is invalid, as it did not "
"contain all numbers from 0 to %i (or None was in nested), "
"nested: %s" % (self.n_unique_specs - 1, nested))
if return_tuple:
return tuple(rval)
# else, return something close to the type of nested
if len(rval) == 1:
return rval[0]
if isinstance(nested, (tuple, list)):
return tuple(rval)
elif isinstance(nested, Space):
return CompositeSpace(rval)
def _make_nested_tuple(self, flat, mapping):
"""
Auxiliary recursive function used by self.nest
Parameters
----------
flat : WRITEME
mapping : WRITEME
Returns
-------
WRITEME
"""
if mapping is None:
# The corresponding space was a NullSpace,
# and there is no corresponding value in flat,
# we use None as a placeholder
return None
if isinstance(mapping, int):
# We are at a leaf of the tree
idx = mapping
if isinstance(flat, (tuple, list)):
assert 0 <= idx < len(flat)
return flat[idx]
else:
assert idx == 0
return flat
else:
return tuple(
self._make_nested_tuple(flat, sub_mapping)
for sub_mapping in mapping)
def _make_nested_space(self, flat, mapping):
"""
Auxiliary recursive function used by self.nest
Parameters
----------
flat : WRITEME
mapping : WRITEME
Returns
-------
WRITEME
"""
if isinstance(mapping, int):
# We are at a leaf of the tree
idx = mapping
if isinstance(flat, CompositeSpace):
assert 0 <= idx < len(flat.components)
return flat.components[idx]
else:
assert idx == 0
return flat
else:
return CompositeSpace([
self._make_nested_space(flat, sub_mapping)
for sub_mapping in mapping])
def nest(self, flat):
"""
Iterate through spec_mapping, building a nested tuple from "flat".
The length of "flat" should be equal to self.n_unique_specs.
Parameters
----------
flat : Space or tuple
WRITEME
Returns
-------
WRITEME
"""
if isinstance(flat, Space):
if isinstance(flat, CompositeSpace):
assert len(flat.components) == self.n_unique_specs
else:
assert self.n_unique_specs == 1
return self._make_nested_space(flat, self.spec_mapping)
else:
if isinstance(flat, (list, tuple)):
assert len(flat) == self.n_unique_specs
else:
# flat is not iterable, this is valid only if spec_mapping
# contains only 0's, that is, when self.n_unique_specs == 1
assert self.n_unique_specs == 1
return self._make_nested_tuple(flat, self.spec_mapping)
def is_flat_space(space):
"""
Returns True for elementary Spaces and non-nested CompositeSpaces
Parameters
----------
space : WRITEME
Returns
-------
WRITEME
"""
if isinstance(space, CompositeSpace):
for sub_space in space.components:
if isinstance(sub_space, CompositeSpace):
return False
elif not isinstance(space, Space):
raise TypeError("space is not a Space: %s (%s)"
% (space, type(space)))
return True
def is_flat_source(source):
"""
Returns True for a string or a non-nested tuple of strings
Parameters
----------
source : WRITEME
Returns
-------
WRITEME
"""
if isinstance(source, (tuple, list)):
for sub_source in source:
if isinstance(sub_source, (tuple, list)):
return False
elif not isinstance(source, str):
raise TypeError("source should be a string or a non-nested tuple/list "
"of strings: %s" % source)
return True
def is_flat_specs(data_specs):
"""
.. todo::
WRITEME
"""
return is_flat_space(data_specs[0]) and is_flat_source(data_specs[1])
|
|
from __future__ import division, print_function, absolute_import
import warnings
from . import _minpack
import numpy as np
from numpy import (atleast_1d, dot, take, triu, shape, eye,
transpose, zeros, product, greater, array,
all, where, isscalar, asarray, inf, abs,
finfo, inexact, issubdtype, dtype)
from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
error = _minpack.error
__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
def _check_func(checker, argname, thefunc, x0, args, numinputs,
output_shape=None):
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
if (output_shape is not None) and (shape(res) != output_shape):
if (output_shape[0] != 1):
if len(output_shape) > 1:
if output_shape[1] == 1:
return shape(res)
msg = "%s: there is a mismatch between the input and output " \
"shape of the '%s' argument" % (checker, argname)
func_name = getattr(thefunc, '__name__', None)
if func_name:
msg += " '%s'." % func_name
else:
msg += "."
raise TypeError(msg)
if issubdtype(res.dtype, inexact):
dt = res.dtype
else:
dt = dtype(float)
return shape(res), dt
def fsolve(func, x0, args=(), fprime=None, full_output=0,
col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
epsfcn=None, factor=100, diag=None):
"""
Find the roots of a function.
Return the roots of the (non-linear) equations defined by
``func(x) = 0`` given a starting estimate.
Parameters
----------
func : callable ``f(x, *args)``
A function that takes at least one (possibly vector) argument.
x0 : ndarray
The starting estimate for the roots of ``func(x) = 0``.
args : tuple, optional
Any extra arguments to `func`.
fprime : callable(x), optional
A function to compute the Jacobian of `func` with derivatives
across the rows. By default, the Jacobian will be estimated.
full_output : bool, optional
If True, return optional outputs.
col_deriv : bool, optional
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float, optional
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int, optional
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple, optional
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
epsfcn : float, optional
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`epsfcn` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the
variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for
an unsuccessful call).
infodict : dict
A dictionary of optional outputs with the keys:
``nfev``
number of function calls
``njev``
number of Jacobian calls
``fvec``
function evaluated at the output
``fjac``
the orthogonal matrix, q, produced by the QR
factorization of the final approximate Jacobian
matrix, stored column wise
``r``
upper triangular matrix produced by QR factorization
of the same matrix
``qtf``
the vector ``(transpose(q) * fvec)``
ier : int
An integer flag. Set to 1 if a solution was found, otherwise refer
to `mesg` for more information.
mesg : str
If no solution is found, `mesg` details the cause of failure.
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See the 'hybr' `method` in particular.
Notes
-----
``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
"""
options = {'col_deriv': col_deriv,
'xtol': xtol,
'maxfev': maxfev,
'band': band,
'eps': epsfcn,
'factor': factor,
'diag': diag,
'full_output': full_output}
res = _root_hybr(func, x0, args, jac=fprime, **options)
if full_output:
x = res['x']
info = dict((k, res.get(k))
for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)
info['fvec'] = res['fun']
return x, info, res['status'], res['message']
else:
return res['x']
def _root_hybr(func, x0, args=(), jac=None,
col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
factor=100, diag=None, full_output=0, **unknown_options):
"""
Find the roots of a multivariate function using MINPACK's hybrd and
hybrj routines (modified Powell method).
Options
-------
col_deriv : bool
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
eps : float
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`eps` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the
variables.
"""
_check_unknown_options(unknown_options)
epsfcn = eps
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
if epsfcn is None:
epsfcn = finfo(dtype).eps
Dfun = jac
if Dfun is None:
if band is None:
ml, mu = -10, -10
else:
ml, mu = band[:2]
if maxfev == 0:
maxfev = 200 * (n + 1)
retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
ml, mu, epsfcn, factor, diag)
else:
_check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
if (maxfev == 0):
maxfev = 100 * (n + 1)
retval = _minpack._hybrj(func, Dfun, x0, args, 1,
col_deriv, xtol, maxfev, factor, diag)
x, status = retval[0], retval[-1]
errors = {0: ["Improper input parameters were entered.", TypeError],
1: ["The solution converged.", None],
2: ["The number of calls to function has "
"reached maxfev = %d." % maxfev, ValueError],
3: ["xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible." % xtol, ValueError],
4: ["The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.", ValueError],
5: ["The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.", ValueError],
'unknown': ["An error occurred.", TypeError]}
if status != 1 and not full_output:
if status in [2, 3, 4, 5]:
msg = errors[status][0]
warnings.warn(msg, RuntimeWarning)
else:
try:
raise errors[status][1](errors[status][0])
except KeyError:
raise errors['unknown'][1](errors['unknown'][0])
info = retval[1]
info['fun'] = info.pop('fvec')
sol = OptimizeResult(x=x, success=(status == 1), status=status)
sol.update(info)
try:
sol['message'] = errors[status][0]
except KeyError:
info['message'] = errors['unknown'][0]
return sol
def leastsq(func, x0, args=(), Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
"""
Minimize the sum of squares of a set of equations.
::
x = arg min(sum(func(y)**2,axis=0))
y
Parameters
----------
func : callable
should take at least one (possibly length N vector) argument and
returns M floating point numbers. It must not return NaNs or
fitting might fail.
x0 : ndarray
The starting estimate for the minimization.
args : tuple, optional
Any extra arguments to func are placed in this tuple.
Dfun : callable, optional
A function or method to compute the Jacobian of func with derivatives
across the rows. If this is None, the Jacobian will be estimated.
full_output : bool, optional
non-zero to return all optional outputs.
col_deriv : bool, optional
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float, optional
Relative error desired in the sum of squares.
xtol : float, optional
Relative error desired in the approximate solution.
gtol : float, optional
Orthogonality desired between the function vector and the columns of
the Jacobian.
maxfev : int, optional
The maximum number of calls to the function. If `Dfun` is provided
then the default `maxfev` is 100*(N+1) where N is the number of elements
in x0, otherwise the default `maxfev` is 200*(N+1).
epsfcn : float, optional
A variable used in determining a suitable step length for the forward-
difference approximation of the Jacobian (for Dfun=None).
Normally the actual step length will be sqrt(epsfcn)*x
If epsfcn is less than the machine precision, it is assumed that the
relative errors are of the order of the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for an unsuccessful
call).
cov_x : ndarray
Uses the fjac and ipvt optional outputs to construct an
estimate of the jacobian around the solution. None if a
singular matrix encountered (indicates very flat curvature in
some direction). This matrix must be multiplied by the
residual variance to get the covariance of the
parameter estimates -- see curve_fit.
infodict : dict
a dictionary of optional outputs with the key s:
``nfev``
The number of function calls
``fvec``
The function evaluated at the output
``fjac``
A permutation of the R matrix of a QR
factorization of the final approximate
Jacobian matrix, stored column wise.
Together with ipvt, the covariance of the
estimate can be approximated.
``ipvt``
An integer array of length N which defines
a permutation matrix, p, such that
fjac*p = q*r, where r is upper triangular
with diagonal elements of nonincreasing
magnitude. Column j of p is column ipvt(j)
of the identity matrix.
``qtf``
The vector (transpose(q) * fvec).
mesg : str
A string message giving information about the cause of failure.
ier : int
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
found. Otherwise, the solution was not found. In either case, the
optional output variable 'mesg' gives more information.
Notes
-----
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
cov_x is a Jacobian approximation to the Hessian of the least squares
objective function.
This approximation assumes that the objective function is based on the
difference between some observed target data (ydata) and a (non-linear)
function of the parameters `f(xdata, params)` ::
func(params) = ydata - f(xdata, params)
so that the objective function is ::
min sum((ydata - f(xdata, params))**2, axis=0)
params
"""
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
m = shape[0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
if epsfcn is None:
epsfcn = finfo(dtype).eps
if Dfun is None:
if maxfev == 0:
maxfev = 200*(n + 1)
retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
if maxfev == 0:
maxfev = 100 * (n + 1)
retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv,
ftol, xtol, gtol, maxfev, factor, diag)
errors = {0: ["Improper input parameters.", TypeError],
1: ["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2: ["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3: ["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol), None],
4: ["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5: ["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6: ["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible.""" % ftol,
ValueError],
7: ["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol,
ValueError],
8: ["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError],
'unknown': ["Unknown error.", TypeError]}
info = retval[-1] # The FORTRAN return value
if info not in [1, 2, 3, 4] and not full_output:
if info in [5, 6, 7, 8]:
warnings.warn(errors[info][0], RuntimeWarning)
else:
try:
raise errors[info][1](errors[info][0])
except KeyError:
raise errors['unknown'][1](errors['unknown'][0])
mesg = errors[info][0]
if full_output:
cov_x = None
if info in [1, 2, 3, 4]:
from numpy.dual import inv
from numpy.linalg import LinAlgError
perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
r = triu(transpose(retval[1]['fjac'])[:n, :])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R), R))
except (LinAlgError, ValueError):
pass
return (retval[0], cov_x) + retval[1:-1] + (mesg, info)
else:
return (retval[0], info)
def _general_function(params, xdata, ydata, function):
return function(xdata, *params) - ydata
def _weighted_general_function(params, xdata, ydata, function, weights):
return weights * (function(xdata, *params) - ydata)
def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
check_finite=True, **kw):
"""
Use non-linear least squares to fit a function, f, to data.
Assumes ``ydata = f(xdata, *params) + eps``
Parameters
----------
f : callable
The model function, f(x, ...). It must take the independent
variable as the first argument and the parameters to fit as
separate remaining arguments.
xdata : An M-length sequence or an (k,M)-shaped array
for functions with k predictors.
The independent variable where the data is measured.
ydata : M-length sequence
The dependent data --- nominally f(xdata, ...)
p0 : None, scalar, or N-length sequence, optional
Initial guess for the parameters. If None, then the initial
values will all be 1 (if the number of parameters for the function
can be determined using introspection, otherwise a ValueError
is raised).
sigma : None or M-length sequence, optional
If not None, the uncertainties in the ydata array. These are used as
weights in the least-squares problem
i.e. minimising ``np.sum( ((f(xdata, *popt) - ydata) / sigma)**2 )``
If None, the uncertainties are assumed to be 1.
absolute_sigma : bool, optional
If False, `sigma` denotes relative weights of the data points.
The returned covariance matrix `pcov` is based on *estimated*
errors in the data, and is not affected by the overall
magnitude of the values in `sigma`. Only the relative
magnitudes of the `sigma` values matter.
If True, `sigma` describes one standard deviation errors of
the input data points. The estimated covariance in `pcov` is
based on these values.
check_finite : bool, optional
If True, check that the input arrays do not contain nans of infs,
and raise a ValueError if they do. Setting this parameter to
False may silently produce nonsensical results if the input arrays
do contain nans.
Default is True.
Returns
-------
popt : array
Optimal values for the parameters so that the sum of the squared error
of ``f(xdata, *popt) - ydata`` is minimized
pcov : 2d array
The estimated covariance of popt. The diagonals provide the variance
of the parameter estimate. To compute one standard deviation errors
on the parameters use ``perr = np.sqrt(np.diag(pcov))``.
How the `sigma` parameter affects the estimated covariance
depends on `absolute_sigma` argument, as described above.
Raises
------
OptimizeWarning
if covariance of the parameters can not be estimated.
ValueError
if ydata and xdata contain NaNs.
See Also
--------
leastsq : Minimize the sum of squares of a set of equations.
stats.linregress : Calculate a linear least squares regression for two sets
of measurements.
Notes
-----
The algorithm uses the Levenberg-Marquardt algorithm through `leastsq`.
Additional keyword arguments are passed directly to that algorithm.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import curve_fit
>>> def func(x, a, b, c):
... return a * np.exp(-b * x) + c
>>> xdata = np.linspace(0, 4, 50)
>>> y = func(xdata, 2.5, 1.3, 0.5)
>>> ydata = y + 0.2 * np.random.normal(size=len(xdata))
>>> popt, pcov = curve_fit(func, xdata, ydata)
"""
if p0 is None:
# determine number of parameters by inspecting the function
from scipy._lib._util import getargspec_no_self as _getargspec
args, varargs, varkw, defaults = _getargspec(f)
if len(args) < 2:
msg = "Unable to determine number of fit parameters."
raise ValueError(msg)
p0 = [1.0] * (len(args)-1)
# Check input arguments
if isscalar(p0):
p0 = array([p0])
# NaNs can not be handled
if check_finite:
ydata = np.asarray_chkfinite(ydata)
else:
ydata = np.asarray(ydata)
if isinstance(xdata, (list, tuple, np.ndarray)):
# `xdata` is passed straight to the user-defined `f`, so allow
# non-array_like `xdata`.
if check_finite:
xdata = np.asarray_chkfinite(xdata)
else:
xdata = np.asarray(xdata)
args = (xdata, ydata, f)
if sigma is None:
func = _general_function
else:
func = _weighted_general_function
args += (1.0 / asarray(sigma),)
# Remove full_output from kw, otherwise we're passing it in twice.
return_full = kw.pop('full_output', False)
res = leastsq(func, p0, args=args, full_output=1, **kw)
(popt, pcov, infodict, errmsg, ier) = res
if ier not in [1, 2, 3, 4]:
msg = "Optimal parameters not found: " + errmsg
raise RuntimeError(msg)
warn_cov = False
if pcov is None:
# indeterminate covariance
pcov = zeros((len(popt), len(popt)), dtype=float)
pcov.fill(inf)
warn_cov = True
elif not absolute_sigma:
if len(ydata) > len(p0):
s_sq = (asarray(func(popt, *args))**2).sum() / (len(ydata) - len(p0))
pcov = pcov * s_sq
else:
pcov.fill(inf)
warn_cov = True
if warn_cov:
warnings.warn('Covariance of the parameters could not be estimated',
category=OptimizeWarning)
if return_full:
return popt, pcov, infodict, errmsg, ier
else:
return popt, pcov
def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
"""Perform a simple check on the gradient for correctness.
"""
x = atleast_1d(x0)
n = len(x)
x = x.reshape((n,))
fvec = atleast_1d(fcn(x, *args))
m = len(fvec)
fvec = fvec.reshape((m,))
ldfjac = m
fjac = atleast_1d(Dfcn(x, *args))
fjac = fjac.reshape((m, n))
if col_deriv == 0:
fjac = transpose(fjac)
xp = zeros((n,), float)
err = zeros((m,), float)
fvecp = None
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
fvecp = atleast_1d(fcn(xp, *args))
fvecp = fvecp.reshape((m,))
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
good = (product(greater(err, 0.5), axis=0))
return (good, err)
def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500):
"""
Find a fixed point of the function.
Given a function of one or more variables and a starting point, find a
fixed-point of the function: i.e. where ``func(x0) == x0``.
Parameters
----------
func : function
Function to evaluate.
x0 : array_like
Fixed point of function.
args : tuple, optional
Extra arguments to `func`.
xtol : float, optional
Convergence tolerance, defaults to 1e-08.
maxiter : int, optional
Maximum number of iterations, defaults to 500.
Notes
-----
Uses Steffensen's Method using Aitken's ``Del^2`` convergence acceleration.
See Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
Examples
--------
>>> from scipy import optimize
>>> def func(x, c1, c2):
... return np.sqrt(c1/(x+c2))
>>> c1 = np.array([10,12.])
>>> c2 = np.array([3, 5.])
>>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
array([ 1.4920333 , 1.37228132])
"""
if not isscalar(x0):
x0 = asarray(x0)
p0 = x0
for iter in range(maxiter):
p1 = func(p0, *args)
p2 = func(p1, *args)
d = p2 - 2.0 * p1 + p0
p = where(d == 0, p2, p0 - (p1 - p0)*(p1 - p0) / d)
relerr = where(p0 == 0, p, (p-p0)/p0)
if all(abs(relerr) < xtol):
return p
p0 = p
else:
p0 = x0
for iter in range(maxiter):
p1 = func(p0, *args)
p2 = func(p1, *args)
d = p2 - 2.0 * p1 + p0
if d == 0.0:
return p2
else:
p = p0 - (p1 - p0)*(p1 - p0) / d
if p0 == 0:
relerr = p
else:
relerr = (p - p0)/p0
if abs(relerr) < xtol:
return p
p0 = p
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
raise RuntimeError(msg)
|
|
"""
test special properties (eg. column_property, ...)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
from sqlalchemy import select, func
from sqlalchemy.orm import column_property
from elixir import *
def setup():
metadata.bind = 'sqlite://'
class TestSpecialProperties(object):
def teardown(self):
cleanup_all(True)
def test_lifecycle(self):
class A(Entity):
name = Field(String(20))
assert isinstance(A.name, Field)
setup_all()
assert not isinstance(A.name, Field)
def test_generic_property(self):
class Tag(Entity):
score1 = Field(Float)
score2 = Field(Float)
score = GenericProperty(
lambda c: column_property(
(c.score1 * c.score2).label('score')))
setup_all(True)
t1 = Tag(score1=5.0, score2=3.0)
t2 = Tag(score1=10.0, score2=2.0)
session.commit()
session.close()
for tag in Tag.query.all():
assert tag.score == tag.score1 * tag.score2
def test_column_property(self):
class Tag(Entity):
score1 = Field(Float)
score2 = Field(Float)
score = ColumnProperty(lambda c: c.score1 * c.score2)
setup_all(True)
t1 = Tag(score1=5.0, score2=3.0)
t2 = Tag(score1=10.0, score2=2.0)
session.commit()
session.close()
for tag in Tag.query.all():
assert tag.score == tag.score1 * tag.score2
def test_column_property_eagerload_and_reuse(self):
class Tag(Entity):
score1 = Field(Float)
score2 = Field(Float)
user = ManyToOne('User')
score = ColumnProperty(lambda c: c.score1 * c.score2)
class User(Entity):
name = Field(String(16))
category = ManyToOne('Category')
tags = OneToMany('Tag', lazy=False)
score = ColumnProperty(lambda c:
select([func.sum(Tag.score)],
Tag.user_id == c.id).as_scalar())
class Category(Entity):
name = Field(String(16))
users = OneToMany('User', lazy=False)
score = ColumnProperty(lambda c:
select([func.avg(User.score)],
User.category_id == c.id
).as_scalar())
setup_all(True)
u1 = User(name='joe', tags=[Tag(score1=5.0, score2=3.0),
Tag(score1=55.0, score2=1.0)])
u2 = User(name='bar', tags=[Tag(score1=5.0, score2=4.0),
Tag(score1=50.0, score2=1.0),
Tag(score1=15.0, score2=2.0)])
c1 = Category(name='dummy', users=[u1, u2])
session.commit()
session.close()
category = Category.query.one()
assert category.score == 85
for user in category.users:
assert user.score == sum([tag.score for tag in user.tags])
for tag in user.tags:
assert tag.score == tag.score1 * tag.score2
def test_has_property(self):
class Tag(Entity):
has_field('score1', Float)
has_field('score2', Float)
has_property('score',
lambda c: column_property(
(c.score1 * c.score2).label('score')))
setup_all(True)
t1 = Tag(score1=5.0, score2=3.0)
t1 = Tag(score1=10.0, score2=2.0)
session.commit()
session.close()
for tag in Tag.query.all():
assert tag.score == tag.score1 * tag.score2
def test_deferred(self):
class A(Entity):
name = Field(String(20))
stuff = Field(Text, deferred=True)
setup_all(True)
A(name='foo')
session.commit()
def test_synonym(self):
class Person(Entity):
name = Field(String(50), required=True)
_email = Field(String(20), colname='email', synonym='email')
def _set_email(self, email):
Person.email_values.append(email)
self._email = email
def _get_email(self):
Person.email_gets += 1
return self._email
email = property(_get_email, _set_email)
email_values = []
email_gets = 0
setup_all(True)
mrx = Person(name='Mr. X', email='x@y.com')
assert mrx.email == 'x@y.com'
assert Person.email_gets == 1
mrx.email = "x@z.com"
assert Person.email_values == ['x@y.com', 'x@z.com']
session.commit()
session.close()
# test the synonym itself (ie querying)
p = Person.get_by(email='x@z.com')
assert p.name == 'Mr. X'
def test_synonym_class(self):
class Person(Entity):
name = Field(String(30))
primary_email = Field(String(100))
email_address = Synonym('primary_email')
class User(Person):
user_name = Synonym('name')
password = Field(String(20))
setup_all(True)
alexandre = Person(
name = 'Alexandre da Silva',
email_address = 'x@y.com'
)
johann = User(
name = 'Johann Felipe Voigt',
email_address = 'y@z.com',
password = 'unencrypted'
)
session.commit(); session.close()
p = Person.get_by(name='Alexandre da Silva')
assert p.primary_email == 'x@y.com'
u = User.get_by(user_name='Johann Felipe Voigt')
assert u.email_address == 'y@z.com'
u.email_address = 'new@z.com'
session.commit(); session.close()
p = Person.get_by(name='Johann Felipe Voigt')
assert p.primary_email == 'new@z.com'
def test_setattr(self):
class A(Entity):
pass
A.name = Field(String(30))
setup_all(True)
a1 = A(name='a1')
session.commit(); session.close()
a = A.query.one()
assert a.name == 'a1'
|
|
# Copyright 2009-2012 James P Goodwin ped tiny python editor
""" module that contains the symbolic names of the keys """
import curses
KEYTAB_NOKEY=chr(0)
KEYTAB_ALTA=chr(27)+'a'
KEYTAB_ALTB=chr(27)+'b'
KEYTAB_ALTC=chr(27)+'c'
KEYTAB_ALTD=chr(27)+'d'
KEYTAB_ALTE=chr(27)+'e'
KEYTAB_ALTF=chr(27)+'f'
KEYTAB_ALTG=chr(27)+'g'
KEYTAB_ALTH=chr(27)+'h'
KEYTAB_ALTI=chr(27)+'i'
KEYTAB_ALTJ=chr(27)+'j'
KEYTAB_ALTK=chr(27)+'k'
KEYTAB_ALTL=chr(27)+'l'
KEYTAB_ALTM=chr(27)+'m'
KEYTAB_ALTN=chr(27)+'n'
KEYTAB_ALTo=chr(27)+'o'
KEYTAB_ALTO=chr(27)+'O'
KEYTAB_ALTP=chr(27)+'p'
KEYTAB_ALTQ=chr(27)+'q'
KEYTAB_ALTR=chr(27)+'r'
KEYTAB_ALTS=chr(27)+'s'
KEYTAB_ALTT=chr(27)+'t'
KEYTAB_ALTU=chr(27)+'u'
KEYTAB_ALTV=chr(27)+'v'
KEYTAB_ALTW=chr(27)+'w'
KEYTAB_ALTX=chr(27)+'x'
KEYTAB_ALTY=chr(27)+'y'
KEYTAB_ALTZ=chr(27)+'z'
KEYTAB_BACKSPACE="backspace"
KEYTAB_BACKSPACE=chr(8)
KEYTAB_BACKTAB=chr(27)+'[Z'
KEYTAB_BTAB="btab"
KEYTAB_CR=chr(10)
KEYTAB_CTRLA=chr(1)
KEYTAB_CTRLB=chr(2)
KEYTAB_CTRLC=chr(3)
KEYTAB_CTRLD=chr(4)
KEYTAB_CTRLE=chr(5)
KEYTAB_CTRLF=chr(6)
KEYTAB_CTRLG=chr(7)
KEYTAB_CTRLH=chr(8)
KEYTAB_CTRLI=chr(9)
KEYTAB_CTRLJ=chr(10)
KEYTAB_CTRLK=chr(11)
KEYTAB_CTRLL=chr(12)
KEYTAB_CTRLM=chr(13)
KEYTAB_CTRLN=chr(14)
KEYTAB_CTRLO=chr(15)
KEYTAB_CTRLP=chr(16)
KEYTAB_CTRLQ=chr(17)
KEYTAB_CTRLR=chr(18)
KEYTAB_CTRLS=chr(19)
KEYTAB_CTRLT=chr(20)
KEYTAB_CTRLU=chr(21)
KEYTAB_CTRLV=chr(22)
KEYTAB_CTRLW=chr(23)
KEYTAB_CTRLX=chr(24)
KEYTAB_CTRLY=chr(25)
KEYTAB_CTRLZ=chr(26)
KEYTAB_CTRLLEFT='ctrl-left'
KEYTAB_CTRLRIGHT='ctrl-right'
KEYTAB_CTRLHOME='ctrl-home'
KEYTAB_CTRLEND='ctrl-end'
KEYTAB_DELC="delc"
KEYTAB_DLGCANCEL="cancel"
KEYTAB_DLGNOP=KEYTAB_NOKEY
KEYTAB_DLGOK="ok"
KEYTAB_DOWN="down"
KEYTAB_END="end"
KEYTAB_ESC=chr(27)
KEYTAB_F00="fk00"
KEYTAB_F01="fk01"
KEYTAB_F02="fk02"
KEYTAB_F03="fk03"
KEYTAB_F04="fk04"
KEYTAB_F05="fk05"
KEYTAB_F06="fk06"
KEYTAB_F07="fk07"
KEYTAB_F08="fk08"
KEYTAB_F09="fk09"
KEYTAB_F10="fk10"
KEYTAB_F11="fk11"
KEYTAB_F12="fk12"
KEYTAB_F13="fk13"
KEYTAB_F14="fk14"
KEYTAB_F15="fk15"
KEYTAB_F16="fk16"
KEYTAB_F17="fk17"
KEYTAB_F18="fk18"
KEYTAB_F19="fk19"
KEYTAB_F20="fk20"
KEYTAB_F21="fk21"
KEYTAB_F22="fk22"
KEYTAB_F23="fk23"
KEYTAB_F24="fk24"
KEYTAB_F25="fk25"
KEYTAB_F26="fk26"
KEYTAB_F27="fk27"
KEYTAB_F28="fk28"
KEYTAB_F29="fk29"
KEYTAB_F30="fk30"
KEYTAB_F31="fk31"
KEYTAB_F32="fk32"
KEYTAB_F33="fk33"
KEYTAB_F34="fk34"
KEYTAB_F35="fk35"
KEYTAB_F36="fk36"
KEYTAB_F37="fk37"
KEYTAB_F38="fk38"
KEYTAB_F39="fk39"
KEYTAB_F40="fk40"
KEYTAB_F41="fk41"
KEYTAB_F42="fk42"
KEYTAB_F43="fk43"
KEYTAB_F44="fk44"
KEYTAB_F45="fk45"
KEYTAB_F46="fk46"
KEYTAB_F47="fk47"
KEYTAB_F48="fk48"
KEYTAB_F49="fk49"
KEYTAB_F50="fk50"
KEYTAB_F51="fk51"
KEYTAB_F52="fk52"
KEYTAB_F53="fk53"
KEYTAB_F54="fk54"
KEYTAB_F55="fk55"
KEYTAB_F56="fk56"
KEYTAB_F57="fk57"
KEYTAB_F58="fk58"
KEYTAB_F59="fk59"
KEYTAB_F60="fk60"
KEYTAB_F61="fk61"
KEYTAB_F62="fk62"
KEYTAB_F63="fk63"
KEYTAB_HOME="home"
KEYTAB_INSERT="insert"
KEYTAB_KEYPADPLUS=chr(27)+'Ok'
KEYTAB_KEYTPADMINUS=chr(27)+'Om'
KEYTAB_LEFT="left"
KEYTAB_PAGEDOWN="pagedown"
KEYTAB_PAGEUP="pageup"
KEYTAB_REFRESH="refresh"
KEYTAB_RESIZE="resize"
KEYTAB_RIGHT="right"
KEYTAB_SPACE=' '
KEYTAB_TAB=chr(9)
KEYTAB_UP="up"
KEYTAB_MOUSE="mouse"
name_to_key = {
"KEYTAB_ALTA" : KEYTAB_ALTA,
"KEYTAB_ALTB" : KEYTAB_ALTB,
"KEYTAB_ALTC" : KEYTAB_ALTC,
"KEYTAB_ALTD" : KEYTAB_ALTD,
"KEYTAB_ALTE" : KEYTAB_ALTE,
"KEYTAB_ALTF" : KEYTAB_ALTF,
"KEYTAB_ALTG" : KEYTAB_ALTG,
"KEYTAB_ALTH" : KEYTAB_ALTH,
"KEYTAB_ALTI" : KEYTAB_ALTI,
"KEYTAB_ALTJ" : KEYTAB_ALTJ,
"KEYTAB_ALTK" : KEYTAB_ALTK,
"KEYTAB_ALTL" : KEYTAB_ALTL,
"KEYTAB_ALTM" : KEYTAB_ALTM,
"KEYTAB_ALTN" : KEYTAB_ALTN,
"KEYTAB_ALTo" : KEYTAB_ALTo,
"KEYTAB_ALTO" : KEYTAB_ALTO,
"KEYTAB_ALTP" : KEYTAB_ALTP,
"KEYTAB_ALTQ" : KEYTAB_ALTQ,
"KEYTAB_ALTR" : KEYTAB_ALTR,
"KEYTAB_ALTS" : KEYTAB_ALTS,
"KEYTAB_ALTT" : KEYTAB_ALTT,
"KEYTAB_ALTU" : KEYTAB_ALTU,
"KEYTAB_ALTV" : KEYTAB_ALTV,
"KEYTAB_ALTW" : KEYTAB_ALTW,
"KEYTAB_ALTX" : KEYTAB_ALTX,
"KEYTAB_ALTY" : KEYTAB_ALTY,
"KEYTAB_ALTZ" : KEYTAB_ALTZ,
"KEYTAB_BACKSPACE" : KEYTAB_BACKSPACE,
"KEYTAB_BACKSPACE" : KEYTAB_BACKSPACE,
"KEYTAB_BACKTAB" : KEYTAB_BACKTAB,
"KEYTAB_BTAB" : KEYTAB_BTAB,
"KEYTAB_CR" : KEYTAB_CR,
"KEYTAB_CTRLA" : KEYTAB_CTRLA,
"KEYTAB_CTRLB" : KEYTAB_CTRLB,
"KEYTAB_CTRLC" : KEYTAB_CTRLC,
"KEYTAB_CTRLD" : KEYTAB_CTRLD,
"KEYTAB_CTRLE" : KEYTAB_CTRLE,
"KEYTAB_CTRLF" : KEYTAB_CTRLF,
"KEYTAB_CTRLG" : KEYTAB_CTRLG,
"KEYTAB_CTRLH" : KEYTAB_CTRLH,
"KEYTAB_CTRLI" : KEYTAB_CTRLI,
"KEYTAB_CTRLJ" : KEYTAB_CTRLJ,
"KEYTAB_CTRLK" : KEYTAB_CTRLK,
"KEYTAB_CTRLL" : KEYTAB_CTRLL,
"KEYTAB_CTRLM" : KEYTAB_CTRLM,
"KEYTAB_CTRLN" : KEYTAB_CTRLN,
"KEYTAB_CTRLO" : KEYTAB_CTRLO,
"KEYTAB_CTRLP" : KEYTAB_CTRLP,
"KEYTAB_CTRLQ" : KEYTAB_CTRLQ,
"KEYTAB_CTRLR" : KEYTAB_CTRLR,
"KEYTAB_CTRLS" : KEYTAB_CTRLS,
"KEYTAB_CTRLT" : KEYTAB_CTRLT,
"KEYTAB_CTRLU" : KEYTAB_CTRLU,
"KEYTAB_CTRLV" : KEYTAB_CTRLV,
"KEYTAB_CTRLW" : KEYTAB_CTRLW,
"KEYTAB_CTRLX" : KEYTAB_CTRLX,
"KEYTAB_CTRLY" : KEYTAB_CTRLY,
"KEYTAB_CTRLZ" : KEYTAB_CTRLZ,
"KEYTAB_CTRLLEFT" : KEYTAB_CTRLLEFT,
"KEYTAB_CTRLRIGHT" : KEYTAB_CTRLRIGHT,
"KEYTAB_CTRLHOME" : KEYTAB_CTRLHOME,
"KEYTAB_CTRLEND" : KEYTAB_CTRLEND,
"KEYTAB_DELC" : KEYTAB_DELC,
"KEYTAB_DLGCANCEL" : KEYTAB_DLGCANCEL,
"KEYTAB_DLGNOP" : KEYTAB_DLGNOP,
"KEYTAB_DLGOK" : KEYTAB_DLGOK,
"KEYTAB_DOWN" : KEYTAB_DOWN,
"KEYTAB_END" : KEYTAB_END,
"KEYTAB_ESC" : KEYTAB_ESC,
"KEYTAB_F00" : KEYTAB_F00,
"KEYTAB_F01" : KEYTAB_F01,
"KEYTAB_F02" : KEYTAB_F02,
"KEYTAB_F03" : KEYTAB_F03,
"KEYTAB_F04" : KEYTAB_F04,
"KEYTAB_F05" : KEYTAB_F05,
"KEYTAB_F06" : KEYTAB_F06,
"KEYTAB_F07" : KEYTAB_F07,
"KEYTAB_F08" : KEYTAB_F08,
"KEYTAB_F09" : KEYTAB_F09,
"KEYTAB_F10" : KEYTAB_F10,
"KEYTAB_F11" : KEYTAB_F11,
"KEYTAB_F12" : KEYTAB_F12,
"KEYTAB_F13" : KEYTAB_F13,
"KEYTAB_F14" : KEYTAB_F14,
"KEYTAB_F15" : KEYTAB_F15,
"KEYTAB_F16" : KEYTAB_F16,
"KEYTAB_F17" : KEYTAB_F17,
"KEYTAB_F18" : KEYTAB_F18,
"KEYTAB_F19" : KEYTAB_F19,
"KEYTAB_F20" : KEYTAB_F20,
"KEYTAB_F21" : KEYTAB_F21,
"KEYTAB_F22" : KEYTAB_F22,
"KEYTAB_F23" : KEYTAB_F23,
"KEYTAB_F24" : KEYTAB_F24,
"KEYTAB_F25" : KEYTAB_F25,
"KEYTAB_F26" : KEYTAB_F26,
"KEYTAB_F27" : KEYTAB_F27,
"KEYTAB_F28" : KEYTAB_F28,
"KEYTAB_F29" : KEYTAB_F29,
"KEYTAB_F30" : KEYTAB_F30,
"KEYTAB_F31" : KEYTAB_F31,
"KEYTAB_F32" : KEYTAB_F32,
"KEYTAB_F33" : KEYTAB_F33,
"KEYTAB_F34" : KEYTAB_F34,
"KEYTAB_F35" : KEYTAB_F35,
"KEYTAB_F36" : KEYTAB_F36,
"KEYTAB_F37" : KEYTAB_F37,
"KEYTAB_F38" : KEYTAB_F38,
"KEYTAB_F39" : KEYTAB_F39,
"KEYTAB_F40" : KEYTAB_F40,
"KEYTAB_F41" : KEYTAB_F41,
"KEYTAB_F42" : KEYTAB_F42,
"KEYTAB_F43" : KEYTAB_F43,
"KEYTAB_F44" : KEYTAB_F44,
"KEYTAB_F45" : KEYTAB_F45,
"KEYTAB_F46" : KEYTAB_F46,
"KEYTAB_F47" : KEYTAB_F47,
"KEYTAB_F48" : KEYTAB_F48,
"KEYTAB_F49" : KEYTAB_F49,
"KEYTAB_F50" : KEYTAB_F50,
"KEYTAB_F51" : KEYTAB_F51,
"KEYTAB_F52" : KEYTAB_F52,
"KEYTAB_F53" : KEYTAB_F53,
"KEYTAB_F54" : KEYTAB_F54,
"KEYTAB_F55" : KEYTAB_F55,
"KEYTAB_F56" : KEYTAB_F56,
"KEYTAB_F57" : KEYTAB_F57,
"KEYTAB_F58" : KEYTAB_F58,
"KEYTAB_F59" : KEYTAB_F59,
"KEYTAB_F60" : KEYTAB_F60,
"KEYTAB_F61" : KEYTAB_F61,
"KEYTAB_F62" : KEYTAB_F62,
"KEYTAB_F63" : KEYTAB_F63,
"KEYTAB_HOME" : KEYTAB_HOME,
"KEYTAB_INSERT" : KEYTAB_INSERT,
"KEYTAB_KEYPADPLUS" : KEYTAB_KEYPADPLUS,
"KEYTAB_KEYTPADMINUS" : KEYTAB_KEYTPADMINUS,
"KEYTAB_LEFT" : KEYTAB_LEFT,
"KEYTAB_NOKEY" : KEYTAB_NOKEY,
"KEYTAB_PAGEDOWN" : KEYTAB_PAGEDOWN,
"KEYTAB_PAGEUP" : KEYTAB_PAGEUP,
"KEYTAB_REFRESH" : KEYTAB_REFRESH,
"KEYTAB_RESIZE" : KEYTAB_RESIZE,
"KEYTAB_RIGHT" : KEYTAB_RIGHT,
"KEYTAB_SPACE" : KEYTAB_SPACE,
"KEYTAB_TAB" : KEYTAB_TAB,
"KEYTAB_UP" : KEYTAB_UP,
"KEYTAB_MOUSE" : KEYTAB_MOUSE,
}
key_to_name = {}
for name,key in list(name_to_key.items()):
key_to_name[key] = name
keydef = [
((0,),KEYTAB_NOKEY),
((27,-1,),KEYTAB_ESC),
((27,ord('a'),-1),KEYTAB_ALTA),
((27,ord('b'),-1),KEYTAB_ALTB),
((27,ord('c'),-1),KEYTAB_ALTC),
((27,ord('d'),-1),KEYTAB_ALTD),
((27,ord('e'),-1),KEYTAB_ALTE),
((27,ord('f'),-1),KEYTAB_ALTF),
((27,ord('g'),-1),KEYTAB_ALTG),
((27,ord('h'),-1),KEYTAB_ALTH),
((27,ord('i'),-1),KEYTAB_ALTI),
((27,ord('j'),-1),KEYTAB_ALTJ),
((27,ord('k'),-1),KEYTAB_ALTK),
((27,ord('l'),-1),KEYTAB_ALTL),
((27,ord('m'),-1),KEYTAB_ALTM),
((27,ord('n'),-1),KEYTAB_ALTN),
((27,ord('o'),-1),KEYTAB_ALTo),
((27,ord('p'),-1),KEYTAB_ALTP),
((27,ord('q'),-1),KEYTAB_ALTQ),
((27,ord('r'),-1),KEYTAB_ALTR),
((27,ord('s'),-1),KEYTAB_ALTS),
((27,ord('t'),-1),KEYTAB_ALTT),
((27,ord('u'),-1),KEYTAB_ALTU),
((27,ord('v'),-1),KEYTAB_ALTV),
((27,ord('w'),-1),KEYTAB_ALTW),
((27,ord('x'),-1),KEYTAB_ALTX),
((27,ord('y'),-1),KEYTAB_ALTY),
((27,ord('z'),-1),KEYTAB_ALTZ),
((27,ord('A'),-1),KEYTAB_ALTA),
((27,ord('B'),-1),KEYTAB_ALTB),
((27,ord('C'),-1),KEYTAB_ALTC),
((27,ord('D'),-1),KEYTAB_ALTD),
((27,ord('E'),-1),KEYTAB_ALTE),
((27,ord('F'),-1),KEYTAB_ALTF),
((27,ord('G'),-1),KEYTAB_ALTG),
((27,ord('H'),-1),KEYTAB_ALTH),
((27,ord('I'),-1),KEYTAB_ALTI),
((27,ord('J'),-1),KEYTAB_ALTJ),
((27,ord('K'),-1),KEYTAB_ALTK),
((27,ord('L'),-1),KEYTAB_ALTL),
((27,ord('M'),-1),KEYTAB_ALTM),
((27,ord('N'),-1),KEYTAB_ALTN),
((27,ord('O'),-1),KEYTAB_ALTO),
((27,ord('P'),-1),KEYTAB_ALTP),
((27,ord('Q'),-1),KEYTAB_ALTQ),
((27,ord('R'),-1),KEYTAB_ALTR),
((27,ord('S'),-1),KEYTAB_ALTS),
((27,ord('T'),-1),KEYTAB_ALTT),
((27,ord('U'),-1),KEYTAB_ALTU),
((27,ord('V'),-1),KEYTAB_ALTV),
((27,ord('W'),-1),KEYTAB_ALTW),
((27,ord('X'),-1),KEYTAB_ALTX),
((27,ord('Y'),-1),KEYTAB_ALTY),
((27,ord('Z'),-1),KEYTAB_ALTZ),
((curses.KEY_BACKSPACE,-1),KEYTAB_BACKSPACE),
((8,-1),KEYTAB_BACKSPACE),
((127,-1),KEYTAB_BACKSPACE),
((27,ord('['),ord('Z'),-1),KEYTAB_BACKTAB),
((curses.KEY_BTAB,-1),KEYTAB_BTAB),
((10,-1),KEYTAB_CR),
((1,-1),KEYTAB_CTRLA),
((2,-1),KEYTAB_CTRLB),
((3,-1),KEYTAB_CTRLC),
((4,-1),KEYTAB_CTRLD),
((5,-1),KEYTAB_CTRLE),
((6,-1),KEYTAB_CTRLF),
((7,-1),KEYTAB_CTRLG),
((8,-1),KEYTAB_CTRLH),
((9,-1),KEYTAB_CTRLI),
((10,-1),KEYTAB_CTRLJ),
((11,-1),KEYTAB_CTRLK),
((12,-1),KEYTAB_CTRLL),
((13,-1),KEYTAB_CTRLM),
((14,-1),KEYTAB_CTRLN),
((15,-1),KEYTAB_CTRLO),
((16,-1),KEYTAB_CTRLP),
((17,-1),KEYTAB_CTRLQ),
((18,-1),KEYTAB_CTRLR),
((19,-1),KEYTAB_CTRLS),
((20,-1),KEYTAB_CTRLT),
((21,-1),KEYTAB_CTRLU),
((22,-1),KEYTAB_CTRLV),
((23,-1),KEYTAB_CTRLW),
((24,-1),KEYTAB_CTRLX),
((25,-1),KEYTAB_CTRLY),
((26,-1),KEYTAB_CTRLZ),
((545,-1),KEYTAB_CTRLLEFT),
((560,-1),KEYTAB_CTRLRIGHT),
((530,-1),KEYTAB_CTRLHOME),
((525,-1),KEYTAB_CTRLEND),
((curses.KEY_DC,-1),KEYTAB_DELC),
((curses.KEY_DOWN,-1),KEYTAB_DOWN),
((curses.KEY_END,-1),KEYTAB_END),
((curses.KEY_F0,-1),KEYTAB_F00),
((curses.KEY_F1,-1),KEYTAB_F01),
((curses.KEY_F2,-1),KEYTAB_F02),
((curses.KEY_F3,-1),KEYTAB_F03),
((curses.KEY_F4,-1),KEYTAB_F04),
((curses.KEY_F5,-1),KEYTAB_F05),
((curses.KEY_F6,-1),KEYTAB_F06),
((curses.KEY_F7,-1),KEYTAB_F07),
((curses.KEY_F8,-1),KEYTAB_F08),
((curses.KEY_F9,-1),KEYTAB_F09),
((curses.KEY_F10,-1),KEYTAB_F10),
((curses.KEY_F11,-1),KEYTAB_F11),
((curses.KEY_F12,-1),KEYTAB_F12),
((curses.KEY_F13,-1),KEYTAB_F13),
((curses.KEY_F14,-1),KEYTAB_F14),
((curses.KEY_F15,-1),KEYTAB_F15),
((curses.KEY_F16,-1),KEYTAB_F16),
((curses.KEY_F17,-1),KEYTAB_F17),
((curses.KEY_F18,-1),KEYTAB_F18),
((curses.KEY_F19,-1),KEYTAB_F19),
((curses.KEY_F20,-1),KEYTAB_F20),
((curses.KEY_F21,-1),KEYTAB_F21),
((curses.KEY_F22,-1),KEYTAB_F22),
((curses.KEY_F23,-1),KEYTAB_F23),
((curses.KEY_F24,-1),KEYTAB_F24),
((curses.KEY_F25,-1),KEYTAB_F25),
((curses.KEY_F26,-1),KEYTAB_F26),
((curses.KEY_F27,-1),KEYTAB_F27),
((curses.KEY_F28,-1),KEYTAB_F28),
((curses.KEY_F29,-1),KEYTAB_F29),
((curses.KEY_F30,-1),KEYTAB_F30),
((curses.KEY_F31,-1),KEYTAB_F31),
((curses.KEY_F32,-1),KEYTAB_F32),
((curses.KEY_F33,-1),KEYTAB_F33),
((curses.KEY_F34,-1),KEYTAB_F34),
((curses.KEY_F35,-1),KEYTAB_F35),
((curses.KEY_F36,-1),KEYTAB_F36),
((curses.KEY_F37,-1),KEYTAB_F37),
((curses.KEY_F38,-1),KEYTAB_F38),
((curses.KEY_F39,-1),KEYTAB_F39),
((curses.KEY_F40,-1),KEYTAB_F40),
((curses.KEY_F41,-1),KEYTAB_F41),
((curses.KEY_F42,-1),KEYTAB_F42),
((curses.KEY_F43,-1),KEYTAB_F43),
((curses.KEY_F44,-1),KEYTAB_F44),
((curses.KEY_F45,-1),KEYTAB_F45),
((curses.KEY_F46,-1),KEYTAB_F46),
((curses.KEY_F47,-1),KEYTAB_F47),
((curses.KEY_F48,-1),KEYTAB_F48),
((curses.KEY_F49,-1),KEYTAB_F49),
((curses.KEY_F50,-1),KEYTAB_F50),
((curses.KEY_F51,-1),KEYTAB_F51),
((curses.KEY_F52,-1),KEYTAB_F52),
((curses.KEY_F53,-1),KEYTAB_F53),
((curses.KEY_F54,-1),KEYTAB_F54),
((curses.KEY_F55,-1),KEYTAB_F55),
((curses.KEY_F56,-1),KEYTAB_F56),
((curses.KEY_F57,-1),KEYTAB_F57),
((curses.KEY_F58,-1),KEYTAB_F58),
((curses.KEY_F59,-1),KEYTAB_F59),
((curses.KEY_F60,-1),KEYTAB_F60),
((curses.KEY_F61,-1),KEYTAB_F61),
((curses.KEY_F62,-1),KEYTAB_F62),
((curses.KEY_F63,-1),KEYTAB_F63),
((curses.KEY_HOME,-1),KEYTAB_HOME),
((curses.KEY_IC,-1),KEYTAB_INSERT),
((27,ord('O'),ord('k'),-1),KEYTAB_KEYPADPLUS),
((27,ord('O'),ord('m'),-1),KEYTAB_KEYTPADMINUS),
((curses.KEY_LEFT,-1),KEYTAB_LEFT),
((curses.KEY_NPAGE,-1),KEYTAB_PAGEDOWN),
((curses.KEY_PPAGE,-1),KEYTAB_PAGEUP),
((curses.KEY_RESIZE,-1),KEYTAB_RESIZE),
((curses.KEY_RIGHT,-1),KEYTAB_RIGHT),
((ord(' '),-1),KEYTAB_SPACE),
((9,-1),KEYTAB_TAB),
((curses.KEY_UP,-1),KEYTAB_UP),
((curses.KEY_MOUSE,-1),KEYTAB_MOUSE),
]
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import collections
import os
import struct
import time
import requests
from oslo.config import cfg
from ceilometer.openstack.common.crypto import utils as cryptoutils
from ceilometer.openstack.common import jsonutils
from ceilometer.openstack.common import log as logging
secure_message_opts = [
cfg.BoolOpt('enabled', default=True,
help='Whether Secure Messaging (Signing) is enabled,'
' defaults to enabled'),
cfg.BoolOpt('enforced', default=False,
help='Whether Secure Messaging (Signing) is enforced,'
' defaults to not enforced'),
cfg.BoolOpt('encrypt', default=False,
help='Whether Secure Messaging (Encryption) is enabled,'
' defaults to not enabled'),
cfg.StrOpt('secret_keys_file',
help='Path to the file containing the keys, takes precedence'
' over secret_key'),
cfg.MultiStrOpt('secret_key',
help='A list of keys: (ex: name:<base64 encoded key>),'
' ignored if secret_keys_file is set'),
cfg.StrOpt('kds_endpoint',
help='KDS endpoint (ex: http://kds.example.com:35357/v3)'),
]
secure_message_group = cfg.OptGroup('secure_messages',
title='Secure Messaging options')
LOG = logging.getLogger(__name__)
class SecureMessageException(Exception):
"""Generic Exception for Secure Messages."""
msg = "An unknown Secure Message related exception occurred."
def __init__(self, msg=None):
if msg is None:
msg = self.msg
super(SecureMessageException, self).__init__(msg)
class SharedKeyNotFound(SecureMessageException):
"""No shared key was found and no other external authentication mechanism
is available.
"""
msg = "Shared Key for [%s] Not Found. (%s)"
def __init__(self, name, errmsg):
super(SharedKeyNotFound, self).__init__(self.msg % (name, errmsg))
class InvalidMetadata(SecureMessageException):
"""The metadata is invalid."""
msg = "Invalid metadata: %s"
def __init__(self, err):
super(InvalidMetadata, self).__init__(self.msg % err)
class InvalidSignature(SecureMessageException):
"""Signature validation failed."""
msg = "Failed to validate signature (source=%s, destination=%s)"
def __init__(self, src, dst):
super(InvalidSignature, self).__init__(self.msg % (src, dst))
class UnknownDestinationName(SecureMessageException):
"""The Destination name is unknown to us."""
msg = "Invalid destination name (%s)"
def __init__(self, name):
super(UnknownDestinationName, self).__init__(self.msg % name)
class InvalidEncryptedTicket(SecureMessageException):
"""The Encrypted Ticket could not be successfully handled."""
msg = "Invalid Ticket (source=%s, destination=%s)"
def __init__(self, src, dst):
super(InvalidEncryptedTicket, self).__init__(self.msg % (src, dst))
class InvalidExpiredTicket(SecureMessageException):
"""The ticket received is already expired."""
msg = "Expired ticket (source=%s, destination=%s)"
def __init__(self, src, dst):
super(InvalidExpiredTicket, self).__init__(self.msg % (src, dst))
class CommunicationError(SecureMessageException):
"""The Communication with the KDS failed."""
msg = "Communication Error (target=%s): %s"
def __init__(self, target, errmsg):
super(CommunicationError, self).__init__(self.msg % (target, errmsg))
class InvalidArgument(SecureMessageException):
"""Bad initialization argument."""
msg = "Invalid argument: %s"
def __init__(self, errmsg):
super(InvalidArgument, self).__init__(self.msg % errmsg)
Ticket = collections.namedtuple('Ticket', ['skey', 'ekey', 'esek'])
class KeyStore(object):
"""A storage class for Signing and Encryption Keys.
This class creates an object that holds Generic Keys like Signing
Keys, Encryption Keys, Encrypted SEK Tickets ...
"""
def __init__(self):
self._kvps = dict()
def _get_key_name(self, source, target, ktype):
return (source, target, ktype)
def _put(self, src, dst, ktype, expiration, data):
name = self._get_key_name(src, dst, ktype)
self._kvps[name] = (expiration, data)
def _get(self, src, dst, ktype):
name = self._get_key_name(src, dst, ktype)
if name in self._kvps:
expiration, data = self._kvps[name]
if expiration > time.time():
return data
else:
del self._kvps[name]
return None
def clear(self):
"""Wipes the store clear of all data."""
self._kvps.clear()
def put_ticket(self, source, target, skey, ekey, esek, expiration):
"""Puts a sek pair in the cache.
:param source: Client name
:param target: Target name
:param skey: The Signing Key
:param ekey: The Encription Key
:param esek: The token encrypted with the target key
:param expiration: Expiration time in seconds since Epoch
"""
keys = Ticket(skey, ekey, esek)
self._put(source, target, 'ticket', expiration, keys)
def get_ticket(self, source, target):
"""Returns a Ticket (skey, ekey, esek) namedtuple for the
source/target pair.
"""
return self._get(source, target, 'ticket')
_KEY_STORE = KeyStore()
class _KDSClient(object):
USER_AGENT = 'oslo-incubator/rpc'
def __init__(self, endpoint=None, timeout=None):
"""A KDS Client class."""
self._endpoint = endpoint
if timeout is not None:
self.timeout = float(timeout)
else:
self.timeout = None
def _do_get(self, url, request):
req_kwargs = dict()
req_kwargs['headers'] = dict()
req_kwargs['headers']['User-Agent'] = self.USER_AGENT
req_kwargs['headers']['Content-Type'] = 'application/json'
req_kwargs['data'] = jsonutils.dumps({'request': request})
if self.timeout is not None:
req_kwargs['timeout'] = self.timeout
try:
resp = requests.get(url, **req_kwargs)
except requests.ConnectionError as e:
err = "Unable to establish connection. %s" % e
raise CommunicationError(url, err)
return resp
def _get_reply(self, url, resp):
if resp.text:
try:
body = jsonutils.loads(resp.text)
reply = body['reply']
except (KeyError, TypeError, ValueError):
msg = "Failed to decode reply: %s" % resp.text
raise CommunicationError(url, msg)
else:
msg = "No reply data was returned."
raise CommunicationError(url, msg)
return reply
def _get_ticket(self, request, url=None, redirects=10):
"""Send an HTTP request.
Wraps around 'requests' to handle redirects and common errors.
"""
if url is None:
if not self._endpoint:
raise CommunicationError(url, 'Endpoint not configured')
url = self._endpoint + '/kds/ticket'
while redirects:
resp = self._do_get(url, request)
if resp.status_code in (301, 302, 305):
# Redirected. Reissue the request to the new location.
url = resp.headers['location']
redirects -= 1
continue
elif resp.status_code != 200:
msg = "Request returned failure status: %s (%s)"
err = msg % (resp.status_code, resp.text)
raise CommunicationError(url, err)
return self._get_reply(url, resp)
raise CommunicationError(url, "Too many redirections, giving up!")
def get_ticket(self, source, target, crypto, key):
# prepare metadata
md = {'requestor': source,
'target': target,
'timestamp': time.time(),
'nonce': struct.unpack('Q', os.urandom(8))[0]}
metadata = base64.b64encode(jsonutils.dumps(md))
# sign metadata
signature = crypto.sign(key, metadata)
# HTTP request
reply = self._get_ticket({'metadata': metadata,
'signature': signature})
# verify reply
signature = crypto.sign(key, (reply['metadata'] + reply['ticket']))
if signature != reply['signature']:
raise InvalidEncryptedTicket(md['source'], md['destination'])
md = jsonutils.loads(base64.b64decode(reply['metadata']))
if ((md['source'] != source or
md['destination'] != target or
md['expiration'] < time.time())):
raise InvalidEncryptedTicket(md['source'], md['destination'])
# return ticket data
tkt = jsonutils.loads(crypto.decrypt(key, reply['ticket']))
return tkt, md['expiration']
# we need to keep a global nonce, as this value should never repeat non
# matter how many SecureMessage objects we create
_NONCE = None
def _get_nonce():
"""We keep a single counter per instance, as it is so huge we can't
possibly cycle through within 1/100 of a second anyway.
"""
global _NONCE
# Lazy initialize, for now get a random value, multiply by 2^32 and
# use it as the nonce base. The counter itself will rotate after
# 2^32 increments.
if _NONCE is None:
_NONCE = [struct.unpack('I', os.urandom(4))[0], 0]
# Increment counter and wrap at 2^32
_NONCE[1] += 1
if _NONCE[1] > 0xffffffff:
_NONCE[1] = 0
# Return base + counter
return long((_NONCE[0] * 0xffffffff)) + _NONCE[1]
class SecureMessage(object):
"""A Secure Message object.
This class creates a signing/encryption facility for RPC messages.
It encapsulates all the necessary crypto primitives to insulate
regular code from the intricacies of message authentication, validation
and optionally encryption.
:param topic: The topic name of the queue
:param host: The server name, together with the topic it forms a unique
name that is used to source signing keys, and verify
incoming messages.
:param conf: a ConfigOpts object
:param key: (optional) explicitly pass in endpoint private key.
If not provided it will be sourced from the service config
:param key_store: (optional) Storage class for local caching
:param encrypt: (defaults to False) Whether to encrypt messages
:param enctype: (defaults to AES) Cipher to use
:param hashtype: (defaults to SHA256) Hash function to use for signatures
"""
def __init__(self, topic, host, conf, key=None, key_store=None,
encrypt=None, enctype='AES', hashtype='SHA256'):
conf.register_group(secure_message_group)
conf.register_opts(secure_message_opts, group='secure_messages')
self._name = '%s.%s' % (topic, host)
self._key = key
self._conf = conf.secure_messages
self._encrypt = self._conf.encrypt if (encrypt is None) else encrypt
self._crypto = cryptoutils.SymmetricCrypto(enctype, hashtype)
self._hkdf = cryptoutils.HKDF(hashtype)
self._kds = _KDSClient(self._conf.kds_endpoint)
if self._key is None:
self._key = self._init_key(topic, self._name)
if self._key is None:
err = "Secret Key (or key file) is missing or malformed"
raise SharedKeyNotFound(self._name, err)
self._key_store = key_store or _KEY_STORE
def _init_key(self, topic, name):
keys = None
if self._conf.secret_keys_file:
with open(self._conf.secret_keys_file, 'r') as f:
keys = f.readlines()
elif self._conf.secret_key:
keys = self._conf.secret_key
if keys is None:
return None
for k in keys:
if k[0] == '#':
continue
if ':' not in k:
break
svc, key = k.split(':', 1)
if svc == topic or svc == name:
return base64.b64decode(key)
return None
def _split_key(self, key, size):
sig_key = key[:size]
enc_key = key[size:]
return sig_key, enc_key
def _decode_esek(self, key, source, target, timestamp, esek):
"""This function decrypts the esek buffer passed in and returns a
KeyStore to be used to check and decrypt the received message.
:param key: The key to use to decrypt the ticket (esek)
:param source: The name of the source service
:param traget: The name of the target service
:param timestamp: The incoming message timestamp
:param esek: a base64 encoded encrypted block containing a JSON string
"""
rkey = None
try:
s = self._crypto.decrypt(key, esek)
j = jsonutils.loads(s)
rkey = base64.b64decode(j['key'])
expiration = j['timestamp'] + j['ttl']
if j['timestamp'] > timestamp or timestamp > expiration:
raise InvalidExpiredTicket(source, target)
except Exception:
raise InvalidEncryptedTicket(source, target)
info = '%s,%s,%s' % (source, target, str(j['timestamp']))
sek = self._hkdf.expand(rkey, info, len(key) * 2)
return self._split_key(sek, len(key))
def _get_ticket(self, target):
"""This function will check if we already have a SEK for the specified
target in the cache, or will go and try to fetch a new SEK from the key
server.
:param target: The name of the target service
"""
ticket = self._key_store.get_ticket(self._name, target)
if ticket is not None:
return ticket
tkt, expiration = self._kds.get_ticket(self._name, target,
self._crypto, self._key)
self._key_store.put_ticket(self._name, target,
base64.b64decode(tkt['skey']),
base64.b64decode(tkt['ekey']),
tkt['esek'], expiration)
return self._key_store.get_ticket(self._name, target)
def encode(self, version, target, json_msg):
"""This is the main encoding function.
It takes a target and a message and returns a tuple consisting of a
JSON serialized metadata object, a JSON serialized (and optionally
encrypted) message, and a signature.
:param version: the current envelope version
:param target: The name of the target service (usually with hostname)
:param json_msg: a serialized json message object
"""
ticket = self._get_ticket(target)
metadata = jsonutils.dumps({'source': self._name,
'destination': target,
'timestamp': time.time(),
'nonce': _get_nonce(),
'esek': ticket.esek,
'encryption': self._encrypt})
message = json_msg
if self._encrypt:
message = self._crypto.encrypt(ticket.ekey, message)
signature = self._crypto.sign(ticket.skey,
version + metadata + message)
return (metadata, message, signature)
def decode(self, version, metadata, message, signature):
"""This is the main decoding function.
It takes a version, metadata, message and signature strings and
returns a tuple with a (decrypted) message and metadata or raises
an exception in case of error.
:param version: the current envelope version
:param metadata: a JSON serialized object with metadata for validation
:param message: a JSON serialized (base64 encoded encrypted) message
:param signature: a base64 encoded signature
"""
md = jsonutils.loads(metadata)
check_args = ('source', 'destination', 'timestamp',
'nonce', 'esek', 'encryption')
for arg in check_args:
if arg not in md:
raise InvalidMetadata('Missing metadata "%s"' % arg)
if md['destination'] != self._name:
# TODO(simo) handle group keys by checking target
raise UnknownDestinationName(md['destination'])
try:
skey, ekey = self._decode_esek(self._key,
md['source'], md['destination'],
md['timestamp'], md['esek'])
except InvalidExpiredTicket:
raise
except Exception:
raise InvalidMetadata('Failed to decode ESEK for %s/%s' % (
md['source'], md['destination']))
sig = self._crypto.sign(skey, version + metadata + message)
if sig != signature:
raise InvalidSignature(md['source'], md['destination'])
if md['encryption'] is True:
msg = self._crypto.decrypt(ekey, message)
else:
msg = message
return (md, msg)
|
|
import unittest
from lxml import etree
from soapfish import wsa, xsd
from soapfish.core import SOAPError, SOAPRequest, SOAPResponse
from soapfish.middlewares import ExceptionToSoapFault
from soapfish.soap_dispatch import SOAPDispatcher
from soapfish.testutil import EchoInputHeader, EchoOutputHeader, echo_handler, echo_service
class SOAPDispatcherTest(unittest.TestCase):
def test_can_dispatch_good_soap_message(self):
handler, handler_state = echo_handler()
dispatcher = SOAPDispatcher(echo_service(handler))
soap_message = (
'<ns1:echoRequest xmlns:ns1="http://soap.example/echo/types">'
'<value>foobar</value>'
'</ns1:echoRequest>'
)
request_message = self._wrap_with_soap_envelope(soap_message)
request = SOAPRequest({'SOAPACTION': 'echo', 'REQUEST_METHOD': 'POST'}, request_message)
response = dispatcher.dispatch(request)
self.assert_is_successful_response(response, handler_state)
self.assertEqual('foobar', handler_state.input_.value)
response_document = etree.fromstring(response.http_content)
response_xml = etree.tostring(response_document, pretty_print=True)
expected_xml = (
b'<ns0:Envelope xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope/">\n'
b' <ns0:Body>\n'
b' <ns0:echoResponse xmlns:ns0="http://soap.example/echo/types">\n'
b' <value>foobar</value>\n'
b' </ns0:echoResponse>\n'
b' </ns0:Body>\n'
b'</ns0:Envelope>\n'
)
self.assertEqual(expected_xml, response_xml)
def test_can_validate_soap_message(self):
handler, handler_state = echo_handler()
dispatcher = SOAPDispatcher(echo_service(handler))
soap_message = (
'<ns1:echoRequest xmlns:ns1="http://soap.example/echo/types">'
'<invalid>foobar</invalid>'
'</ns1:echoRequest>'
)
request_message = self._wrap_with_soap_envelope(soap_message)
request = SOAPRequest({'SOAPACTION': 'echo', 'REQUEST_METHOD': 'POST'}, request_message)
response = dispatcher.dispatch(request)
self.assertFalse(handler_state.was_called)
self.assert_is_soap_fault(response, partial_fault_string="Element 'invalid': This element is not expected. "
'Expected is ( value ).')
def test_can_reject_malformed_xml_soap_message(self):
request = SOAPRequest({'SOAPACTION': 'echo', 'REQUEST_METHOD': 'POST'}, 'garbage')
dispatcher = SOAPDispatcher(echo_service())
response = dispatcher.dispatch(request)
self.assertEqual(500, response.http_status_code)
self.assertEqual('text/xml', response.http_headers['Content-Type'])
self.assert_is_soap_fault(response, partial_fault_string="Start tag expected, '<' not found")
def test_can_include_imported_schemas_during_validation(self):
# In case the SOAPDispatcher would not use imported schemas for
# validation it would fail because the 'code' tag is only defined in
# the imported schema
handler, handler_state = echo_handler()
service = echo_service(handler)
class CodeType(xsd.String):
pattern = r'[0-9]{5}'
class Container(xsd.ComplexType):
value = xsd.Element(CodeType)
code_schema = xsd.Schema('http://soap.example/included',
location='http://soap.example/included',
elementFormDefault=xsd.ElementFormDefault.UNQUALIFIED,
simpleTypes=[CodeType],
complexTypes=[Container],
elements={'foo': xsd.Element(Container)},
)
service.methods[0].input = 'foo'
service.schemas[0].imports = [code_schema]
# The setup is a bit simplistic because the <code> tag is not parsed
# into a soapfish model element for the handler but this was enough
# to trigger the bug
dispatcher = SOAPDispatcher(service)
wsgi_environ = {'SOAPACTION': 'echo', 'REQUEST_METHOD': 'POST'}
soap_message = '<ns0:foo xmlns:ns0="http://soap.example/included"><value>12345</value></ns0:foo>'
request = SOAPRequest(wsgi_environ, self._wrap_with_soap_envelope(soap_message))
response = dispatcher.dispatch(request)
self.assert_is_successful_response(response, handler_state)
self.assertEqual('12345', handler_state.input_.value)
def test_can_reject_non_soap_xml_body(self):
request = SOAPRequest({'SOAPACTION': 'echo', 'REQUEST_METHOD': 'POST'}, '<some>xml</some>')
dispatcher = SOAPDispatcher(echo_service())
# previously this raised an AttributeError due to an unhandled exception
response = dispatcher.dispatch(request)
self.assertEqual(500, response.http_status_code)
self.assertEqual('text/xml', response.http_headers['Content-Type'])
self.assert_is_soap_fault(response, partial_fault_string='Missing SOAP body')
def test_can_reject_invalid_action(self):
soap_message = (
'<ns1:echoRequest xmlns:ns1="http://soap.example/echo/types">'
'<value>foobar</value>'
'</ns1:echoRequest>'
)
request_message = self._wrap_with_soap_envelope(soap_message)
request = SOAPRequest({'SOAPACTION': 'invalid', 'REQUEST_METHOD': 'POST'}, request_message)
dispatcher = SOAPDispatcher(echo_service())
response = dispatcher.dispatch(request)
self.assert_is_soap_fault(response, partial_fault_string='Invalid SOAP action: invalid')
def test_can_reject_invalid_root_tag(self):
soap_message = ('<ns0:invalid xmlns:ns0="invalid"/>')
request_message = self._wrap_with_soap_envelope(soap_message)
request = SOAPRequest({'REQUEST_METHOD': 'POST'}, request_message)
dispatcher = SOAPDispatcher(echo_service())
response = dispatcher.dispatch(request)
self.assert_is_soap_fault(response, partial_fault_string='DocumentInvalid')
def test_can_dispatch_requests_based_on_soap_body(self):
handler, handler_state = echo_handler()
dispatcher = SOAPDispatcher(echo_service(handler))
soap_message = (
'<ns1:echoRequest xmlns:ns1="http://soap.example/echo/types">'
'<value>foobar</value>'
'</ns1:echoRequest>'
)
request_message = self._wrap_with_soap_envelope(soap_message)
request = SOAPRequest({'SOAPACTION': '""', 'REQUEST_METHOD': 'POST'}, request_message)
response = dispatcher.dispatch(request)
self.assert_is_successful_response(response, handler_state)
def test_can_use_soap_error_from_handler(self):
soap_error = SOAPError('code', 'internal data error', 'actor')
def faulty_handler(request, input_):
return SOAPResponse(soap_error)
dispatcher = SOAPDispatcher(echo_service(handler=faulty_handler))
soap_message = (
'<ns1:echoRequest xmlns:ns1="http://soap.example/echo/types">'
'<value>foobar</value>'
'</ns1:echoRequest>'
)
request_message = self._wrap_with_soap_envelope(soap_message)
request = SOAPRequest({'REQUEST_METHOD': 'POST'}, request_message)
response = dispatcher.dispatch(request)
self.assertEqual('text/xml', response.http_headers['Content-Type'])
self.assertEqual(500, response.http_status_code)
self.assert_is_soap_fault(response, fault_code='code', partial_fault_string='internal data error')
def test_can_handle_xsd_element_as_return_value_from_handler(self):
def handler(request, input_):
return input_
dispatcher = SOAPDispatcher(echo_service(handler))
soap_message = (
'<ns1:echoRequest xmlns:ns1="http://soap.example/echo/types">'
'<value>hello</value>'
'</ns1:echoRequest>'
)
request_message = self._wrap_with_soap_envelope(soap_message)
request = SOAPRequest({'SOAPACTION': 'echo', 'REQUEST_METHOD': 'POST'}, request_message)
response = dispatcher.dispatch(request)
body_text = response.http_content
if not isinstance(body_text, str):
body_text = body_text.decode()
self.assertIn('<value>hello</value>', body_text)
def test_can_propagate_custom_input_header(self):
handler, handler_state = echo_handler()
dispatcher = SOAPDispatcher(echo_service(handler, input_header=EchoInputHeader))
soap_header = ('<tns:InputVersion>42</tns:InputVersion>')
soap_message = (
'<tns:echoRequest>'
'<value>foobar</value>'
'</tns:echoRequest>'
)
request_message = self._wrap_with_soap_envelope(soap_message, header=soap_header)
request = SOAPRequest({'SOAPACTION': 'echo', 'REQUEST_METHOD': 'POST'}, request_message)
response = dispatcher.dispatch(request)
self.assert_is_successful_response(response, handler_state)
self.assertIsNotNone(handler_state.input_header)
self.assertEqual('42', handler_state.input_header.InputVersion)
def test_can_handle_empty_input_header(self):
handler, handler_state = echo_handler()
dispatcher = SOAPDispatcher(echo_service(handler, input_header=EchoInputHeader))
soap_message = (
'<tns:echoRequest xmlns:tns="http://soap.example/echo/types">'
'<value>foobar</value>'
'</tns:echoRequest>'
)
request_message = self._wrap_with_soap_envelope(soap_message)
request = SOAPRequest({'SOAPACTION': 'echo', 'REQUEST_METHOD': 'POST'}, request_message)
response = dispatcher.dispatch(request)
self.assert_is_successful_response(response, handler_state)
def test_can_validate_soap_header(self):
handler, handler_state = echo_handler()
dispatcher = SOAPDispatcher(echo_service(handler, input_header=EchoInputHeader))
soap_header = ('<tns:invalid>42</tns:invalid>')
soap_message = (
'<tns:echoRequest>'
'<value>foobar</value>'
'</tns:echoRequest>'
)
request_message = self._wrap_with_soap_envelope(soap_message, header=soap_header)
request = SOAPRequest({'SOAPACTION': 'echo', 'REQUEST_METHOD': 'POST'}, request_message)
response = dispatcher.dispatch(request)
self.assert_is_soap_fault(response, partial_fault_string='DocumentInvalid')
def test_can_propagate_custom_output_header(self):
handler, handler_state = echo_handler()
def _handler(request, _input):
resp = handler(request, _input)
resp.soap_header = EchoOutputHeader(OutputVersion='42')
return resp
dispatcher = SOAPDispatcher(echo_service(_handler, output_header=EchoOutputHeader))
soap_header = '<tns:InputVersion>42</tns:InputVersion>'
soap_message = (
'<tns:echoRequest xmlns:tns="http://soap.example/echo/types">'
'<value>foobar</value>'
'</tns:echoRequest>'
)
request_message = self._wrap_with_soap_envelope(soap_message, header=soap_header)
request = SOAPRequest({'SOAPACTION': 'echo', 'REQUEST_METHOD': 'POST'}, request_message)
response = dispatcher.dispatch(request)
self.assert_is_successful_response(response, handler_state)
self.assertIn(b'<ns0:OutputVersion>42</ns0:OutputVersion>', response.http_content)
def test_can_handle_empty_output_header(self):
handler, handler_state = echo_handler()
dispatcher = SOAPDispatcher(echo_service(handler, output_header=EchoOutputHeader))
soap_message = (
'<tns:echoRequest xmlns:tns="http://soap.example/echo/types">'
'<value>foobar</value>'
'</tns:echoRequest>'
)
request_message = self._wrap_with_soap_envelope(soap_message)
request = SOAPRequest({'SOAPACTION': 'echo', 'REQUEST_METHOD': 'POST'}, request_message)
response = dispatcher.dispatch(request)
self.assert_is_successful_response(response, handler_state)
def test_return_soap_fault_on_exception(self):
def handler(request, _input):
raise Exception('unexpected exception')
service = echo_service(handler)
dispatcher = SOAPDispatcher(service, [ExceptionToSoapFault()])
soap_message = (
'<tns:echoRequest xmlns:tns="http://soap.example/echo/types">'
'<value>foobar</value>'
'</tns:echoRequest>'
)
request_message = self._wrap_with_soap_envelope(soap_message)
request = SOAPRequest({'SOAPACTION': 'echo', 'REQUEST_METHOD': 'POST'}, request_message)
response = dispatcher.dispatch(request)
self.assert_is_soap_fault(response, fault_code=service.version.Code.SERVER,
partial_fault_string='unexpected exception')
self.assertEqual('text/xml', response.http_headers['Content-Type'])
self.assertEqual(500, response.http_status_code)
def test_can_validate_wsa_header(self):
dispatcher = SOAPDispatcher(echo_service())
header = wsa.Header.parsexml(
'<Header><Action xmlns="http://www.w3.org/2005/08/addressing">/Action</Action></Header>',
)
dispatcher._validate_header(header)
def test_can_detect_invalid_wsa_header(self):
dispatcher = SOAPDispatcher(echo_service())
header = wsa.Header.parsexml(
'<Header><Invalid xmlns="http://www.w3.org/2005/08/addressing">/Action</Invalid></Header>',
)
with self.assertRaises(etree.DocumentInvalid):
dispatcher._validate_header(header)
def test_evaluate_service_location(self):
handler, _ = echo_handler()
service = echo_service(handler)
service.location = '${scheme}://${host}/ws'
dispatcher = SOAPDispatcher(service)
request = SOAPRequest({'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'wsdl', 'HTTP_HOST': 'soap.example'}, '')
response = dispatcher.dispatch(request)
self.assert_is_successful_response(response)
self.assertNotIn('${scheme}', response.http_content.decode())
self.assertNotIn('${host}', response.http_content.decode())
def test_service_bind_function(self):
handler, handler_state = echo_handler()
service = echo_service(handler)
@service.route('echoOperation')
def echo_func(request, input_):
handler_state.new_func_was_called = True
return handler(request, input_)
dispatcher = SOAPDispatcher(service)
soap_message = (
'<ns1:echoRequest xmlns:ns1="http://soap.example/echo/types">'
'<value>foobar</value>'
'</ns1:echoRequest>'
)
request_message = self._wrap_with_soap_envelope(soap_message)
request = SOAPRequest({'SOAPACTION': 'echo', 'REQUEST_METHOD': 'POST'}, request_message)
response = dispatcher.dispatch(request)
self.assertTrue(handler_state.new_func_was_called)
self.assert_is_successful_response(response, handler_state)
def test_hook_soap_request(self):
message = (
'<tns:echoRequest xmlns:tns="http://soap.example/echo/types">'
'<value>Cast a hook to catch a soapfish.</value>'
'</tns:echoRequest>'
)
request = SOAPRequest(
{'REQUEST_METHOD': 'POST', 'SOAPACTION': 'echo'},
self._wrap_with_soap_envelope(message),
)
def hook(dispatcher, request):
request.http_content = request.http_content.replace(b'catch', b'snare')
return request
dispatcher = SOAPDispatcher(echo_service(), hooks={'soap-request': hook})
response = dispatcher.dispatch(request)
self.assertIn(b'Cast a hook to snare a soapfish.', response.http_content)
def test_hook_soap_response(self):
message = (
'<tns:echoRequest xmlns:tns="http://soap.example/echo/types">'
'<value>Cast a hook to catch a soapfish.</value>'
'</tns:echoRequest>'
)
request = SOAPRequest(
{'REQUEST_METHOD': 'POST', 'SOAPACTION': 'echo'},
self._wrap_with_soap_envelope(message),
)
def hook(dispatcher, request, response):
response.http_status_code = 999
return response
dispatcher = SOAPDispatcher(echo_service(), hooks={'soap-response': hook})
response = dispatcher.dispatch(request)
self.assertEqual(response.http_status_code, 999)
# --- custom assertions ---------------------------------------------------
def assert_is_successful_response(self, response, handler_state=None):
self.assertEqual(200, response.http_status_code)
self.assertEqual('text/xml', response.http_headers['Content-Type'])
if handler_state:
self.assertTrue(handler_state.was_called)
def assert_is_soap_fault(self, response, fault_code=None, partial_fault_string=None):
self.assertEqual(500, response.http_status_code)
self.assertEqual('text/xml', response.http_headers['Content-Type'])
fault_document = etree.fromstring(response.http_content)
soap_envelope = fault_document.getroottree()
namespaces = {'s': 'http://schemas.xmlsoap.org/soap/envelope/'}
fault_nodes = soap_envelope.xpath('/s:Envelope/s:Body/s:Fault', namespaces=namespaces)
self.assertEqual(len(fault_nodes), 1, msg='expected exactly one SOAP fault')
children = list(fault_nodes[0])
self.assertEqual(len(children), 2)
xml_fault_code, fault_string = children
if fault_code is None:
fault_code = 'Client'
self.assertEqual(fault_code, xml_fault_code.text)
if partial_fault_string:
self.assertIn(partial_fault_string, fault_string.text)
# --- internal helpers ----------------------------------------------------
def _wrap_with_soap_envelope(self, payload, header=''):
if header:
header = f'<senv:Header>{header}</senv:Header>'
envelope = (
'<?xml version="1.0" encoding="UTF-8"?>'
'<senv:Envelope xmlns:senv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:tns="http://soap.example/echo/types">'
'%(header)s'
'<senv:Body>%(payload)s</senv:Body>'
'</senv:Envelope>'
) % {'payload': payload, 'header': header}
return envelope.encode()
|
|
#!/usr/bin/python2.7
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for importing records in batches, with error detection.
This module converts Python dictionaries into datastore entities.
The values of all dictionary fields are Unicode strings."""
__author__ = 'kpy@google.com (Ka-Ping Yee) and many other Googlers'
import datetime
import logging
import re
import sys
from google.appengine.api import datastore_errors
import subscribe
from model import *
from utils import validate_sex, validate_status, validate_approximate_date, \
validate_age, get_utcnow, get_full_name
DEFAULT_PUT_RETRIES = 3
MAX_PUT_BATCH = 100
def utf8_decoder(dict_reader):
"""Yields a dictionary where all string values are converted to Unicode.
Args:
dict_reader: An iterable that yields dictionaries with string values
Yields:
A dictionary with all string values converted to Unicode.
"""
for record in dict_reader:
for key in record:
value = record[key]
if isinstance(value, str):
record[key] = value.decode('utf-8')
yield record
def put_batch(batch, retries=DEFAULT_PUT_RETRIES):
for attempt in range(retries):
try:
db.put(batch)
logging.info('Imported records: %d' % len(batch))
return len(batch)
except:
type, value, traceback = sys.exc_info()
logging.warn('Retrying batch: %s' % value)
return 0
date_re = re.compile(r'^(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)Z$')
def strip(string_or_none):
if not string_or_none:
return ''
return string_or_none.strip() or ''
def validate_datetime(datetime_or_datestring):
if isinstance(datetime_or_datestring, datetime.datetime):
return datetime_or_datestring
if not datetime_or_datestring:
return None # A missing value is okay.
match = date_re.match(datetime_or_datestring)
if match:
return datetime.datetime(*map(int, match.groups()))
raise ValueError('Bad datetime: %r' % datetime_or_datestring)
def validate_boolean(string):
if not string:
return None # A missing value is okay.
return (isinstance(string, basestring) and
string.strip().lower() in ['true', 'yes', 'y', '1'])
def create_person(repo, fields):
"""Creates a Person entity in the given repository with the given field
values. If 'fields' contains a 'person_record_id', calling put() on the
resulting entity will overwrite any existing (original or clone) record
with the same person_record_id. Otherwise, a new original person record is
created in the given repository."""
person_fields = dict(
entry_date=get_utcnow(),
expiry_date=validate_datetime(fields.get('expiry_date')),
author_name=strip(fields.get('author_name')),
author_email=strip(fields.get('author_email')),
author_phone=strip(fields.get('author_phone')),
source_name=strip(fields.get('source_name')),
source_url=strip(fields.get('source_url')),
source_date=validate_datetime(fields.get('source_date')),
full_name=strip(fields.get('full_name')),
given_name=strip(fields.get('given_name')),
family_name=strip(fields.get('family_name')),
alternate_names=strip(fields.get('alternate_names')),
description=strip(fields.get('description')),
sex=validate_sex(fields.get('sex')),
date_of_birth=validate_approximate_date(fields.get('date_of_birth')),
age=validate_age(fields.get('age')),
home_street=strip(fields.get('home_street')),
home_neighborhood=strip(fields.get('home_neighborhood')),
home_city=strip(fields.get('home_city')),
home_state=strip(fields.get('home_state')),
home_postal_code=strip(fields.get('home_postal_code')),
home_country=strip(fields.get('home_country')),
photo_url=strip(fields.get('photo_url')),
profile_urls=strip(fields.get('profile_urls')),
)
# For PFIF 1.3 or older, populate full_name (it was an optional field
# before), using given_name and family_name if it is empty.
if not person_fields['full_name'].strip():
person_fields['full_name'] = get_full_name(
person_fields['given_name'],
person_fields['family_name'],
config.Configuration(repo))
record_id = strip(fields.get('person_record_id'))
if record_id: # create a record that might overwrite an existing one
if is_clone(repo, record_id):
return Person.create_clone(repo, record_id, **person_fields)
else:
return Person.create_original_with_record_id(
repo, record_id, **person_fields)
else: # create a new original record
return Person.create_original(repo, **person_fields)
def create_note(repo, fields):
"""Creates a Note entity in the given repository with the given field
values. If 'fields' contains a 'note_record_id', calling put() on the
resulting entity will overwrite any existing (original or clone) record
with the same note_record_id. Otherwise, a new original note record is
created in the given repository."""
assert strip(fields.get('person_record_id')), 'person_record_id is required'
assert strip(fields.get('source_date')), 'source_date is required'
note_fields = dict(
person_record_id=strip(fields['person_record_id']),
linked_person_record_id=strip(fields.get('linked_person_record_id')),
author_name=strip(fields.get('author_name')),
author_email=strip(fields.get('author_email')),
author_phone=strip(fields.get('author_phone')),
source_date=validate_datetime(fields.get('source_date')),
status=validate_status(fields.get('status')),
author_made_contact=validate_boolean(fields.get('author_made_contact')),
email_of_found_person=strip(fields.get('email_of_found_person')),
phone_of_found_person=strip(fields.get('phone_of_found_person')),
last_known_location=strip(fields.get('last_known_location')),
text=fields.get('text'),
photo_url=fields.get('photo_url'),
entry_date=get_utcnow(),
)
record_id = strip(fields.get('note_record_id'))
if record_id: # create a record that might overwrite an existing one
if is_clone(repo, record_id):
return Note.create_clone(repo, record_id, **note_fields)
else:
return Note.create_original_with_record_id(
repo, record_id, **note_fields)
else: # create a new original record
return Note.create_original(repo, **note_fields)
def filter_new_notes(entities, repo):
"""Filter the notes which are new."""
notes = []
for entity in entities:
# Send an an email notification for new notes only
if isinstance(entity, Note):
if not Note.get(repo, entity.get_note_record_id()):
notes.append(entity)
return notes
def send_notifications(handler, persons, notes):
"""For each note, send a notification to subscriber.
Args:
notes: List of notes for which to send notification.
persons: Dictionary of persons impacted by the notes,
indexed by person_record_id.
handler: Handler used to send email notification.
"""
for note in notes:
person = persons[note.person_record_id]
subscribe.send_notifications(handler, person, [note])
def notes_match(a, b):
fields = ['person_record_id', 'author_name', 'author_email', 'author_phone',
'source_date', 'status', 'author_made_contact',
'email_of_found_person', 'phone_of_found_person',
'last_known_location', 'text', 'photo_url']
return [getattr(a, f) for f in fields] == [getattr(b, f) for f in fields]
def import_records(repo, domain, converter, records,
mark_notes_reviewed=False,
believed_dead_permission=False,
handler=None,
omit_duplicate_notes=False):
"""Convert and import a list of entries into a respository.
Args:
repo: Identifies the repository in which to store the records.
domain: Accept only records that have this original domain. Only one
original domain may be imported at a time.
converter: A function to transform a dictionary of fields to a
datastore entity. This function may throw an exception if there
is anything wrong with the input fields and import_records will
skip the bad record. The key_name of the resulting datastore
entity must begin with domain + '/', or the record will be skipped.
records: A list of dictionaries representing the entries.
mark_notes_reviewed: If true, mark the new notes as reviewed.
believed_dead_permission: If true, allow importing notes with status
as 'believed_dead'; otherwise skip the note and return an error.
handler: Handler to use to send e-mail notification for notes. If this
is None, then we do not send e-mail.
omit_duplicate_notes: If true, skip any Notes that are identical to
existing Notes on the same Person.
Returns:
The number of passed-in records that were written (not counting other
Person records that were updated because they have new Notes), a list
of (error_message, record) pairs for the skipped records, and the
number of records processed in total.
"""
persons = {} # Person entities to write
notes = {} # Note entities to write
skipped = [] # entities skipped due to an error
total = 0 # total number of entities for which conversion was attempted
for fields in records:
total += 1
try:
entity = converter(repo, fields)
except (KeyError, ValueError, AssertionError,
datastore_errors.BadValueError), e:
skipped.append((e.__class__.__name__ + ': ' + str(e), fields))
continue
if entity.original_domain != domain:
skipped.append(
('Not in authorized domain: %r' % entity.record_id, fields))
continue
if isinstance(entity, Person):
entity.update_index(['old', 'new'])
persons[entity.record_id] = entity
if isinstance(entity, Note):
# Check whether reporting 'believed_dead' in note is permitted.
if (not believed_dead_permission and \
entity.status == 'believed_dead'):
skipped.append(
('Not authorized to post notes with ' \
'the status \"believed_dead\"',
fields))
continue
# Check whether commenting is already disabled by record author.
existing_person = Person.get(repo, entity.person_record_id)
if existing_person and existing_person.notes_disabled:
skipped.append(
('The author has disabled new commenting on this record',
fields))
continue
# Check whether the note is a duplicate.
if omit_duplicate_notes:
other_notes = Note.get_by_person_record_id(
repo, entity.person_record_id, filter_expired=False)
if any(notes_match(entity, note) for note in other_notes):
skipped.append(
('This is a duplicate of an existing note', fields))
continue
entity.reviewed = mark_notes_reviewed
notes[entity.record_id] = entity
# We keep two dictionaries 'persons' and 'extra_persons', with disjoint
# key sets: Person entities for the records passed in to import_records()
# go in 'persons', and any other Person entities affected by the import go
# in 'extra_persons'. The two dictionaries are kept separate in order to
# produce a count of records written that only counts 'persons'.
extra_persons = {} # updated Persons other than those being imported
# For each Note, update the latest_* fields on the associated Person.
# We do these updates in dictionaries keyed by person_record_id so that
# multiple updates for one person_record_id will mutate the same object.
for note in notes.values():
if note.person_record_id in persons:
# This Note belongs to a Person that is being imported.
person = persons[note.person_record_id]
elif note.person_record_id in extra_persons:
# This Note belongs to some other Person that is not part of this
# import and is already being updated due to another Note.
person = extra_persons[note.person_record_id]
else:
# This Note belongs to some other Person that is not part of this
# import and this is the first such Note in this import.
person = Person.get(repo, note.person_record_id)
if not person:
continue
extra_persons[note.person_record_id] = person
person.update_from_note(note)
# TODO(kpy): Don't overwrite existing Persons with newer source_dates.
# Now store the imported Persons and Notes, and count them.
entities = persons.values() + notes.values()
all_persons = dict(persons, **extra_persons)
written = 0
while entities:
# The presence of a handler indicates we should notify subscribers
# for any new notes being written. We do not notify on
# "re-imported" existing notes to avoid spamming subscribers.
new_notes = []
if handler:
new_notes = filter_new_notes(entities[:MAX_PUT_BATCH], repo)
written_batch = put_batch(entities[:MAX_PUT_BATCH])
written += written_batch
# If we have new_notes and results did not fail then send notifications.
if new_notes and written_batch:
send_notifications(handler, all_persons, new_notes)
entities[:MAX_PUT_BATCH] = []
# Also store the other updated Persons, but don't count them.
entities = extra_persons.values()
while entities:
put_batch(entities[:MAX_PUT_BATCH])
entities[:MAX_PUT_BATCH] = []
return written, skipped, total
|
|
#!/usr/bin/env python
import Queue, crawle, socket, unittest
ADDRESS_0 = ('127.0.0.1', 80), False
ADDRESS_1 = ('127.0.0.1', 443), True
ADDRESS_2 = ('127.0.0.1', 8080), False
class TestCQueueLRU(unittest.TestCase):
def setUp(self):
self.lru = crawle.CQueueLRU(10, 10)
def assert_newest(self, key):
self.assertEqual(self.lru.table[key], self.lru.newest)
self.assertEqual(None, self.lru.newest.prev)
def assert_oldest(self, key):
self.assertEqual(self.lru.table[key], self.lru.oldest)
self.assertEqual(None, self.lru.oldest.next)
def testGetSingleItem(self):
item = self.lru[ADDRESS_0]
self.assertEqual(None, self.lru.newest)
self.assertEqual(None, self.lru.oldest)
def testPutSingleItem(self):
item = self.lru[ADDRESS_0]
self.lru[ADDRESS_0] = item
self.assertEqual(1, len(self.lru.table))
self.assert_newest(ADDRESS_0)
self.assert_oldest(ADDRESS_0)
def testReAddSingleItem(self):
item = self.lru[ADDRESS_0]
self.lru[ADDRESS_0] = item
again = self.lru[ADDRESS_0]
self.assertEqual(again, item)
self.lru[ADDRESS_0] = again
self.assertEqual(1, len(self.lru.table))
self.assert_newest(ADDRESS_0)
self.assert_oldest(ADDRESS_0)
def testPutDoubleItemLimit1(self):
self.lru.max_queues = 1
item0 = self.lru[ADDRESS_0]
item1 = self.lru[ADDRESS_1]
self.lru[ADDRESS_0] = item0
self.lru[ADDRESS_1] = item1
self.assertEqual(1, len(self.lru.table))
self.assert_newest(ADDRESS_1)
self.assert_oldest(ADDRESS_1)
def testPutDoubleItemLimitN(self):
item0 = self.lru[ADDRESS_0]
item1 = self.lru[ADDRESS_1]
self.lru[ADDRESS_0] = item0
self.lru[ADDRESS_1] = item1
self.assertEqual(2, len(self.lru.table))
self.assert_newest(ADDRESS_1)
self.assert_oldest(ADDRESS_0)
def testReAddFirstOfDoubleItemLimitN(self):
item0 = self.lru[ADDRESS_0]
item1 = self.lru[ADDRESS_1]
self.lru[ADDRESS_0] = item0
self.lru[ADDRESS_1] = item1
self.lru[ADDRESS_0] = item0
self.assertEqual(2, len(self.lru.table))
self.assert_newest(ADDRESS_0)
self.assert_oldest(ADDRESS_1)
def testReAddSecondOfDoubleItemLimitN(self):
item0 = self.lru[ADDRESS_0]
item1 = self.lru[ADDRESS_1]
self.lru[ADDRESS_0] = item0
self.lru[ADDRESS_1] = item1
self.lru[ADDRESS_1] = item0
self.assertEqual(2, len(self.lru.table))
self.assert_newest(ADDRESS_1)
self.assert_oldest(ADDRESS_0)
def testPutTripleItemLimitN(self):
item0 = self.lru[ADDRESS_0]
item1 = self.lru[ADDRESS_1]
item2 = self.lru[ADDRESS_2]
self.lru[ADDRESS_0] = item0
self.lru[ADDRESS_1] = item1
self.lru[ADDRESS_2] = item2
self.assertEqual(3, len(self.lru.table))
self.assert_newest(ADDRESS_2)
self.assert_oldest(ADDRESS_0)
def testRAddMiddleOfTripleItemLimitN(self):
item0 = self.lru[ADDRESS_0]
item1 = self.lru[ADDRESS_1]
item2 = self.lru[ADDRESS_2]
self.lru[ADDRESS_0] = item0
self.lru[ADDRESS_1] = item1
self.lru[ADDRESS_2] = item2
self.lru[ADDRESS_1] = item1
self.assertEqual(3, len(self.lru.table))
self.assert_newest(ADDRESS_1)
self.assert_oldest(ADDRESS_0)
self.assertEqual(self.lru.newest, self.lru.oldest.prev.prev)
self.assertEqual(self.lru.oldest, self.lru.newest.next.next)
class TestHTTPConnectionQueue(unittest.TestCase):
def setUp(self):
self.cq = crawle.HTTPConnectionQueue(ADDRESS_0)
def testQueueLength(self):
temp = self.cq.get()
for i in range(5):
self.assertEqual(i, self.cq.queue.qsize())
self.cq.put(temp)
def testResetConnection(self):
prev = crawle.HTTPConnectionQueue.REQUEST_LIMIT
try:
crawle.HTTPConnectionQueue.REQUEST_LIMIT = 2
a = self.cq.get()
self.cq.put(a)
self.assertEqual(a, self.cq.get())
self.cq.put(a)
self.assertNotEqual(a, self.cq.get())
finally:
crawle.HTTPConnectionQueue.REQUEST_LIMIT = prev
def testGetConnectionCount(self):
for i in range(5):
conn = self.cq.get()
self.assertEqual(0, conn.request_count)
def testGetConnectionCountReplace(self):
for i in range(5):
conn = self.cq.get()
self.assertEqual(i, conn.request_count)
self.cq.put(conn)
def testLimitConnections(self):
self.cq.max_conn = 1
item0 = self.cq.connection_object(*ADDRESS_0)
item1 = self.cq.connection_object(*ADDRESS_0)
self.cq.put(item0)
self.cq.put(item1)
self.assertEqual(1, self.cq.queue.qsize())
self.assertEqual(item0, self.cq.get())
self.assertEqual(0, self.cq.queue.qsize())
class TestHTTPConnectionControl(unittest.TestCase):
class PreProcessFailHandler(crawle.Handler):
"""Helper class for one of the tests"""
def pre_process(self, req_res):
req_res.response_url = None
def setUp(self):
self.cc = crawle.HTTPConnectionControl(crawle.Handler(), timeout=1)
def testRequestSTOP_CRAWLE(self):
try:
crawle.STOP_CRAWLE = True
rr = crawle.RequestResponse('')
self.assertRaises(crawle.CrawleStopped, self.cc.request, rr)
finally:
crawle.STOP_CRAWLE = False
def testRequestPreProcess(self):
rr = crawle.RequestResponse('http://google.com')
self.cc.handler = self.PreProcessFailHandler()
self.assertRaises(crawle.CrawleRequestAborted, self.cc.request, rr)
def testBuildRequestStandard(self):
rr = crawle.RequestResponse('http://127.0.0.1/CRAWL-E')
address, encrypted, url, headers = self.cc._build_request(rr)
self.assertEqual(('127.0.0.1', None), address)
self.assertEqual(False, encrypted)
self.assertEqual('/CRAWL-E', url)
def testBuildRequestHTTPS(self):
rr = crawle.RequestResponse('https://127.0.0.1/CRAWL-E/')
address, encrypted, url, headers = self.cc._build_request(rr)
self.assertEqual(('127.0.0.1', None), address)
self.assertEqual(True, encrypted)
self.assertEqual('/CRAWL-E/', url)
def testBuildRequestNonStandardPort(self):
rr = crawle.RequestResponse('http://127.0.0.1:1337/')
address, encrypted, url, headers = self.cc._build_request(rr)
self.assertEqual(('127.0.0.1', 1337), address)
self.assertEqual(False, encrypted)
self.assertEqual('/', url)
def testRequestInvalidMethod(self):
rr = crawle.RequestResponse('http://www.google.com', method='INVALID')
self.cc.request(rr)
self.assertEqual(405, rr.response_status)
def testRequestInvalidHostname(self):
rr = crawle.RequestResponse('http://invalid-')
try:
self.cc.request(rr)
self.fail('Did not raise invalid hostname exception')
except socket.gaierror, e:
self.assertTrue(e.errno in [-2, -5])
def testRequestInvalidURL(self):
urls = ['invalid', 'http:///invalid', 'httpz://google.com']
for url in urls:
rr = crawle.RequestResponse(url)
self.assertRaises(crawle.CrawleUnsupportedScheme, self.cc.request,
rr)
def testRequest301(self):
rr = crawle.RequestResponse('http://google.com', redirects=None)
self.cc.request(rr)
self.assertEqual(301, rr.response_status)
self.assertEqual('http://www.google.com/',
rr.response_headers['location'])
def testRequestRedirectExceeded(self):
rr = crawle.RequestResponse('http://google.com', redirects=0)
self.assertRaises(crawle.CrawleRedirectsExceeded, self.cc.request, rr)
def testRequestSuccessfulRedirect(self):
rr = crawle.RequestResponse('http://google.com', redirects=1)
self.cc.request(rr)
self.assertEqual(200, rr.response_status)
self.assertEqual(0, rr.redirects)
def testRequest200(self):
rr = crawle.RequestResponse('http://www.google.com', redirects=1)
self.cc.request(rr)
self.assertEqual(200, rr.response_status)
self.assertEqual(1, rr.redirects)
self.assertTrue(rr.response_time > 0)
def testHTTPSRequest200(self):
# Page that can only be accessed via https, http causes redirect
url = 'https://msp.f-secure.com/web-test/common/test.html'
rr = crawle.RequestResponse(url, redirects=1)
self.cc.request(rr)
self.assertEqual(200, rr.response_status)
self.assertEqual(1, rr.redirects)
self.assertTrue(rr.response_time > 0)
def testRequestGzip(self):
rr = crawle.RequestResponse('http://www.pricetrackr.com/robots.txt',
redirects=1)
self.cc.request(rr)
self.assertEqual(200, rr.response_status)
self.assertEqual(1, rr.redirects)
self.assertTrue(rr.response_time > 0)
self.assertTrue('gzip' in rr.response_headers['content-encoding'])
def testRequestGzipViaZcat(self):
rr = crawle.RequestResponse('http://www.eweek.com/', redirects=1)
self.cc.request(rr)
self.assertEqual(200, rr.response_status)
self.assertEqual(1, rr.redirects)
self.assertTrue(rr.response_time > 0)
self.assertTrue('gzip' in rr.response_headers['content-encoding'])
self.assertTrue('Used zcat' in rr.extra)
def testRequestPost(self):
rr = crawle.RequestResponse(
'http://www.snee.com/xml/crud/posttest.cgi', method='POST',
params={'fname':'CRAWL-E', 'lname':'POST_TEST'})
self.cc.request(rr)
self.assertEqual(200, rr.response_status)
self.assertTrue(rr.response_time > 0)
self.assertTrue(''.join(['<p>First name: "CRAWL-E"</p>',
'<p>Last name: "POST_TEST"</p>'])
in rr.response_body)
def testConnectionTimeout(self):
rr = crawle.RequestResponse('http://4.4.4.4')
self.assertRaises(socket.timeout, self.cc.request, rr)
class TestController(unittest.TestCase):
def testInit(self):
c = crawle.Controller(None, None, 1)
class TestCrawlQueue(unittest.TestCase):
def queue_get(self):
if self.empty:
raise Queue.Empty
else:
return 1
def queue_put(self, item): pass
def setUp(self):
self.q = crawle.CrawlQueue(single_threaded=True)
self.empty = False
self.q._get = self.queue_get
self.q._put = self.queue_put
def testGet(self):
self.assertEqual(0, self.q._workers)
self.q.get()
self.assertEqual(1, self.q._workers)
self.q.get()
self.assertEqual(2, self.q._workers)
def testEmpty(self):
self.empty = True
self.assertRaises(Queue.Empty, self.q.get)
def testPut(self):
self.assertEqual(0, self.q._workers)
self.q.get()
self.assertEqual(1, self.q._workers)
self.q.put(None)
self.assertEqual(1, self.q._workers)
self.q.put(None)
self.q.work_complete()
self.assertEqual(0, self.q._workers)
self.q.put(None)
self.q.work_complete()
self.assertEqual(0, self.q._workers)
def testSingleThreadException(self):
self.q.get()
self.empty = True
self.assertEqual
self.assertRaises(Exception, self.q.get)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import random
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _
from cinder.objects import fields
from cinder import ssh_utils
from cinder import utils
from cinder.volume.drivers.ibm.storwize_svc import storwize_const
LOG = logging.getLogger(__name__)
class StorwizeSVCReplication(object):
def __init__(self, driver, replication_target=None):
self.driver = driver
self.target = replication_target or {}
def failover_volume_host(self, context, vref):
pass
def replication_failback(self, volume):
pass
def volume_replication_setup(self, context, vref):
pass
class StorwizeSVCReplicationGlobalMirror(StorwizeSVCReplication):
"""Support for Storwize/SVC global mirror mode replication.
Global Mirror establishes a Global Mirror relationship between
two volumes of equal size. The volumes in a Global Mirror relationship
are referred to as the master (source) volume and the auxiliary
(target) volume. This mode is dedicated to the asynchronous volume
replication.
"""
asyncmirror = True
def __init__(self, driver, replication_target=None, target_helpers=None):
super(StorwizeSVCReplicationGlobalMirror, self).__init__(
driver, replication_target)
self.target_helpers = target_helpers
def volume_replication_setup(self, context, vref):
LOG.debug('enter: volume_replication_setup: volume %s', vref['name'])
target_vol_name = storwize_const.REPLICA_AUX_VOL_PREFIX + vref['name']
try:
attr = self.target_helpers.get_vdisk_attributes(target_vol_name)
if not attr:
opts = self.driver._get_vdisk_params(vref['volume_type_id'])
pool = self.target.get('pool_name')
src_attr = self.driver._helpers.get_vdisk_attributes(
vref['name'])
opts['iogrp'] = src_attr['IO_group_id']
self.target_helpers.create_vdisk(target_vol_name,
six.text_type(vref['size']),
'gb', pool, opts)
system_info = self.target_helpers.get_system_info()
self.driver._helpers.create_relationship(
vref['name'], target_vol_name, system_info.get('system_name'),
self.asyncmirror)
except Exception as e:
msg = (_("Unable to set up mirror mode replication for %(vol)s. "
"Exception: %(err)s.") % {'vol': vref['id'],
'err': e})
LOG.exception(msg)
raise exception.VolumeDriverException(message=msg)
LOG.debug('leave: volume_replication_setup:volume %s', vref['name'])
def failover_volume_host(self, context, vref):
LOG.debug('enter: failover_volume_host: vref=%(vref)s',
{'vref': vref['name']})
target_vol = storwize_const.REPLICA_AUX_VOL_PREFIX + vref['name']
try:
rel_info = self.target_helpers.get_relationship_info(target_vol)
# Reverse the role of the primary and secondary volumes
self.target_helpers.switch_relationship(rel_info['name'])
return {'replication_status':
fields.ReplicationStatus.FAILED_OVER}
except Exception as e:
LOG.exception('Unable to fail-over the volume %(id)s to the '
'secondary back-end by switchrcrelationship '
'command, error: %(error)s',
{"id": vref['id'], "error": e})
# If the switch command fail, try to make the aux volume
# writeable again.
try:
self.target_helpers.stop_relationship(target_vol,
access=True)
return {'replication_status':
fields.ReplicationStatus.FAILED_OVER}
except Exception as e:
msg = (_('Unable to fail-over the volume %(id)s to the '
'secondary back-end, error: %(error)s') %
{"id": vref['id'], "error": e})
LOG.exception(msg)
raise exception.VolumeDriverException(message=msg)
def replication_failback(self, volume):
tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']
rel_info = self.target_helpers.get_relationship_info(tgt_volume)
if rel_info:
try:
self.target_helpers.switch_relationship(rel_info['name'],
aux=False)
return {'replication_status':
fields.ReplicationStatus.ENABLED,
'status': 'available'}
except Exception as e:
msg = (_('Unable to fail-back the volume:%(vol)s to the '
'master back-end, error:%(error)s') %
{"vol": volume['name'], "error": e})
LOG.exception(msg)
raise exception.VolumeDriverException(message=msg)
class StorwizeSVCReplicationMetroMirror(
StorwizeSVCReplicationGlobalMirror):
"""Support for Storwize/SVC metro mirror mode replication.
Metro Mirror establishes a Metro Mirror relationship between
two volumes of equal size. The volumes in a Metro Mirror relationship
are referred to as the master (source) volume and the auxiliary
(target) volume.
"""
asyncmirror = False
def __init__(self, driver, replication_target=None, target_helpers=None):
super(StorwizeSVCReplicationMetroMirror, self).__init__(
driver, replication_target, target_helpers)
class StorwizeSVCReplicationGMCV(StorwizeSVCReplicationGlobalMirror):
"""Support for Storwize/SVC global mirror with change volumes mode replication.
Global Mirror with Change Volumes(GMCV) provides asynchronous replication
based on point-in-time copies of data. The volumes in a GMCV relationship
are referred to as the master (source) volume, master change volume, the
auxiliary (target) volume and auxiliary change volume.
"""
asyncmirror = True
def __init__(self, driver, replication_target=None, target_helpers=None):
super(StorwizeSVCReplicationGMCV, self).__init__(
driver, replication_target, target_helpers)
def volume_replication_setup(self, context, vref):
LOG.debug('enter: volume_replication_setup: volume %s', vref['name'])
source_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX +
vref['name'])
target_vol_name = storwize_const.REPLICA_AUX_VOL_PREFIX + vref['name']
target_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX +
target_vol_name)
try:
src_attr = self.driver._helpers.get_vdisk_attributes(
vref['name'])
# Create source change volume if it doesn't exist
src_change_attr = self.driver._helpers.get_vdisk_attributes(
source_change_vol_name)
if not src_change_attr:
src_change_opts = self.driver._get_vdisk_params(
vref['volume_type_id'])
src_change_opts['iogrp'] = src_attr['IO_group_id']
# Change volumes would usually be thin-provisioned
src_change_opts['autoexpand'] = True
self.driver._helpers.create_vdisk(source_change_vol_name,
six.text_type(vref['size']),
'gb',
src_attr['mdisk_grp_id'],
src_change_opts)
# Create target volume if it doesn't exist
target_attr = self.target_helpers.get_vdisk_attributes(
target_vol_name)
if not target_attr:
target_opts = self.driver._get_vdisk_params(
vref['volume_type_id'])
target_pool = self.target.get('pool_name')
target_opts['iogrp'] = src_attr['IO_group_id']
self.target_helpers.create_vdisk(target_vol_name,
six.text_type(vref['size']),
'gb',
target_pool,
target_opts)
# Create target change volume if it doesn't exist
target_change_attr = self.target_helpers.get_vdisk_attributes(
target_change_vol_name)
if not target_change_attr:
target_change_opts = self.driver._get_vdisk_params(
vref['volume_type_id'])
target_change_pool = self.target.get('pool_name')
target_change_opts['iogrp'] = src_attr['IO_group_id']
# Change Volumes would usually be thin-provisioned
target_change_opts['autoexpand'] = True
self.target_helpers.create_vdisk(target_change_vol_name,
six.text_type(vref['size']),
'gb',
target_change_pool,
target_change_opts)
system_info = self.target_helpers.get_system_info()
# Get cycle_period_seconds
src_change_opts = self.driver._get_vdisk_params(
vref['volume_type_id'])
cycle_period_seconds = src_change_opts.get('cycle_period_seconds')
self.driver._helpers.create_relationship(
vref['name'], target_vol_name, system_info.get('system_name'),
self.asyncmirror, True, source_change_vol_name,
cycle_period_seconds)
# Set target change volume
self.target_helpers.change_relationship_changevolume(
target_vol_name, target_change_vol_name, False)
# Start gmcv relationship
self.driver._helpers.start_relationship(vref['name'])
except Exception as e:
msg = (_("Unable to set up gmcv mode replication for %(vol)s. "
"Exception: %(err)s.") % {'vol': vref['id'],
'err': six.text_type(e)})
LOG.exception(msg)
raise exception.VolumeDriverException(message=msg)
LOG.debug('leave: volume_replication_setup:volume %s', vref['name'])
def failover_volume_host(self, context, vref):
LOG.debug('enter: failover_volume_host: vref=%(vref)s',
{'vref': vref['name']})
# Make the aux volume writeable.
try:
self.target_helpers.stop_relationship(
storwize_const.REPLICA_AUX_VOL_PREFIX + vref['name'],
access=True)
return {'replication_status':
fields.ReplicationStatus.FAILED_OVER}
except Exception as e:
msg = (_('Unable to fail-over the volume %(id)s to the '
'secondary back-end, error: %(error)s') %
{"id": vref['id'], "error": six.text_type(e)})
LOG.exception(msg)
raise exception.VolumeDriverException(message=msg)
def replication_failback(self, volume):
LOG.debug('enter: replication_failback: volume=%(volume)s',
{'volume': volume['name']})
tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']
rel_info = self.target_helpers.get_relationship_info(tgt_volume)
if rel_info:
try:
self.target_helpers.stop_relationship(tgt_volume, access=True)
self.target_helpers.start_relationship(tgt_volume, 'master')
return {'replication_status':
fields.ReplicationStatus.ENABLED,
'status': 'available'}
except Exception as e:
msg = (_('Unable to fail-back the volume:%(vol)s to the '
'master back-end, error:%(error)s') %
{"vol": volume['name'], "error": six.text_type(e)})
LOG.exception(msg)
raise exception.VolumeDriverException(message=msg)
class StorwizeSVCReplicationManager(object):
def __init__(self, driver, replication_target=None, target_helpers=None):
self.sshpool = None
self.driver = driver
self.target = replication_target
self.target_helpers = target_helpers(self._run_ssh)
self._master_helpers = self.driver._master_backend_helpers
self.global_m = StorwizeSVCReplicationGlobalMirror(
self.driver, replication_target, self.target_helpers)
self.metro_m = StorwizeSVCReplicationMetroMirror(
self.driver, replication_target, self.target_helpers)
self.gmcv = StorwizeSVCReplicationGMCV(
self.driver, replication_target, self.target_helpers)
def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1):
utils.check_ssh_injection(cmd_list)
# TODO(vhou): We'll have a common method in ssh_utils to take
# care of this _run_ssh method.
command = ' '. join(cmd_list)
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(
self.target.get('san_ip'),
self.target.get('san_ssh_port', 22),
self.target.get('ssh_conn_timeout', 30),
self.target.get('san_login'),
password=self.target.get('san_password'),
privatekey=self.target.get('san_private_key', ''),
min_size=self.target.get('ssh_min_pool_conn', 1),
max_size=self.target.get('ssh_max_pool_conn', 5),)
last_exception = None
try:
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
return processutils.ssh_execute(
ssh, command, check_exit_code=check_exit_code)
except Exception as e:
LOG.error(six.text_type(e))
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1, stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Error running SSH command: %s", command)
def get_target_helpers(self):
return self.target_helpers
def get_replica_obj(self, rep_type):
if rep_type == storwize_const.GLOBAL:
return self.global_m
elif rep_type == storwize_const.METRO:
return self.metro_m
elif rep_type == storwize_const.GMCV:
return self.gmcv
else:
return None
def _partnership_validate_create(self, client, remote_name, remote_ip):
try:
partnership_info = client.get_partnership_info(
remote_name)
if not partnership_info:
candidate_info = client.get_partnershipcandidate_info(
remote_name)
if candidate_info:
client.mkfcpartnership(remote_name)
else:
client.mkippartnership(remote_ip)
except Exception:
msg = (_('Unable to establish the partnership with '
'the Storwize cluster %s.'), remote_name)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def _partnership_start(self, client, remote_name):
try:
partnership_info = client.get_partnership_info(
remote_name)
if (partnership_info and
partnership_info['partnership'] != 'fully_configured'):
client.chpartnership(partnership_info['id'])
except Exception:
msg = (_('Unable to start the partnership with '
'the Storwize cluster %s.'), remote_name)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def establish_target_partnership(self):
local_system_info = self._master_helpers.get_system_info()
target_system_info = self.target_helpers.get_system_info()
local_system_name = local_system_info['system_name']
target_system_name = target_system_info['system_name']
local_ip = self.driver.configuration.safe_get('san_ip')
target_ip = self.target.get('san_ip')
# Establish partnership only when the local system and the replication
# target system is different.
if target_system_name != local_system_name:
self._partnership_validate_create(self._master_helpers,
target_system_name, target_ip)
self._partnership_validate_create(self.target_helpers,
local_system_name, local_ip)
self._partnership_start(self._master_helpers, target_system_name)
self._partnership_start(self.target_helpers, local_system_name)
|
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
# pylint: disable=unused-argument
# pylint: disable=unused-import
"""Python source file include taxi pipeline functions and necesasry utils.
The utilities in this file are used to build a model with native Keras.
This module file will be used in Transform and generic Trainer.
"""
from typing import List
import absl
import tensorflow as tf
import tensorflow_transform as tft
from tfx.components.trainer.executor import TrainerFnArgs
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx_bsl.tfxio import dataset_options
# Categorical features are assumed to each have a maximum value in the dataset.
_MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
_CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
_DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
# Number of buckets used by tf.transform for encoding each feature.
_FEATURE_BUCKET_COUNT = 10
_BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
_VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
_OOV_SIZE = 10
_VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
# Keys
_LABEL_KEY = 'tips'
_FARE_KEY = 'fare'
def _transformed_name(key):
return key + '_xf'
def _transformed_names(keys):
return [_transformed_name(key) for key in keys]
def _fill_in_missing(x):
"""Replace missing values in a SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to a dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
if not isinstance(x, tf.sparse.SparseTensor):
return x
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns a function that parses a serialized tf.Example and applies TFT."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(_LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
def _input_fn(file_pattern: List[str], data_accessor: DataAccessor,
tf_transform_output: tft.TFTransformOutput,
batch_size: int) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in a single batch
Returns:
A dataset that contains (features, indices) tuple where features is a
dictionary of Tensors, and indices is a single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=_transformed_name(_LABEL_KEY)),
tf_transform_output.transformed_metadata.schema).repeat()
def _build_keras_model(hidden_units: List[int] = None) -> tf.keras.Model:
"""Creates a DNN Keras model for classifying taxi data.
Args:
hidden_units: [int], the layer sizes of the DNN (input layer first).
Returns:
A keras Model.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)
for key in _transformed_names(_VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)
for key in _transformed_names(_BUCKET_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
_transformed_names(_CATEGORICAL_FEATURE_KEYS),
_MAX_CATEGORICAL_FEATURE_VALUES)
]
indicator_column = [
tf.feature_column.indicator_column(categorical_column)
for categorical_column in categorical_columns
]
model = _wide_and_deep_classifier(
# TODO(b/139668410) replace with premade wide_and_deep keras model
wide_columns=indicator_column,
deep_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25])
return model
def _wide_and_deep_classifier(wide_columns, deep_columns, dnn_hidden_units):
"""Build a simple keras wide and deep model.
Args:
wide_columns: Feature columns wrapped in indicator_column for wide (linear)
part of the model.
deep_columns: Feature columns for deep part of the model.
dnn_hidden_units: [int], the layer sizes of the hidden DNN.
Returns:
A Wide and Deep Keras model
"""
# Following values are hard coded for simplicity in this example,
# However prefarably they should be passsed in as hparams.
# Keras needs the feature definitions at compile time.
# TODO(b/139081439): Automate generation of input layers from FeatureColumn.
input_layers = {
colname: tf.keras.layers.Input(name=colname, shape=(), dtype=tf.float32)
for colname in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
}
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _transformed_names(_VOCAB_FEATURE_KEYS)
})
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _transformed_names(_BUCKET_FEATURE_KEYS)
})
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _transformed_names(_CATEGORICAL_FEATURE_KEYS)
})
# TODO(b/161952382): Replace with Keras premade models and
# Keras preprocessing layers.
deep = tf.keras.layers.DenseFeatures(deep_columns)(input_layers)
for numnodes in dnn_hidden_units:
deep = tf.keras.layers.Dense(numnodes)(deep)
wide = tf.keras.layers.DenseFeatures(wide_columns)(input_layers)
output = tf.keras.layers.Dense(
1, activation='sigmoid')(
tf.keras.layers.concatenate([deep, wide]))
model = tf.keras.Model(input_layers, output)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr=0.001),
metrics=[tf.keras.metrics.BinaryAccuracy()])
model.summary(print_fn=absl.logging.info)
return model
# TFX Transform will call this function.
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _DENSE_FLOAT_FEATURE_KEYS:
# If sparse make it dense, setting nan's to 0 or '', and apply zscore.
outputs[_transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in _VOCAB_FEATURE_KEYS:
# Build a vocabulary for this feature.
outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=_VOCAB_SIZE,
num_oov_buckets=_OOV_SIZE)
for key in _BUCKET_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]),
_FEATURE_BUCKET_COUNT)
for key in _CATEGORICAL_FEATURE_KEYS:
outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger a big tipper?
taxi_fare = _fill_in_missing(inputs[_FARE_KEY])
tips = _fill_in_missing(inputs[_LABEL_KEY])
outputs[_transformed_name(_LABEL_KEY)] = tf.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs
# TFX Trainer will call this function.
def run_fn(fn_args: TrainerFnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(fn_args.train_files, fn_args.data_accessor,
tf_transform_output, 40)
eval_dataset = _input_fn(fn_args.eval_files, fn_args.data_accessor,
tf_transform_output, 40)
# If no GPUs are found, CPU is used.
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = _build_keras_model(
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
])
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)
|
|
"""
# =============================================================================
Copyright Government of Canada 2015
Written by: Eric Enns, Public Health Agency of Canada,
National Microbiology Laboratory
Mark Iskander, Public Health Agency of Canada,
National Microbiology Laboratory
Daniel Bouchard, Public Health Agency of Canada,
National Microbiology Laboratory
Funded by the National Micriobiology Laboratory
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this work except in compliance with the License. You may obtain a copy of the
License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
# =============================================================================
"""
"""
Created on June 23rd, 2015
@currentauthor: Mark Iskander
@originalauthor: Daniel Bouchard
"""
import logging
import os
import re
import sys
sys.path.append("")
import unittest
import mockGalaxy as mg
import ymltests as yt
import dynamic_tool_destination.DynamicToolDestination as dt
from dynamic_tool_destination.DynamicToolDestination import map_tool_to_destination
from testfixtures import log_capture
theApp = mg.App( "waffles_default", "test_spec")
#======================Jobs====================================
zeroJob = mg.Job()
emptyJob = mg.Job()
emptyJob.add_input_dataset( mg.InputDataset("input1", mg.Dataset( (os.getcwd() + "/tests/data/test.empty"), "txt", 14)) )
failJob = mg.Job()
failJob.add_input_dataset( mg.InputDataset("input1", mg.Dataset( (os.getcwd() + "/tests/data/test1.full"), "txt", 15)) )
msfileJob = mg.Job()
msfileJob.add_input_dataset( mg.InputDataset("input1", mg.Dataset( (os.getcwd() + "/tests/data/not_here.full"), "txt", 15)) )
notfileinpJob = mg.Job()
msfileJob.add_input_dataset( mg.InputDataset("input1", mg.NotAFile() ) )
runJob = mg.Job()
runJob.add_input_dataset( mg.InputDataset("input1", mg.Dataset( (os.getcwd() + "/tests/data/test3.full"), "txt", 15)) )
vfJob = mg.Job()
vfJob.add_input_dataset( mg.InputDataset("input1", mg.Dataset( (os.getcwd() + "/tests/data/test3.full"), "txt", 15)) )
vfJob.set_arg_value( "mlst_or_genedb", {"vfdb_in": "-bact"} )
argJob = mg.Job()
argJob.add_input_dataset( mg.InputDataset("input1", mg.Dataset( (os.getcwd() + "/tests/data/test3.full"), "txt", 15)) )
argJob.set_arg_value( "careful", True )
argNotFoundJob = mg.Job()
argNotFoundJob.add_input_dataset( mg.InputDataset("input1", mg.Dataset( (os.getcwd() + "/tests/data/test3.full"), "txt", 15)) )
argNotFoundJob.set_arg_value( "careful", False )
notvfJob = mg.Job()
notvfJob.add_input_dataset( mg.InputDataset("input1", mg.Dataset( (os.getcwd() + "/tests/data/test3.full"), "txt", 15)) )
notvfJob.set_arg_value( "mlst_or_genedb", {"vfdb_in": "-not_here"} )
dbJob = mg.Job()
dbJob.add_input_dataset( mg.InputDataset("input1", mg.Dataset( (os.getcwd() + "/tests/data/test.fasta"), "fasta", 10)) )
dbcountJob = mg.Job()
dbcountJob.add_input_dataset( mg.InputDataset("input1", mg.Dataset( (os.getcwd() + "/tests/data/test.fasta"), "fasta", None)) )
vfdbJob = mg.Job()
vfdbJob.add_input_dataset( mg.InputDataset("input1", mg.Dataset( (os.getcwd() + "/tests/data/test.fasta"), "fasta", 6)) )
vfdbJob.set_arg_value( "mlst_or_genedb", {"vfdb_in": "-bact"} )
#======================Tools===================================
vanillaTool = mg.Tool( 'test' )
unTool = mg.Tool( 'unregistered' )
overlapTool = mg.Tool( 'test_overlap' )
defaultTool = mg.Tool( 'test_tooldefault' )
dbTool = mg.Tool( 'test_db' )
dbinfTool = mg.Tool( 'test_db_high' )
argTool = mg.Tool( 'test_arguments' )
vfdbTool = mg.Tool( 'test_db' )
vfdbTool.add_tool_dependency( mg.ToolDependency("vfdb", os.getcwd() + "/tests") )
noVBTool = mg.Tool( 'test_no_verbose' )
usersTool = mg.Tool( 'test_users' )
numinputsTool = mg.Tool( 'test_num_input_datasets' )
#=======================YML file================================
path = os.getcwd() + "/tests/data/tool_destination.yml"
priority_path = os.getcwd() + "/tests/data/priority_tool_destination.yml"
broken_default_dest_path = os.getcwd() + "/tests/data/dest_fail.yml"
no_verbose_path = os.getcwd() + "/tests/data/test_no_verbose.yml"
users_test_path = os.getcwd() + "/tests/data/test_users.yml"
num_input_datasets_test_path = os.getcwd() + "/tests/data/test_num_input_datasets.yml"
#======================Test Variables=========================
value = 1
valueK = value * 1024
valueM = valueK * 1024
valueG = valueM * 1024
valueT = valueG * 1024
valueP = valueT * 1024
valueE = valueP * 1024
valueZ = valueE * 1024
valueY = valueZ * 1024
class TestDynamicToolDestination(unittest.TestCase):
def setUp(self):
self.maxDiff = None
logger = logging.getLogger()
#=======================map_tool_to_destination()================================
@log_capture()
def test_brokenDestYML(self, l):
self.assertRaises(mg.JobMappingException, map_tool_to_destination, runJob, theApp, vanillaTool, "user@email.com", True, broken_default_dest_path)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'No global default destination specified in config!'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: input1' + os.getcwd() + '/tests/data/test3.full'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total size: 3.23 KB')
)
@log_capture()
def test_filesize_empty(self, l):
self.assertRaises(mg.JobMappingException, map_tool_to_destination, emptyJob, theApp, vanillaTool, "user@email.com", True, path)
self.assertRaises(mg.JobMappingException, map_tool_to_destination, emptyJob, theApp, vanillaTool, "user@email.com", True, priority_path)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: input1' + os.getcwd() + '/tests/data/test.empty'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total size: 0.00 B'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total number of files: 1'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: input1' + os.getcwd() + '/tests/data/test.empty'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total size: 0.00 B'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total number of files: 1')
)
@log_capture()
def test_filesize_zero(self, l):
self.assertRaises(mg.JobMappingException, map_tool_to_destination, zeroJob, theApp, vanillaTool, "user@email.com", True, path)
self.assertRaises(mg.JobMappingException, map_tool_to_destination, zeroJob, theApp, vanillaTool, "user@email.com", True, priority_path)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total size: 0.00 B'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total number of files: 0'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total size: 0.00 B'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total number of files: 0')
)
@log_capture()
def test_filesize_fail(self, l):
self.assertRaises(mg.JobMappingException, map_tool_to_destination, failJob, theApp, vanillaTool, "user@email.com", True, path)
self.assertRaises(mg.JobMappingException, map_tool_to_destination, failJob, theApp, vanillaTool, "user@email.com", True, priority_path)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: input1' + os.getcwd() + '/tests/data/test1.full'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total size: 293.00 B'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total number of files: 1'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: input1' + os.getcwd() + '/tests/data/test1.full'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total size: 293.00 B'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total number of files: 1')
)
@log_capture()
def test_filesize_run(self, l):
job = map_tool_to_destination( runJob, theApp, vanillaTool, "user@email.com", True, path )
self.assertEquals( job, 'Destination1' )
priority_job = map_tool_to_destination( runJob, theApp, vanillaTool, "user@email.com", True, priority_path )
self.assertEquals( priority_job, 'Destination1_high' )
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: input1' + os.getcwd() + '/tests/data/test3.full'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total size: 3.23 KB'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total number of files: 1'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test' with 'Destination1'."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: input1' + os.getcwd() + '/tests/data/test3.full'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total size: 3.23 KB'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total number of files: 1'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test' with 'Destination1_high'.")
)
@log_capture()
def test_default_tool(self, l):
job = map_tool_to_destination( runJob, theApp, defaultTool, "user@email.com", True, path )
self.assertEquals( job, 'waffles_default' )
priority_job = map_tool_to_destination( runJob, theApp, defaultTool, "user@email.com", True, priority_path )
self.assertEquals( priority_job, 'waffles_default_high' )
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Tool 'test_tooldefault' not specified in config. Using default destination."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_tooldefault' with 'waffles_default'."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Tool 'test_tooldefault' not specified in config. Using default destination."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_tooldefault' with 'waffles_default_high'.")
)
@log_capture()
def test_arguments_tool(self, l):
job = map_tool_to_destination( argJob, theApp, argTool, "user@email.com", True, path )
self.assertEquals( job, 'Destination6' )
priority_job = map_tool_to_destination( argJob, theApp, argTool, "user@email.com", True, priority_path )
self.assertEquals( priority_job, 'Destination6_med' )
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_arguments' with 'Destination6'."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_arguments' with 'Destination6_med'.")
)
@log_capture()
def test_arguments_arg_not_found(self, l):
job = map_tool_to_destination( argNotFoundJob, theApp, argTool, "user@email.com", True, path )
self.assertEquals( job, 'waffles_default' )
priority_job = map_tool_to_destination( argNotFoundJob, theApp, argTool, "user@email.com", True, priority_path )
self.assertEquals( priority_job, 'waffles_default_high' )
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_arguments' with 'waffles_default'."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_arguments' with 'waffles_default_high'.")
)
@log_capture()
def test_tool_not_found(self, l):
job = map_tool_to_destination( runJob, theApp, unTool, "user@email.com", True, path )
self.assertEquals( job, 'waffles_default' )
priority_job = map_tool_to_destination( runJob, theApp, unTool, "user@email.com", True, priority_path )
self.assertEquals( priority_job, 'waffles_default_high' )
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Tool 'unregistered' not specified in config. Using default destination."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'unregistered' with 'waffles_default'."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Tool 'unregistered' not specified in config. Using default destination."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'unregistered' with 'waffles_default_high'.")
)
@log_capture()
def test_fasta(self, l):
job = map_tool_to_destination( dbJob, theApp, dbTool, "user@email.com", True, path )
self.assertEquals( job, 'Destination4' )
priority_job = map_tool_to_destination( dbJob, theApp, dbTool, "user@email.com", True, priority_path )
self.assertEquals( priority_job, 'Destination4_high' )
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: input1' + os.getcwd() + '/tests/data/test.fasta'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total amount of records: 10'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_db' with 'Destination4'."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: input1' + os.getcwd() + '/tests/data/test.fasta'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total amount of records: 10'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_db' with 'Destination4_high'.")
)
@log_capture()
def test_fasta_count(self, l):
job = map_tool_to_destination( dbcountJob, theApp, dbTool, "user@email.com", True, path )
self.assertEquals( job, 'Destination4' )
priority_job = map_tool_to_destination( dbcountJob, theApp, dbTool, "user@email.com", True, priority_path )
self.assertEquals( priority_job, 'Destination4_high' )
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: input1' + os.getcwd() + '/tests/data/test.fasta'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total amount of records: 6'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_db' with 'Destination4'."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: input1' + os.getcwd() + '/tests/data/test.fasta'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total amount of records: 6'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_db' with 'Destination4_high'.")
)
@log_capture()
def test_vf(self, l):
job = map_tool_to_destination( vfJob, theApp, vfdbTool, "user@email.com", True, path )
self.assertEquals( job, 'Destination4' )
priority_job = map_tool_to_destination( vfJob, theApp, vfdbTool, "user@email.com", True, priority_path )
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: ' + os.getcwd() + '/tests/vfdb/?bact.test'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: input1' + os.getcwd() + '/tests/data/test3.full'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total amount of records: 4'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_db' with 'Destination4'."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: ' + os.getcwd() + '/tests/vfdb/?bact.test'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: input1' + os.getcwd() + '/tests/data/test3.full'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total amount of records: 4'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_db' with 'Destination4_high'.")
)
self.assertEquals( priority_job, 'Destination4_high' )
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: ' + os.getcwd() + '/tests/vfdb/?bact.test'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: input1' + os.getcwd() + '/tests/data/test3.full'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total amount of records: 4'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_db' with 'Destination4'."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: ' + os.getcwd() + '/tests/vfdb/?bact.test'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Loading file: input1' + os.getcwd() + '/tests/data/test3.full'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total amount of records: 4'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_db' with 'Destination4_high'.")
)
@log_capture()
def test_vf_not_found(self, l):
job = map_tool_to_destination( notvfJob, theApp, vfdbTool, "user@email.com", True, path )
self.assertEquals( job, 'Destination4' )
priority_job = map_tool_to_destination( notvfJob, theApp, vfdbTool, "user@email.com", True, priority_path )
self.assertEquals( priority_job, 'Destination4_high' )
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'INFO', 'No virulence factors database'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG',
'Loading file: input1' + os.getcwd() + '/tests/data/test3.full'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total amount of records: 0'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_db' with 'Destination4'."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'INFO', 'No virulence factors database'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG',
'Loading file: input1' + os.getcwd() + '/tests/data/test3.full'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Total amount of records: 0'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_db' with 'Destination4_high'.")
)
@log_capture()
def test_no_verbose(self, l):
job = map_tool_to_destination( runJob, theApp, noVBTool, "user@email.com", True, no_verbose_path )
self.assertEquals( job, 'Destination1' )
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_no_verbose' with 'Destination1'.")
)
@log_capture()
def test_authorized_user(self, l):
job = map_tool_to_destination( runJob, theApp, usersTool, "user@email.com", True, users_test_path )
self.assertEquals( job, 'special_cluster' )
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_users' with 'special_cluster'."),
)
@log_capture()
def test_unauthorized_user(self, l):
job = map_tool_to_destination( runJob, theApp, usersTool, "userblah@email.com", True, users_test_path )
self.assertEquals( job, 'lame_cluster' )
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Running 'test_users' with 'lame_cluster'.")
)
#================================Invalid yaml files==============================
@log_capture()
def test_no_file(self, l):
self.assertRaises(IOError, dt.parse_yaml, path="")
l.check()
@log_capture()
def test_bad_nice(self, l):
dt.parse_yaml(path=yt.ivYMLTest11, test=True)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG',
"Running config validation..."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG',
"nice_value goes from -20 to 20; rule 1 in 'spades' has a nice_value of '-21'. Setting nice_value to 0."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_empty_file(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest2, test=True), {})
@log_capture()
def test_no_tool_name(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest3, test=True), yt.iv3dict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Malformed YML; expected job name, but found a list instead!'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_no_rule_type(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest4, test=True), yt.ivDict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "No rule_type found for rule 1 in 'spades'."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_no_rule_lower_bound(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest51, test=True), yt.ivDict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Missing bounds for rule 1 in 'spades'. Ignoring rule."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_no_rule_upper_bound(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest52, test=True), yt.ivDict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Missing bounds for rule 1 in 'spades'. Ignoring rule."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_no_rule_arg(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest53, test=True), yt.ivDict53)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Found a fail_message for rule 1 in 'spades', but destination is not 'fail'! Setting destination to 'fail'."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_bad_rule_type(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest6, test=True), yt.ivDict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Unrecognized rule_type 'iencs' found in 'spades'. Ignoring..."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_no_err_msg(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest91, test=True), yt.iv91dict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "No nice_value found for rule 1 in 'spades'. Setting nice_value to 0."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Missing a fail_message for rule 1 in 'spades'. Adding generic fail_message."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_no_default_dest(self, l):
dt.parse_yaml(path=yt.ivYMLTest7, test=True)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'No global default destination specified in config!'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_invalid_category(self, l):
dt.parse_yaml(path=yt.ivYMLTest8, test=True)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'No global default destination specified in config!'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Unrecognized category 'ice_cream' found in config file!"),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_arguments_no_err_msg(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest12, test=True), yt.iv12dict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG',
"Missing a fail_message for rule 1 in 'spades'. Adding generic fail_message."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_arguments_no_args(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest131, test=True), yt.iv131dict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG',
"No arguments found for rule 1 in 'spades' despite being of type arguments. Ignoring rule."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_arguments_no_arg(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest132, test=True), yt.iv132dict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Found a fail_message for rule 1 in 'spades', but destination is not 'fail'! Setting destination to 'fail'."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_return_bool_for_multiple_jobs(self, l):
self.assertFalse(dt.parse_yaml(path=yt.ivYMLTest133, test=True, return_bool=True))
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Missing a fail_message for rule 1 in 'smalt'.")
)
@log_capture()
def test_return_rule_for_multiple_jobs(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest133, test=True), yt.iv133dict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Missing a fail_message for rule 1 in 'smalt'. Adding generic fail_message."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_return_bool_for_no_destination(self, l):
self.assertFalse(dt.parse_yaml(path=yt.ivYMLTest134, test=True, return_bool=True))
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "No destination specified for rule 1 in 'spades'.")
)
@log_capture()
def test_return_rule_for_no_destination(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest134, test=True), yt.iv134dict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "No destination specified for rule 1 in 'spades'. Ignoring..."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_return_rule_for_reversed_bounds(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest135, test=True), yt.iv135dict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "lower_bound exceeds upper_bound for rule 1 in 'spades'. Reversing bounds."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_return_bool_for_missing_tool_fields(self, l):
self.assertFalse(dt.parse_yaml(path=yt.ivYMLTest136, test=True, return_bool=True))
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Tool 'spades' does not have rules nor a default_destination!")
)
@log_capture()
def test_return_rule_for_missing_tool_fields(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest136, test=True), yt.iv136dict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Tool 'spades' does not have rules nor a default_destination!"),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_return_bool_for_blank_tool(self, l):
self.assertFalse(dt.parse_yaml(path=yt.ivYMLTest137, test=True, return_bool=True))
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Config section for tool 'spades' is blank!")
)
@log_capture()
def test_return_rule_for_blank_tool(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest137, test=True), yt.iv137dict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Config section for tool 'spades' is blank!"),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_return_bool_for_malformed_users(self, l):
self.assertFalse(dt.parse_yaml(path=yt.ivYMLTest138, test=True, return_bool=True))
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Entry '123' in users for rule 1 in tool 'spades' is in an invalid format!"),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Supplied email 'invaliduser.email@com' for rule 1 in tool 'spades' is in an invalid format!")
)
@log_capture()
def test_return_rule_for_malformed_users(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest138, test=True), yt.iv138dict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Entry '123' in users for rule 1 in tool 'spades' is in an invalid format! Ignoring entry."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Supplied email 'invaliduser.email@com' for rule 1 in tool 'spades' is in an invalid format! Ignoring email."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_return_bool_for_no_users(self, l):
self.assertFalse(dt.parse_yaml(path=yt.ivYMLTest139, test=True, return_bool=True))
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Couldn't find a list under 'users:'!")
)
@log_capture()
def test_return_rule_for_no_users(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest139, test=True), yt.iv139dict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Couldn't find a list under 'users:'! Ignoring rule."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_return_bool_for_malformed_user_email(self, l):
self.assertFalse(dt.parse_yaml(path=yt.ivYMLTest140, test=True, return_bool=True))
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Supplied email 'invalid.user2@com' for rule 2 in tool 'spades' is in an invalid format!"),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Supplied email 'invalid.user1@com' for rule 2 in tool 'spades' is in an invalid format!"),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "No valid user emails were specified for rule 2 in tool 'spades'!")
)
@log_capture()
def test_return_rule_for_malformed_user_email(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest140, test=True), yt.iv140dict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Supplied email 'invalid.user2@com' for rule 2 in tool 'spades' is in an invalid format! Ignoring email."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Supplied email 'invalid.user1@com' for rule 2 in tool 'spades' is in an invalid format! Ignoring email."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "No valid user emails were specified for rule 2 in tool 'spades'! Ignoring rule."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_return_bool_for_empty_users(self, l):
self.assertFalse(dt.parse_yaml(path=yt.ivYMLTest141, test=True, return_bool=True))
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Entry 'None' in users for rule 2 in tool 'spades' is in an invalid format!"),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Entry 'None' in users for rule 2 in tool 'spades' is in an invalid format!"),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "No valid user emails were specified for rule 2 in tool 'spades'!")
)
@log_capture()
def test_return_rule_for_empty_users(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest141, test=True), yt.iv141dict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Entry 'None' in users for rule 2 in tool 'spades' is in an invalid format! Ignoring entry."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Entry 'None' in users for rule 2 in tool 'spades' is in an invalid format! Ignoring entry."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "No valid user emails were specified for rule 2 in tool 'spades'! Ignoring rule."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_return_bool_for_bad_num_input_datasets_bounds(self, l):
self.assertFalse(dt.parse_yaml(path=yt.ivYMLTest142, test=True, return_bool=True))
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Error: lower_bound is set to Infinity, but must be lower than upper_bound!"),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "lower_bound exceeds upper_bound for rule 1 in 'smalt'.")
)
@log_capture()
def test_return_rule_for_bad_num_input_datasets_bound(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest142, test=True), yt.iv142dict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Error: lower_bound is set to Infinity, but must be lower than upper_bound! Setting lower_bound to 0!"),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_return_bool_for_worse_num_input_datasets_bounds(self, l):
self.assertFalse(dt.parse_yaml(path=yt.ivYMLTest143, test=True, return_bool=True))
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Error: lower_bound is set to Infinity, but must be lower than upper_bound!")
)
@log_capture()
def test_return_rule_for_worse_num_input_datasets_bound(self, l):
self.assertEquals(dt.parse_yaml(path=yt.ivYMLTest143, test=True), yt.iv143dict)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Error: lower_bound is set to Infinity, but must be lower than upper_bound! Setting lower_bound to 0!"),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_priority_default_destination_without_med_priority_destination(self, l):
dt.parse_yaml(path=yt.ivYMLTest144, test=True)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "No default 'med' priority destination!"),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_priority_default_destination_with_invalid_priority_destination(self, l):
dt.parse_yaml(path=yt.ivYMLTest145, test=True)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Invalid default priority destination 'mine' found in config!"),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_tool_without_med_priority_destination(self, l):
dt.parse_yaml(path=yt.ivYMLTest146, test=True)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "No 'med' priority destination for rule 1 in 'smalt'. Ignoring..."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_tool_with_invalid_priority_destination(self, l):
dt.parse_yaml(path=yt.ivYMLTest147, test=True)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "Invalid priority destination 'mine' for rule 1 in 'smalt'. Ignoring..."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
@log_capture()
def test_users_with_invalid_priority(self, l):
dt.parse_yaml(path=yt.ivYMLTest148, test=True)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', "User 'user@email.com', priority is not valid! Must be either low, med, or high."),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.')
)
#================================Valid yaml files==============================
@log_capture()
def test_parse_valid_yml(self, l):
self.assertEqual(dt.parse_yaml(yt.vYMLTest1, test=True), yt.vdictTest1_yml)
self.assertEqual(dt.parse_yaml(yt.vYMLTest2, test=True), yt.vdictTest2_yml)
self.assertEqual(dt.parse_yaml(yt.vYMLTest3, test=True), yt.vdictTest3_yml)
self.assertTrue(dt.parse_yaml(yt.vYMLTest4, test=True, return_bool=True))
self.assertEqual(dt.parse_yaml(yt.vYMLTest4, test=True), yt.vdictTest4_yml)
self.assertTrue(dt.parse_yaml(yt.vYMLTest5, test=True, return_bool=True))
self.assertEqual(dt.parse_yaml(yt.vYMLTest5, test=True), yt.vdictTest5_yml)
self.assertTrue(dt.parse_yaml(yt.vYMLTest6, test=True, return_bool=True))
self.assertEqual(dt.parse_yaml(yt.vYMLTest6, test=True), yt.vdictTest6_yml)
self.assertTrue(dt.parse_yaml(yt.vYMLTest7, test=True, return_bool=True))
self.assertEqual(dt.parse_yaml(yt.vYMLTest7, test=True), yt.vdictTest7_yml)
l.check(
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Running config validation...'),
('dynamic_tool_destination.DynamicToolDestination', 'DEBUG', 'Finished config validation.'),
)
#================================Testing str_to_bytes==========================
def test_str_to_bytes_invalid(self):
self.assertRaises(dt.MalformedYMLException, dt.str_to_bytes, "1d")
self.assertRaises(dt.MalformedYMLException, dt.str_to_bytes, "1 d")
def test_str_to_bytes_valid(self):
self.assertEqual(dt.str_to_bytes("-1"), -1)
self.assertEqual(dt.str_to_bytes( "1" ), value)
self.assertEqual(dt.str_to_bytes( 156 ), 156)
self.assertEqual(dt.str_to_bytes( "1 B" ), value)
self.assertEqual(dt.str_to_bytes( "1 KB" ), valueK)
self.assertEqual(dt.str_to_bytes( "1 MB" ), valueM)
self.assertEqual(dt.str_to_bytes( "1 gB" ), valueG)
self.assertEqual(dt.str_to_bytes( "1 Tb" ), valueT)
self.assertEqual(dt.str_to_bytes( "1 pb" ), valueP)
self.assertEqual(dt.str_to_bytes( "1 EB" ), valueE)
self.assertEqual(dt.str_to_bytes( "1 ZB" ), valueZ)
self.assertEqual(dt.str_to_bytes( "1 YB" ), valueY)
#==============================Testing bytes_to_str=============================
@log_capture()
def test_bytes_to_str_invalid(self, l):
testValue = ""
self.assertRaises( ValueError, dt.bytes_to_str, testValue )
testValue = "5564fads"
self.assertRaises( ValueError, dt.bytes_to_str, testValue )
testValue = "45.0.1"
self.assertRaises( ValueError, dt.bytes_to_str, testValue )
self.assertRaises( ValueError, dt.bytes_to_str, "1 024" )
def test_bytes_to_str_valid(self):
self.assertEqual(dt.bytes_to_str(-1), "Infinity")
self.assertEqual(dt.bytes_to_str( value), "1.00 B")
self.assertEqual(dt.bytes_to_str( valueK), "1.00 KB")
self.assertEqual(dt.bytes_to_str( valueM), "1.00 MB")
self.assertEqual(dt.bytes_to_str( valueG), "1.00 GB")
self.assertEqual(dt.bytes_to_str( valueT ), "1.00 TB")
self.assertEqual(dt.bytes_to_str( valueP ), "1.00 PB")
self.assertEqual(dt.bytes_to_str( valueE ), "1.00 EB")
self.assertEqual(dt.bytes_to_str( valueZ ), "1.00 ZB")
self.assertEqual(dt.bytes_to_str( valueY ), "1.00 YB")
self.assertEqual(dt.bytes_to_str( 10, "B" ), "10.00 B")
self.assertEqual(dt.bytes_to_str( 1000000, "KB" ), "976.56 KB")
self.assertEqual(dt.bytes_to_str( 1000000000, "MB" ), "953.67 MB")
self.assertEqual(dt.bytes_to_str( 1000000000000, "GB" ), "931.32 GB")
self.assertEqual(dt.bytes_to_str( 1000000000000000, "TB" ), "909.49 TB")
self.assertEqual(dt.bytes_to_str( 1000000000000000000, "PB" ), "888.18 PB")
self.assertEqual(dt.bytes_to_str( 1000000000000000000000, "EB" ), "867.36 EB")
self.assertEqual(dt.bytes_to_str( 1000000000000000000000000, "ZB" ), "847.03 ZB")
self.assertEqual(dt.bytes_to_str( value, "KB" ), "1.00 B")
self.assertEqual(dt.bytes_to_str( valueK, "MB" ), "1.00 KB")
self.assertEqual(dt.bytes_to_str( valueM, "GB" ), "1.00 MB")
self.assertEqual(dt.bytes_to_str( valueG, "TB" ), "1.00 GB")
self.assertEqual(dt.bytes_to_str( valueT, "PB" ), "1.00 TB")
self.assertEqual(dt.bytes_to_str( valueP, "EB" ), "1.00 PB")
self.assertEqual(dt.bytes_to_str( valueE, "ZB" ), "1.00 EB")
self.assertEqual(dt.bytes_to_str( valueZ, "YB" ), "1.00 ZB")
self.assertEqual(dt.bytes_to_str( "1" ), "1.00 B")
self.assertEqual(dt.bytes_to_str( "\t\t1000000" ), "976.56 KB")
self.assertEqual(dt.bytes_to_str( "1000000000\n" ), "953.67 MB")
self.assertEqual(dt.bytes_to_str( 1024, "fda" ), "1.00 KB")
if __name__ == '__main__':
unittest.main()
#suite = unittest.TestLoader().loadTestsFromTestCase(TestDynamicToolDestination)
#ret = not unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()
#print(ret)
#sys.exit(ret)
|
|
# Generated by Django 2.0.7 on 2018-07-12 17:10
import django.contrib.auth.validators
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('ui', '0020_auto_20180608_1144'),
]
operations = [
migrations.AddField(
model_name='historicalcollection',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='historicalcollectionset',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='historicalcredential',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='historicalseed',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='collection',
name='end_date',
field=models.DateTimeField(blank=True, help_text='If blank, will continue until stopped.', null=True),
),
migrations.AlterField(
model_name='collection',
name='harvest_type',
field=models.CharField(choices=[('twitter_user_timeline', 'Twitter user timeline'), ('twitter_search', 'Twitter search'), ('twitter_filter', 'Twitter filter'), ('twitter_sample', 'Twitter sample'), ('tumblr_blog_posts', 'Tumblr blog posts'), ('flickr_user', 'Flickr user'), ('weibo_timeline', 'Weibo timeline')], max_length=255),
),
migrations.AlterField(
model_name='collection',
name='link',
field=models.CharField(blank=True, max_length=512, verbose_name='Public link'),
),
migrations.AlterField(
model_name='collection',
name='name',
field=models.CharField(max_length=255, verbose_name='Collection name'),
),
migrations.AlterField(
model_name='collection',
name='schedule_minutes',
field=models.PositiveIntegerField(choices=[(1, 'One time harvest'), (30, 'Every 30 minutes'), (60, 'Every hour'), (240, 'Every 4 hours'), (720, 'Every 12 hours'), (1440, 'Every day'), (10080, 'Every week'), (40320, 'Every 4 weeks'), (5, 'Every 5 minutes')], default=10080, null=True, verbose_name='schedule'),
),
migrations.AlterField(
model_name='collection',
name='visibility',
field=models.CharField(choices=[('default', 'Group only'), ('local', 'All other users')], default='default', help_text='Who else can view and export from this collection. Select "All other users" to share with all Social Feed Manager users.', max_length=255),
),
migrations.AlterField(
model_name='collectionset',
name='name',
field=models.CharField(max_length=255, verbose_name='Collection set name'),
),
migrations.AlterField(
model_name='credential',
name='name',
field=models.CharField(max_length=255, verbose_name='Credential name'),
),
migrations.AlterField(
model_name='credential',
name='platform',
field=models.CharField(choices=[('twitter', 'Twitter'), ('flickr', 'Flickr'), ('weibo', 'Weibo'), ('tumblr', 'Tumblr')], help_text='Platform name', max_length=255),
),
migrations.AlterField(
model_name='export',
name='errors',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='export',
name='export_format',
field=models.CharField(choices=[('xlsx', 'Excel (XLSX)'), ('csv', 'Comma separated values (CSV)'), ('tsv', 'Tab separated values (TSV)'), ('json_full', 'Full JSON'), ('json', 'JSON of limited fields'), ('dehydrate', 'Text file of identifiers (dehydrate)')], default='xlsx', max_length=10),
),
migrations.AlterField(
model_name='export',
name='export_segment_size',
field=models.BigIntegerField(blank=True, choices=[(100000, '100,000'), (250000, '250,000'), (500000, '500,000'), (100000, '1,000,000'), (None, 'Single file'), (100, '100')], default=250000, null=True),
),
migrations.AlterField(
model_name='export',
name='infos',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='export',
name='status',
field=models.CharField(choices=[('not requested', 'Not requested'), ('requested', 'Requested'), ('running', 'Running'), ('completed success', 'Success'), ('completed failure', 'Failure')], default='not requested', max_length=20),
),
migrations.AlterField(
model_name='export',
name='warnings',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='harvest',
name='errors',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='harvest',
name='infos',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='harvest',
name='status',
field=models.CharField(choices=[('requested', 'Requested'), ('completed success', 'Success'), ('completed failure', 'Completed with errors'), ('running', 'Running'), ('stop requested', 'Stop requested'), ('stopping', 'Stopping'), ('voided', 'Voided'), ('skipped', 'Skipped'), ('paused', 'Paused')], default='requested', max_length=20),
),
migrations.AlterField(
model_name='harvest',
name='token_updates',
field=jsonfield.fields.JSONField(blank=True, default={}),
),
migrations.AlterField(
model_name='harvest',
name='uids',
field=jsonfield.fields.JSONField(blank=True, default={}),
),
migrations.AlterField(
model_name='harvest',
name='warnings',
field=jsonfield.fields.JSONField(blank=True, default=[]),
),
migrations.AlterField(
model_name='historicalcollection',
name='end_date',
field=models.DateTimeField(blank=True, help_text='If blank, will continue until stopped.', null=True),
),
migrations.AlterField(
model_name='historicalcollection',
name='harvest_type',
field=models.CharField(choices=[('twitter_user_timeline', 'Twitter user timeline'), ('twitter_search', 'Twitter search'), ('twitter_filter', 'Twitter filter'), ('twitter_sample', 'Twitter sample'), ('tumblr_blog_posts', 'Tumblr blog posts'), ('flickr_user', 'Flickr user'), ('weibo_timeline', 'Weibo timeline')], max_length=255),
),
migrations.AlterField(
model_name='historicalcollection',
name='link',
field=models.CharField(blank=True, max_length=512, verbose_name='Public link'),
),
migrations.AlterField(
model_name='historicalcollection',
name='name',
field=models.CharField(max_length=255, verbose_name='Collection name'),
),
migrations.AlterField(
model_name='historicalcollection',
name='schedule_minutes',
field=models.PositiveIntegerField(choices=[(1, 'One time harvest'), (30, 'Every 30 minutes'), (60, 'Every hour'), (240, 'Every 4 hours'), (720, 'Every 12 hours'), (1440, 'Every day'), (10080, 'Every week'), (40320, 'Every 4 weeks'), (5, 'Every 5 minutes')], default=10080, null=True, verbose_name='schedule'),
),
migrations.AlterField(
model_name='historicalcollection',
name='visibility',
field=models.CharField(choices=[('default', 'Group only'), ('local', 'All other users')], default='default', help_text='Who else can view and export from this collection. Select "All other users" to share with all Social Feed Manager users.', max_length=255),
),
migrations.AlterField(
model_name='historicalcollectionset',
name='name',
field=models.CharField(max_length=255, verbose_name='Collection set name'),
),
migrations.AlterField(
model_name='historicalcredential',
name='name',
field=models.CharField(max_length=255, verbose_name='Credential name'),
),
migrations.AlterField(
model_name='historicalcredential',
name='platform',
field=models.CharField(choices=[('twitter', 'Twitter'), ('flickr', 'Flickr'), ('weibo', 'Weibo'), ('tumblr', 'Tumblr')], help_text='Platform name', max_length=255),
),
migrations.AlterField(
model_name='user',
name='email_frequency',
field=models.CharField(choices=[('daily', 'Daily'), ('weekly', 'Weekly'), ('monthly', 'Monthly'), ('none', 'None')], default='daily', max_length=10),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=150, verbose_name='last name'),
),
migrations.AlterField(
model_name='user',
name='local_id',
field=models.CharField(blank=True, default='', help_text='Local identifier', max_length=255),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
|
|
# Abstract Syntax Tree produced by parser
#from __future__ import generators
import types
import string
import fracttypes
import re
from ffloat import Float
class Node:
def __init__(self,type,pos,children=None,leaf=None,datatype=None):
self.type = type
if children:
self.children = children
else:
self.children = [ ]
self.leaf = leaf
self.datatype = datatype
self.pos = pos
def __str__(self):
return "[%s : %s]" % (self.type , self.leaf)
def pretty(self,depth=0):
str = " " * depth + "[%s : %s" % (self.type , self.leaf)
if self.datatype != None:
str += "(%s)" % fracttypes.strOfType(self.datatype)
if self.children:
str += "\n"
for child in self.children:
assert(isinstance(child,Node))
str += child.pretty(depth+1) + "\n"
str += " " * depth + "]"
else:
str += "]"
return str
def __iter__(self):
return NodeIter(self)
def childByName(self,name):
'find a child with leaf == name'
for child in self.children:
if child.leaf == name:
return child
return None
def DeepCmp(self,other):
if self.type < other.type: return -1
if self.type > other.type: return 1
if self.leaf < other.leaf: return -1
if self.leaf > other.leaf: return 1
#if len(self.children) < len(other.children): return -1
#if len(self.children) > len(other.children): return 1
if not self.children and not other.children: return 0
for (child, otherchild) in zip(self.children,other.children):
eql = child.DeepCmp(otherchild)
if eql: return eql
return eql
# def preorder(t):
# if t:
# print "pre",t
# yield t
# for child in t.children:
# print "prechild", child
# preorder(child)
class NodeIter:
def __init__(self,node):
self.nodestack = [(node,-1)]
def __iter__(self):
return self
def getNode(self,node,child):
if child == -1:
return node
else:
return node.children[child]
def next(self):
#print map(lambda (n,x) :"%s %s" % (n,x), self.nodestack)
if self.nodestack == []:
raise StopIteration
(node,child) = self.nodestack.pop()
ret = self.getNode(node,child)
child+= 1
while len(node.children) <= child:
if self.nodestack == []:
return ret
(node,child) = self.nodestack.pop()
self.nodestack.append((node,child+1))
self.nodestack.append((node.children[child],-1))
return ret
def CheckTree(tree, nullOK=0):
if nullOK and tree == None:
return 1
if not isinstance(tree,Node):
raise Exception, "bad node type %s" % tree
if tree.children:
if not isinstance(tree.children, types.ListType):
raise Exception, ("children not a list: %s instead" % tree.children)
for child in tree.children:
CheckTree(child,0)
return 1
# shorthand named ctors for specific node types
def Formlist(list, pos):
return Node("formlist", pos, list, "")
def Set(id, s, pos):
return Node("set", pos, [id,s], None)
def SetType(id,t,pos):
type = fracttypes.typeOfStr(t)
return Node("set", pos, [id, Empty(pos)], None, type)
def Number(n,pos):
if re.search('[.eE]',n):
t = fracttypes.Float
n = Float(n)
else:
t = fracttypes.Int
n = string.atoi(n)
return Node("const", pos, None, n, t)
def Const(n,pos):
if isinstance(n,types.StringType):
n = n.lower()
return Node("const", pos, None, n=="true" or n=="yes", fracttypes.Bool)
def Binop(op, left, right,pos):
return Node("binop", pos, [left, right], op)
def ID(id,pos):
return Node("id", pos, None, id)
def Mag(exp,pos):
return Node("unop", pos, [exp], "cmag")
def Negate(exp,pos):
return Node("unop", pos, [exp], "t__neg")
def Not(exp, pos):
return Node("unop", pos, [exp], "t__not")
def String(s,list,pos):
return Node("string", pos, list, s, fracttypes.String)
def Funcall(id,arglist,pos):
return Node("funcall", pos, arglist, id)
def Assign(id,exp,pos):
return Node("assign", pos, [id, exp], None)
def Decl(type, id, pos, exp=None):
if exp == None:
l = None
else:
l = [exp]
return Node("decl", pos, l , id, fracttypes.typeOfStr(type))
def DeclArray(type, id, indexes, pos):
return Node("declarray", pos, indexes, id, fracttypes.typeOfStr(type))
def ArrayLookup(id, indexes, pos):
return Node("arraylookup", pos, indexes, id)
def Stmlist(id, list,pos):
return Node("stmlist", pos,list, string.lower(id))
def Setlist(id, list,pos):
return Node("setlist", pos, list, string.lower(id))
def Empty(pos):
return Node("empty", pos, None, "")
def Formula(id, stmlist, pos):
# rather gruesome: we re-lex the formula ID to extract the symmetry spec
# if any. Then we smuggle it into the top-level node
m = re.match(".*?(\s*\(\s*(\w+)\s*\))", id)
if m:
symmetry = m.group(2)
id = id[:m.start(1)]
else:
symmetry = None
n = Node("formula", pos, stmlist, id)
n.symmetry = symmetry
return n
def Param(id,settinglist,type,pos):
return Node("param", pos, settinglist, id, fracttypes.typeOfStr(type))
def Func(id,settinglist,type, pos):
return Node("func", pos, settinglist, id, fracttypes.typeOfStr(type))
def Heading(settinglist,pos):
return Node("heading", pos, settinglist)
def Repeat(body, test, pos):
return Node("repeat", pos, [test, Stmlist("",body,pos)], "")
def While(test, body, pos):
return Node("while", pos, [test, Stmlist("", body,pos)], "")
def If(test, left, right, pos):
return Node("if", pos,
[test, Stmlist("",left,pos), Stmlist("",right,pos)], "")
def Error2(str, pos):
if str == "$":
return Node(
"error", pos, None,
"%d: Error: unexpected preprocessor directive" % pos)
return Node("error", pos, None,
"%d: Syntax error: unexpected '%s' " % (pos,str))
def Error(type, value, pos):
# get complaints about NEWLINE tokens on right line
if type == "NEWLINE":
pos -= 1
return Node("error", pos, None,
"%d: Syntax error: unexpected newline" % pos)
return Node("error", pos, None,
"%d: Syntax error: unexpected %s '%s'" %
(pos, string.lower(type), value))
def PreprocessorError(value,pos):
return Node("error", pos, None, value)
|
|
import errno
import os
import re
import sublime
import sublime_plugin
import shlex
from ..anf_util import *
from ..platform.windows_platform import WindowsPlatform
from ..platform.nix_platform import NixPlatform
from ..completions.nix_completion import NixCompletion
from ..completions.windows_completion import WindowsCompletion
if not IS_ST3:
if PLATFORM == "windows":
import sys
sys.path.append(os.path.dirname(sys.executable))
from ..lib.ushlex import split as st2_shlex_split
VIEW_NAME = "AdvancedNewFileCreation"
class AdvancedNewFileBase(object):
def __init__(self, window):
super(AdvancedNewFileBase, self).__init__(window)
if PLATFORM == "windows":
self.platform = WindowsPlatform(window.active_view())
else:
self.platform = NixPlatform()
def __generate_default_root(self):
root_setting = self._get_default_root()
path, folder_index = self.__parse_path_setting(
root_setting, DEFAULT_FOLDER_INDEX_SETTING)
if path is None and folder_index is None:
return os.path.expanduser(self.settings.get(DEFAULT_PATH_SETTING))
elif path is None:
return self.__project_folder_from_index(folder_index)
return path
def __generate_alias_root(self):
path, folder_index = self.__parse_path_setting(
self.settings.get(ALIAS_ROOT_SETTING), ALIAS_FOLDER_INDEX_SETTING)
if path is None and folder_index is None:
return os.path.expanduser(self.settings.get(ALIAS_PATH_SETTING))
elif path is None:
if folder_index >= 0:
return self.window.folders()[folder_index]
else:
return os.path.expanduser("~/")
return path
def generate_initial_path(self, initial_path=None):
path = None
# Search for initial string
if initial_path is not None:
path = initial_path
else:
if self.settings.get(USE_CURSOR_TEXT_SETTING, False):
cursor_text = self.get_cursor_path()
if cursor_text != "":
path = cursor_text
if path is None:
path = self.settings.get(DEFAULT_INITIAL_SETTING)
return path
def run_setup(self):
self.view = self.window.active_view()
self.settings = get_settings(self.view)
self.root = None
self.alias_root = None
self.aliases = self.__get_aliases()
self.root = self.__generate_default_root()
self.alias_root = self.__generate_alias_root()
# Need to fix this
debug = self.settings.get(DEBUG_SETTING) or False
completion_type = self.settings.get(COMPLETION_TYPE_SETTING)
if completion_type == "windows":
self.completion = WindowsCompletion(self)
else:
self.completion = NixCompletion(self)
def __get_aliases(self):
aliases = self.settings.get(ALIAS_SETTING)
all_os_aliases = self.settings.get(OS_SPECIFIC_ALIAS_SETTING)
for key in all_os_aliases:
if PLATFORM in all_os_aliases.get(key):
aliases[key] = all_os_aliases.get(key).get(PLATFORM)
return aliases
def __parse_path_setting(self, setting, index_setting):
root = None
folder_index = None
if setting == "home":
root = os.path.expanduser("~/")
elif setting == "current":
if self.view is not None:
filename = self.view.file_name()
if filename is not None:
root = os.path.dirname(filename)
if root is None:
if self.settings.get(CURRENT_FALLBACK_TO_PROJECT_SETTING, False):
folder_index = self.__validate_folder_index(0)
if folder_index == -1:
root = os.path.expanduser("~/")
else:
root = os.path.expanduser("~/")
elif setting == "project_folder":
folder_index = self.settings.get(index_setting)
folder_index = self.__validate_folder_index(folder_index)
elif setting == "top_folder":
folder_index = self.__validate_folder_index(0)
elif setting == "path":
pass
else:
print("Invalid root specifier")
return (root, folder_index)
def __validate_folder_index(self, folder_index):
num_folders = len(self.window.folders())
if num_folders == 0:
folder_index = -1
elif num_folders < folder_index:
folder_index = 0
return folder_index
def __parse_for_shell_input(self, path):
if not IS_ST3 and self.__contains_non_ascii(path):
split_path = self.__split_shell_input_for_st2_non_ascii(path)
else:
split_path = shlex.split(str(path))
return " ".join(split_path)
def __split_shell_input_for_st2_non_ascii(self, path):
return st2_shlex_split(path)
def __contains_non_ascii(self, string):
# Don't really like this....
try:
string.decode("ascii")
except UnicodeEncodeError:
return True
return False
def split_path(self, path=""):
HOME_REGEX = r"^~[/\\]"
root = None
try:
root, path = self.platform.split(path)
if self.settings.get(SHELL_INPUT_SETTING, False) and len(path) > 0:
path = self.__parse_for_shell_input(path)
# Parse if alias
if TOP_LEVEL_SPLIT_CHAR in path and root is None:
parts = path.rsplit(TOP_LEVEL_SPLIT_CHAR, 1)
root, path = self.__translate_alias(parts[0])
path_list = []
if path != "":
path_list.append(path)
if parts[1] != "":
path_list.append(parts[1])
path = TOP_LEVEL_SPLIT_CHAR.join(path_list)
elif re.match(r"^/", path):
root, path_offset = self.platform.parse_nix_path(root, path)
path = path[path_offset:]
# Parse if tilde used
elif re.match(HOME_REGEX, path) and root is None:
root = os.path.expanduser("~")
path = path[2:]
elif (re.match(r"^\.{1,2}[/\\]", path) and
self.settings.get(RELATIVE_FROM_CURRENT_SETTING, False)):
path_index = 2
if self.view.file_name() is not None:
root = os.path.dirname(self.view.file_name())
else:
folder_index = self.settings.get(
RELATIVE_FALLBACK_INDEX_SETTING, 0)
folder_index = self.__validate_folder_index(folder_index)
root = self.__project_folder_from_index(folder_index)
if re.match(r"^\.{2}[/\\]", path):
root = os.path.dirname(root)
path_index = 3
path = path[path_index:]
# Default
if root is None:
root = self.root
except IndexError:
root = os.path.expanduser("~")
return root, path
def __project_folder_from_index(self, folder_index):
if folder_index >= 0:
return self.window.folders()[folder_index]
else:
return os.path.expanduser("~/")
def bash_expansion(self, path):
if len(path) == 0:
return path
split_path = shlex.split(path)
new_path = " ".join(split_path)
return new_path
def __translate_alias(self, path):
root = None
split_path = None
if path == "" and self.view is not None:
filename = self.view.file_name()
if filename is not None:
root = os.path.dirname(filename)
else:
split_path = path.split(TOP_LEVEL_SPLIT_CHAR)
join_index = len(split_path) - 1
target = path
root_found = False
use_folder_name = self.settings.get(USE_FOLDER_NAME_SETTING)
while join_index >= 0 and not root_found:
# Folder aliases
for name, folder in get_project_folder_data(use_folder_name):
if name == target:
root = folder
root_found = True
break
# Aliases from settings.
for alias in self.aliases.keys():
if alias == target:
alias_path = self.aliases.get(alias)
if re.search(HOME_REGEX, alias_path) is None:
root = self.platform.get_alias_absolute_path(
self.alias_root, alias_path)
if root is not None:
break
root = os.path.expanduser(alias_path)
root_found = True
break
remove = re.escape(split_path[join_index])
target = re.sub(r":%s$" % remove, "", target)
join_index -= 1
if root is None:
# Nothing found
return None, path
elif split_path is None:
# Current directory as alias
return os.path.abspath(root), ""
else:
# Add to index so we re
join_index += 2
return (os.path.abspath(root),
TOP_LEVEL_SPLIT_CHAR.join(split_path[join_index:]))
def input_panel_caption(self):
return ""
def show_filename_input(self, initial):
caption = self.input_panel_caption()
self.input_panel_view = self.window.show_input_panel(
caption, initial,
self.on_done, self.__update_filename_input, self.clear
)
self.input_panel_view.set_name(VIEW_NAME)
self.input_panel_view.settings().set("auto_complete_commit_on_tab",
False)
self.input_panel_view.settings().set("tab_completion", False)
self.input_panel_view.settings().set("translate_tabs_to_spaces", False)
self.input_panel_view.settings().set("anf_panel", True)
if self.settings.get(CURSOR_BEFORE_EXTENSION_SETTING):
self.__place_cursor_before_extension(self.input_panel_view)
def __update_filename_input(self, path_in):
new_content = path_in
if self.settings.get(COMPLETION_TYPE_SETTING) == "windows":
if "prev_text" in dir(self) and self.prev_text != path_in:
if self.view is not None:
self.view.erase_status("AdvancedNewFile2")
if path_in.endswith("\t"):
new_content = self.completion.completion(path_in.replace("\t", ""))
if new_content != path_in:
self.input_panel_view.run_command("anf_replace",
{"content": new_content})
else:
base, path = self.split_path(path_in)
creation_path = generate_creation_path(self.settings, base, path,
True)
if self.settings.get(SHOW_PATH_SETTING, False):
self.update_status_message(creation_path)
def update_status_message(self, creation_path):
pass
def entered_file_action(self, path):
pass
def empty_file_action(self):
pass
def on_done(self, input_string):
if len(input_string) != 0:
self.entered_filename(input_string)
elif self.settings.get(DEFAULT_NEW_FILE, False):
self.empty_file_action()
self.clear()
self.refresh_sidebar()
def entered_filename(self, filename):
# Check if valid root specified for windows.
if PLATFORM == "windows":
if re.match(WIN_ROOT_REGEX, filename):
root = filename[0:3]
if not os.path.isdir(root):
sublime.error_message(root + " is not a valid root.")
self.clear()
return
base, path = self.split_path(filename)
file_path = generate_creation_path(self.settings, base, path, True)
# Check for invalid alias specified.
is_valid = (TOP_LEVEL_SPLIT_CHAR in filename and
not self.platform.is_absolute_path(base))
if is_valid:
if base == "":
error_message = "Current file cannot be resolved."
else:
error_message = "'" + base + "' is an invalid alias."
sublime.error_message(error_message)
self.entered_file_action(file_path)
def open_file(self, file_path):
new_view = None
if os.path.isdir(file_path):
if not re.search(r"(/|\\)$", file_path):
sublime.error_message("Cannot open view for '" + file_path +
"'. It is a directory. ")
else:
new_view = self.window.open_file(file_path)
return new_view
def refresh_sidebar(self):
if self.settings.get(AUTO_REFRESH_SIDEBAR_SETTING):
try:
self.window.run_command("refresh_folder_list")
except:
pass
def clear(self):
if self.view is not None:
self.view.erase_status("AdvancedNewFile")
self.view.erase_status("AdvancedNewFile2")
def create(self, filename):
base, filename = os.path.split(filename)
self.create_folder(base)
if filename != "":
creation_path = os.path.join(base, filename)
self.create_file(creation_path)
def create_file(self, name):
open(name, "a").close()
if self.settings.get(FILE_PERMISSIONS_SETTING, "") != "":
file_permissions = self.settings.get(FILE_PERMISSIONS_SETTING, "")
os.chmod(name, int(file_permissions, 8))
def create_folder(self, path):
init_list = []
temp_path = path
while not os.path.exists(temp_path):
init_list.append(temp_path)
temp_path = os.path.dirname(temp_path)
try:
if not os.path.exists(path):
os.makedirs(path)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
file_permissions = self.settings.get(FILE_PERMISSIONS_SETTING, "")
folder_permissions = self.settings.get(FOLDER_PERMISSIONS_SETTING, "")
for entry in init_list:
if self.is_python:
creation_path = os.path.join(entry, '__init__.py')
open(creation_path, 'a').close()
if file_permissions != "":
os.chmod(creation_path, int(file_permissions, 8))
if folder_permissions != "":
os.chmod(entry, int(folder_permissions, 8))
def get_cursor_path(self):
if self.view is None:
return ""
view = self.view
path = ""
for region in view.sel():
syntax = view.scope_name(region.begin())
if region.begin() != region.end():
path = view.substr(region)
break
if (re.match(".*string.quoted.double", syntax) or
re.match(".*string.quoted.single", syntax)):
path = view.substr(view.extract_scope(region.begin()))
path = re.sub('^"|\'', '', re.sub('"|\'$', '', path.strip()))
break
return path
def _expand_default_path(self, path):
current_file = self.view.file_name()
if current_file:
directory, current_file_name = os.path.split(current_file)
path = path.replace("<filepath>", current_file)
path = path.replace("<filedirectory>", directory + os.sep)
else:
current_file_name = ""
path = path.replace("<filename>", current_file_name)
return path
def _find_open_file(self, file_name):
window = self.window
if IS_ST3:
return window.find_open_file(file_name)
else:
for view in window.views():
view_name = view.file_name()
if view_name != "" and view_name == file_name:
return view
return None
## Should be overridden by sub class
def get_default_root_setting(self):
return DEFAULT_ROOT_SETTING
def _get_default_root(self):
root_setting_value = self.get_default_root_setting()
root_setting = self.settings.get(root_setting_value)
if root_setting == DEFAULT_ROOT_SETTING:
return self.settings.get(DEFAULT_ROOT_SETTING)
return root_setting
def __place_cursor_before_extension(self, view):
if view.settings().get("anf_panel", False):
cursors = view.sel()
cursor = cursors[0]
line_region = view.line(cursor)
content = view.substr(line_region)
matcher = re.match(r"(.+)\..+", content)
if matcher:
initial_position = len(matcher.group(1))
cursors.clear()
cursors.add(sublime.Region(initial_position, initial_position))
|
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from . import ivi
# Exceptions
class InvalidScanListException(ivi.IviException): pass
class InvalidSwitchPathException(ivi.IviException): pass
class EmptyScanListException(ivi.IviException): pass
class EmptySwitchPathException(ivi.IviException): pass
class ScanInProgressException(ivi.IviException): pass
class NoScanInProgressException(ivi.IviException): pass
class NoSuchPathException(ivi.IviException): pass
class IsConfigurationChannelException(ivi.IviException): pass
class NotAConfigurationChannelException(ivi.IviException): pass
class AttemptToConnectSourcesException(ivi.IviException): pass
class ExplicitConnectionExistsException(ivi.IviException): pass
class LegMissingFirstChannelException(ivi.IviException): pass
class LegMissingSecondChannelException(ivi.IviException): pass
class ChannelDuplicatedInLegException(ivi.IviException): pass
class ChannelDuplicatedInPathException(ivi.IviException): pass
class PathNotFoundException(ivi.IviException): pass
class DiscontinuousPathException(ivi.IviException): pass
class CannotConnectDirectlyException(ivi.IviException): pass
class ChannelsAlreadyConnectedException(ivi.IviException): pass
class CannotConnectToItselfException(ivi.IviException): pass
# Parameter Values
ScanMode = set(['none', 'break_before_make', 'break_after_make'])
ScanActionType = set(['connect_path', 'disconnect_path', 'wait_for_trigger'])
Path = set(['available', 'exists', 'unsupported', 'resource_in_use',
'source_conflict', 'channel_not_available'])
class Base(ivi.IviContainer):
"Base IVI methods for all switch modules"
def __init__(self, *args, **kwargs):
# needed for _init_channels calls from other __init__ methods
self._channel_count = 1
super(Base, self).__init__( *args, **kwargs)
cls = 'IviSwtch'
grp = 'Base'
ivi.add_group_capability(self, cls+grp)
self._channel_name = list()
self._channel_characteristics_ac_current_carry_max = list()
self._channel_characteristics_ac_current_switching_max = list()
self._channel_characteristics_ac_power_carry_max = list()
self._channel_characteristics_ac_power_switching_max = list()
self._channel_characteristics_ac_voltage_max = list()
self._channel_characteristics_bandwidth = list()
self._channel_characteristics_impedance = list()
self._channel_characteristics_dc_current_carry_max = list()
self._channel_characteristics_dc_current_switching_max = list()
self._channel_characteristics_dc_power_carry_max = list()
self._channel_characteristics_dc_power_switching_max = list()
self._channel_characteristics_dc_voltage_max = list()
self._channel_is_configuration_channel = list()
self._channel_is_source_channel = list()
self._channel_characteristics_settling_time = list()
self._channel_characteristics_wire_mode = list()
self._path_is_debounced = False
self._add_property('channels[].characteristics.ac_current_carry_max',
self._get_channel_characteristics_ac_current_carry_max,
None,
None,
ivi.Doc("""
The maximum AC current the channel can carry, in amperes RMS.
Notice that values for this attribute are on per-channel basis and may not
take into account the other switches that make up a path to or from this
channel.
""", cls, grp, '4.2.1'))
self._add_property('channels[].characteristics.ac_current_switching_max',
self._get_channel_characteristics_ac_current_switching_max,
None,
None,
ivi.Doc("""
The maximum AC current the channel can switch, in amperes RMS.
Notice that values for this attribute are on per-channel basis and may not
take into account the other switches that make up a path to or from this
channel.
""", cls, grp, '4.2.2'))
self._add_property('channels[].characteristics.ac_power_carry_max',
self._get_channel_characteristics_ac_power_carry_max,
None,
None,
ivi.Doc("""
The maximum AC power the channel can handle, in volt-amperes.
Notice that values for this attribute are on per-channel basis and may not
take into account the other switches that make up a path to or from this
channel.
""", cls, grp, '4.2.3'))
self._add_property('channels[].characteristics.ac_power_switching_max',
self._get_channel_characteristics_ac_power_switching_max,
None,
None,
ivi.Doc("""
The maximum AC power the channel can switch, in volt-amperes.
Notice that values for this attribute are on per-channel basis and may not
take into account the other switches that make up a path to or from this
channel.
""", cls, grp, '4.2.4'))
self._add_property('channels[].characteristics.ac_voltage_max',
self._get_channel_characteristics_ac_voltage_max,
None,
None,
ivi.Doc("""
The maximum AC voltage the channel can handle, in volts RMS.
Notice that values for this attribute are on per-channel basis and may not
take into account the other switches that make up a path to or from this
channel.
""", cls, grp, '4.2.5'))
self._add_property('channels[].characteristics.bandwidth',
self._get_channel_characteristics_bandwidth,
None,
None,
ivi.Doc("""
The maximum frequency signal, in Hertz, that can pass through the channel.
without attenuating it by more than 3dB.
Notice that values for this attribute are on per-channel basis and may not
take into account the other switches that make up a path to or from this
channel.
""", cls, grp, '4.2.6'))
self._add_property('channels[].name',
self._get_channel_name,
None,
None,
ivi.Doc("""
This attribute returns the physical name identifier defined by the
specific driver for the Channel that corresponds to the one-based index
that the user specifies. If the driver defines a qualified channel name,
this property returns the qualified name. If the value that the user
passes for the Index parameter is less than one or greater than the value
of the Channel Count, the attribute returns an empty string for the value
and returns an error.
""", cls, grp, '4.2.9'))
self._add_property('channels[].characteristics.impedance',
self._get_channel_characteristics_impedance,
None,
None,
ivi.Doc("""
The characteristic impedance of the channel, in ohms.
Notice that values for this attribute are on per-channel basis and may not
take into account the other switches that make up a path to or from this
channel.
""", cls, grp, '4.2.10'))
self._add_property('channels[].characteristics.dc_current_carry_max',
self._get_channel_characteristics_dc_current_carry_max,
None,
None,
ivi.Doc("""
The maximum DC current the channel can carry, in amperes.
Notice that values for this attribute are on per-channel basis and may not
take into account the other switches that make up a path to or from this
channel.
""", cls, grp, '4.2.11'))
self._add_property('channels[].characteristics.dc_current_switching_max',
self._get_channel_characteristics_dc_current_switching_max,
None,
None,
ivi.Doc("""
The maximum DC current the channel can switch, in amperes
Notice that values for this attribute are on per-channel basis and may not
take into account the other switches that make up a path to or from this
channel.
""", cls, grp, '4.2.12'))
self._add_property('channels[].characteristics.dc_power_carry_max',
self._get_channel_characteristics_dc_power_carry_max,
None,
None,
ivi.Doc("""
The maximum DC power the channel can handle, in watts.
Notice that values for this attribute are on per-channel basis and may not
take into account the other switches that make up a path to or from this
channel.
""", cls, grp, '4.2.13'))
self._add_property('channels[].characteristics.dc_power_switching_max',
self._get_channel_characteristics_dc_power_switching_max,
None,
None,
ivi.Doc("""
The maximum DC power the channel can switch, in watts.
Notice that values for this attribute are on per-channel basis and may not
take into account the other switches that make up a path to or from this
channel.
""", cls, grp, '4.2.14'))
self._add_property('channels[].characteristics.dc_voltage_max',
self._get_channel_characteristics_dc_voltage_max,
None,
None,
ivi.Doc("""
The maximum DC voltage the channel can handle, in volts.
Notice that values for this attribute are on per-channel basis and may not
take into account the other switches that make up a path to or from this
channel.
""", cls, grp, '4.2.15'))
self._add_property('channels[].is_configuration_channel',
self._get_channel_is_configuration_channel,
self._set_channel_is_configuration_channel,
None,
ivi.Doc("""
Specifies whether the specific driver uses the channel for internal path
creation. If set to True, the channel is no longer accessible to the user
and can be used by the specific driver for path creation. If set to False,
the channel is considered a standard channel and can be explicitly
connected to another channel.
For example, if the user specifies a column-to-column connection in a
matrix, it typically must use at least one row channel to make the
connection. Specifying a channel as a configuration channel allows the
instrument driver to use it to create the path.
Notice that once a channel has been configured as a configuration channel,
then no operation can be performed on that channel, except for reading and
writing the Is Configuration Channel attribute.
""", cls, grp, '4.2.16'))
self._add_property('path.is_debounced',
self._get_path_is_debounced,
None,
None,
ivi.Doc("""
This attribute indicates whether the switch module has settled from the
switching commands and completed the debounce. If True, the switch module
has settled from the switching commands and completed the debounce. It
indicates that the signal going through the switch module is valid,
assuming that the switches in the path have the correct characteristics.
If False, the switch module has not settled.
""", cls, grp, '4.2.17'))
self._add_property('channels[].is_source_channel',
self._get_channel_is_source_channel,
self._set_channel_is_source_channel,
None,
ivi.Doc("""
Allows the user to declare a particular channel as a source channel. If
set to True, the channel is a source channel. If set to False, the channel
is not a source channel.
If a user ever attempts to connect two channels that are either sources or
have their own connections to sources, the path creation operation returns
an error. Notice that the term source can be from either the instrument or
the UUT perspective. This requires the driver to ensure with each
connection that another connection within the switch module does not
connect to another source.
The intention of this attribute is to prevent channels from being
connected that may cause damage to the channels, devices, or system.
Notice that GROUND can be considered a source in some circumstances.
""", cls, grp, '4.2.18'))
self._add_property('channels[].characteristics.settling_time',
self._get_channel_characteristics_settling_time,
None,
None,
ivi.Doc("""
The maximum total settling time for the channel before the signal going
through it is considered stable. This includes both the activation time
for the channel as well as any debounce time.
Notice that values for this attribute are on per-channel basis and may not
take into account the other switches that make up a path to or from this
channel.
The units are seconds.
""", cls, grp, '4.2.19'))
self._add_property('channels[].characteristics.wire_mode',
self._get_channel_characteristics_wire_mode,
None,
None,
ivi.Doc("""
This attribute describes the number of conductors in the current channel.
Notice that values for this attribute are on per-channel basis and may not
take into account the other switches that make up a path to or from this
channel.
For example, this attribute returns 2 if the channel has two conductors.
""", cls, grp, '4.2.20'))
self._add_method('path.can_connect',
self._path_can_connect,
ivi.Doc("""
The purpose of this function is to allow the user to verify whether the
switch module can create a given path without the switch module actually
creating the path. In addition, the operation indicates whether the switch
module can create the path at the moment based on the current paths in
existence.
Notice that while this operation is available for the end user, the
primary purpose of this operation is to allow higher-level switch drivers
to incorporate IviSwtch drivers into higher level switching systems.
If the implicit connection exists between the two specified channels, this
functions returns the warning Implicit Connection Exists.
""", cls, grp, '4.3.1'))
self._add_method('path.connect',
self._path_connect,
ivi.Doc("""
This function takes two channel names and, if possible, creates a path
between the two channels. If the path already exists, the operation does
not count the number of calls. For example, it does not remember that
there were two calls to connect, thus requiring two calls to disconnect,
but instead returns an error, regardless of whether the order of the two
channels is the same or different on the two calls. This is true because
paths are assumed to be bi-directional. This class does not handle
unidirectional paths. Notice that the IVI spec does not specify the
default names for the channels because this depends on the architecture
of the switch module. The user can specify aliases for the vendor defined
channel names in the IVI Configuration Store.
This function returns as soon as the command is given to the switch module
and the switch module is ready for another command. This may be before or
after the switches involved settle. Use the Is Debounced function to
determine if the switch module has settled. Use the Wait For Debounce
function if you want to wait until the switch has debounced.
If an explicit connection already exists between the two specified
channels, this function returns the error Explicit Connection Exists
without performing any connection operation.
If one of the specified channels is a configuration channel, this function
returns the error Is Configuration Channel without performing any
connection operation.
If the two specified channels are both connected to a different source,
this function returns the error Attempt To Connect Sources without
performing any connection operation.
If the two specified channels are the same, this function returns the
error Cannot Connect To Itself without performing any connection
operation.
If a path cannot be found between the two specified channels, this
function returns the error Path Not Found without performing any
connection operation.
""", cls, grp, '4.3.2'))
self._add_method('path.disconnect',
self._path_disconnect,
ivi.Doc("""
This function takes two channel names and, if possible, destroys the path
between the two channels. The order of the two channels in the operation
does not need to be the same as the connect operation. Notice that the IVI
specification does not specify what the default names are for the channels
as this depends on the architecture of the switch module. The user can
specify aliases for the vendor defined channel names in the IVI
Configuration Store.
This function returns as soon as the command is given to the switch module
and the switch module is ready for another command. This may be before or
after the switches involved settle. Use the Is Debounced attribute to see
if the switch has settled. Use the Wait For Debounce function if you want
to wait until the switch has debounced.
If some connections remain after disconnecting the two specified channels,
this function returns the warning Path Remains.
If no explicit path exists between the two specified channels, this
function returns the error No Such Path without performing any
disconnection operation.
""", cls, grp, '4.3.3'))
self._add_method('path.disconnect_all',
self._path_disconnect_all,
ivi.Doc("""
The purpose of this function is to allow the user to disconnect all paths
created since Initialize or Reset have been called. This can be used as
the test program goes from one sub-test to another to ensure there are no
side effects in the switch module.
Notice that some switch modules may not be able to disconnect all paths
(such as a scanner that must keep at least one path). In these cases, this
function returns the warning Path Remains.
""", cls, grp, '4.3.4'))
self._add_method('path.get_path',
self._path_get_path,
ivi.Doc("""
This function returns a list of channels (see the Set Path function for a
description on the syntax of path list) that have been connected in order
to create the path between the specified channels. The names of the
switches as well as the internal configuration of the switch module are
vendor specific. This function can be used to return the list of the
switches in order to better understand the signal characteristics of the
path and to provide the path list for the Set Path function.
The first and last names in the list are the channel names of the path.
All channels other than the first and the last channel in the path list
are configuration channels. No other channel can be used to generate the
path between the two channels.
The only valid paths that can be returned are ones that have been
explicitly set via Connect and Set Path functions.
If no explicit path exists between the two specified channels, this
function returns the error No Such Path.
""", cls, grp, '4.3.6'))
self._add_method('path.set_path',
self._path_set_path,
ivi.Doc("""
The IVI Switch is designed to provide automatic routing from channel to
channel. However, due to such issues as calibration, it may be necessary
to have deterministic control over the path that is created between two
channels. This function allows the user to specify the exact path, in
terms of the configuration channels used, to create. Notice that the end
channel names are the first and last entries in the Path List parameter.
The driver makes a connection between the channels using the configuration
channels. The intermediary steps are called legs of the path.
The path list syntax is a string array of channels. Path lists obey the
following rules:
* In the array, elements n and n+1 create a path leg.
* Every channel in the path list other than the first and the last must be
a configuration channel.
* Driver channel strings as well as virtual channel names may be used to
describe a path leg in a path list.
An example of creating a path list is:
path_list = ['ch1', 'conf1', 'ch2']
It should be noticed that, even if users utilize virtual channel names,
path_list is not interchangeable since the names of switches within the
switch module are not required to be interchangeable and depend on the
internal architecture of the switch module. However, it is possible to use
the Connect and then Get Path functions to retrieve an already existing
path. This allows the user to guarantee that the routing can be recreated
exactly.
If the specified path list is empty, this function returns the error Empty
Switch Path without performing any connection operation.
If one of the channels in the path list is a configuration channel that is
currently in use, this function returns the error Resource In Use without
performing any connection operation.
If an explicit connection is made to a configuration channel, this
function returns the error Is Configuration Channel without performing any
connection operation.
If one of the non-terminal channels in the path list is not a
configuration channel, this function returns the error Not A Configuration
Channel without performing any connection operation.
If the path list attempts to connect between two different source
channels, this function returns the error Attempt To Connect Sources
without performing any connection operation.
If the path list attempts to connect between channels that already have an
explicit connection, this function returns the error Explicit Connection
Exists without performing any connection operation.
If the first and the second channels in the leg are the same, this
function returns the error Channel Duplicated In Leg without performing
any connection operation.
If a channel name is duplicated in the path list, this function returns
the error Channel Duplicated In Path without performing any connection
operation.
If the path list contains a leg with two channels that cannot be directly
connected, this function returns the error Cannot Connect Directly without
performing any connection operation. If a leg in the path contains two
channels that are already directly connected, this function returns the
error Channels Already Connected without performing any connection
operation.
""", cls, grp, '4.3.8'))
self._add_method('path.wait_for_debounce',
self._path_wait_for_debounce,
ivi.Doc("""
The purpose of this function is to wait until the path through the switch
is stable (debounced). If the signals did not settle within the time
period the user specified with the maximum_time parameter, the function
returns the Max Time Exceeded error.
""", cls, grp, '4.3.9'))
self._init_channels()
def _init_channels(self):
try:
super(Base, self)._init_channels()
except AttributeError:
pass
self._channel_name = list()
self._channel_characteristics_ac_current_carry_max = list()
self._channel_characteristics_ac_current_switching_max = list()
self._channel_characteristics_ac_power_carry_max = list()
self._channel_characteristics_ac_power_switching_max = list()
self._channel_characteristics_ac_voltage_max = list()
self._channel_characteristics_bandwidth = list()
self._channel_characteristics_impedance = list()
self._channel_characteristics_dc_current_carry_max = list()
self._channel_characteristics_dc_current_switching_max = list()
self._channel_characteristics_dc_power_carry_max = list()
self._channel_characteristics_dc_power_switching_max = list()
self._channel_characteristics_dc_voltage_max = list()
self._channel_is_configuration_channel = list()
self._channel_is_source_channel = list()
self._channel_characteristics_settling_time = list()
self._channel_characteristics_wire_mode = list()
for i in range(self._channel_count):
self._channel_name.append("channel%d" % (i+1))
self._channel_characteristics_ac_current_carry_max.append(0.1)
self._channel_characteristics_ac_current_switching_max.append(0.1)
self._channel_characteristics_ac_power_carry_max.append(1)
self._channel_characteristics_ac_power_switching_max.append(1)
self._channel_characteristics_ac_voltage_max.append(100)
self._channel_characteristics_bandwidth.append(1e6)
self._channel_characteristics_impedance.append(50)
self._channel_characteristics_dc_current_carry_max.append(0.1)
self._channel_characteristics_dc_current_switching_max.append(0.1)
self._channel_characteristics_dc_power_carry_max.append(1)
self._channel_characteristics_dc_power_switching_max.append(1)
self._channel_characteristics_dc_voltage_max.append(100)
self._channel_is_configuration_channel.append(False)
self._channel_is_source_channel.append(False)
self._channel_characteristics_settling_time.append(0.1)
self._channel_characteristics_wire_mode.append(1)
self.channels._set_list(self._channel_name)
def _get_channel_characteristics_ac_current_carry_max(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_characteristics_ac_current_carry_max[index]
def _get_channel_characteristics_ac_current_switching_max(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_characteristics_ac_current_switching_max[index]
def _get_channel_characteristics_ac_power_carry_max(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_characteristics_ac_power_carry_max[index]
def _get_channel_characteristics_ac_power_switching_max(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_characteristics_ac_power_switching_max[index]
def _get_channel_characteristics_ac_voltage_max(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_characteristics_ac_voltage_max[index]
def _get_channel_characteristics_bandwidth(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_characteristics_bandwidth[index]
def _get_channel_name(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_name[index]
def _get_channel_characteristics_impedance(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_characteristics_impedance[index]
def _get_channel_characteristics_dc_current_carry_max(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_characteristics_dc_current_carry_max[index]
def _get_channel_characteristics_dc_current_switching_max(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_characteristics_dc_current_switching_max[index]
def _get_channel_characteristics_dc_power_carry_max(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_characteristics_dc_power_carry_max[index]
def _get_channel_characteristics_dc_power_switching_max(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_characteristics_dc_power_switching_max[index]
def _get_channel_characteristics_dc_voltage_max(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_characteristics_dc_voltage_max[index]
def _get_channel_is_configuration_channel(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_is_configuration_channel[index]
def _set_channel_is_configuration_channel(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = bool(value)
self._channel_is_configuration_channel[index] = value
def _get_path_is_debounced(self, index):
index = ivi.get_index(self._channel_name, index)
return self._path_is_debounced[index]
def _get_channel_is_source_channel(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_is_source_channel[index]
def _set_channel_is_source_channel(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = bool(value)
self._channel_is_source_channel[index] = value
def _get_channel_characteristics_settling_time(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_characteristics_settling_time[index]
def _get_channel_characteristics_wire_mode(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_characteristics_wire_mode[index]
def _path_can_connect(self, channel1, channel2):
channel1 = ivi.get_index(self._channel_name, channel1)
channel2 = ivi.get_index(self._channel_name, channel2)
return False
def _path_connect(self, channel1, channel2):
channel1 = ivi.get_index(self._channel_name, channel1)
channel2 = ivi.get_index(self._channel_name, channel2)
def _path_disconnect(self, channel1, channel2):
channel1 = ivi.get_index(self._channel_name, channel1)
channel2 = ivi.get_index(self._channel_name, channel2)
def _path_disconnect_all(self):
pass
def _path_get_path(self, channel1, channel2):
channel1 = ivi.get_index(self._channel_name, channel1)
channel2 = ivi.get_index(self._channel_name, channel2)
return []
def _path_set_path(self, path):
pass
def _path_wait_for_debounce(self, maximum_time):
pass
# Scanner
# SoftwareTrigger
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom configurations and functions for Google App Engine."""
__author__ = 'psimakov@google.com (Pavel Simakov)'
import datetime
import importlib
import logging
import os
import sys
from common import manifests
# configure Appstats
appstats_MAX_STACK = 20
# Whether we are running in the production environment.
PRODUCTION_MODE = not os.environ.get(
'SERVER_SOFTWARE', 'Development').startswith('Development')
# Set this flag to true to enable bulk downloads of Javascript/CSS files in lib
BUNDLE_LIB_FILES = not os.environ.get(
'GCB_STATIC_SERV_ENABLED', 'false').upper() == 'TRUE'
# Set this flag to true if you can generate flattened polymer import files
USE_FLATTENED_HTML_IMPORTS = os.environ.get(
'GCB_STATIC_SERV_ENABLED', 'false').upper() == 'TRUE'
# this is the official location of this app for computing of all relative paths
BUNDLE_ROOT = os.path.dirname(__file__)
# make all Windows and Linux paths have the same separator '/'
BUNDLE_ROOT = BUNDLE_ROOT.replace('\\', '/')
CODE_ROOT = BUNDLE_ROOT
# Default namespace name is '' and not None.
DEFAULT_NAMESPACE_NAME = ''
# Flag to indicate whether module importation is in progress. Some modules
# and core items may wish to be a little flexible about warnings and
# exceptions due to some, but not all, modules being imported yet at module
# registration time.
MODULE_REGISTRATION_IN_PROGRESS = False
# Name for the core module. We don't actually have any code in modules/core,
# since having a core module is pretty well a contradiction in terms. However,
# there are a few things that want module and module-like-things to register
# themselves by name, and so here we provide a name for the un-module that is
# the immutable core functionality.
CORE_MODULE_NAME = 'core'
class _Library(object):
"""DDO that represents a Python library contained in a .zip file."""
def __init__(self, zipfile, relative_path=None):
self._relative_path = relative_path
self._zipfile = zipfile
@property
def file_path(self):
"""Path to the library's file on disk."""
return os.path.join(BUNDLE_ROOT, 'lib', self._zipfile)
@property
def full_path(self):
"""Full path for imports, containing archive-relative paths if any."""
path = self.file_path
if self._relative_path:
path = os.path.join(path, self._relative_path)
return path
# Google-produced library zip files.
GOOGLE_LIBS = [
_Library('google-api-python-client-1.4.0.zip'),
_Library('GoogleAppEngineCloudStorageClient-1.9.15.0.zip',
relative_path='GoogleAppEngineCloudStorageClient-1.9.15.0'),
_Library('GoogleAppEnginePipeline-1.9.17.0.zip',
relative_path='GoogleAppEnginePipeline-1.9.17.0'),
]
# Third-party library zip files.
THIRD_PARTY_LIBS = [
_Library('Graphy-1.0.0.zip', relative_path='Graphy-1.0.0'),
_Library('appengine-mapreduce-0.8.2.zip',
relative_path='appengine-mapreduce-0.8.2/python/src'),
_Library('babel-0.9.6.zip'),
_Library('decorator-3.4.0.zip', relative_path='src'),
_Library('gaepytz-2011h.zip'),
_Library('graphene-0.7.3.zip'),
_Library('graphql-core-0.4.12.1.zip'),
_Library('graphql-relay-0.3.3.zip'),
_Library('html5lib-0.95.zip'),
_Library('identity-toolkit-python-client-0.1.6.zip'),
_Library('markdown-2.5.zip', relative_path='Markdown-2.5'),
_Library('mrs-mapreduce-0.9.zip', relative_path='mrs-mapreduce-0.9'),
_Library('networkx-1.9.1.zip', relative_path='networkx-1.9.1'),
_Library('oauth-1.0.1.zip', relative_path='oauth'),
_Library('pyparsing-1.5.7.zip'),
_Library('reportlab-3.1.8.zip'),
_Library('simplejson-3.7.1.zip', relative_path='simplejson-3.7.1'),
_Library('six-1.10.0.zip'),
# rdflib and deps
_Library('isodate-0.5.5.zip', relative_path='src'),
_Library('rdflib-4.2.2-dev.zip', relative_path='rdflib'),
]
ALL_LIBS = GOOGLE_LIBS + THIRD_PARTY_LIBS
def gcb_force_default_encoding(encoding):
"""Force default encoding to a specific value."""
# Eclipse silently sets default encoding to 'utf-8', while GAE forces
# 'ascii'. We need to control this directly for consistency.
if sys.getdefaultencoding() != encoding:
reload(sys)
sys.setdefaultencoding(encoding)
def _third_party_libs_from_env():
ret = []
for lib_config in os.environ.get('GCB_THIRD_PARTY_LIBRARIES', '').split():
parts = lib_config.split(':')
if len(parts) == 1:
ret.append(_Library(parts[0]))
else:
ret.append(_Library(parts[0], relative_path=parts[1]))
return ret
def gcb_init_third_party():
"""Add all third party libraries to system path."""
for lib in ALL_LIBS + _third_party_libs_from_env():
if not os.path.exists(lib.file_path):
raise Exception('Library does not exist: %s' % lib.file_path)
sys.path.insert(0, lib.full_path)
def gcb_appstats_enabled():
return 'True' == os.environ.get('GCB_APPSTATS_ENABLED')
def gcb_test_mode():
return os.environ.get('GCB_TEST_MODE', 'false').upper() == 'TRUE'
def webapp_add_wsgi_middleware(app):
"""Enable AppStats if requested."""
if gcb_appstats_enabled():
logging.info('Enabling AppStats.')
from google.appengine.ext.appstats import recording
app = recording.appstats_wsgi_middleware(app)
return app
def _import_and_enable_modules(env_var, reraise=False):
for module_name in os.environ.get(env_var, '').split():
enabled = True
if module_name.count('='):
module_name, option = module_name.split('=', 1)
enabled = (option.lower() == 'enabled')
_import_module_by_name(module_name, enabled, reraise=reraise)
def _import_module_by_name(module_name, enabled, reraise=False):
try:
operation = 'importing'
module = importlib.import_module(module_name)
operation = 'registering'
custom_module = module.register_module()
if enabled:
operation = 'enabling'
custom_module.enable()
except Exception, ex: # pylint: disable=broad-except
logging.exception('Problem %s module "%s"', operation, module_name)
if reraise:
raise ex
def _import_and_enable_modules_by_manifest():
modules = manifests.ModulesRepo(BUNDLE_ROOT)
for module_name, manifest in sorted(modules.module_to_manifest.iteritems()):
registration = manifest.get_registration()
if registration.main_module:
enabled = (
registration.enabled or
(registration.enabled_for_tests and gcb_test_mode()))
_import_module_by_name(registration.main_module, enabled)
def import_and_enable_modules():
global MODULE_REGISTRATION_IN_PROGRESS # pylint: disable=global-statement
MODULE_REGISTRATION_IN_PROGRESS = True
_import_and_enable_modules('GCB_PRELOADED_MODULES')
_import_and_enable_modules('GCB_REGISTERED_MODULES_CUSTOM')
_import_and_enable_modules('GCB_THIRD_PARTY_MODULES')
_import_and_enable_modules_by_manifest()
MODULE_REGISTRATION_IN_PROGRESS = False
def time_delta_to_millis(delta):
"""Converts time delta into total number of milliseconds."""
millis = delta.days * 24 * 60 * 60 * 1000
millis += delta.seconds * 1000
millis += delta.microseconds / 1000
return millis
def timeandlog(name, duration_only=False):
"""Times and logs execution of decorated method."""
def timed_1(func):
def timed_2(*args, **kwargs):
_name = name
if args and isinstance(args[0], type):
_name += '.' + str(args[0].__name__)
before = datetime.datetime.utcnow()
if not duration_only:
log_appstats_event(_name + '.enter')
result = func(*args, **kwargs)
after = datetime.datetime.utcnow()
millis = time_delta_to_millis(after - before)
if duration_only:
logging.info(_name + ': duration=%sms' % millis)
log_appstats_event(_name, {'millis': millis})
else:
logging.info(_name + '.leave: duration=%sms' % millis)
log_appstats_event(_name + '.leave', {'millis': millis})
return result
if gcb_appstats_enabled():
return timed_2
else:
return func
return timed_1
def log_appstats_event(label, data=None):
if gcb_appstats_enabled():
try:
from google.appengine.ext.appstats.recording import recorder_proxy
if recorder_proxy and (
recorder_proxy.has_recorder_for_current_request()):
recorder_proxy.record_custom_event(label=label, data=data)
except Exception: # pylint: disable=broad-except
logging.exception('Failed to record Appstats event %s.', label)
gcb_init_third_party()
|
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import onnx
import tensorrt as trt
import json
from code.bert.tensorrt_sparse.builder_utils import add_gelu, mark
def bert_encoder_layer_int8_vs_il(cfg, max_seqlen, weights_dict, network, input_tensor, residual, cu_seqlens, layer):
"""builds one encoder layer, setting the dynamic ranges extracted from the qat checkpoint"""
plg_registry = trt.get_plugin_registry()
qkv_plg_creator = plg_registry.get_plugin_creator("CustomQKVToContextPluginDynamic", "3", "")
pc_skln = plg_registry.get_plugin_creator("CustomSkipLayerNormPluginDynamic", "4", "")
dtype=trt.int8
N = cfg.N
H = cfg.H
prefix = 'bert.encoder.layers.{}.'.format(layer)
dr_input = weights_dict[prefix + 'attention.query_key_value._input_quantizer._amax']
input_tensor.set_dynamic_range(-dr_input, dr_input)
##### FC QKV
dr_qkv = max(
weights_dict[prefix + 'attention.matmul_q_quantizer._amax'], # attention_self_qv_a_input_quantizer_amax
weights_dict[prefix + 'attention.matmul_k_quantizer._amax'], # attention_self_qv_b_input_quantizer_amax
weights_dict[prefix + 'attention.matmul_v_quantizer._amax'], # attention_self_av_b_input_quantizer_amax
)
Wqkv = weights_dict[prefix + 'attention.query_key_value.weight']
Bqkv = weights_dict[prefix + 'attention.query_key_value.bias']
Wqkv = np.ascontiguousarray(Wqkv.reshape((3, N, H, N, H)))
Bqkv = np.ascontiguousarray(Bqkv.reshape((3, N, H)))
Wqkv = trt.Weights(Wqkv.astype(np.float32))
Bqkv = trt.Weights(Bqkv.astype(np.float32))
fc_qkv = network.add_convolution(input_tensor, cfg.qkv_size, (1,1), Wqkv, Bqkv)
fc_qkv.name = prefix + 'fc_qkv'
fc_qkv_out = fc_qkv.get_output(0)
fc_qkv_out.name = prefix + 'attention_self_qkv_mult'
fc_qkv_out.set_dynamic_range(-dr_qkv, dr_qkv)
##### QKV2CTX
dr_probs = weights_dict[prefix + 'attention.matmul_a_quantizer._amax']
dq_probs = dr_probs / 127.0
pf_hidden_size = trt.PluginField("hidden_size", np.array([cfg.hidden_size], np.int32), trt.PluginFieldType.INT32)
pf_num_heads = trt.PluginField("num_heads", np.array([cfg.N], np.int32), trt.PluginFieldType.INT32)
pf_dq_probs = trt.PluginField("dq_probs", np.array([dq_probs], np.float32), trt.PluginFieldType.FLOAT32)
pfc = trt.PluginFieldCollection([pf_hidden_size, pf_num_heads, pf_dq_probs])
qkv2ctx_plug = qkv_plg_creator.create_plugin("qkv2ctx", pfc)
dr_ctx = weights_dict[prefix+'attention.output.dense._input_quantizer._amax']
qkv2ctx_layer = network.add_plugin_v2([fc_qkv_out, cu_seqlens, max_seqlen], qkv2ctx_plug)
qkv2ctx_layer.name = prefix + 'qkv_to_ctx'
qkv2ctx_out = qkv2ctx_layer.get_output(0)
qkv2ctx_out.set_dynamic_range(-dr_ctx, dr_ctx)
##### FC AOUT
dr_fc_aout = weights_dict[prefix + 'attention.output.add_local_input_quantizer._amax']
Waout = weights_dict[prefix + 'attention.output.dense.weight']
Baout = weights_dict[prefix + 'attention.output.dense.bias']
Waout = trt.Weights(Waout.astype(np.float32))
Baout = trt.Weights(Baout.astype(np.float32))
fc_aout = network.add_convolution(qkv2ctx_out, cfg.hidden_size, (1,1), Waout, Baout)
fc_aout.precision=dtype
fc_aout.name = prefix + 'fc_aout'
fc_aout_out = fc_aout.get_output(0)
fc_aout_out.dtype = dtype
fc_aout_out.set_dynamic_range(-dr_fc_aout, dr_fc_aout)
##### Skip-Layernorm 1
dr_skln1 = weights_dict[prefix + 'intermediate.dense_act._input_quantizer._amax']
pf_ld = trt.PluginField("ld", np.array([cfg.hidden_size], np.int32), trt.PluginFieldType.INT32)
pf_beta = trt.PluginField("beta", weights_dict[prefix+'attention.output.LayerNorm.bias'], trt.PluginFieldType.FLOAT32)
pf_gamma = trt.PluginField("gamma", weights_dict[prefix+'attention.output.LayerNorm.weight'], trt.PluginFieldType.FLOAT32)
fields = [pf_beta, pf_gamma]
pfc = trt.PluginFieldCollection(fields)
skipln_plug = pc_skln.create_plugin("skipln", pfc)
dr_skln1_res_in = weights_dict[prefix + 'attention.output.add_residual_input_quantizer._amax']
residual.set_dynamic_range(-dr_skln1_res_in, dr_skln1_res_in)
skipln_inputs = [fc_aout_out, residual]
skln1 = network.add_plugin_v2(skipln_inputs, skipln_plug)
skln1.name = prefix+'skln_1'
skln1_out = skln1.get_output(0)
skln1_out.dtype = dtype
skln1_out.set_dynamic_range(-dr_skln1, dr_skln1)
skln1_residual = skln1.get_output(1)
skln1_residual.dtype = dtype
##### FC MID
Wmid = weights_dict[prefix + 'intermediate.dense_act.weight']
Bmid = weights_dict[prefix + 'intermediate.dense_act.bias']
Wmid = trt.Weights(Wmid.astype(np.float32))
Bmid = trt.Weights(Bmid.astype(np.float32))
fc_mid = network.add_convolution(skln1_out, cfg.mid_size, (1,1),Wmid, Bmid)
fc_mid.name = prefix+'fc_mid'
fc_mid_out = fc_mid.get_output(0)
##### GELU
dr_gelu = weights_dict[prefix + 'output.dense._input_quantizer._amax']
gelu_layer = add_gelu(network, fc_mid_out)
gelu_layer.name = prefix + 'gelu'
gelu_out = gelu_layer.get_output(0)
gelu_out.set_dynamic_range(-dr_gelu, dr_gelu)
##### FC OUT
dr_fc_out = weights_dict[prefix + 'output.add_local_input_quantizer._amax']
Wout = weights_dict[prefix + 'output.dense.weight']
Bout = weights_dict[prefix + 'output.dense.bias']
Wout = trt.Weights(Wout.astype(np.float32))
Bout = trt.Weights(Bout.astype(np.float32))
fc_out = network.add_convolution(gelu_out, cfg.hidden_size, (1,1), Wout, Bout)
fc_out.name = prefix + 'fc_out'
fc_out.precision = dtype
fc_out_out = fc_out.get_output(0)
fc_out_out.dtype = dtype
fc_out_out.set_dynamic_range(-dr_fc_out, dr_fc_out)
##### Skip-Layernorm 2
if layer == cfg.L - 1:
pf_beta = trt.PluginField("beta", weights_dict['bert.encoder.encoder.layers.23.output.LayerNorm.bias'], trt.PluginFieldType.FLOAT32)
pf_gamma = trt.PluginField("gamma", weights_dict['bert.encoder.encoder.layers.23.output.LayerNorm.weight'], trt.PluginFieldType.FLOAT32)
pc_skln = plg_registry.get_plugin_creator("CustomSkipLayerNormPluginDynamic", "3", "")
else:
pf_beta = trt.PluginField("beta", weights_dict[prefix+'output.LayerNorm.bias'], trt.PluginFieldType.FLOAT32)
pf_gamma = trt.PluginField("gamma", weights_dict[prefix+'output.LayerNorm.weight'], trt.PluginFieldType.FLOAT32)
fields = [pf_beta, pf_gamma]
pfc = trt.PluginFieldCollection(fields)
skipln_plug = pc_skln.create_plugin("skipln", pfc)
dr_skln2_res_in = weights_dict[prefix + 'output.add_residual_input_quantizer._amax']
skln1_residual.set_dynamic_range(-dr_skln2_res_in, dr_skln2_res_in)
skipln_inputs = [fc_out_out, skln1_residual]
skln2 = network.add_plugin_v2(skipln_inputs, skipln_plug)
skln2.name = prefix + 'skln_2'
skln2_out = skln2.get_output(0)
if layer == cfg.L - 1:
skln2_residual = None
else:
skln2_residual = skln2.get_output(1)
skln2_residual.dtype = dtype
return skln2_out, skln2_residual
def bert_squad_int8_vs_il(network, weights_dict, cfg, input_shape, cu_seqlens_shape):
#instantiate all the plugins
plg_registry = trt.get_plugin_registry()
pc_emb = plg_registry.get_plugin_creator("CustomEmbLayerNormPluginDynamic", "3", "")
wbeta = trt.PluginField("bert_embeddings_layernorm_beta", weights_dict["bert.embeddings.LayerNorm.bias"], trt.PluginFieldType.FLOAT32)
wgamma = trt.PluginField("bert_embeddings_layernorm_gamma", weights_dict["bert.embeddings.LayerNorm.weight"], trt.PluginFieldType.FLOAT32)
wwordemb = trt.PluginField("bert_embeddings_word_embeddings", weights_dict["bert.embedding.word_embeddings.weight"], trt.PluginFieldType.FLOAT32)
wtokemb = trt.PluginField("bert_embeddings_token_type_embeddings", weights_dict["bert.embedding.tokentype_embeddings.weight"], trt.PluginFieldType.FLOAT32)
wposemb = trt.PluginField("bert_embeddings_position_embeddings", weights_dict["bert.embedding.position_embeddings.weight"], trt.PluginFieldType.FLOAT32)
output_fp16 = trt.PluginField("output_fp16", np.array([int(trt.float16)]).astype(np.int32), trt.PluginFieldType.INT32)
pfc = trt.PluginFieldCollection([wbeta, wgamma, wwordemb, wtokemb, wposemb, output_fp16])
embln_plugin = pc_emb.create_plugin("embeddings", pfc)
dtype = trt.int8
input_ids = network.add_input(name="input_ids", dtype=trt.int32, shape=input_shape)
segment_ids = network.add_input(name="segment_ids", dtype=trt.int32, shape=input_shape)
cu_seqlens = network.add_input(name="cu_seqlens", dtype=trt.int32, shape=cu_seqlens_shape)
#dummy input used to indicate maximum sequence length to plugins
max_seqlen = network.add_input(name="max_seqlen", dtype=trt.int32, shape=(-1,))
inputs = [input_ids, segment_ids, cu_seqlens, max_seqlen]
emb_layer = network.add_plugin_v2(inputs, embln_plugin)
emb_layer.name = 'embln'
embeddings = emb_layer.get_output(0)
residual = emb_layer.get_output(1)
# we ideally want to go to int8 before the shuffle
dr_emb = weights_dict['bert.encoder.layers.0.attention.query_key_value._input_quantizer._amax']
embeddings.dtype = dtype
embeddings.set_dynamic_range(-dr_emb, dr_emb)
dr_skln1_res_in = weights_dict['bert.encoder.layers.0.attention.output.add_residual_input_quantizer._amax']
residual.dtype = dtype
residual.set_dynamic_range(-dr_skln1_res_in, dr_skln1_res_in)
shufflei = network.add_shuffle(embeddings)
shufflei.name = 'shufflei'
shufflei.second_transpose = (2,1,0,3)
shufflei_out = shufflei.get_output(0)
shufflei_out.set_dynamic_range(-dr_emb, dr_emb)
shufflei_out.dtype = dtype
shufflei_out.allowed_formats = 1 << int(trt.TensorFormat.CHW32)
shuffler = network.add_shuffle(residual)
shuffler.name = 'shuffler'
shuffler.second_transpose = (2,1,0,3)
shuffler_out = shuffler.get_output(0)
shuffler_out.set_dynamic_range(-dr_emb, dr_emb)
shuffler_out.dtype = dtype
shuffler_out.allowed_formats = 1 << int(trt.TensorFormat.CHW32)
embeddings = shufflei_out
residual = shuffler_out
layer = 0
for layer in range(cfg.L):
embeddings, residual = bert_encoder_layer_int8_vs_il(cfg, max_seqlen, weights_dict, network, embeddings, residual, cu_seqlens, layer)
Wsquad = weights_dict['qa_outputs.weight']
Bsquad = weights_dict['qa_outputs.bias']
Wsquad = trt.Weights(Wsquad.astype(np.float32))
Bsquad = trt.Weights(Bsquad.astype(np.float32))
dr_out = weights_dict['bert.encoder.encoder.layers.23.output.LayerNorm_output_quantizer._amax']
embeddings.set_dynamic_range(-dr_out, dr_out)
squad_output = network.add_convolution(embeddings, 2, (1,1), Wsquad, Bsquad)
squad_output.name = 'squad_logits'
logits = squad_output.get_output(0)
#1 x 2 x sum_s x 1
logit_shuffle = network.add_shuffle(logits)
logit_shuffle.first_transpose = (2,1,0,3)
logits = logit_shuffle.get_output(0)
# output shape will be sum_s x 2 (x 1 x 1)
mark(network, logits, trt.float16)
|
|
import sys
import numpy as np
import argparse
import scipy.io
import theano
import theano.tensor as T
import lasagne
from sklearn import preprocessing
from spacy.en import English
import time
sys.path.append('/home/mayank/visual_question_ans/tools/')
from tools2 import selectFrequentAnswers, grouper
from feature_extraction import get_questions_matrix_sum, get_images_matrix, get_answers_matrix, to_categorical
def iterate_minibatches(questions, answers, images, batchsize):
assert len(questions) == len(answers)
for start_idx in range(0, len(questions)-batchsize+1, batchsize):
examples = slice(start_idx, start_idx+batchsize)
yield questions[examples], answers[examples], images[examples]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-num_hidden_units', type=int, default=512)
parser.add_argument('-num_hidden_layers', type=int, default=2)
parser.add_argument('-dropout', type=float, default=0.5)
parser.add_argument('-activation', type=str, default='rectified')
parser.add_argument('-language_only', type=bool, default= False)
parser.add_argument('-num_epochs', type=int, default=20)
parser.add_argument('-model_save_interval', type=int, default=10)
parser.add_argument('-batch_size', type=int, default=128)
args = parser.parse_args()
questions_train = open('../Datasets/VQA/preprocessed/questions_train2014.txt', 'r').read().decode('utf8').splitlines()
answers_train = open('../Datasets/VQA/preprocessed/answers_train2014_top1000.txt', 'r').read().decode('utf8').splitlines()
images_train = open('../Datasets/VQA/preprocessed/images_train2014.txt', 'r').read().decode('utf8').splitlines()
vgg_model_path = '../Datasets/VQA/coco/vgg_feats.mat'
questions_val = open('../Datasets/VQA/preprocessed/questions_val2014.txt', 'r').read().decode('utf8').splitlines()
answers_val = open('../Datasets/VQA/preprocessed/answers2014_top1000.txt', 'r').read().decode('utf8').splitlines()
images_val = open('../Datasets/VQA/preprocessed/images_train2014.txt', 'r').read().decode('utf8').splitlines()
maxAnswers = 1000
questions_train, answers_train, images_train = selectFrequentAnswers(questions_train,answers_train,images_train, maxAnswers)
labelencoder = preprocessing.LabelEncoder()
labelencoder.fit(answers_train)
num_classes = len(list(labelencoder.classes_))
features_struct = scipy.io.loadmat(vgg_model_path)
VGGfeatures = features_struct['feats']
print "Features loaded from VGG pretrained model"
img_ids = open('/home/mayank/visual_question_ans/features/coco_vgg_IDMap.txt').read().splitlines()
id_map = {}
for id_pair in img_ids:
id_pair_split = id_pair.split()
id_map[id_pair_split[0]] = int(id_pair_split[1])
# The nlp processor
nlp = English()
print 'loaded word2vec features...'
####################Variables###########
input_var = T.matrix('inputs')
target_var = T.matrix('targets')
#######################################
img_dim = 4096
word_vec_dim = 300
####################Model############
nonlin = lasagne.nonlinearities.rectify ###Change activation here
depth = args.num_hidden_layers
num_hidden = args.num_hidden_units
#Input Layer
network = lasagne.layers.InputLayer(shape=(None, img_dim + word_vec_dim), input_var=input_var)
network = lasagne.layers.dropout(network, p=0.2)
#Hidden Layers
for num in range(depth):
network = lasagne.layers.DenseLayer(network, num_hidden, nonlinearity=nonlin)
network = lasagne.layers.dropout(network, p=0.5)
#Output Layer
softmax = lasagne.nonlinearities.softmax
network = lasagne.layers.DenseLayer(network, num_classes, nonlinearity=softmax)
print 'Model Assembled...'
###########################################################
###################Prediction and loss####################
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
val_prediction = lasagne.layers.get_output(network, deterministic=True)
val_loss = lasagne.objectives.categorical_crossentropy(val_prediction, target_var)
val_loss = val_loss.mean()
val_acc = T.mean(T.eq(val_prediction, target_var), dtype=theano.config.floatX)
###################################################
####################Updates##################################
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.momentum(loss, params, learning_rate=0.01)
##############################################################
#####Compilation#######
print 'compiling model....'
train_fn = theano.function([input_var, target_var], loss, updates=updates, allow_input_downcast=True)
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
print 'model compiled!'
###################Training######################################
print 'Training started.........'
for epoch in range(args.num_epochs):
print('Current Epoch:' + str(epoch))
##We shuffle the data before training
index_shuff = range(len(questions_train))
np.random.shuffle(index_shuff)
questions_train = [questions_train[i] for i in index_shuff]
answers_train = [answers_train[i] for i in index_shuff]
images_train = [images_train[i] for i in index_shuff]
train_err = 0
train_batches = 0
#start_time = time.time()
for ques_batch, ans_batch, img_batch in iterate_minibatches(questions_train, answers_train, images_train, args.batch_size):
X_ques_batch = get_questions_matrix_sum(ques_batch, nlp)
X_img_batch = get_images_matrix(img_batch, id_map, VGGfeatures)
X_batch = np.hstack((X_ques_batch, X_img_batch))
Y_batch = get_answers_matrix(ans_batch, labelencoder)
train_err += train_fn(X_batch, Y_batch)
train_batches += 1
if train_batches%100 == 0:
print train_batches
print float(train_err)/float(train_batches)
####Validation accuracy
val_err = 0
val_acc = 0
val_batches = 0
for ques_val, ans_val, img_val in iterate_minibatches(questions_val, answers_val, images_val, 256)
if __name__ == "__main__":
main()
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of functions to transforms schemas and objects that map to them."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import datetime
import itertools
import types
import urlparse
SIMPLE_TYPES = (int, long, float, bool, dict, basestring, list)
ISO_8601_DATE_FORMAT = '%Y-%m-%d'
ISO_8601_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
_LEGACY_DATE_FORMAT = '%Y/%m/%d'
_JSON_DATE_FORMATS = [
ISO_8601_DATE_FORMAT,
_LEGACY_DATE_FORMAT,
]
_JSON_DATETIME_FORMATS = [
ISO_8601_DATETIME_FORMAT
] + [
''.join(parts) for parts in itertools.product(
# Permutations of reasonably-expected permitted variations on ISO-8601.
# The first item in each tuple indicates the preferred choice.
_JSON_DATE_FORMATS,
('T', ' '),
('%H:%M:%S', '%H:%M'),
('.%f', ',%f', ''), # US/Euro decimal separator
('Z', ''), # Be explicit about Zulu timezone. Blank implies local.
)
]
JSON_TYPES = ['string', 'date', 'datetime', 'text', 'html',
'boolean', 'integer', 'number', 'array', 'object', 'timestamp']
def get_custom_serializer_for(value, custom_type_serializer=None):
if custom_type_serializer:
for custom_type, serializer in custom_type_serializer.iteritems():
if isinstance(value, custom_type):
return serializer
return None
def dict_to_json(
source_dict, custom_type_serializer=None, schema=None, recurse=False):
"""Converts Python dictionary into JSON dictionary using schema."""
output = {}
for key, value in source_dict.items():
if isinstance(value, dict) and recurse:
output[key] = dict_to_json(
value, custom_type_serializer=custom_type_serializer,
recurse=recurse)
elif value is None or isinstance(value, SIMPLE_TYPES):
output[key] = value
elif isinstance(value, datetime.datetime):
output[key] = value.strftime(ISO_8601_DATETIME_FORMAT)
elif isinstance(value, datetime.date):
output[key] = value.strftime(ISO_8601_DATE_FORMAT)
else:
custom = get_custom_serializer_for(value, custom_type_serializer)
if custom:
output[key] = custom(value)
else:
raise ValueError(
'Failed to encode key \'%s\' with value \'%s\'.' %
(key, value))
return output
def _json_to_datetime(value, date_only=False):
if value is None:
return None
DNMF = 'does not match format'
if date_only:
formats = _JSON_DATE_FORMATS
else:
formats = _JSON_DATETIME_FORMATS
exception = None
for format_str in formats:
try:
value = datetime.datetime.strptime(value, format_str)
if date_only:
value = value.date()
return value
except ValueError as e:
# Save first exception so as to preserve the error message that
# describes the most-preferred format, unless the new error
# message is something other than "does-not-match-format", (and
# the old one is) in which case save that, because anything other
# than DNMF is more useful/informative.
if not exception or (DNMF not in str(e) and DNMF in str(exception)):
exception = e
# We cannot get here without an exception.
# The linter thinks we might still have 'None', but is mistaken.
# pylint: disable=raising-bad-type
raise exception
def _convert_bool(value, key):
if isinstance(value, types.NoneType):
return False
elif isinstance(value, bool):
return value
elif isinstance(value, basestring):
value = value.lower()
if value == 'true':
return True
elif value == 'false':
return False
raise ValueError('Bad boolean value for %s: %s' % (key, value))
def coerce_json_value(source, schema, debug_key):
data_type = schema['type']
if data_type not in JSON_TYPES:
raise ValueError('Unsupported JSON type: %s' % data_type)
if data_type == 'object':
return json_to_dict(source, schema)
elif data_type == 'datetime' or data_type == 'date':
return _json_to_datetime(source, data_type == 'date')
elif data_type == 'number':
return float(source)
elif data_type in ('integer', 'timestamp'):
return int(source) if source else 0
elif data_type == 'boolean':
return _convert_bool(source, debug_key)
elif data_type == 'array':
subschema = schema['items']
array = []
for item in source:
array.append(coerce_json_value(item, subschema, debug_key))
return array
else:
return source
def json_to_dict(source_dict, schema, permit_none_values=False):
"""Converts JSON dictionary into Python dictionary using schema."""
output = {}
for key, attr in schema['properties'].items():
# Skip schema elements that don't exist in source.
if key not in source_dict:
is_optional = _convert_bool(attr.get('optional'), 'optional')
if not is_optional:
raise ValueError('Missing required attribute: %s' % key)
continue
# TODO(jorr): Make the policy for None values clearer and more
# consistent. Note that some types (string and datetime) always accept
# None but others (integer) don't.
# Reifying from database may provide "null", which translates to
# None. As long as the field is optional (checked above), set
# value to None directly (skipping conversions below).
if permit_none_values and source_dict[key] is None:
output[key] = None
continue
output[key] = coerce_json_value(source_dict[key], attr, key)
return output
def string_to_value(string, value_type):
"""Converts string representation to a value."""
if value_type == str:
if not string:
return ''
else:
return string
elif value_type == bool:
if string == '1' or string == 'True' or string == 1:
return True
else:
return False
elif value_type == int or value_type == long:
if not string:
return 0
else:
return long(string)
else:
raise ValueError('Unknown type: %s' % value_type)
def value_to_string(value, value_type):
"""Converts value to a string representation."""
if value_type == str:
return value
elif value_type == bool:
if value:
return 'True'
else:
return 'False'
elif value_type == int or value_type == long:
return str(value)
else:
raise ValueError('Unknown type: %s' % value_type)
def dict_to_instance(adict, instance, defaults=None):
"""Populates instance attributes using data dictionary."""
for key, unused_value in instance.__dict__.iteritems():
if not key.startswith('_'):
if key in adict:
setattr(instance, key, adict[key])
elif defaults and key in defaults:
setattr(instance, key, defaults[key])
else:
raise KeyError(key)
def validate_object_matches_json_schema(obj, schema, path='', complaints=None):
"""Check whether the given object matches a schema.
When building up a dict of contents which is supposed to match a declared
schema, human error often creeps in; it is easy to neglect to cast a number
to a floating point number, or an object ID to a string. This function
verifies the presence, type, and format of fields.
Note that it is not effective to verify sub-components that are scalars
or arrays, due to the way field names are (or rather, are not) stored
in the JSON schema layout.
Args:
obj: A dict containing contents that should match the given schema
schema: A dict describing a schema, as obtained from
FieldRegistry.get_json_schema_dict(). This parameter can also
be the 'properties' member of a JSON schema dict, as that sub-item
is commonly used in the REST data source subsystem.
path: Do not pass a value for this; it is used for internal recursion.
complaints: Either leave this blank or pass in an empty list. If
blank, the list of complaints is available as the return value.
If nonblank, the list of complaints will be appended to this list.
Either is fine, depending on your preferred style.
Returns:
Array of verbose complaint strings. If array is blank, object
validated without error.
"""
def is_valid_url(obj):
url = urlparse.urlparse(obj)
return url.scheme and url.netloc
def is_valid_date(obj):
try:
datetime.datetime.strptime(obj, ISO_8601_DATE_FORMAT)
return True
except ValueError:
return False
def is_valid_datetime(obj):
try:
datetime.datetime.strptime(obj, ISO_8601_DATETIME_FORMAT)
return True
except ValueError:
return False
if complaints is None:
complaints = []
if 'properties' in schema or isinstance(obj, dict):
if not path:
if 'id' in schema:
path = schema['id']
else:
path = '(root)'
if obj is None:
pass
elif not isinstance(obj, dict):
complaints.append('Expected a dict at %s, but had %s' % (
path, type(obj)))
else:
if 'properties' in schema:
schema = schema['properties']
for name, sub_schema in schema.iteritems():
validate_object_matches_json_schema(
obj.get(name), sub_schema, path + '.' + name, complaints)
for name in obj:
if name not in schema:
complaints.append('Unexpected member "%s" in %s' % (
name, path))
elif 'items' in schema:
if 'items' in schema['items']:
complaints.append('Unsupported: array-of-array at ' + path)
if obj is None:
pass
elif not isinstance(obj, (list, tuple)):
complaints.append('Expected a list or tuple at %s, but had %s' % (
path, type(obj)))
else:
for index, item in enumerate(obj):
item_path = path + '[%d]' % index
if item is None:
complaints.append('Found None at %s' % item_path)
else:
validate_object_matches_json_schema(
item, schema['items'], item_path, complaints)
else:
if obj is None:
if not schema.get('optional'):
complaints.append('Missing mandatory value at ' + path)
else:
expected_type = None
validator = None
if schema['type'] in ('string', 'text', 'html', 'file'):
expected_type = basestring
elif schema['type'] == 'url':
expected_type = basestring
validator = is_valid_url
elif schema['type'] in ('integer', 'timestamp'):
expected_type = (int, long)
elif schema['type'] in 'number':
expected_type = float
elif schema['type'] in 'boolean':
expected_type = bool
elif schema['type'] == 'date':
expected_type = basestring
validator = is_valid_date
elif schema['type'] == 'datetime':
expected_type = basestring
validator = is_valid_datetime
if expected_type:
if not isinstance(obj, expected_type):
complaints.append(
'Expected %s at %s, but instead had %s' % (
expected_type, path, type(obj)))
elif validator and not validator(obj):
complaints.append(
'Value "%s" is not well-formed according to %s' % (
str(obj), validator.__name__))
else:
complaints.append(
'Unrecognized schema scalar type "%s" at %s' % (
schema['type'], path))
return complaints
|
|
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404
from source.articles.models import Article, Section, Category
from source.code.models import Code
from source.guides.models import Guide
from source.jobs.models import Job
from source.tags.models import TechnologyTag, ConceptTag
from source.tags.utils import get_validated_tag_list, get_tag_filtered_queryset
from taggit.models import Tag
ONE_DAY_AGO = datetime.now() - timedelta(hours=24)
class ObjectWithTagsFeed(Feed):
'''common get_object for Article and Code feeds to handle tag queries'''
def get_object(self, request, *args, **kwargs):
self.section = kwargs.get('section', None)
if self.section:
self.section = get_object_or_404(Section, slug=self.section)
self.category = kwargs.get('category', None)
if self.category:
self.category = get_object_or_404(Category, slug=self.category)
self.tag_slugs = kwargs.get('tag_slugs', None)
if self.tag_slugs:
self.tag_slug_list = self.tag_slugs.split('+')
self.tags = get_validated_tag_list(self.tag_slug_list, tags=[])
return ''
class ArticleFeed(ObjectWithTagsFeed):
description_template = "feeds/article_description.html"
def title(self, obj):
if self.section:
return "Source: %s" % self.section.name
elif self.category:
return "Source: Articles in the category %s" % self.category.name
elif self.tag_slugs:
return "Source: Articles tagged with '%s'" % "+".join([tag.name for tag in self.tags])
return "Source"
def link(self, obj):
if self.section:
return reverse('article_list')
#return reverse('article_list_by_section', kwargs={'section': self.section.slug})
elif self.category:
return reverse('article_list_by_category', kwargs={'category': self.category.slug})
elif self.tag_slugs:
return reverse('article_list_by_tag', kwargs={'tag_slugs': self.tag_slugs})
return reverse('homepage')
def description(self, obj):
identifier = 'from Source'
if self.section:
identifier = "in the %s section" % self.section.name
elif self.category:
identifier = "in the %s category" % self.category.name
elif self.tag_slugs:
identifier = "tagged with '%s'" % "+".join([tag.name for tag in self.tags])
return "Recent articles %s" % identifier
def item_title(self, item):
_title = item.title
# Alert anyone using an RSS feed on staging
if settings.DEBUG:
_title = "THIS IS A TEST ARTICLE ON THE STAGING SITE: " + _title
return _title
def item_pubdate(self, item):
return item.pubdate
def item_author_name(self, item):
if item.get_live_author_set().exists():
return ','.join([author.name() for author in item.get_live_author_set()])
return ''
def item_categories(self, item):
if item.category:
return [item.category.name]
return ''
def items(self, obj):
queryset = Article.live_objects.filter(show_in_lists=True)
if self.section:
queryset = queryset.filter(category__section=self.section)
elif self.category:
queryset = queryset.filter(category=self.category)
elif self.tag_slugs:
queryset = get_tag_filtered_queryset(queryset, self.tag_slug_list)
return queryset[:20]
class CodeFeed(ObjectWithTagsFeed):
def title(self, obj):
identifier = ""
if self.tag_slugs:
identifier = " tagged '%s'" % "+".join([tag.name for tag in self.tags])
return "Source: Code%s" % identifier
def link(self, obj):
if self.tag_slugs:
return reverse('code_list_by_tag', kwargs={'tag_slugs': self.tag_slugs})
return reverse('code_list')
def description(self, obj):
identifier = " from Source"
if self.tag_slugs:
identifier = " tagged '%s'" % "+".join([tag.name for tag in self.tags])
return "Recent code index pages%s" % identifier
def item_title(self, item):
_name = item.name
# Alert anyone using an RSS feed on staging
if settings.DEBUG:
_name = "THIS IS A TEST ENTRY ON THE STAGING SITE: " + _name
return _name
def item_description(self, item):
return item.description
def items(self, obj):
queryset = Code.live_objects.order_by('-created')
if self.tag_slugs:
queryset = get_tag_filtered_queryset(queryset, self.tag_slug_list)
return queryset[:20]
class JobFeed(Feed):
def title(self, obj):
return "Source: Jobs"
def link(self, obj):
return reverse('job_list')
def description(self, obj):
return 'Recent jobs listed on Source'
def item_title(self, item):
_name = item.name
# Alert anyone using an RSS feed on staging
if settings.DEBUG:
_name = "THIS IS A TEST ENTRY ON THE STAGING SITE: " + _name
return _name
def item_pubdate(self, item):
return item.modified
def item_description(self, item):
return 'Job posting from %s' % item.organization
def item_link(self, item):
'''
We don't have individual detail pages, so use item.url
or fall back to jobs list page.
'''
return item.url or reverse('job_list')
def items(self, obj):
queryset = Job.live_objects.order_by('-created')
return queryset[:20]
class GuideFeed(Feed):
def title(self, obj):
return "Source: Guides"
def link(self, obj):
return reverse('guide_list')
def description(self, obj):
return 'Recent guides from Source'
def item_title(self, item):
_name = item.title
# Alert anyone using an RSS feed on staging
if settings.DEBUG:
_name = "THIS IS A TEST ENTRY ON THE STAGING SITE: " + _name
return _name
def item_description(self, item):
return item.summary_or_description
def items(self, obj):
queryset = Guide.live_objects.order_by('-pubdate')
return queryset[:20]
class RecentArticleSummaryFeed(Feed):
description_template = "feeds/article_summary_only.html"
def title(self, obj):
return "Source: Latest Article Summaries"
def link(self, obj):
return reverse('article_list')
def description(self, obj):
return 'Recent articles from Source'
def item_title(self, item):
_name = item.title
# Alert anyone using an RSS feed on staging
if settings.DEBUG:
_name = "THIS IS A TEST ENTRY ON THE STAGING SITE: " + _name
return _name
def item_pubdate(self, item):
return item.pubdate
def item_author_name(self, item):
if item.get_live_author_set().exists():
return ','.join([author.name() for author in item.get_live_author_set()])
return ''
def item_description(self, item):
return item.safe_summary
def items(self, obj):
queryset = Article.live_objects.filter(show_in_lists=True)
queryset = queryset.filter(pubdate__gte=ONE_DAY_AGO)
return queryset
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Base API Library Tests"""
from requests_mock.contrib import fixture
from keystoneclient import session
from openstackclient.api import api
from openstackclient.common import exceptions
from openstackclient.tests import utils
RESP_ITEM_1 = {
'id': '1',
'name': 'alpha',
'status': 'UP',
}
RESP_ITEM_2 = {
'id': '2',
'name': 'beta',
'status': 'DOWN',
}
RESP_ITEM_3 = {
'id': '3',
'name': 'delta',
'status': 'UP',
}
LIST_RESP = [RESP_ITEM_1, RESP_ITEM_2]
LIST_BODY = {
'p1': 'xxx',
'p2': 'yyy',
}
class TestSession(utils.TestCase):
BASE_URL = 'https://api.example.com:1234/vX'
def setUp(self):
super(TestSession, self).setUp()
self.sess = session.Session()
self.requests_mock = self.useFixture(fixture.Fixture())
class TestKeystoneSession(TestSession):
def setUp(self):
super(TestKeystoneSession, self).setUp()
self.api = api.KeystoneSession(
session=self.sess,
endpoint=self.BASE_URL,
)
def test_session_request(self):
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz',
json=RESP_ITEM_1,
status_code=200,
)
ret = self.api._request('GET', '/qaz')
self.assertEqual(RESP_ITEM_1, ret.json())
class TestBaseAPI(TestSession):
def setUp(self):
super(TestBaseAPI, self).setUp()
self.api = api.BaseAPI(
session=self.sess,
endpoint=self.BASE_URL,
)
def test_create_post(self):
self.requests_mock.register_uri(
'POST',
self.BASE_URL + '/qaz',
json=RESP_ITEM_1,
status_code=202,
)
ret = self.api.create('qaz')
self.assertEqual(RESP_ITEM_1, ret)
def test_create_put(self):
self.requests_mock.register_uri(
'PUT',
self.BASE_URL + '/qaz',
json=RESP_ITEM_1,
status_code=202,
)
ret = self.api.create('qaz', method='PUT')
self.assertEqual(RESP_ITEM_1, ret)
def test_delete(self):
self.requests_mock.register_uri(
'DELETE',
self.BASE_URL + '/qaz',
status_code=204,
)
ret = self.api.delete('qaz')
self.assertEqual(204, ret.status_code)
# find tests
def test_find_attr_by_id(self):
# All first requests (by name) will fail in this test
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz?name=1',
json={'qaz': []},
status_code=200,
)
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz?id=1',
json={'qaz': [RESP_ITEM_1]},
status_code=200,
)
ret = self.api.find_attr('qaz', '1')
self.assertEqual(RESP_ITEM_1, ret)
# value not found
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz?name=0',
json={'qaz': []},
status_code=200,
)
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz?id=0',
json={'qaz': []},
status_code=200,
)
self.assertRaises(
exceptions.CommandError,
self.api.find_attr,
'qaz',
'0',
)
# Attribute other than 'name'
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz?status=UP',
json={'qaz': [RESP_ITEM_1]},
status_code=200,
)
ret = self.api.find_attr('qaz', 'UP', attr='status')
self.assertEqual(RESP_ITEM_1, ret)
ret = self.api.find_attr('qaz', value='UP', attr='status')
self.assertEqual(RESP_ITEM_1, ret)
def test_find_attr_by_name(self):
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz?name=alpha',
json={'qaz': [RESP_ITEM_1]},
status_code=200,
)
ret = self.api.find_attr('qaz', 'alpha')
self.assertEqual(RESP_ITEM_1, ret)
# value not found
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz?name=0',
json={'qaz': []},
status_code=200,
)
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz?id=0',
json={'qaz': []},
status_code=200,
)
self.assertRaises(
exceptions.CommandError,
self.api.find_attr,
'qaz',
'0',
)
# Attribute other than 'name'
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz?status=UP',
json={'qaz': [RESP_ITEM_1]},
status_code=200,
)
ret = self.api.find_attr('qaz', 'UP', attr='status')
self.assertEqual(RESP_ITEM_1, ret)
ret = self.api.find_attr('qaz', value='UP', attr='status')
self.assertEqual(RESP_ITEM_1, ret)
def test_find_attr_path_resource(self):
# Test resource different than path
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/wsx?name=1',
json={'qaz': []},
status_code=200,
)
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/wsx?id=1',
json={'qaz': [RESP_ITEM_1]},
status_code=200,
)
ret = self.api.find_attr('wsx', '1', resource='qaz')
self.assertEqual(RESP_ITEM_1, ret)
def test_find_bulk_none(self):
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz',
json=LIST_RESP,
status_code=200,
)
ret = self.api.find_bulk('qaz')
self.assertEqual(LIST_RESP, ret)
def test_find_bulk_one(self):
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz',
json=LIST_RESP,
status_code=200,
)
ret = self.api.find_bulk('qaz', id='1')
self.assertEqual([LIST_RESP[0]], ret)
ret = self.api.find_bulk('qaz', id='0')
self.assertEqual([], ret)
ret = self.api.find_bulk('qaz', name='beta')
self.assertEqual([LIST_RESP[1]], ret)
ret = self.api.find_bulk('qaz', error='bogus')
self.assertEqual([], ret)
def test_find_bulk_two(self):
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz',
json=LIST_RESP,
status_code=200,
)
ret = self.api.find_bulk('qaz', id='1', name='alpha')
self.assertEqual([LIST_RESP[0]], ret)
ret = self.api.find_bulk('qaz', id='1', name='beta')
self.assertEqual([], ret)
ret = self.api.find_bulk('qaz', id='1', error='beta')
self.assertEqual([], ret)
def test_find_bulk_dict(self):
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz',
json={'qaz': LIST_RESP},
status_code=200,
)
ret = self.api.find_bulk('qaz', id='1')
self.assertEqual([LIST_RESP[0]], ret)
# list tests
def test_list_no_body(self):
self.requests_mock.register_uri(
'GET',
self.BASE_URL,
json=LIST_RESP,
status_code=200,
)
ret = self.api.list('')
self.assertEqual(LIST_RESP, ret)
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz',
json=LIST_RESP,
status_code=200,
)
ret = self.api.list('qaz')
self.assertEqual(LIST_RESP, ret)
def test_list_params(self):
params = {'format': 'json'}
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '?format=json',
json=LIST_RESP,
status_code=200,
)
ret = self.api.list('', **params)
self.assertEqual(LIST_RESP, ret)
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz?format=json',
json=LIST_RESP,
status_code=200,
)
ret = self.api.list('qaz', **params)
self.assertEqual(LIST_RESP, ret)
def test_list_body(self):
self.requests_mock.register_uri(
'POST',
self.BASE_URL + '/qaz',
json=LIST_RESP,
status_code=200,
)
ret = self.api.list('qaz', body=LIST_BODY)
self.assertEqual(LIST_RESP, ret)
def test_list_detailed(self):
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz/details',
json=LIST_RESP,
status_code=200,
)
ret = self.api.list('qaz', detailed=True)
self.assertEqual(LIST_RESP, ret)
def test_list_filtered(self):
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz?attr=value',
json=LIST_RESP,
status_code=200,
)
ret = self.api.list('qaz', attr='value')
self.assertEqual(LIST_RESP, ret)
def test_list_wrapped(self):
self.requests_mock.register_uri(
'GET',
self.BASE_URL + '/qaz?attr=value',
json={'responses': LIST_RESP},
status_code=200,
)
ret = self.api.list('qaz', attr='value')
self.assertEqual({'responses': LIST_RESP}, ret)
|
|
# -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from nose.tools import assert_raises
import gevent
from zerorpc import zmq
import zerorpc
from testutils import teardown, random_ipc_endpoint
def test_close_server_bufchan():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=2)
client_bufchan = zerorpc.BufferedChannel(client_hbchan)
client_bufchan.emit('openthat', None)
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=2)
server_bufchan = zerorpc.BufferedChannel(server_hbchan)
server_bufchan.recv()
gevent.sleep(3)
print 'CLOSE SERVER SOCKET!!!'
server_bufchan.close()
with assert_raises(zerorpc.LostRemote):
client_bufchan.recv()
print 'CLIENT LOST SERVER :)'
client_bufchan.close()
server.close()
client.close()
def test_close_client_bufchan():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=2)
client_bufchan = zerorpc.BufferedChannel(client_hbchan)
client_bufchan.emit('openthat', None)
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=2)
server_bufchan = zerorpc.BufferedChannel(server_hbchan)
server_bufchan.recv()
gevent.sleep(3)
print 'CLOSE CLIENT SOCKET!!!'
client_bufchan.close()
with assert_raises(zerorpc.LostRemote):
server_bufchan.recv()
print 'SERVER LOST CLIENT :)'
server_bufchan.close()
server.close()
client.close()
def test_heartbeat_can_open_channel_server_close():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=2)
client_bufchan = zerorpc.BufferedChannel(client_hbchan)
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=2)
server_bufchan = zerorpc.BufferedChannel(server_hbchan)
gevent.sleep(3)
print 'CLOSE SERVER SOCKET!!!'
server_bufchan.close()
with assert_raises(zerorpc.LostRemote):
client_bufchan.recv()
print 'CLIENT LOST SERVER :)'
client_bufchan.close()
server.close()
client.close()
def test_heartbeat_can_open_channel_client_close():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=2)
client_bufchan = zerorpc.BufferedChannel(client_hbchan)
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=2)
server_bufchan = zerorpc.BufferedChannel(server_hbchan)
gevent.sleep(3)
print 'CLOSE CLIENT SOCKET!!!'
client_bufchan.close()
client.close()
with assert_raises(zerorpc.LostRemote):
server_bufchan.recv()
print 'SERVER LOST CLIENT :)'
server_bufchan.close()
server.close()
def test_do_some_req_rep():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=2)
client_bufchan = zerorpc.BufferedChannel(client_hbchan)
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=2)
server_bufchan = zerorpc.BufferedChannel(server_hbchan)
def client_do():
for x in xrange(20):
client_bufchan.emit('add', (x, x * x))
event = client_bufchan.recv()
assert event.name == 'OK'
assert event.args == (x + x * x,)
client_bufchan.close()
coro_pool = gevent.pool.Pool()
coro_pool.spawn(client_do)
def server_do():
for x in xrange(20):
event = server_bufchan.recv()
assert event.name == 'add'
server_bufchan.emit('OK', (sum(event.args),))
server_bufchan.close()
coro_pool.spawn(server_do)
coro_pool.join()
client.close()
server.close()
def test_do_some_req_rep_lost_server():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
def client_do():
print 'running'
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=2)
client_bufchan = zerorpc.BufferedChannel(client_hbchan)
for x in xrange(10):
client_bufchan.emit('add', (x, x * x))
event = client_bufchan.recv()
assert event.name == 'OK'
assert event.args == (x + x * x,)
client_bufchan.emit('add', (x, x * x))
with assert_raises(zerorpc.LostRemote):
event = client_bufchan.recv()
client_bufchan.close()
coro_pool = gevent.pool.Pool()
coro_pool.spawn(client_do)
def server_do():
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=2)
server_bufchan = zerorpc.BufferedChannel(server_hbchan)
for x in xrange(10):
event = server_bufchan.recv()
assert event.name == 'add'
server_bufchan.emit('OK', (sum(event.args),))
server_bufchan.close()
coro_pool.spawn(server_do)
coro_pool.join()
client.close()
server.close()
def test_do_some_req_rep_lost_client():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
def client_do():
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=2)
client_bufchan = zerorpc.BufferedChannel(client_hbchan)
for x in xrange(10):
client_bufchan.emit('add', (x, x * x))
event = client_bufchan.recv()
assert event.name == 'OK'
assert event.args == (x + x * x,)
client_bufchan.close()
coro_pool = gevent.pool.Pool()
coro_pool.spawn(client_do)
def server_do():
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=2)
server_bufchan = zerorpc.BufferedChannel(server_hbchan)
for x in xrange(10):
event = server_bufchan.recv()
assert event.name == 'add'
server_bufchan.emit('OK', (sum(event.args),))
with assert_raises(zerorpc.LostRemote):
event = server_bufchan.recv()
server_bufchan.close()
coro_pool.spawn(server_do)
coro_pool.join()
client.close()
server.close()
def test_do_some_req_rep_client_timeout():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
def client_do():
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=2)
client_bufchan = zerorpc.BufferedChannel(client_hbchan)
with assert_raises(zerorpc.TimeoutExpired):
for x in xrange(10):
client_bufchan.emit('sleep', (x,))
event = client_bufchan.recv(timeout=3)
assert event.name == 'OK'
assert event.args == (x,)
client_bufchan.close()
coro_pool = gevent.pool.Pool()
coro_pool.spawn(client_do)
def server_do():
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=2)
server_bufchan = zerorpc.BufferedChannel(server_hbchan)
with assert_raises(zerorpc.LostRemote):
for x in xrange(20):
event = server_bufchan.recv()
assert event.name == 'sleep'
gevent.sleep(event.args[0])
server_bufchan.emit('OK', event.args)
server_bufchan.close()
coro_pool.spawn(server_do)
coro_pool.join()
client.close()
server.close()
class CongestionError(Exception):
pass
def test_congestion_control_server_pushing():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=2)
client_bufchan = zerorpc.BufferedChannel(client_hbchan)
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=2)
server_bufchan = zerorpc.BufferedChannel(server_hbchan)
def client_do():
for x in xrange(200):
event = client_bufchan.recv()
assert event.name == 'coucou'
assert event.args == x
coro_pool = gevent.pool.Pool()
coro_pool.spawn(client_do)
def server_do():
with assert_raises(CongestionError):
for x in xrange(200):
if server_bufchan.emit('coucou', x, block=False) == False:
raise CongestionError() # will fail when x == 1
server_bufchan.emit('coucou', 1) # block until receiver is ready
with assert_raises(CongestionError):
for x in xrange(2, 200):
if server_bufchan.emit('coucou', x, block=False) == False:
raise CongestionError() # will fail when x == 100
for x in xrange(101, 200):
server_bufchan.emit('coucou', x) # block until receiver is ready
coro_pool.spawn(server_do)
coro_pool.join()
client_bufchan.close()
client.close()
server_bufchan.close()
server.close()
|
|
#!/usr/bin/env python
"""Linux client repackers."""
import io
import logging
import os
import shutil
import subprocess
import zipfile
from grr_response_client_builder import build
from grr_response_client_builder import build_helpers
from grr_response_core import config
from grr_response_core.lib import utils
class LinuxClientRepacker(build.ClientRepacker):
"""Repackage Linux templates."""
# TODO(user):pytype: incorrect shutil.move() definition in typeshed.
# pytype: disable=wrong-arg-types
def _GenerateDPKGFiles(self, template_path):
"""Generates the files needed by dpkg-buildpackage."""
fleetspeak_enabled = config.CONFIG.Get(
"Client.fleetspeak_enabled", context=self.context)
fleetspeak_bundled = config.CONFIG.Get(
"ClientBuilder.fleetspeak_bundled", context=self.context)
if fleetspeak_bundled and not fleetspeak_enabled:
raise build.BuildError("ClientBuilder.fleetspeak_bundled requires "
"Client.fleetspeak_enabled to be set.")
# Rename the generated binaries to the correct name.
template_binary_dir = os.path.join(template_path, "dist/debian/grr-client")
package_name = config.CONFIG.Get(
"ClientBuilder.package_name", context=self.context)
target_binary_dir = os.path.join(
template_path, "dist/debian/%s%s" %
(package_name,
config.CONFIG.Get("ClientBuilder.target_dir", context=self.context)))
if package_name == "grr-client":
# Need to rename the template path or the move will fail.
shutil.move(template_binary_dir, "%s-template" % template_binary_dir)
template_binary_dir = "%s-template" % template_binary_dir
utils.EnsureDirExists(os.path.dirname(target_binary_dir))
shutil.move(template_binary_dir, target_binary_dir)
shutil.move(
os.path.join(target_binary_dir, "grr-client"),
os.path.join(
target_binary_dir,
config.CONFIG.Get("Client.binary_name", context=self.context)))
deb_in_dir = os.path.join(template_path, "dist/debian/debian.in/")
if not os.path.isdir(deb_in_dir):
# This is an universal (fleetspeak + legacy) template.
# In prior versions, debian.in used to contain different files for a
# fleetspeak-enabled and legacy template respectively.
if fleetspeak_enabled:
deb_in_dir = os.path.join(template_path,
"dist/debian/fleetspeak-debian.in/")
else:
deb_in_dir = os.path.join(template_path,
"dist/debian/legacy-debian.in/")
build_helpers.GenerateDirectory(
deb_in_dir,
os.path.join(template_path, "dist/debian"),
[("grr-client", package_name)],
context=self.context)
# Generate directories for the /usr/sbin link.
utils.EnsureDirExists(
os.path.join(template_path, "dist/debian/%s/usr/sbin" % package_name))
if os.path.exists(os.path.join(target_binary_dir, "wrapper.sh.in")):
build_helpers.GenerateFile(
os.path.join(target_binary_dir, "wrapper.sh.in"),
os.path.join(target_binary_dir, "wrapper.sh"),
context=self.context)
os.chmod(os.path.join(target_binary_dir, "wrapper.sh"), 0o755)
if fleetspeak_enabled:
if fleetspeak_bundled:
self._GenerateFleetspeakConfig(template_path,
"/etc/fleetspeak-client/textservices")
self._GenerateBundledFleetspeakFiles(
os.path.join(template_path, "dist/bundled-fleetspeak"),
os.path.join(template_path, "dist/debian", package_name))
shutil.copy(
config.CONFIG.Get(
"ClientBuilder.fleetspeak_client_config", context=self.context),
os.path.join(template_path, "dist", "debian", package_name,
"etc/fleetspeak-client/client.config"))
else:
fleetspeak_service_dir = config.CONFIG.Get(
"ClientBuilder.fleetspeak_service_dir", context=self.context)
self._GenerateFleetspeakConfig(template_path, fleetspeak_service_dir)
else:
# Generate the nanny template.
# This exists from client version 3.1.2.5 onwards.
build_helpers.GenerateFile(
os.path.join(target_binary_dir, "nanny.sh.in"),
os.path.join(target_binary_dir, "nanny.sh"),
context=self.context)
# Generate the upstart template.
build_helpers.GenerateFile(
os.path.join(template_path, "dist/debian/upstart.in/grr-client.conf"),
os.path.join(template_path, "dist/debian/%s.upstart" % package_name),
context=self.context)
# Generate the initd template. The init will not run if it detects upstart
# is present.
build_helpers.GenerateFile(
os.path.join(template_path, "dist/debian/initd.in/grr-client"),
os.path.join(template_path, "dist/debian/%s.init" % package_name),
context=self.context)
# Generate the systemd unit file.
build_helpers.GenerateFile(
os.path.join(template_path,
"dist/debian/systemd.in/grr-client.service"),
os.path.join(template_path, "dist/debian/%s.service" % package_name),
context=self.context)
# Clean up the template dirs.
# Some of the dirs might be missing in older template versions, so removing
# conditionally.
self._RmTreeIfExists(os.path.join(template_path, "dist/debian/debian.in"))
self._RmTreeIfExists(
os.path.join(template_path, "dist/debian/fleetspeak-debian.in"))
self._RmTreeIfExists(
os.path.join(template_path, "dist/debian/legacy-debian.in"))
self._RmTreeIfExists(os.path.join(template_path, "dist/debian/upstart.in"))
self._RmTreeIfExists(os.path.join(template_path, "dist/debian/initd.in"))
self._RmTreeIfExists(os.path.join(template_path, "dist/debian/systemd.in"))
self._RmTreeIfExists(os.path.join(template_path, "dist/fleetspeak"))
self._RmTreeIfExists(os.path.join(template_path, "dist/bundled-fleetspeak"))
def _RmTreeIfExists(self, path):
if os.path.exists(path):
shutil.rmtree(path)
# pytype: enable=wrong-arg-types
def _GenerateFleetspeakConfig(self, build_dir, dest_config_dir):
"""Generates a Fleetspeak config for GRR in the debian build dir."""
# We need to strip leading /'s or .join will ignore everything that comes
# before it.
dest_config_dir = dest_config_dir.lstrip("/")
source_config = os.path.join(
build_dir, "dist", "fleetspeak",
os.path.basename(
config.CONFIG.Get(
"ClientBuilder.fleetspeak_config_path", context=self.context)))
dest_config = os.path.join(
build_dir, "dist", "debian",
config.CONFIG.Get("ClientBuilder.package_name", context=self.context),
dest_config_dir,
config.CONFIG.Get(
"Client.fleetspeak_unsigned_config_fname", context=self.context))
utils.EnsureDirExists(os.path.dirname(dest_config))
build_helpers.GenerateFile(
input_filename=source_config,
output_filename=dest_config,
context=self.context)
def _GenerateBundledFleetspeakFiles(self, src_dir, dst_dir):
files = [
"etc/fleetspeak-client/communicator.txt",
"lib/systemd/system/fleetspeak-client.service",
"usr/bin/fleetspeak-client",
]
for filename in files:
src = os.path.join(src_dir, filename)
dst = os.path.join(dst_dir, filename)
utils.EnsureDirExists(os.path.dirname(dst))
shutil.copy(src, dst)
def MakeDeployableBinary(self, template_path, output_path):
"""This will add the config to the client template and create a .deb."""
buildpackage_binary = "/usr/bin/dpkg-buildpackage"
if not os.path.exists(buildpackage_binary):
logging.error("dpkg-buildpackage not found, unable to repack client.")
return None
with utils.TempDirectory() as tmp_dir:
template_dir = os.path.join(tmp_dir, "dist")
utils.EnsureDirExists(template_dir)
zf = zipfile.ZipFile(template_path)
for name in zf.namelist():
dirname = os.path.dirname(name)
utils.EnsureDirExists(os.path.join(template_dir, dirname))
with io.open(os.path.join(template_dir, name), "wb") as fd:
fd.write(zf.read(name))
# Generate the dpkg files.
self._GenerateDPKGFiles(tmp_dir)
# Create a client config.
client_context = ["Client Context"] + self.context
client_config_content = build_helpers.GetClientConfig(client_context)
# We need to strip leading /'s or .join will ignore everything that comes
# before it.
target_dir = config.CONFIG.Get(
"ClientBuilder.target_dir", context=self.context).lstrip("/")
agent_dir = os.path.join(
template_dir, "debian",
config.CONFIG.Get("ClientBuilder.package_name", context=self.context),
target_dir)
with io.open(
os.path.join(
agent_dir,
config.CONFIG.Get(
"ClientBuilder.config_filename", context=self.context)),
"w",
encoding="utf-8") as fd:
fd.write(client_config_content)
# Set the daemon to executable.
os.chmod(
os.path.join(
agent_dir,
config.CONFIG.Get("Client.binary_name", context=self.context)),
0o755)
arch = config.CONFIG.Get("Template.arch", context=self.context)
try:
old_working_dir = os.getcwd()
except OSError:
old_working_dir = os.environ.get("HOME", "/tmp")
try:
os.chdir(template_dir)
command = [buildpackage_binary, "-uc", "-d", "-b", "-a%s" % arch]
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if b"Failed to sign" not in e.output:
logging.error("Error calling %s.", command)
logging.error(e.output)
raise
filename_base = config.CONFIG.Get(
"ClientBuilder.debian_package_base", context=self.context)
output_base = config.CONFIG.Get(
"ClientRepacker.output_basename", context=self.context)
finally:
try:
os.chdir(old_working_dir)
except OSError:
pass
utils.EnsureDirExists(os.path.dirname(output_path))
for extension in [
".changes",
config.CONFIG.Get(
"ClientBuilder.output_extension", context=self.context)
]:
input_name = "%s%s" % (filename_base, extension)
output_name = "%s%s" % (output_base, extension)
# TODO(user):pytype: incorrect move() definition in typeshed.
# pytype: disable=wrong-arg-types
shutil.move(
os.path.join(tmp_dir, input_name),
os.path.join(os.path.dirname(output_path), output_name))
# pytype: enable=wrong-arg-types
logging.info("Created package %s", output_path)
return output_path
class CentosClientRepacker(LinuxClientRepacker):
"""Repackages Linux RPM templates."""
def _Sign(self, rpm_filename):
if self.signer:
return self.signer.AddSignatureToRPMs([rpm_filename])
def MakeDeployableBinary(self, template_path, output_path):
"""This will add the config to the client template and create a .rpm."""
rpmbuild_binary = "/usr/bin/rpmbuild"
if not os.path.exists(rpmbuild_binary):
logging.error("rpmbuild not found, unable to repack client.")
return None
with utils.TempDirectory() as tmp_dir:
template_dir = os.path.join(tmp_dir, "dist")
utils.EnsureDirExists(template_dir)
zf = zipfile.ZipFile(template_path)
for name in zf.namelist():
dirname = os.path.dirname(name)
utils.EnsureDirExists(os.path.join(template_dir, dirname))
with io.open(os.path.join(template_dir, name), "wb") as fd:
fd.write(zf.read(name))
self._ProcessUniversalTemplate(template_dir)
# Set up a RPM building environment.
rpm_root_dir = os.path.join(tmp_dir, "rpmbuild")
rpm_build_dir = os.path.join(rpm_root_dir, "BUILD")
utils.EnsureDirExists(rpm_build_dir)
rpm_buildroot_dir = os.path.join(rpm_root_dir, "BUILDROOT")
utils.EnsureDirExists(rpm_buildroot_dir)
rpm_rpms_dir = os.path.join(rpm_root_dir, "RPMS")
utils.EnsureDirExists(rpm_rpms_dir)
rpm_specs_dir = os.path.join(rpm_root_dir, "SPECS")
utils.EnsureDirExists(rpm_specs_dir)
template_binary_dir = os.path.join(tmp_dir, "dist/rpmbuild/grr-client")
target_binary_dir = "%s%s" % (
rpm_build_dir,
config.CONFIG.Get("ClientBuilder.target_dir", context=self.context))
utils.EnsureDirExists(os.path.dirname(target_binary_dir))
try:
shutil.rmtree(target_binary_dir)
except OSError:
pass
# TODO(user):pytype: incorrect move() definition in typeshed.
# pytype: disable=wrong-arg-types
shutil.move(template_binary_dir, target_binary_dir)
# pytype: enable=wrong-arg-types
client_name = config.CONFIG.Get("Client.name", context=self.context)
client_binary_name = config.CONFIG.Get(
"Client.binary_name", context=self.context)
if client_binary_name != "grr-client":
# TODO(user):pytype: incorrect move() definition in typeshed.
# pytype: disable=wrong-arg-types
shutil.move(
os.path.join(target_binary_dir, "grr-client"),
os.path.join(target_binary_dir, client_binary_name))
# pytype: enable=wrong-arg-types
if config.CONFIG.Get("Client.fleetspeak_enabled", context=self.context):
self._GenerateFleetspeakConfig(template_dir, rpm_build_dir)
if not config.CONFIG.Get(
"Client.fleetspeak_service_name", context=self.context):
# The Fleetspeak service name is required when generating the RPM
# spec file.
raise build.BuildError("Client.fleetspeak_service_name is not set.")
if config.CONFIG.Get(
"ClientBuilder.fleetspeak_bundled", context=self.context):
self._GenerateBundledFleetspeakFiles(
os.path.join(template_dir, "bundled-fleetspeak"), rpm_build_dir)
shutil.copy(
config.CONFIG.Get(
"ClientBuilder.fleetspeak_client_config",
context=self.context),
os.path.join(rpm_build_dir,
"etc/fleetspeak-client/client.config"))
else:
self._GenerateInitConfigs(template_dir, rpm_build_dir)
# Generate spec
spec_filename = os.path.join(rpm_specs_dir, "%s.spec" % client_name)
build_helpers.GenerateFile(
os.path.join(tmp_dir, "dist/rpmbuild/grr.spec.in"),
spec_filename,
context=self.context)
# Generate prelinking blacklist file
prelink_target_filename = os.path.join(rpm_build_dir,
"etc/prelink.conf.d",
"%s.conf" % client_name)
utils.EnsureDirExists(os.path.dirname(prelink_target_filename))
build_helpers.GenerateFile(
os.path.join(tmp_dir, "dist/rpmbuild/prelink_blacklist.conf.in"),
prelink_target_filename,
context=self.context)
# Create a client config.
client_context = ["Client Context"] + self.context
client_config_content = build_helpers.GetClientConfig(client_context)
with io.open(
os.path.join(
target_binary_dir,
config.CONFIG.Get(
"ClientBuilder.config_filename", context=self.context)),
"w",
encoding="utf-8") as fd:
fd.write(client_config_content)
# Set the daemon to executable.
os.chmod(os.path.join(target_binary_dir, client_binary_name), 0o755)
client_arch = config.CONFIG.Get("Template.arch", context=self.context)
if client_arch == "amd64":
client_arch = "x86_64"
# Create wrapper script
if os.path.exists(os.path.join(target_binary_dir, "wrapper.sh.in")):
build_helpers.GenerateFile(
os.path.join(target_binary_dir, "wrapper.sh.in"),
os.path.join(target_binary_dir, "wrapper.sh"),
context=self.context)
os.chmod(os.path.join(target_binary_dir, "wrapper.sh"), 0o755)
command = [
rpmbuild_binary, "--define", "_topdir " + rpm_root_dir, "--target",
client_arch, "--buildroot", rpm_buildroot_dir, "-bb", spec_filename
]
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logging.error("Error calling %s.", command)
logging.error(e.output)
raise
client_version = config.CONFIG.Get(
"Template.version_string", context=self.context)
rpm_filename = os.path.join(
rpm_rpms_dir, client_arch,
"%s-%s-1.%s.rpm" % (client_name, client_version, client_arch))
utils.EnsureDirExists(os.path.dirname(output_path))
shutil.move(rpm_filename, output_path)
logging.info("Created package %s", output_path)
self._Sign(output_path)
return output_path
def _GenerateFleetspeakConfig(self, template_dir, rpm_build_dir):
"""Generates a Fleetspeak config for GRR."""
source_config = os.path.join(
template_dir, "fleetspeak",
os.path.basename(
config.CONFIG.Get(
"ClientBuilder.fleetspeak_config_path", context=self.context)))
fleetspeak_service_dir = config.CONFIG.Get(
"ClientBuilder.fleetspeak_service_dir", context=self.context)
dest_config_dir = os.path.join(rpm_build_dir, fleetspeak_service_dir[1:])
utils.EnsureDirExists(dest_config_dir)
dest_config_path = os.path.join(
dest_config_dir,
config.CONFIG.Get(
"Client.fleetspeak_unsigned_config_fname", context=self.context))
build_helpers.GenerateFile(
input_filename=source_config,
output_filename=dest_config_path,
context=self.context)
def _GenerateInitConfigs(self, template_dir, rpm_build_dir):
"""Generates init-system configs."""
client_name = config.CONFIG.Get("Client.name", context=self.context)
initd_target_filename = os.path.join(rpm_build_dir, "etc/init.d",
client_name)
# Generate init.d
utils.EnsureDirExists(os.path.dirname(initd_target_filename))
build_helpers.GenerateFile(
os.path.join(template_dir, "rpmbuild/grr-client.initd.in"),
initd_target_filename,
context=self.context)
# Generate systemd unit
if config.CONFIG["Template.version_numeric"] >= 3125:
systemd_target_filename = os.path.join(rpm_build_dir,
"usr/lib/systemd/system/",
"%s.service" % client_name)
utils.EnsureDirExists(os.path.dirname(systemd_target_filename))
build_helpers.GenerateFile(
os.path.join(template_dir, "rpmbuild/grr-client.service.in"),
systemd_target_filename,
context=self.context)
def _ProcessUniversalTemplate(self, dist_dir):
# An universal teplate contains both fleetspeak and legacy files.
# If there is a legacy directory, then this is an universal template
# Depending on the config option, copy only one set of the files into
# the tree.
if not os.path.exists(os.path.join(dist_dir, "legacy")):
return
if config.CONFIG.Get("Client.fleetspeak_enabled", context=self.context):
# Since there is fleetspeak/fleetspeak, rename the top-level
# fleetspeak directory.
shutil.move(
os.path.join(dist_dir, "fleetspeak"),
os.path.join(dist_dir, "_fleetspeak"))
utils.MergeDirectories(os.path.join(dist_dir, "_fleetspeak"), dist_dir)
else:
utils.MergeDirectories(os.path.join(dist_dir, "legacy"), dist_dir)
self._RmTreeIfExists(os.path.join(dist_dir, "legacy"))
self._RmTreeIfExists(os.path.join(dist_dir, "_fleetspeak"))
|
|
#!/usr/bin/env python
import logging
import tornado.auth
import tornado.escape
import tornado.ioloop
import tornado.options
import tornado.web
import os.path
import uuid
import json
import pprint
import math
import urllib
import urllib2
import bitly_api
from urllib2 import Request, urlopen, URLError
import os, shutil
from urllib import urlretrieve
import boto
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('/home/ubuntu/fz/conf/app.cfg')
AWS_ACCESS_KEY = config.get('aws', 'accesskey')
AWS_SECRET_KEY = config.get('aws', 'secretkey')
# ParsePy.APPLICATION_ID = config.get('parse', 'P_APP_ID')
# ParsePy.MASTER_KEY = config.get('parse', 'P_MASTER_KEY')
_convert = config.get('fz', 'convertQueueName')
_upload = config.get('fz', 'uploadQueueName')
from tornado.options import define, options
define("port", default=8000, help="run on the given port", type=int)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", LaunchHandler),
(r"/", LaunchHandler),
(r"/app", AppRedirect),
(r"/library", ApplicationHandler),
(r"/library/.*", ApplicationHandler),
(r"/complete/", UploadCompleteHandler),
(r"/approve/(.*)", ApproveConversionHandler),
(r"/roi/(.*)", EditFigureHandler),
(r"/ref/(.*)", EditRefFigureHandler),
(r"/edit/(.*)", ViewFigureHandler),
(r"/fz/(.*)", PublishHandler),
(r"/summary/(.*)", SummaryHandler),
(r"/api/(.*)", APIHandler),
(r"/doi/(.*)", DOIHandler),
(r"/json/(.*)", JSONHandler),
(r"/wall", WallHandler),
(r"/cut/", CUTHandler),
(r"/x/", X3Handler),
(r"/xtk/", XTKHandler),
(r"/fs/(.*)", FullScreenHandler),
(r"/b/", X3Handler),
(r"/c/", XTKEmbedHandler),
(r"/star/(.*)", StarHandler),
(r"/unstar/(.*)", UnStarHandler)
]
settings = dict(
debug=False,
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static")
)
tornado.web.Application.__init__(self, handlers, **settings)
class DemoHandler(tornado.web.RequestHandler):
def get(self):
self.render("demo.html", messages=None)
class AppRedirect(tornado.web.RequestHandler):
def get(self):
self.redirect('http://itunes.apple.com/us/app/figurezero/id566982640?mt=8')
# http://itunes.apple.com/us/app/figurezero/id566982640?mt=8
# This is the main application, available after the user signs in
# From this app they can view thier current figures and their status
# They can upload a figure
class ApplicationHandler(tornado.web.RequestHandler):
def generate_post_form(self,bucket_name, key):
import hmac, datetime
from hashlib import sha1
expiration = datetime.datetime.utcnow() + datetime.timedelta(days=365)
policy = '''{"expiration": "%(expires)s","conditions": [{"bucket":"%(bucket)s"}, {"success_action_redirect": "http://figurezero.com/complete/"}, ["starts-with","$key","%(key)s"],{"acl":"private"}, ["starts-with","$x-amz-meta-tag",""], ["starts-with","$x-amz-meta-email",""], ["starts-with","$x-amz-meta-parse",""]]}'''
policy = policy%{
"expires": expiration.strftime("%Y-%m-%dT%H:%M:%SZ"), # This has to be formatted this way
"bucket": bucket_name,
"key": key,
}
encoded = policy.encode('utf-8').encode('base64').replace("\n","") # Here we base64 encode a UTF-8 version of our policy. Make sure there are no new lines, Amazon doesn't like them.
return ("https://%s.s3.amazonaws.com/"%(bucket_name),{
"policy":encoded,
"signature":hmac.new(AWS_SECRET_KEY,encoded,sha1).digest().encode("base64").replace("\n",""), # Generate the policy signature using our Amazon Secret Key
"key": key,
"AWSAccessKeyId": AWS_ACCESS_KEY, # Obviously the Amazon Access Key
"acl":"private",
"success_action_status":"200",
})
def get(self):
import uuid
randomkey = uuid.uuid4()
_postdict = self.generate_post_form('figurezero', 'upload/%s'%randomkey)
self.render("app.html", postdict=_postdict)
################################################################################################################################
################################################################################################################################
################################################################################################################################
################################################################################################################################
################################################################################################################################
################################################################################################################################
class FZConverter(object):
"""docstring for FZConverter"""
def __init__(self):
super(FZConverter, self).__init__()
# self.arg = arg
def loadFromZoomifyURL(self, zoomify_url):
self.url_to_process = zoomify_url
self.url_type = 'zoomify'
self.tileSize = 256
print 'loaded URL: %s' % self.url_to_process
def getTileIndex(self, level, x, y):
"""
Get the zoomify index of a tile in a given level, at given co-ordinates
This is needed to get the tilegroup.
Keyword arguments:
level -- the zoomlevel of the tile
x,y -- the co-ordinates of the tile in that level
Returns -- the zoomify index
"""
index = x + y * int(math.ceil( math.floor(self.width/pow(2, self.number_of_zoom_levels - level - 1)) / self.tileSize ) )
for i in range(1, level+1):
index += int(math.ceil( math.floor(self.width /pow(2, self.number_of_zoom_levels - i)) / self.tileSize ) ) * \
int(math.ceil( math.floor(self.height/pow(2, self.number_of_zoom_levels - i)) / self.tileSize ) )
return index
def parseZoomify(self, verbose=False):
if self.url_type == 'local_zoomify':
image_property_path = '%sImageProperties.xml' % (self.path_to_process)
if verbose:
print 'loading properties from %s' % image_property_path
image_property_string = open(image_property_path, 'r').read()
splitline = image_property_string.split(' ')
if len(splitline) > 1:
for items in splitline:
if 'HEIGHT' in items:
self.height = int(items.split('"')[1])
if 'WIDTH' in items:
self.width = int(items.split('"')[1])
self.number_of_zoom_levels = int(max(math.ceil(math.log(self.width / 256, 2.0)), math.ceil(math.log(self.height / 256 , 2.0))) + 1)
if verbose:
print 'Found image with %d x %d pixels in size, expected maximum zoom: %d (not counting 0)' % (self.width, self.height, self.number_of_zoom_levels)
elif self.url_type == 'zoomify':
return self.parseZoomifyURL()
def parseZoomifyURL(self, verbose=True):
# assume we don't have height & width for the image, figure it out
print 'Processing zoomifyURL: %s' % (self.url_to_process)
zoomify_url = self.url_to_process
parse_successful = True
self.image_property_path = ''
# create image properties url
if 'ImageProperties' not in zoomify_url:
if verbose:
print "ImageProperties not in URL, adding"
self.image_property_path = '%sImageProperties.xml' % (zoomify_url)
else:
if verbose:
print "ImageProperties in URL, continue"
self.baseURL = zoomify_url.split('/ImageProperties')[0]
self.image_property_path = zoomify_url
print self.image_property_path
useOriginalParse = True
try:
from StringIO import StringIO
import gzip
remote_site_description = ''
request = urllib2.Request(self.image_property_path)
request.add_header('Accept-encoding', 'gzip')
response = urllib2.urlopen(request)
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO( response.read())
f = gzip.GzipFile(fileobj=buf)
remote_site_description = f.read()
print remote_site_description
if 'IMAGE_PROPERTIES' not in remote_site_description:
remote_site_description = urllib2.urlopen(self.image_property_path)
else:
useOriginalParse = False
except URLError, e:
print 'Unable to locate ImageProperties.xml at %s' % (self.image_property_path)
print e.reason
# print remote_site_description
if parse_successful:
# parse height and width from xml file of entire image
self.height = 0
self.width = 0
self.tileSize = 256
# print len(remote_site_description)
if useOriginalParse:
for line in remote_site_description:
splitline = line.split(' ')
if len(splitline) > 1:
for items in splitline:
if 'HEIGHT' in items:
self.height = int(items.split('"')[1])
if 'WIDTH' in items:
self.width = int(items.split('"')[1])
else:
# for line in remote_site_description:
splitline = remote_site_description.split(' ')
if len(splitline) > 1:
for items in splitline:
if 'HEIGHT' in items:
self.height = int(items.split('"')[1])
if 'WIDTH' in items:
self.width = int(items.split('"')[1])
self.number_of_zoom_levels = int(max(math.ceil(math.log(self.width / 256, 2.0)), math.ceil(math.log(self.height / 256 , 2.0))) + 1)
print 'Found image with %d x %d pixels in size, expected maximum zoom: %d (not counting 0)' % (self.width, self.height, self.number_of_zoom_levels)
return parse_successful
def extractRegionURL(self, region_array, output_size, annotate=True, verbose=False):
native_coordinate_array = region_array
output_coordinate_array = output_size
self.tile_extent = []
for i in range(0,int(self.number_of_zoom_levels)):
rc = {}
row_max = int( math.ceil( self.height / (256 * pow(2,i) )))+1
col_max = int( math.ceil( self.width / (256 * pow(2,i) )))+1
rc['row'] = row_max
rc['col'] = col_max
rc['tot'] = row_max * col_max
self.tile_extent.append(rc)
z_to_use = 0
for i in range(0,int(self.number_of_zoom_levels)):
i_min = int( math.floor( float(native_coordinate_array[0]) / (256 * pow(2,i) )))
i_max = int( math.floor( (float(native_coordinate_array[0]) + float(native_coordinate_array[2])) / (256 * pow(2,i) )))
j_min = int( ( math.floor( float(native_coordinate_array[1]) / (256 * pow(2,i) ))))
j_max = int( ( math.floor( (float(native_coordinate_array[1]) + float(native_coordinate_array[3])) / (256 * pow(2,i) ))))
if verbose:
print 'For at z: %d, there are %d cols and %d rows' % (self.number_of_zoom_levels - i, col_max, row_max)
print 'For at z: %d, there are %d pixels and %d pixels' % ( self.number_of_zoom_levels - i, col_max*256, row_max*256)
tilegroup = self.getTileIndex(self.number_of_zoom_levels - i - 1, i_min, j_min) / 256
print 'For at z: %d, the object starts at group %d, %d cols and %d rows,' % (self.number_of_zoom_levels - i, tilegroup, i_min, j_min)
print '\tTileGroup%d/%d-%d-%d.jpg' % (tilegroup, self.number_of_zoom_levels - i - 1, i_min, j_min)
tilegroup = self.getTileIndex(self.number_of_zoom_levels - i - 1, i_max, j_max) / 256
print 'For at z: %d, the object ends at group %d, %d cols and %d rows,' % (self.number_of_zoom_levels - i, tilegroup, i_max, j_max)
print '\tTileGroup%d/%d-%d-%d.jpg' % (tilegroup, self.number_of_zoom_levels - i - 1, i_max, j_max)
region_w = native_coordinate_array[2] / pow(2,i)
region_h = native_coordinate_array[3] / pow(2,i)
if verbose:
print 'At this zoom (%d), the capture region will be %f x %f' % (i, region_w, region_h)
if region_w < output_coordinate_array[0] or region_w < output_coordinate_array[1]:
if verbose:
print 'below output resolution'
pass
else:
z_to_use = i
if verbose:
print 'will use %d for capture' % (z_to_use)
i_min = int( math.floor( float(native_coordinate_array[0]) / (256 * pow(2,z_to_use) )))
i_max = int( math.floor( (float(native_coordinate_array[0]) + float(native_coordinate_array[2])) / (256 * pow(2,z_to_use) )))
j_min = int( ( math.floor( float(native_coordinate_array[1]) / (256 * pow(2,z_to_use) ))))
j_max = int( ( math.floor( (float(native_coordinate_array[1]) + float(native_coordinate_array[3])) / (256 * pow(2,z_to_use) ))))
if verbose:
tilegroup = self.getTileIndex(self.number_of_zoom_levels - z_to_use - 1, i_min, j_min) / 256
print 'For at z: %d, the object starts at group %d, %d cols and %d rows,' % (self.number_of_zoom_levels - i, tilegroup, i_min, j_min)
print '\tTileGroup%d/%d-%d-%d.jpg' % (tilegroup, self.number_of_zoom_levels - z_to_use - 1, i_min, j_min)
tilegroup = self.getTileIndex(self.number_of_zoom_levels - z_to_use - 1, i_max, j_max) / 256
print 'For at z: %d, the object ends at group %d, %d cols and %d rows,' % (self.number_of_zoom_levels - i, tilegroup, i_max, j_max)
print '\tTileGroup%d/%d-%d-%d.jpg' % (tilegroup, self.number_of_zoom_levels - z_to_use - 1, i_max, j_max)
region_w = native_coordinate_array[2] / pow(2,z_to_use)
region_h = native_coordinate_array[3] / pow(2,z_to_use)
if verbose:
print 'At this zoom, the capture region will be %f x %f' % (region_w, region_h)
tile_list_needed = []
# iterate across each col for a row
for i in range(i_min, i_max+1):
# list of tiles in this row
row_list = []
for j in range(j_min, j_max+1):
actual_offset = self.number_of_zoom_levels - z_to_use - 1
tilegroup = self.getTileIndex(self.number_of_zoom_levels - z_to_use - 1, i, j) / 256
# tileIndex = self.getTileIndex(int(actual_offset), i, j)
tile_url = '%sTileGroup%d/%d-%d-%d.jpg' % (self.url_to_process[0:-19], tilegroup, actual_offset, i, j)
tile_name = '%d-%d-%d.jpg' % (actual_offset, i, j)
row_list.append([tile_url, tile_name])
if os.path.exists(tile_url):
pass
else:
if verbose:
print 'FILE NOT FOUND -> this is good.'
pass
tile_list_needed.append(row_list)
# tmp_dir_name = '/tmp/' + str(uuid.uuid4())
tmp_dir_name = '/tmp/fztest/'
if os.path.exists(tmp_dir_name):
pass
else:
os.makedirs(tmp_dir_name)
# # os.mkdir(tmp_dir_name)
col_cmdstr = '/usr/bin/montage '
if verbose:
print col_cmdstr
row_ind = 0
for row_needed in tile_list_needed:
cmdstr = '/usr/bin/montage '
for col_needed in row_needed:
output_path = tmp_dir_name + '' + col_needed[1]
# if annotate:
# annotate_cmd = '''/usr/local/bin/convert /Users/stonerri/allenhack/figurezero/%s -pointsize 40 -draw "gravity center text 0,0 '%s'" %s ''' % (col_needed[0], col_needed[1], output_path)
# if verbose:
# print annotate_cmd
# pcmd = os.popen(annotate_cmd)
# for e in pcmd:
# if verbose:
# print e
# pass
# else:
# shutil.copy(col_needed[0], output_path)
print col_needed[0]
print output_path
urlretrieve(col_needed[0], output_path)
cmdstr += output_path + ' '
row_path = tmp_dir_name + 'row-%d.jpg' % (row_ind)
row_ind +=1
cmdstr += ' -geometry +0+0 -tile 1x%d %s' % (len(row_needed), row_path )
if verbose:
print cmdstr
pipe = os.popen(cmdstr)
for e in pipe:
if verbose:
print e
pass
col_cmdstr += row_path + ' '
prepath = tmp_dir_name + 'merge-'
final_path = prepath+'%d-%d_%d-%d_%d-%d.png' % (float(native_coordinate_array[0]), float(native_coordinate_array[1]), float(native_coordinate_array[2]), float(native_coordinate_array[3]), output_coordinate_array[0], output_coordinate_array[1])
# print final_path
col_cmdstr += ' -geometry +0+0 -tile %dx1 %s' % (len(tile_list_needed), final_path )
pipe = os.popen(col_cmdstr)
for e in pipe:
if verbose:
print e
pass
import glob
files_to_remove = glob.glob('%s/*.jpg' % tmp_dir_name)
for file_name in files_to_remove:
os.remove(file_name)
crop_origin_x = i_min*256
crop_origin_y = j_min*256
crop_bound_x = (i_max+1)*256
crop_bound_y = (j_max+1)*256
if verbose:
print 'base image origin: %d %d' % (crop_origin_x, crop_origin_y)
print 'base image bound: %d %d' % (crop_bound_x, crop_bound_y)
print 'base image dimensions: %d %d' % (crop_bound_x - crop_origin_x, crop_bound_y - crop_origin_y)
target_origin_x = native_coordinate_array[0] / pow(2,z_to_use)
target_origin_y = native_coordinate_array[1] / pow(2,z_to_use)
if verbose:
print 'target origin: %d %d' % (target_origin_x, target_origin_y)
print 'target size: %d %d' % (region_w, region_h)
print 'target bound: %d %d' % (target_origin_x + region_w, target_origin_y + region_h)
croppath = tmp_dir_name + 'crop-%d-%d_%d-%d_%d-%d.png' % (float(native_coordinate_array[0]), float(native_coordinate_array[1]), float(native_coordinate_array[2]), float(native_coordinate_array[3]), output_coordinate_array[0], output_coordinate_array[1])
crop_cmdstr = '/usr/bin/convert %s -crop %dx%d+%d+%d %s' % (final_path, region_w, region_h, target_origin_x - crop_origin_x, target_origin_y - crop_origin_y, croppath)
if verbose:
print crop_cmdstr
pipe = os.popen(crop_cmdstr)
for e in pipe:
pass
finalfinalpath = tmp_dir_name + 'final-%d-%d_%d-%d_%d-%d.jpg' % (float(native_coordinate_array[0]), float(native_coordinate_array[1]), float(native_coordinate_array[2]), float(native_coordinate_array[3]), output_coordinate_array[0], output_coordinate_array[1])
scale_cmdstr = '/usr/bin/convert %s -quality 80 -resize %dx%d %s' % (croppath, output_coordinate_array[0], output_coordinate_array[1], finalfinalpath)
if verbose:
print scale_cmdstr
pipe = os.popen(scale_cmdstr)
for e in pipe:
pass
return finalfinalpath
class CUTHandler(tornado.web.RequestHandler):
def prepare(self):
pass
def post(self):
jsondict = json.loads(self.request.body)
import pprint
pprint.pprint(jsondict)
fztest = FZConverter()
fztest.loadFromZoomifyURL(jsondict['ipxml'])
ipxarray = jsondict['ipxml'].split('/')
rootID = ipxarray[-3]
imageID = ipxarray[-2]
if fztest.parseZoomify():
s_array = jsondict['origin'] + jsondict['size']
in_array = []
out_array = jsondict['output']
for v in s_array:
in_array.append(int(v))
# print in_array
tmpfilename = fztest.extractRegionURL(in_array, out_array)
idstring = str(uuid.uuid4())
fulllocation= jsondict['path'] + idstring + '.jpg'
thumblocation= jsondict['path'] + 'th-' + idstring + '.jpg'
print tmpfilename
print fulllocation
cmdstr = '/usr/bin/convert %s -quality 80 %s' % (tmpfilename, fulllocation)
print cmdstr
pipe = os.popen(cmdstr)
for e in pipe:
print e
cmdstr = '/usr/bin/convert %s -quality 80 -resize 256x256 %s' % (tmpfilename, thumblocation)
print cmdstr
pipe = os.popen(cmdstr)
for e in pipe:
print e
# print 'success!'
viewurl = fulllocation[20:]
thumburl = thumblocation[20:]
s3path = 'tile/%s/%s/%s.jpg' % (rootID, imageID, idstring)
s3thumb = 'tile/%s/%s/th-%s.jpg' % (rootID, imageID, idstring)
from boto.s3.connection import S3Connection
from boto.s3.key import Key
self.s3conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
b = self.s3conn.create_bucket('fzero')
fk = Key(b)
fk.key = s3path
fk.set_contents_from_filename(fulllocation)
thk = Key(b)
thk.key = s3thumb
thk.set_contents_from_filename(thumblocation)
# viewURL= 'http://figurezero.com%s' % (viewurl)
viewURL = 'https://s3.amazonaws.com/fzero/%s' % (s3path)
thumbURL = 'https://s3.amazonaws.com/fzero/%s' % (s3thumb)
bitly = bitly_api.Connection('figurezero','BITLY_API')
shortenedURL = bitly.shorten(viewURL)
qrLinkURL = shortenedURL['url'] + '.qrcode?s=400'
json_dict = {}
json_dict['result'] = 'success'
# json_dict['viewfile'] = tmpfilename
# json_dict['viewthumb'] = thumblocation
json_dict['viewurl'] = viewURL
json_dict['thumburl'] = thumbURL
json_dict['bitlyurl'] = shortenedURL['url']
json_dict['qrURL'] = qrLinkURL
self.set_header("Content-Type", "text/json")
self.write(json.dumps(json_dict))
else:
print 'error of some sort'
################################################################################################################################
################################################################################################################################
################################################################################################################################
################################################################################################################################
class UploadCompleteHandler(tornado.web.RequestHandler):
def prepare(self):
from boto.sqs.connection import SQSConnection
self.conn = SQSConnection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
self._q = self.conn.create_queue('FZconvertQueue', 120)
from boto.s3.connection import S3Connection
self.s3conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
def get(self):
completeDebugDict = {}
# completeDebugDict['bucket'] = self.get_arguments('bucket')[0]
completeDebugDict['key'] = self.get_arguments('key')[0]
completeDebugDict['parseID'] = self.get_arguments('id')[0]
self._bucket = self.s3conn.create_bucket('figurezero')
k = self._bucket.get_key(completeDebugDict['key'])
rootID = completeDebugDict['key'].split('/')[1]
completeDebugDict['tag'] = k.get_metadata('tag')
completeDebugDict['rootID'] = rootID
# completeDebugDict['parseID'] = k.get_metadata('parse')
completeDebugDict['emailAddress'] = k.get_metadata('email')
completeDebugDict['size'] = k.size
from boto.sqs.message import Message
m = Message()
m.set_body(json.dumps(completeDebugDict))
status = self._q.write(m)
self.redirect('/approve/%s' % (completeDebugDict['parseID']))
class APIHandler(tornado.web.RequestHandler):
def get(self, input):
if input == 'tags.json':
json_array = ['#figurezero', '#sfn2012', '#sfn12']
self.set_header("Content-Type", "text/json")
self.write(json.dumps(json_array))
else:
self.set_header("Content-Type", "text/json")
self.write('[ ]')
class UnStarHandler(tornado.web.RequestHandler):
def get(self, input):
if len(input) > 0:
if len(input.split('/')) == 2:
import ParsePy
ParsePy.APPLICATION_ID = ""
ParsePy.MASTER_KEY = ""
print input
figid = input.split('/')[0]
userid = input.split('/')[1]
figureObject = ParsePy.ParseQuery("UploadObject").get(figid)
if figureObject:
try:
starArray = figureObject.starArray
if userid in starArray:
starArray.remove(userid)
figureObject.starArray = starArray
figureObject.save()
json_dict = {}
json_dict['result'] = 'unstarred figure'
json_dict['starcount'] = len(starArray)
self.set_header("Content-Type", "text/json")
self.write(json.dumps(json_dict))
else:
# figure already starred by this user
json_dict = {}
json_dict['result'] = 'not starred'
json_dict['starcount'] = len(starArray)
self.set_header("Content-Type", "text/json")
self.write(json.dumps(json_dict))
except:
json_dict = {}
json_dict['result'] = 'star array not found'
json_dict['starcount'] = 0
self.set_header("Content-Type", "text/json")
self.write(json.dumps(json_dict))
else:
json_dict = {}
json_dict['result'] = 'figureObject not found.'
self.set_header("Content-Type", "text/json")
self.write(json.dumps(json_dict))
else:
json_dict = {}
json_dict['result'] = 'incorrect formatting'
self.set_header("Content-Type", "text/json")
self.write(json.dumps(json_dict))
else:
json_dict = {}
json_dict['result'] = 'Length cannot be zero'
self.set_header("Content-Type", "text/json")
self.write(json.dumps(json_dict))
class StarHandler(tornado.web.RequestHandler):
def get(self, input):
if len(input) > 0:
if len(input.split('/')) == 2:
import ParsePy
ParsePy.APPLICATION_ID = ""
ParsePy.MASTER_KEY = ""
print input
figid = input.split('/')[0]
userid = input.split('/')[1]
figureObject = ParsePy.ParseQuery("UploadObject").get(figid)
if figureObject:
try:
starArray = figureObject.starArray
if userid not in starArray:
starArray.append(userid)
figureObject.starArray = starArray
figureObject.save()
json_dict = {}
json_dict['result'] = 'starred figure'
json_dict['starcount'] = len(starArray)
self.set_header("Content-Type", "text/json")
self.write(json.dumps(json_dict))
else:
# figure already starred by this user
json_dict = {}
json_dict['result'] = 'already starred'
json_dict['starcount'] = len(starArray)
self.set_header("Content-Type", "text/json")
self.write(json.dumps(json_dict))
except:
starArray = []
starArray.append(userid)
figureObject.starArray = starArray
figureObject.save()
json_dict = {}
json_dict['result'] = 'created star array'
json_dict['starcount'] = len(starArray)
self.set_header("Content-Type", "text/json")
self.write(json.dumps(json_dict))
else:
json_dict = {}
json_dict['result'] = 'figureObject not found.'
self.set_header("Content-Type", "text/json")
self.write(json.dumps(json_dict))
else:
json_dict = {}
json_dict['result'] = 'incorrect formatting'
self.set_header("Content-Type", "text/json")
self.write(json.dumps(json_dict))
else:
json_dict = {}
json_dict['result'] = 'Length cannot be zero'
self.set_header("Content-Type", "text/json")
self.write(json.dumps(json_dict))
class DOIHandler(tornado.web.RequestHandler):
def get(self, input):
if len(input) > 0:
import urllib2
req = urllib2.Request(input)
req.add_header('Accept', 'application/citeproc+json')
resp = urllib2.urlopen(req)
self.write(resp.read())
class JSONHandler(tornado.web.RequestHandler):
def get(self, input):
if len(input) > 0:
import ParsePy
ParsePy.APPLICATION_ID = ""
ParsePy.MASTER_KEY = ""
print input
figureObject = ParsePy.ParseQuery("UploadObject").get(input)
if figureObject:
# print figureObject._getJSONProperties()
jsondict = json.loads(figureObject._getJSONProperties())
import pprint
pprint.pprint(jsondict)
if figureObject.published:
self.set_header("Content-Type", "text/json")
self.write(json.dumps(jsondict, indent=4))
# self.write(pprint.pprint(jsondict))
else:
returndict = {}
returndict['status'] = 'Figure not public.'
self.write(json.dumps(returndict))
else:
returndict = {}
returndict['status'] = 'Figure not found'
self.write(json.dumps(returndict))
else:
returndict = {}
returndict['status'] = 'Invalid ID'
self.write(json.dumps(returndict))
# import urllib2
# req = urllib2.Request(input)
# req.add_header('Accept', 'application/citeproc+json')
# resp = urllib2.urlopen(req)
# self.write(resp.read())
class WallHandler(tornado.web.RequestHandler):
def prepare(self):
pass
def get(self):
self.render('wall.html');
class PublishHandler(tornado.web.RequestHandler):
# do anything that needs preparation behind scenes here
def prepare(self):
pass
def get(self, inputarg):
if len(inputarg) > 3:
self.render("publish.html", messages=inputarg.split('/')[0])
else:
self.redirect('/')
class SummaryHandler(tornado.web.RequestHandler):
# do anything that needs preparation behind scenes here
def prepare(self):
pass
def get(self, inputarg):
if len(inputarg) > 3:
self.render("summary.html", messages=inputarg)
else:
self.redirect('/')
class AllHandler(tornado.web.RequestHandler):
# do anything that needs preparation behind scenes here
def prepare(self):
pass
def get(self, inputarg):
if len(inputarg) > 3:
self.render("all.html", messages=inputarg)
else:
self.redirect('/')
class SearchHandler(tornado.web.RequestHandler):
def prepare(self):
pass
def get(self, inputarg):
if len(inputarg) > 3:
self.render("search.html", messages=inputarg)
else:
self.redirect('/')
class ApproveConversionHandler(tornado.web.RequestHandler):
# do anything that needs preparation behind scenes here
def prepare(self):
pass
def get(self, inputarg):
if len(inputarg) > 3:
self.render("approve.html", messages=inputarg)
else:
self.redirect('/')
class LaunchHandler(tornado.web.RequestHandler):
# do anything that needs preparation behind scenes here
def prepare(self):
pass
def get(self):
self.render("launch.html")
class X3Handler(tornado.web.RequestHandler):
# do anything that needs preparation behind scenes here
def prepare(self):
pass
def get(self):
self.render("x3dtest.html")
class XTKHandler(tornado.web.RequestHandler):
# do anything that needs preparation behind scenes here
def prepare(self):
pass
def get(self, inputarg):
if len(inputarg) > 3:
self.render("publish.html", messages=inputarg.split('/')[0])
else:
self.redirect('/')
class FullScreenHandler(tornado.web.RequestHandler):
# do anything that needs preparation behind scenes here
def prepare(self):
pass
def get(self, inputarg):
if len(inputarg) > 3:
self.render("fullscreen.html", messages=inputarg.split('/')[0])
else:
self.redirect('/')
class XTKEmbedHandler(tornado.web.RequestHandler):
# do anything that needs preparation behind scenes here
def prepare(self):
pass
def get(self):
self.render("xtkembed.html")
class EditFigureHandler(tornado.web.RequestHandler):
# do anything that needs preparation behind scenes here
def prepare(self):
pass
def get(self, inputarg):
if len(inputarg) > 3:
self.render("editView.html", messages=inputarg)
else:
self.redirect('/')
class EditRefFigureHandler(tornado.web.RequestHandler):
# do anything that needs preparation behind scenes here
def prepare(self):
pass
def get(self, inputarg):
if len(inputarg) > 3:
self.render("editRef.html", messages=inputarg)
else:
self.redirect('/')
class ViewFigureHandler(tornado.web.RequestHandler):
# do anything that needs preparation behind scenes here
def prepare(self):
pass
def get(self, inputarg):
if len(inputarg) > 3:
self.render("preview.html", messages=inputarg)
else:
self.redirect('/')
def main():
tornado.options.parse_command_line()
app = Application()
app.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for learning rate decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import learning_rate_decay
class LRDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testContinuous(self):
self.evaluate(variables.global_variables_initializer())
step = 5
decayed_lr = learning_rate_decay.exponential_decay(0.05, step, 10, 0.96)
expected = .05 * 0.96**(5.0 / 10.0)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testStaircase(self):
if context.executing_eagerly():
step = resource_variable_ops.ResourceVariable(0)
self.evaluate(variables.global_variables_initializer())
decayed_lr = learning_rate_decay.exponential_decay(
.1, step, 3, 0.96, staircase=True)
# No change to learning rate due to staircase
expected = .1
self.evaluate(step.assign(1))
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
expected = .1
self.evaluate(step.assign(2))
self.assertAllClose(self.evaluate(decayed_lr), .1, 1e-6)
# Decayed learning rate
expected = .1 * 0.96 ** (100 // 3)
self.evaluate(step.assign(100))
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
def testVariables(self):
with self.cached_session():
step = variables.VariableV1(1)
assign_1 = step.assign(1)
assign_2 = step.assign(2)
assign_100 = step.assign(100)
decayed_lr = learning_rate_decay.exponential_decay(.1, step, 3, 0.96,
staircase=True)
variables.global_variables_initializer().run()
# No change to learning rate
assign_1.op.run()
self.assertAllClose(decayed_lr.eval(), .1, 1e-6)
assign_2.op.run()
self.assertAllClose(decayed_lr.eval(), .1, 1e-6)
# Decayed learning rate
assign_100.op.run()
expected = .1 * 0.96 ** (100 // 3)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testPiecewiseConstant(self):
x = resource_variable_ops.ResourceVariable(-999)
decayed_lr = learning_rate_decay.piecewise_constant(
x, [100, 110, 120], [1.0, 0.1, 0.01, 0.001])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(decayed_lr), 1.0, 1e-6)
self.evaluate(x.assign(100))
self.assertAllClose(self.evaluate(decayed_lr), 1.0, 1e-6)
self.evaluate(x.assign(105))
self.assertAllClose(self.evaluate(decayed_lr), 0.1, 1e-6)
self.evaluate(x.assign(110))
self.assertAllClose(self.evaluate(decayed_lr), 0.1, 1e-6)
self.evaluate(x.assign(120))
self.assertAllClose(self.evaluate(decayed_lr), 0.01, 1e-6)
self.evaluate(x.assign(999))
self.assertAllClose(self.evaluate(decayed_lr), 0.001, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testPiecewiseConstantEdgeCases(self):
x_int = resource_variable_ops.ResourceVariable(
0, dtype=variables.dtypes.int32)
boundaries, values = [-1.0, 1.0], [1, 2, 3]
with self.assertRaises(ValueError):
decayed_lr = learning_rate_decay.piecewise_constant(
x_int, boundaries, values)
if context.executing_eagerly():
decayed_lr()
x = resource_variable_ops.ResourceVariable(0.0)
boundaries, values = [-1.0, 1.0], [1.0, 2, 3]
with self.assertRaises(ValueError):
decayed_lr = learning_rate_decay.piecewise_constant(
x, boundaries, values)
if context.executing_eagerly():
decayed_lr()
# Test that ref types are valid.
if not context.executing_eagerly():
x = variables.VariableV1(0.0)
x_ref = x.op.outputs[0] # float32_ref tensor should be accepted
boundaries, values = [1.0, 2.0], [1, 2, 3]
learning_rate_decay.piecewise_constant(x_ref, boundaries, values)
# Test casting boundaries from int32 to int64.
x_int64 = resource_variable_ops.ResourceVariable(
0, dtype=variables.dtypes.int64)
boundaries, values = [1, 2, 3], [0.4, 0.5, 0.6, 0.7]
decayed_lr = learning_rate_decay.piecewise_constant(
x_int64, boundaries, values)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(decayed_lr), 0.4, 1e-6)
self.evaluate(x_int64.assign(1))
self.assertAllClose(self.evaluate(decayed_lr), 0.4, 1e-6)
self.evaluate(x_int64.assign(2))
self.assertAllClose(self.evaluate(decayed_lr), 0.5, 1e-6)
self.evaluate(x_int64.assign(3))
self.assertAllClose(self.evaluate(decayed_lr), 0.6, 1e-6)
self.evaluate(x_int64.assign(4))
self.assertAllClose(self.evaluate(decayed_lr), 0.7, 1e-6)
class LinearDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testHalfWay(self):
step = 5
lr = 0.05
end_lr = 0.0
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = lr * 0.5
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testEnd(self):
step = 10
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testHalfWayWithEnd(self):
step = 5
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = (lr + end_lr) * 0.5
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testBeyondEnd(self):
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testBeyondEndWithCycle(self):
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, 10, end_lr, cycle=True)
expected = (lr - end_lr) * 0.25 + end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
class SqrtDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testHalfWay(self):
step = 5
lr = 0.05
end_lr = 0.0
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, 10, end_lr, power=power)
expected = lr * 0.5**power
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testEnd(self):
step = 10
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, 10, end_lr, power=power)
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testHalfWayWithEnd(self):
step = 5
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, 10, end_lr, power=power)
expected = (lr - end_lr) * 0.5**power + end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testBeyondEnd(self):
step = 15
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, 10, end_lr, power=power)
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testBeyondEndWithCycle(self):
step = 15
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, 10, end_lr, power=power, cycle=True)
expected = (lr - end_lr) * 0.25**power + end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
class PolynomialDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testBeginWithCycle(self):
lr = 0.001
decay_steps = 10
step = 0
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, decay_steps, cycle=True)
expected = lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
class ExponentialDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testDecay(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = resource_variable_ops.ResourceVariable(0)
decayed_lr = learning_rate_decay.natural_exp_decay(initial_lr, step, k,
decay_rate)
self.evaluate(variables.global_variables_initializer())
for i in range(k + 1):
expected = initial_lr * math.exp(-i / k * decay_rate)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
self.evaluate(step.assign_add(1))
@test_util.run_in_graph_and_eager_modes
def testStaircase(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = resource_variable_ops.ResourceVariable(0)
decayed_lr = learning_rate_decay.natural_exp_decay(
initial_lr, step, k, decay_rate, staircase=True)
self.evaluate(variables.global_variables_initializer())
for i in range(k + 1):
expected = initial_lr * math.exp(-decay_rate * (i // k))
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
self.evaluate(step.assign_add(1))
class InverseDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testDecay(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = resource_variable_ops.ResourceVariable(0)
decayed_lr = learning_rate_decay.inverse_time_decay(initial_lr, step, k,
decay_rate)
self.evaluate(variables.global_variables_initializer())
for i in range(k + 1):
expected = initial_lr / (1 + i / k * decay_rate)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
self.evaluate(step.assign_add(1))
@test_util.run_in_graph_and_eager_modes
def testStaircase(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = resource_variable_ops.ResourceVariable(0)
decayed_lr = learning_rate_decay.inverse_time_decay(
initial_lr, step, k, decay_rate, staircase=True)
self.evaluate(variables.global_variables_initializer())
for i in range(k + 1):
expected = initial_lr / (1 + decay_rate * (i // k))
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
self.evaluate(step.assign_add(1))
class CosineDecayTest(test_util.TensorFlowTestCase):
def np_cosine_decay(self, step, decay_steps, alpha=0.0):
step = min(step, decay_steps)
completed_fraction = step / decay_steps
decay = 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
return (1.0 - alpha) * decay + alpha
@test_util.run_in_graph_and_eager_modes
def testDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.cosine_decay(initial_lr, step,
num_training_steps)
expected = self.np_cosine_decay(step, num_training_steps)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testAlpha(self):
num_training_steps = 1000
initial_lr = 1.0
alpha = 0.1
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.cosine_decay(initial_lr, step,
num_training_steps, alpha)
expected = self.np_cosine_decay(step, num_training_steps, alpha)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
class CosineDecayRestartsTest(test_util.TensorFlowTestCase):
def np_cosine_decay_restarts(self, step, decay_steps, t_mul=2.0, m_mul=1.0,
alpha=0.0):
fac = 1.0
while step >= decay_steps:
step -= decay_steps
decay_steps *= t_mul
fac *= m_mul
completed_fraction = step / decay_steps
decay = fac * 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
return (1.0 - alpha) * decay + alpha
@test_util.run_in_graph_and_eager_modes
def testDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.cosine_decay_restarts(
initial_lr, step, num_training_steps)
expected = self.np_cosine_decay_restarts(step, num_training_steps)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testAlpha(self):
num_training_steps = 1000
initial_lr = 1.0
alpha = 0.1
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.cosine_decay_restarts(
initial_lr, step, num_training_steps, alpha=alpha)
expected = self.np_cosine_decay_restarts(
step, num_training_steps, alpha=alpha)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testMMul(self):
num_training_steps = 1000
initial_lr = 1.0
m_mul = 0.9
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.cosine_decay_restarts(
initial_lr, step, num_training_steps, m_mul=m_mul)
expected = self.np_cosine_decay_restarts(
step, num_training_steps, m_mul=m_mul)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testTMul(self):
num_training_steps = 1000
initial_lr = 1.0
t_mul = 1.0
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.cosine_decay_restarts(
initial_lr, step, num_training_steps, t_mul=t_mul)
expected = self.np_cosine_decay_restarts(
step, num_training_steps, t_mul=t_mul)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
class LinearCosineDecayTest(test_util.TensorFlowTestCase):
def np_linear_cosine_decay(self,
step,
decay_steps,
alpha=0.0,
beta=0.001,
num_periods=0.5):
step = min(step, decay_steps)
linear_decayed = float(decay_steps - step) / decay_steps
fraction = 2.0 * num_periods * step / float(decay_steps)
cosine_decayed = 0.5 * (1.0 + math.cos(math.pi * fraction))
return (alpha + linear_decayed) * cosine_decayed + beta
@test_util.run_in_graph_and_eager_modes
def testDefaultDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.linear_cosine_decay(
initial_lr, step, num_training_steps)
expected = self.np_linear_cosine_decay(step, num_training_steps)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testNonDefaultDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.linear_cosine_decay(
initial_lr,
step,
num_training_steps,
alpha=0.1,
beta=1e-4,
num_periods=5)
expected = self.np_linear_cosine_decay(
step, num_training_steps, alpha=0.1, beta=1e-4, num_periods=5)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
class NoisyLinearCosineDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testDefaultNoisyLinearCosine(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
# No numerical check because of noise
decayed_lr = learning_rate_decay.noisy_linear_cosine_decay(
initial_lr, step, num_training_steps)
# Cannot be deterministically tested
self.evaluate(decayed_lr)
@test_util.run_in_graph_and_eager_modes
def testNonDefaultNoisyLinearCosine(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
# No numerical check because of noise
decayed_lr = learning_rate_decay.noisy_linear_cosine_decay(
initial_lr,
step,
num_training_steps,
initial_variance=0.5,
variance_decay=0.1,
alpha=0.1,
beta=1e-4,
num_periods=5)
# Cannot be deterministically tested
self.evaluate(decayed_lr)
if __name__ == "__main__":
googletest.main()
|
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells Service Manager
"""
import datetime
import time
from oslo.config import cfg
from oslo import messaging as oslo_messaging
from nova.cells import messaging
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
from nova import context
from nova import exception
from nova import manager
from nova.objects import instance as instance_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import periodic_task
from nova.openstack.common import timeutils
cell_manager_opts = [
cfg.StrOpt('driver',
default='nova.cells.rpc_driver.CellsRPCDriver',
help='Cells communication driver to use'),
cfg.IntOpt("instance_updated_at_threshold",
default=3600,
help="Number of seconds after an instance was updated "
"or deleted to continue to update cells"),
cfg.IntOpt("instance_update_num_instances",
default=1,
help="Number of instances to update per periodic task run")
]
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.register_opts(cell_manager_opts, group='cells')
LOG = logging.getLogger(__name__)
class CellsManager(manager.Manager):
"""The nova-cells manager class. This class defines RPC
methods that the local cell may call. This class is NOT used for
messages coming from other cells. That communication is
driver-specific.
Communication to other cells happens via the nova.cells.messaging module.
The MessageRunner from that module will handle routing the message to
the correct cell via the communications driver. Most methods below
create 'targeted' (where we want to route a message to a specific cell)
or 'broadcast' (where we want a message to go to multiple cells)
messages.
Scheduling requests get passed to the scheduler class.
"""
target = oslo_messaging.Target(version='1.27')
def __init__(self, *args, **kwargs):
LOG.warn(_('The cells feature of Nova is considered experimental '
'by the OpenStack project because it receives much '
'less testing than the rest of Nova. This may change '
'in the future, but current deployers should be aware '
'that the use of it in production right now may be '
'risky.'))
# Mostly for tests.
cell_state_manager = kwargs.pop('cell_state_manager', None)
super(CellsManager, self).__init__(service_name='cells',
*args, **kwargs)
if cell_state_manager is None:
cell_state_manager = cells_state.CellStateManager
self.state_manager = cell_state_manager()
self.msg_runner = messaging.MessageRunner(self.state_manager)
cells_driver_cls = importutils.import_class(
CONF.cells.driver)
self.driver = cells_driver_cls()
self.instances_to_heal = iter([])
def post_start_hook(self):
"""Have the driver start its servers for inter-cell communication.
Also ask our child cells for their capacities and capabilities so
we get them more quickly than just waiting for the next periodic
update. Receiving the updates from the children will cause us to
update our parents. If we don't have any children, just update
our parents immediately.
"""
# FIXME(comstud): There's currently no hooks when services are
# stopping, so we have no way to stop servers cleanly.
self.driver.start_servers(self.msg_runner)
ctxt = context.get_admin_context()
if self.state_manager.get_child_cells():
self.msg_runner.ask_children_for_capabilities(ctxt)
self.msg_runner.ask_children_for_capacities(ctxt)
else:
self._update_our_parents(ctxt)
@periodic_task.periodic_task
def _update_our_parents(self, ctxt):
"""Update our parent cells with our capabilities and capacity
if we're at the bottom of the tree.
"""
self.msg_runner.tell_parents_our_capabilities(ctxt)
self.msg_runner.tell_parents_our_capacities(ctxt)
@periodic_task.periodic_task
def _heal_instances(self, ctxt):
"""Periodic task to send updates for a number of instances to
parent cells.
On every run of the periodic task, we will attempt to sync
'CONF.cells.instance_update_num_instances' number of instances.
When we get the list of instances, we shuffle them so that multiple
nova-cells services aren't attempting to sync the same instances
in lockstep.
If CONF.cells.instance_update_at_threshold is set, only attempt
to sync instances that have been updated recently. The CONF
setting defines the maximum number of seconds old the updated_at
can be. Ie, a threshold of 3600 means to only update instances
that have modified in the last hour.
"""
if not self.state_manager.get_parent_cells():
# No need to sync up if we have no parents.
return
info = {'updated_list': False}
def _next_instance():
try:
instance = self.instances_to_heal.next()
except StopIteration:
if info['updated_list']:
return
threshold = CONF.cells.instance_updated_at_threshold
updated_since = None
if threshold > 0:
updated_since = timeutils.utcnow() - datetime.timedelta(
seconds=threshold)
self.instances_to_heal = cells_utils.get_instances_to_sync(
ctxt, updated_since=updated_since, shuffle=True,
uuids_only=True)
info['updated_list'] = True
try:
instance = self.instances_to_heal.next()
except StopIteration:
return
return instance
rd_context = ctxt.elevated(read_deleted='yes')
for i in xrange(CONF.cells.instance_update_num_instances):
while True:
# Yield to other greenthreads
time.sleep(0)
instance_uuid = _next_instance()
if not instance_uuid:
return
try:
instance = self.db.instance_get_by_uuid(rd_context,
instance_uuid)
except exception.InstanceNotFound:
continue
self._sync_instance(ctxt, instance)
break
def _sync_instance(self, ctxt, instance):
"""Broadcast an instance_update or instance_destroy message up to
parent cells.
"""
if instance['deleted']:
self.instance_destroy_at_top(ctxt, instance)
else:
self.instance_update_at_top(ctxt, instance)
def schedule_run_instance(self, ctxt, host_sched_kwargs):
"""Pick a cell (possibly ourselves) to build new instance(s)
and forward the request accordingly.
"""
# Target is ourselves first.
our_cell = self.state_manager.get_my_state()
self.msg_runner.schedule_run_instance(ctxt, our_cell,
host_sched_kwargs)
def build_instances(self, ctxt, build_inst_kwargs):
"""Pick a cell (possibly ourselves) to build new instance(s) and
forward the request accordingly.
"""
# Target is ourselves first.
our_cell = self.state_manager.get_my_state()
self.msg_runner.build_instances(ctxt, our_cell, build_inst_kwargs)
def get_cell_info_for_neighbors(self, _ctxt):
"""Return cell information for our neighbor cells."""
return self.state_manager.get_cell_info_for_neighbors()
def run_compute_api_method(self, ctxt, cell_name, method_info, call):
"""Call a compute API method in a specific cell."""
response = self.msg_runner.run_compute_api_method(ctxt,
cell_name,
method_info,
call)
if call:
return response.value_or_raise()
def instance_update_at_top(self, ctxt, instance):
"""Update an instance at the top level cell."""
self.msg_runner.instance_update_at_top(ctxt, instance)
def instance_destroy_at_top(self, ctxt, instance):
"""Destroy an instance at the top level cell."""
self.msg_runner.instance_destroy_at_top(ctxt, instance)
def instance_delete_everywhere(self, ctxt, instance, delete_type):
"""This is used by API cell when it didn't know what cell
an instance was in, but the instance was requested to be
deleted or soft_deleted. So, we'll broadcast this everywhere.
"""
if isinstance(instance, dict):
instance = instance_obj.Instance._from_db_object(ctxt,
instance_obj.Instance(), instance)
self.msg_runner.instance_delete_everywhere(ctxt, instance,
delete_type)
def instance_fault_create_at_top(self, ctxt, instance_fault):
"""Create an instance fault at the top level cell."""
self.msg_runner.instance_fault_create_at_top(ctxt, instance_fault)
def bw_usage_update_at_top(self, ctxt, bw_update_info):
"""Update bandwidth usage at top level cell."""
self.msg_runner.bw_usage_update_at_top(ctxt, bw_update_info)
def sync_instances(self, ctxt, project_id, updated_since, deleted):
"""Force a sync of all instances, potentially by project_id,
and potentially since a certain date/time.
"""
self.msg_runner.sync_instances(ctxt, project_id, updated_since,
deleted)
def service_get_all(self, ctxt, filters):
"""Return services in this cell and in all child cells."""
responses = self.msg_runner.service_get_all(ctxt, filters)
ret_services = []
# 1 response per cell. Each response is a list of services.
for response in responses:
services = response.value_or_raise()
for service in services:
cells_utils.add_cell_to_service(service, response.cell_name)
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, ctxt, host_name):
"""Return a service entry for a compute host in a certain cell."""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
response = self.msg_runner.service_get_by_compute_host(ctxt,
cell_name,
host_name)
service = response.value_or_raise()
cells_utils.add_cell_to_service(service, response.cell_name)
return service
def get_host_uptime(self, ctxt, host_name):
"""Return host uptime for a compute host in a certain cell
:param host_name: fully qualified hostname. It should be in format of
parent!child@host_id
"""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
response = self.msg_runner.get_host_uptime(ctxt, cell_name,
host_name)
return response.value_or_raise()
def service_update(self, ctxt, host_name, binary, params_to_update):
"""Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
:returns: the service reference
"""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
response = self.msg_runner.service_update(
ctxt, cell_name, host_name, binary, params_to_update)
service = response.value_or_raise()
cells_utils.add_cell_to_service(service, response.cell_name)
return service
def service_delete(self, ctxt, cell_service_id):
"""Deletes the specified service."""
cell_name, service_id = cells_utils.split_cell_and_item(
cell_service_id)
self.msg_runner.service_delete(ctxt, cell_name, service_id)
def proxy_rpc_to_manager(self, ctxt, topic, rpc_message, call, timeout):
"""Proxy an RPC message as-is to a manager."""
compute_topic = CONF.compute_topic
cell_and_host = topic[len(compute_topic) + 1:]
cell_name, host_name = cells_utils.split_cell_and_item(cell_and_host)
response = self.msg_runner.proxy_rpc_to_manager(ctxt, cell_name,
host_name, topic, rpc_message, call, timeout)
return response.value_or_raise()
def task_log_get_all(self, ctxt, task_name, period_beginning,
period_ending, host=None, state=None):
"""Get task logs from the DB from all cells or a particular
cell.
If 'host' is not None, host will be of the format 'cell!name@host',
with '@host' being optional. The query will be directed to the
appropriate cell and return all task logs, or task logs matching
the host if specified.
'state' also may be None. If it's not, filter by the state as well.
"""
if host is None:
cell_name = None
else:
cell_name, host = cells_utils.split_cell_and_item(host)
# If no cell name was given, assume that the host name is the
# cell_name and that the target is all hosts
if cell_name is None:
cell_name, host = host, cell_name
responses = self.msg_runner.task_log_get_all(ctxt, cell_name,
task_name, period_beginning, period_ending,
host=host, state=state)
# 1 response per cell. Each response is a list of task log
# entries.
ret_task_logs = []
for response in responses:
task_logs = response.value_or_raise()
for task_log in task_logs:
cells_utils.add_cell_to_task_log(task_log,
response.cell_name)
ret_task_logs.append(task_log)
return ret_task_logs
def compute_node_get(self, ctxt, compute_id):
"""Get a compute node by ID in a specific cell."""
cell_name, compute_id = cells_utils.split_cell_and_item(
compute_id)
response = self.msg_runner.compute_node_get(ctxt, cell_name,
compute_id)
node = response.value_or_raise()
cells_utils.add_cell_to_compute_node(node, cell_name)
return node
def compute_node_get_all(self, ctxt, hypervisor_match=None):
"""Return list of compute nodes in all cells."""
responses = self.msg_runner.compute_node_get_all(ctxt,
hypervisor_match=hypervisor_match)
# 1 response per cell. Each response is a list of compute_node
# entries.
ret_nodes = []
for response in responses:
nodes = response.value_or_raise()
for node in nodes:
cells_utils.add_cell_to_compute_node(node,
response.cell_name)
ret_nodes.append(node)
return ret_nodes
def compute_node_stats(self, ctxt):
"""Return compute node stats totals from all cells."""
responses = self.msg_runner.compute_node_stats(ctxt)
totals = {}
for response in responses:
data = response.value_or_raise()
for key, val in data.iteritems():
totals.setdefault(key, 0)
totals[key] += val
return totals
def actions_get(self, ctxt, cell_name, instance_uuid):
response = self.msg_runner.actions_get(ctxt, cell_name, instance_uuid)
return response.value_or_raise()
def action_get_by_request_id(self, ctxt, cell_name, instance_uuid,
request_id):
response = self.msg_runner.action_get_by_request_id(ctxt, cell_name,
instance_uuid,
request_id)
return response.value_or_raise()
def action_events_get(self, ctxt, cell_name, action_id):
response = self.msg_runner.action_events_get(ctxt, cell_name,
action_id)
return response.value_or_raise()
def consoleauth_delete_tokens(self, ctxt, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
self.msg_runner.consoleauth_delete_tokens(ctxt, instance_uuid)
def validate_console_port(self, ctxt, instance_uuid, console_port,
console_type):
"""Validate console port with child cell compute node."""
instance = self.db.instance_get_by_uuid(ctxt, instance_uuid)
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
response = self.msg_runner.validate_console_port(ctxt,
instance['cell_name'], instance_uuid, console_port,
console_type)
return response.value_or_raise()
def get_capacities(self, ctxt, cell_name):
return self.state_manager.get_capacities(cell_name)
def bdm_update_or_create_at_top(self, ctxt, bdm, create=None):
"""BDM was created/updated in this cell. Tell the API cells."""
self.msg_runner.bdm_update_or_create_at_top(ctxt, bdm, create=create)
def bdm_destroy_at_top(self, ctxt, instance_uuid, device_name=None,
volume_id=None):
"""BDM was destroyed for instance in this cell. Tell the API cells."""
self.msg_runner.bdm_destroy_at_top(ctxt, instance_uuid,
device_name=device_name,
volume_id=volume_id)
def get_migrations(self, ctxt, filters):
"""Fetch migrations applying the filters."""
target_cell = None
if "cell_name" in filters:
_path_cell_sep = cells_utils.PATH_CELL_SEP
target_cell = '%s%s%s' % (CONF.cells.name, _path_cell_sep,
filters['cell_name'])
responses = self.msg_runner.get_migrations(ctxt, target_cell,
False, filters)
migrations = []
for response in responses:
migrations += response.value_or_raise()
return migrations
def instance_update_from_api(self, ctxt, instance, expected_vm_state,
expected_task_state, admin_state_reset):
"""Update an instance in its cell."""
self.msg_runner.instance_update_from_api(ctxt, instance,
expected_vm_state,
expected_task_state,
admin_state_reset)
def start_instance(self, ctxt, instance):
"""Start an instance in its cell."""
self.msg_runner.start_instance(ctxt, instance)
def stop_instance(self, ctxt, instance, do_cast=True):
"""Stop an instance in its cell."""
response = self.msg_runner.stop_instance(ctxt, instance,
do_cast=do_cast)
if not do_cast:
return response.value_or_raise()
def cell_create(self, ctxt, values):
return self.state_manager.cell_create(ctxt, values)
def cell_update(self, ctxt, cell_name, values):
return self.state_manager.cell_update(ctxt, cell_name, values)
def cell_delete(self, ctxt, cell_name):
return self.state_manager.cell_delete(ctxt, cell_name)
def cell_get(self, ctxt, cell_name):
return self.state_manager.cell_get(ctxt, cell_name)
def reboot_instance(self, ctxt, instance, reboot_type):
"""Reboot an instance in its cell."""
self.msg_runner.reboot_instance(ctxt, instance, reboot_type)
def pause_instance(self, ctxt, instance):
"""Pause an instance in its cell."""
self.msg_runner.pause_instance(ctxt, instance)
def unpause_instance(self, ctxt, instance):
"""Unpause an instance in its cell."""
self.msg_runner.unpause_instance(ctxt, instance)
def suspend_instance(self, ctxt, instance):
"""Suspend an instance in its cell."""
self.msg_runner.suspend_instance(ctxt, instance)
def resume_instance(self, ctxt, instance):
"""Resume an instance in its cell."""
self.msg_runner.resume_instance(ctxt, instance)
def terminate_instance(self, ctxt, instance):
"""Delete an instance in its cell."""
self.msg_runner.terminate_instance(ctxt, instance)
def soft_delete_instance(self, ctxt, instance):
"""Soft-delete an instance in its cell."""
self.msg_runner.soft_delete_instance(ctxt, instance)
def resize_instance(self, ctxt, instance, flavor,
extra_instance_updates):
"""Resize an instance in its cell."""
self.msg_runner.resize_instance(ctxt, instance,
flavor, extra_instance_updates)
def live_migrate_instance(self, ctxt, instance, block_migration,
disk_over_commit, host_name):
"""Live migrate an instance in its cell."""
self.msg_runner.live_migrate_instance(ctxt, instance,
block_migration,
disk_over_commit,
host_name)
def revert_resize(self, ctxt, instance):
"""Revert a resize for an instance in its cell."""
self.msg_runner.revert_resize(ctxt, instance)
def confirm_resize(self, ctxt, instance):
"""Confirm a resize for an instance in its cell."""
self.msg_runner.confirm_resize(ctxt, instance)
def reset_network(self, ctxt, instance):
"""Reset networking for an instance in its cell."""
self.msg_runner.reset_network(ctxt, instance)
def inject_network_info(self, ctxt, instance):
"""Inject networking for an instance in its cell."""
self.msg_runner.inject_network_info(ctxt, instance)
def snapshot_instance(self, ctxt, instance, image_id):
"""Snapshot an instance in its cell."""
self.msg_runner.snapshot_instance(ctxt, instance, image_id)
def backup_instance(self, ctxt, instance, image_id, backup_type, rotation):
"""Backup an instance in its cell."""
self.msg_runner.backup_instance(ctxt, instance, image_id,
backup_type, rotation)
def rebuild_instance(self, ctxt, instance, image_href, admin_password,
files_to_inject, preserve_ephemeral, kwargs):
self.msg_runner.rebuild_instance(ctxt, instance, image_href,
admin_password, files_to_inject,
preserve_ephemeral, kwargs)
|
|
"""CILogon OAuthAuthenticator for JupyterHub
Usese OAuth 1.0a with cilogon.org
Setup:
1. generate rsa keypair:
openssl genrsa -out oauth-privkey.pem 2048
openssl rsa -in oauth-privkey.pem -pubout -out oauth-pubkey.pem
2. generate certificate request (interactive)
openssl req -new -key oauth-privkey.pem -out oauth-certreq.csr
3. register with CILogon: https://cilogon.org/oauth/register
4. save your client_id from the request.
It will be used as CILOGON_CLIENT_ID env
Caveats:
- For user whitelist/admin names,
usernames will be email addresses where '@' is replaced with '.'
"""
import errno
import os
import pwd
from urllib.parse import parse_qs
try:
from OpenSSL.crypto import load_certificate, FILETYPE_PEM
except ImportError:
raise ImportError("CILogon OAuth requires PyOpenSSL")
try:
from oauthlib.oauth1 import SIGNATURE_RSA, SIGNATURE_TYPE_QUERY, Client as OAuthClient
except ImportError:
raise ImportError("CILogon requires oauthlib")
from tornado import gen
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from tornado.httputil import url_concat
from traitlets.config import Configurable
from jupyterhub.auth import LocalAuthenticator
from jupyterhub.handlers.base import BaseHandler
from jupyterhub.utils import url_path_join as ujoin
from traitlets import Unicode, Instance
from .oauth2 import OAuthenticator
class CILogonHandler(BaseHandler):
"""OAuth handler for redirecting to CILogon delegator"""
@gen.coroutine
def get(self):
token = yield self.authenticator.get_oauth_token()
self.redirect(url_concat(self.authenticator.authorization_url,
{'oauth_token': token, 'cilogon_skin': self.authenticator.cilogon_skin}))
class CILogonOAuthenticator(OAuthenticator):
"""CILogon OAuthenticator
required env:
CILOGON_CLIENT_ID - the client ID for CILogon OAuth
CILOGON_RSA_KEY_PATH - path to file containing rsa private key
CILOGON_CSR_PATH - path to file certificate request (.csr)
"""
login_service = "CILogon"
authorization_url = "https://cilogon.org/delegate"
cilogon_skin = "xsede"
oauth_url = "https://cilogon.org/oauth"
login_handler = CILogonHandler
client_id_env = 'CILOGON_CLIENT_ID'
rsa_key_path = Unicode(config=True)
def _rsa_key_path_default(self):
return os.getenv('CILOGON_RSA_KEY_PATH') or 'oauth-privkey.pem'
rsa_key = Unicode()
def _rsa_key_default(self):
with open(self.rsa_key_path) as f:
return f.read()
certreq_path = Unicode(config=True)
def _certreq_path_default(self):
return os.getenv('CILOGON_CSR_PATH') or 'oauth-certreq.csr'
certreq = Unicode()
def _certreq_default(self):
# read certreq. CILogon API can't handle standard BEGIN/END lines, so strip them
lines = []
with open(self.certreq_path) as f:
for line in f:
if not line.isspace() and '----' not in line:
lines.append(line)
return ''.join(lines)
user_cert_dir = Unicode(config=True,
help="""Directory in which to store user credentials.
This directory will be made user-private.
If not specified, user credentials will not be stored.
"""
)
def _user_cert_dir_changed(self, name, old, new):
# ensure dir exists
if not new:
return
try:
os.mkdir(new)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# make it private
os.chmod(new, 0o700)
# double-check that it's private
mode = os.stat(new).st_mode
if mode & 0o077:
raise IOError("Bad permissions on user cert dir %r: %o" % (new, mode))
oauth_client = Instance(OAuthClient)
def _oauth_client_default(self):
return OAuthClient(
self.client_id,
rsa_key=self.rsa_key,
signature_method=SIGNATURE_RSA,
signature_type=SIGNATURE_TYPE_QUERY,
)
client = Instance(AsyncHTTPClient, args=())
@gen.coroutine
def get_oauth_token(self):
"""Get the temporary OAuth token"""
uri = url_concat(ujoin(self.oauth_url, "initiate"), {
'oauth_callback': self.oauth_callback_url,
'certreq': self.certreq,
})
uri, _, _ = self.oauth_client.sign(uri)
req = HTTPRequest(uri)
# FIXME: handle failure (CILogon replies with 200 on failure)
resp = yield self.client.fetch(req)
reply = resp.body.decode('utf8', 'replace')
credentials = parse_qs(reply)
return credentials['oauth_token'][0]
@gen.coroutine
def get_user_token(self, token, verifier):
"""Get a user token from an oauth callback parameters"""
uri = url_concat(ujoin(self.oauth_url, 'token'), {
'oauth_token': token,
'oauth_verifier': verifier,
})
uri, _, _ = self.oauth_client.sign(uri)
resp = yield self.client.fetch(uri)
# FIXME: handle failure
reply = resp.body.decode('utf8', 'replace')
return parse_qs(reply)['oauth_token'][0]
@gen.coroutine
def username_from_token(self, token):
"""Turn a user token into a username"""
uri = url_concat(ujoin(self.oauth_url, 'getcert'), {
'oauth_token': token,
})
uri, _, _ = self.oauth_client.sign(uri)
resp = yield self.client.fetch(uri)
# FIXME: handle failure
reply = resp.body.decode('utf8', 'replace')
_, cert_txt = reply.split('\n', 1)
cert = load_certificate(FILETYPE_PEM, cert_txt)
username = None
for i in range(cert.get_extension_count()):
ext = cert.get_extension(i)
if ext.get_short_name().decode('ascii', 'replace') == 'subjectAltName':
data = ext.get_data()
# cert starts with some weird bytes. Not sure why or if they are consistent
username = data[4:].decode('utf8').lower()
# workaround notebook bug not handling @
username = username.replace('@', '.')
break
if username is None:
raise ValueError("Failed to get username from cert: %s", cert_txt)
return username, cert_txt
def _user_cert_path(self, username):
return os.path.join(self.user_cert_dir, username + '.crt')
def save_user_cert(self, username, cert):
"""Save the certificate for a given user in self.user_cert_dir"""
if not self.user_cert_dir:
return
cert_path = self._user_cert_path(username)
self.log.info("Saving cert for %s in %s", username, cert_path)
with open(cert_path, 'w') as f:
f.write(cert)
def user_cert(self, username):
"""Get the certificate for a user by name"""
if not self.user_cert_dir:
# not storing certs
return
# FIXME: handle cert file missing?
with open(self._user_cert_path(username)) as f:
return f.read()
@gen.coroutine
def authenticate(self, handler, data=None):
"""Called on the OAuth callback"""
token = yield self.get_user_token(
handler.get_argument('oauth_token'),
handler.get_argument('oauth_verifier'),
)
username, cert = yield self.username_from_token(token)
if not username:
return
self.save_user_cert(username, cert)
return username
class LocalCILogonOAuthenticator(LocalAuthenticator, CILogonOAuthenticator):
"""A version that mixes in local system user creation"""
pass
class CILogonSpawnerMixin(Configurable):
"""Spawner Mixin for staging the CILogon cert file"""
cert_file_path = Unicode("cilogon.crt", config=True,
help="The path (relative to home) where the CILogon cert should be placed.")
def get_user_info(self):
"""Get the user's home dir, uid, gid, for resolving relative cert_file_path.
Returns a dict with 'home', 'uid', 'gid' keys.
By default, populated from pwd.getpwname(self.user.name).
"""
pw_struct = pwd.getpwnam(self.user.name)
return {
'home': pw_struct.pw_dir,
'uid': pw_struct.pw_uid,
'gid': pw_struct.pw_gid,
}
_cert = None
@property
def cert(self):
if self._cert is None:
self._cert = self.authenticator.user_cert(self.user.name)
return self._cert
def stage_cert_file(self):
"""Stage the CILogon user cert for the spawner.
Override for Spawners not on the local filesystem.
"""
if not self.cert:
self.log.info("No cert found for %s", self.user.name)
uinfo = self.get_user_info()
dst = os.path.join(uinfo['home'], self.cert_file_path)
self.log.info("Staging cert for %s: %s", self.user.name, dst)
with open(dst, 'w') as f:
fd = f.fileno()
os.fchmod(fd, 0o600) # make private before writing content
f.write(self.cert)
# set user as owner
os.fchown(fd, uinfo['uid'], uinfo['gid'])
@gen.coroutine
def start(self):
yield gen.maybe_future(self.stage_cert_file())
result = yield gen.maybe_future(super().start())
return result
def unstage_cert_file(self):
"""Unstage user cert
called after stopping
"""
uinfo = self.get_user_info()
dst = os.path.join(uinfo['home'], self.cert_file_path)
if not os.path.exists(dst):
self.log.debug("No cert for %s: %s", self.user.name, dst)
return
self.log.info("Unstaging cert for %s: %s", self.user.name, dst)
try:
os.remove(dst)
except OSError as e:
if e.errno == errno.EEXIST:
pass
self.log.error("Failed to unstage cert for %s (%s): %s",
self.user.name, dst, e)
@gen.coroutine
def stop(self):
result = yield gen.maybe_future(super().stop())
yield gen.maybe_future(self.unstage_cert_file())
return result
|
|
from __future__ import absolute_import
import cgi
import email.utils
import getpass
import json
import logging
import mimetypes
import os
import platform
import re
import shutil
import sys
import tempfile
try:
import ssl # noqa
HAS_TLS = True
except ImportError:
HAS_TLS = False
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
import pip
from pip.exceptions import InstallationError, HashMismatch
from pip.models import PyPI
from pip.utils import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
ARCHIVE_EXTENSIONS, consume, call_subprocess)
from pip.utils.encoding import auto_decode
from pip.utils.filesystem import check_path_owner
from pip.utils.logging import indent_log
from pip.utils.setuptools_build import SETUPTOOLS_SHIM
from pip.utils.glibc import libc_ver
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
from pip.locations import write_delete_marker_file
from pip.vcs import vcs
from pip._vendor import requests, six
from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
from pip._vendor.requests.utils import get_netrc_auth
from pip._vendor.requests.structures import CaseInsensitiveDict
from pip._vendor import urllib3
from pip._vendor.cachecontrol import CacheControlAdapter
from pip._vendor.cachecontrol.caches import FileCache
from pip._vendor.lockfile import LockError
from pip._vendor.six.moves import xmlrpc_client
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url',
'unpack_http_url', 'unpack_url']
logger = logging.getLogger(__name__)
def user_agent():
"""
Return a string representing the user agent.
"""
data = {
"installer": {"name": "pip", "version": pip.__version__},
"python": platform.python_version(),
"implementation": {
"name": platform.python_implementation(),
},
}
if data["implementation"]["name"] == 'CPython':
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'PyPy':
if sys.pypy_version_info.releaselevel == 'final':
pypy_version_info = sys.pypy_version_info[:3]
else:
pypy_version_info = sys.pypy_version_info
data["implementation"]["version"] = ".".join(
[str(x) for x in pypy_version_info]
)
elif data["implementation"]["name"] == 'Jython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'IronPython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
if sys.platform.startswith("linux"):
from pip._vendor import distro
distro_infos = dict(filter(
lambda x: x[1],
zip(["name", "version", "id"], distro.linux_distribution()),
))
libc = dict(filter(
lambda x: x[1],
zip(["lib", "version"], libc_ver()),
))
if libc:
distro_infos["libc"] = libc
if distro_infos:
data["distro"] = distro_infos
if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]}
if platform.system():
data.setdefault("system", {})["name"] = platform.system()
if platform.release():
data.setdefault("system", {})["release"] = platform.release()
if platform.machine():
data["cpu"] = platform.machine()
# Python 2.6 doesn't have ssl.OPENSSL_VERSION.
if HAS_TLS and sys.version_info[:2] > (2, 6):
data["openssl_version"] = ssl.OPENSSL_VERSION
return "{data[installer][name]}/{data[installer][version]} {json}".format(
data=data,
json=json.dumps(data, separators=(",", ":"), sort_keys=True),
)
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
self.prompting = prompting
self.passwords = {}
def __call__(self, req):
parsed = urllib_parse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.rsplit("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Extract credentials embedded in the url if we have none stored
if username is None:
username, password = self.parse_credentials(parsed.netloc)
# Get creds from netrc if we still don't have them
if username is None and password is None:
netrc_auth = get_netrc_auth(req.url)
username, password = netrc_auth if netrc_auth else (None, None)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simply return the response
if not self.prompting:
return resp
parsed = urllib_parse.urlparse(resp.url)
# Prompt the user for a new username and password
username = six.moves.input("User for %s: " % parsed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
pathname = url_to_path(request.url)
resp = Response()
resp.status_code = 200
resp.url = request.url
try:
stats = os.stat(pathname)
except OSError as exc:
resp.status_code = 404
resp.raw = exc
else:
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
resp.headers = CaseInsensitiveDict({
"Content-Type": content_type,
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = open(pathname, "rb")
resp.close = resp.raw.close
return resp
def close(self):
pass
class SafeFileCache(FileCache):
"""
A file based cache which is safe to use even when the target directory may
not be accessible or writable.
"""
def __init__(self, *args, **kwargs):
super(SafeFileCache, self).__init__(*args, **kwargs)
# Check to ensure that the directory containing our cache directory
# is owned by the user current executing pip. If it does not exist
# we will check the parent directory until we find one that does exist.
# If it is not owned by the user executing pip then we will disable
# the cache and log a warning.
if not check_path_owner(self.directory):
logger.warning(
"The directory '%s' or its parent directory is not owned by "
"the current user and the cache has been disabled. Please "
"check the permissions and owner of that directory. If "
"executing pip with sudo, you may want sudo's -H flag.",
self.directory,
)
# Set our directory to None to disable the Cache
self.directory = None
def get(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).get(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def set(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).set(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def delete(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).delete(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
class InsecureHTTPAdapter(HTTPAdapter):
def cert_verify(self, conn, url, verify, cert):
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
class PipSession(requests.Session):
timeout = None
def __init__(self, *args, **kwargs):
retries = kwargs.pop("retries", 0)
cache = kwargs.pop("cache", None)
insecure_hosts = kwargs.pop("insecure_hosts", [])
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Create our urllib3.Retry instance which will allow us to customize
# how we handle retries.
retries = urllib3.Retry(
# Set the total number of retries that a particular request can
# have.
total=retries,
# A 503 error from PyPI typically means that the Fastly -> Origin
# connection got interrupted in some way. A 503 error in general
# is typically considered a transient error so we'll go ahead and
# retry it.
status_forcelist=[503],
# Add a small amount of back off between failed requests in
# order to prevent hammering the service.
backoff_factor=0.25,
)
# We want to _only_ cache responses on securely fetched origins. We do
# this because we can't validate the response of an insecurely fetched
# origin, and we don't want someone to be able to poison the cache and
# require manual eviction from the cache to fix it.
if cache:
secure_adapter = CacheControlAdapter(
cache=SafeFileCache(cache, use_dir_lock=True),
max_retries=retries,
)
else:
secure_adapter = HTTPAdapter(max_retries=retries)
# Our Insecure HTTPAdapter disables HTTPS validation. It does not
# support caching (see above) so we'll use it for all http:// URLs as
# well as any https:// host that we've marked as ignoring TLS errors
# for.
insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
self.mount("https://", secure_adapter)
self.mount("http://", insecure_adapter)
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
# We want to use a non-validating adapter for any requests which are
# deemed insecure.
for host in insecure_hosts:
self.mount("https://{0}/".format(host), insecure_adapter)
def request(self, method, url, *args, **kwargs):
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
if session is None:
raise TypeError(
"get_file_content() missing 1 required keyword argument: 'session'"
)
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from and
comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib_parse.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
# FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
return resp.url, resp.text
try:
with open(url, 'rb') as f:
content = auto_decode(f.read())
except IOError as exc:
raise InstallationError(
'Could not open requirements file: %s' % str(exc)
)
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
_, netloc, path, _, _ = urllib_parse.urlsplit(url)
# if we have a UNC path, prepend UNC share notation
if netloc:
netloc = '\\\\' + netloc
path = urllib_request.url2pathname(netloc + path)
return path
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path))
return url
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
ext = splitext(name)[1].lower()
if ext in ARCHIVE_EXTENSIONS:
return True
return False
def unpack_vcs_link(link, location):
vcs_backend = _get_used_vcs_backend(link)
vcs_backend.unpack(location)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def is_dir_url(link):
"""Return whether a file:// Link points to a directory.
``link`` must not have any other scheme but file://. Call is_file_url()
first.
"""
link_path = url_to_path(link.url_without_fragment)
return os.path.isdir(link_path)
def _progress_indicator(iterable, *args, **kwargs):
return iterable
def _download_url(resp, link, content_file, hashes):
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
cached_resp = getattr(resp, "from_cache", False)
if logger.getEffectiveLevel() > logging.INFO:
show_progress = False
elif cached_resp:
show_progress = False
elif total_length > (40 * 1000):
show_progress = True
elif not total_length:
show_progress = True
else:
show_progress = False
show_url = link.show_url
def resp_read(chunk_size):
try:
# Special case for urllib3.
for chunk in resp.raw.stream(
chunk_size,
# We use decode_content=False here because we don't
# want urllib3 to mess with the raw bytes we get
# from the server. If we decompress inside of
# urllib3 then we cannot verify the checksum
# because the checksum will be of the compressed
# file. This breakage will only occur if the
# server adds a Content-Encoding header, which
# depends on how the server was configured:
# - Some servers will notice that the file isn't a
# compressible file and will leave the file alone
# and with an empty Content-Encoding
# - Some servers will notice that the file is
# already compressed and will leave the file
# alone and will add a Content-Encoding: gzip
# header
# - Some servers won't notice anything at all and
# will take a file that's already been compressed
# and compress it again and set the
# Content-Encoding: gzip header
#
# By setting this not to decode automatically we
# hope to eliminate problems with the second case.
decode_content=False):
yield chunk
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
def written_chunks(chunks):
for chunk in chunks:
content_file.write(chunk)
yield chunk
progress_indicator = _progress_indicator
if link.netloc == PyPI.netloc:
url = show_url
else:
url = link.url_without_fragment
if show_progress: # We don't show progress on cached responses
if total_length:
logger.info("Downloading %s (%s)", url, format_size(total_length))
progress_indicator = DownloadProgressBar(max=total_length).iter
else:
logger.info("Downloading %s", url)
progress_indicator = DownloadProgressSpinner().iter
elif cached_resp:
logger.info("Using cached %s", url)
else:
logger.info("Downloading %s", url)
logger.debug('Downloading from URL %s', link)
downloaded_chunks = written_chunks(
progress_indicator(
resp_read(CONTENT_CHUNK_SIZE),
CONTENT_CHUNK_SIZE
)
)
if hashes:
hashes.check_against_chunks(downloaded_chunks)
else:
consume(downloaded_chunks)
def _copy_file(filename, location, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)abort' %
display_path(download_location), ('i', 'w', 'b', 'a'))
if response == 'i':
copy = False
elif response == 'w':
logger.warning('Deleting %s', display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warning(
'Backing up %s to %s',
display_path(download_location),
display_path(dest_file),
)
shutil.move(download_location, dest_file)
elif response == 'a':
sys.exit(-1)
if copy:
shutil.copy(filename, download_location)
logger.info('Saved %s', display_path(download_location))
def unpack_http_url(link, location, download_dir=None,
session=None, hashes=None):
if session is None:
raise TypeError(
"unpack_http_url() missing 1 required keyword argument: 'session'"
)
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
# If a download dir is specified, is the file already downloaded there?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link,
download_dir,
hashes)
if already_downloaded_path:
from_path = already_downloaded_path
content_type = mimetypes.guess_type(from_path)[0]
else:
# let's download to a tmp dir
from_path, content_type = _download_http_url(link,
session,
temp_dir,
hashes)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, link)
if not already_downloaded_path:
os.unlink(from_path)
rmtree(temp_dir)
def unpack_file_url(link, location, download_dir=None, hashes=None):
"""Unpack link into location.
If download_dir is provided and link points to a file, make a copy
of the link file inside download_dir.
"""
link_path = url_to_path(link.url_without_fragment)
# If it's a url to a local directory
if is_dir_url(link):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
if download_dir:
logger.info('Link is a directory, ignoring download_dir')
return
# If --require-hashes is off, `hashes` is either empty, the
# link's embedded hash, or MissingHashes; it is required to
# match. If --require-hashes is on, we are satisfied by any
# hash in `hashes` matching: a URL-based or an option-based
# one; no internet-sourced hash will be in `hashes`.
if hashes:
hashes.check_against_path(link_path)
# If a download dir is specified, is the file already there and valid?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link,
download_dir,
hashes)
if already_downloaded_path:
from_path = already_downloaded_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, link)
def _copy_dist_from_dir(link_path, location):
"""Copy distribution files in `link_path` to `location`.
Invoked when user requests to install a local directory. E.g.:
pip install .
pip install ~/dev/git-repos/python-prompt-toolkit
"""
# Note: This is currently VERY SLOW if you have a lot of data in the
# directory, because it copies everything with `shutil.copytree`.
# What it should really do is build an sdist and install that.
# See https://github.com/pypa/pip/issues/2195
if os.path.isdir(location):
rmtree(location)
# build an sdist
setup_py = 'setup.py'
sdist_args = [sys.executable]
sdist_args.append('-c')
sdist_args.append(SETUPTOOLS_SHIM % setup_py)
sdist_args.append('sdist')
sdist_args += ['--dist-dir', location]
logger.info('Running setup.py sdist for %s', link_path)
with indent_log():
call_subprocess(sdist_args, cwd=link_path, show_stdout=False)
# unpack sdist into `location`
sdist = os.path.join(location, os.listdir(location)[0])
logger.info('Unpacking sdist %s into %s', sdist, location)
unpack_file(sdist, location, content_type=None, link=None)
class PipXmlrpcTransport(xmlrpc_client.Transport):
"""Provide a `xmlrpclib.Transport` implementation via a `PipSession`
object.
"""
def __init__(self, index_url, session, use_datetime=False):
xmlrpc_client.Transport.__init__(self, use_datetime)
index_parts = urllib_parse.urlparse(index_url)
self._scheme = index_parts.scheme
self._session = session
def request(self, host, handler, request_body, verbose=False):
parts = (self._scheme, host, handler, None, None, None)
url = urllib_parse.urlunparse(parts)
try:
headers = {'Content-Type': 'text/xml'}
response = self._session.post(url, data=request_body,
headers=headers, stream=True)
response.raise_for_status()
self.verbose = verbose
return self.parse_response(response.raw)
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s",
exc.response.status_code, url,
)
raise
def unpack_url(link, location, download_dir=None,
only_download=False, session=None, hashes=None):
"""Unpack link.
If link is a VCS link:
if only_download, export into download_dir and ignore location
else unpack into location
for other types of link:
- unpack into location
- if download_dir, copy the file into download_dir
- if only_download, mark location for deletion
:param hashes: A Hashes object, one of whose embedded hashes must match,
or HashMismatch will be raised. If the Hashes is empty, no matches are
required, and unhashable types of requirements (like VCS ones, which
would ordinarily raise HashUnsupported) are allowed.
"""
# non-editable vcs urls
if is_vcs_url(link):
unpack_vcs_link(link, location)
# file urls
elif is_file_url(link):
unpack_file_url(link, location, download_dir, hashes=hashes)
# http urls
else:
if session is None:
session = PipSession()
unpack_http_url(
link,
location,
download_dir,
session,
hashes=hashes
)
if only_download:
write_delete_marker_file(location)
def _download_http_url(link, session, temp_dir, hashes):
"""Download link url into temp_dir using provided session"""
target_url = link.url.split('#', 1)[0]
try:
resp = session.get(
target_url,
# We use Accept-Encoding: identity here because requests
# defaults to accepting compressed responses. This breaks in
# a variety of ways depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible
# file and will leave the file alone and with an empty
# Content-Encoding
# - Some servers will notice that the file is already
# compressed and will leave the file alone and will add a
# Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take
# a file that's already been compressed and compress it again
# and set the Content-Encoding: gzip header
# By setting this to request only the identity encoding We're
# hoping to eliminate the third case. Hopefully there does not
# exist a server which when given a file will notice it is
# already compressed and that you're not asking for a
# compressed file and will then decompress it before sending
# because if that's the case I don't think it'll ever be
# possible to make this work.
headers={"Accept-Encoding": "identity"},
stream=True,
)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s", exc.response.status_code, link,
)
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
file_path = os.path.join(temp_dir, filename)
with open(file_path, 'wb') as content_file:
_download_url(resp, link, content_file, hashes)
return file_path, content_type
def _check_download_dir(link, download_dir, hashes):
""" Check download_dir for previously downloaded file with correct hash
If a correct file is found return its path else None
"""
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
# If already downloaded, does its hash match?
logger.info('File was already downloaded %s', download_path)
if hashes:
try:
hashes.check_against_path(download_path)
except HashMismatch:
logger.warning(
'Previously-downloaded file %s has bad hash. '
'Re-downloading.',
download_path
)
os.unlink(download_path)
return None
return download_path
return None
|
|
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
import os
import mimetypes
from nereid import route
from nereid.helpers import send_file, url_for
from nereid.globals import _request_ctx_stack
from werkzeug import abort
from trytond.model import ModelSQL, ModelView, fields, Unique
from trytond.config import config
from trytond.transaction import Transaction
from trytond.pyson import Eval, Bool
__all__ = ['NereidStaticFolder', 'NereidStaticFile']
READONLY_IF_FILES = {
'readonly': Bool(Eval('files'))
}
class NereidStaticFolder(ModelSQL, ModelView):
"Static folder for Nereid"
__name__ = "nereid.static.folder"
name = fields.Char(
'Name', required=True, select=True, states=READONLY_IF_FILES,
depends=['files']
)
description = fields.Char(
'Description', select=True, states=READONLY_IF_FILES,
depends=['files']
)
files = fields.One2Many('nereid.static.file', 'folder', 'Files')
type = fields.Selection([
('local', 'Local File'),
], 'File Type', states=READONLY_IF_FILES, depends=['files'])
@classmethod
def __setup__(cls):
super(NereidStaticFolder, cls).__setup__()
table = cls.__table__()
cls._sql_constraints += [
('unique_folder', Unique(table, table.name),
'Folder name needs to be unique')
]
cls._error_messages.update({
'invalid_name': """Invalid folder name:
(1) '.' in folder name (OR)
(2) folder name begins with '/'""",
})
@classmethod
def validate(cls, folders):
"""
Validates the records.
:param folders: active record list of folders
"""
super(NereidStaticFolder, cls).validate(folders)
for folder in folders:
folder.check_name()
@staticmethod
def default_type():
return 'local'
def check_name(self):
'''
Check the validity of folder name
Allowing the use of / or . will be risky as that could
eventually lead to previlege escalation
'''
if ('.' in self.name) or (self.name.startswith('/')):
self.raise_user_error('invalid_name')
class NereidStaticFile(ModelSQL, ModelView):
"Static files for Nereid"
__name__ = "nereid.static.file"
name = fields.Char('File Name', select=True, required=True)
folder = fields.Many2One(
'nereid.static.folder', 'Folder', select=True, required=True
)
#: This function field returns the field contents. This is useful if the
#: field is going to be displayed on the clients.
file_binary = fields.Function(
fields.Binary('File', filename='name'),
'get_file_binary', 'set_file_binary',
)
#: Full path to the file in the filesystem
file_path = fields.Function(fields.Char('File Path'), 'get_file_path')
#: URL that can be used to idenfity the resource. Note that the value
#: of this field is available only when called within a request context.
#: In other words the URL is valid only when called in a nereid request.
url = fields.Function(fields.Char('URL'), 'get_url')
# Sequence
sequence = fields.Integer('Sequence', select=True)
# File mimetype
mimetype = fields.Function(fields.Char('Mimetype'), getter='get_mimetype')
@classmethod
def __setup__(cls):
super(NereidStaticFile, cls).__setup__()
table = cls.__table__()
cls._sql_constraints += [
('name_folder_uniq', Unique(table, table.name, table.folder),
'The Name of the Static File must be unique in a folder.!'),
]
cls._error_messages.update({
'invalid_file_name': """Invalid file name:
(1) '..' in file name (OR)
(2) file name contains '/'""",
'missing_extension': (
"File extension is missing from file name and it is required"
" to guess file type."
)
})
@staticmethod
def default_sequence():
return 10
def get_mimetype(self, name):
"""
This method detects and returns the mimetype for the static file.
The python mimetypes module returns a tuple of the form -:
>>> mimetypes.guess_type(file_name)
(file_mimetype, encoding)
which can then be used to fill the `mimetype` field. Some example types
are -:
* image/png
* application/pdf
etc.
"""
return mimetypes.guess_type(self.name)[0]
def get_url(self, name):
"""Return the url if within an active request context or return
False values
"""
if _request_ctx_stack.top is None:
return None
return url_for(
'nereid.static.file.send_static_file',
folder=self.folder.name, name=self.name
)
@staticmethod
def get_nereid_base_path():
"""
Returns base path for nereid, where all the static files would be
stored.
By Default it is:
<Tryton Data Path>/<Database Name>/nereid
"""
return os.path.join(
config.get('database', 'path'),
Transaction().database.name,
"nereid"
)
def _set_file_binary(self, value):
"""
Setter for static file that stores file in file system
:param value: The value to set
"""
file_binary = fields.Binary.cast(bytes(value))
# If the folder does not exist, create it recursively
directory = os.path.dirname(self.file_path)
if not os.path.isdir(directory):
os.makedirs(directory)
with open(self.file_path, 'wb') as file_writer:
file_writer.write(file_binary)
@classmethod
def set_file_binary(cls, files, name, value):
"""
Setter for the functional binary field.
:param files: Records
:param name: Ignored
:param value: The file buffer
"""
for static_file in files:
static_file._set_file_binary(value)
def get_file_binary(self, name):
'''
Getter for the binary_file field. This fetches the file from the
file system, coverts it to buffer and returns it.
:param name: Field name
:return: Bytes
'''
location = self.file_path
with open(location, 'rb') as file_reader:
return fields.Binary.cast(file_reader.read())
def get_file_path(self, name):
"""
Returns the full path to the file in the file system
:param name: Field name
:return: File path
"""
return os.path.abspath(
os.path.join(
self.get_nereid_base_path(),
self.folder.name, self.name
))
@classmethod
def validate(cls, files):
"""
Validates the records.
:param files: active record list of static files
"""
super(NereidStaticFile, cls).validate(files)
for file in files:
file.check_file_name()
def check_file_name(self):
'''
Check the validity of folder name
Allowing the use of / or . will be risky as that could
eventually lead to previlege escalation
'''
file_name, file_extension = os.path.splitext(self.name)
if (not file_extension) or (file_extension == "."):
self.raise_user_error("missing_extension")
elif (".." in self.name) or ("/" in file_name):
self.raise_user_error("invalid_file_name")
@classmethod
@route("/static-file/<folder>/<name>", methods=["GET"])
def send_static_file(cls, folder, name):
"""
Invokes the send_file method in nereid.helpers to send a file as the
response to the request. The file is sent in a way which is as
efficient as possible. For example nereid will use the X-Send_file
header to make nginx send the file if possible.
:param folder: name of the folder
:param name: name of the file
"""
# TODO: Separate this search and find into separate cached method
files = cls.search([
('folder.name', '=', folder),
('name', '=', name)
])
if not files:
abort(404)
return send_file(files[0].file_path)
|
|
#! /usr/bin/env python
"""
***************************
Modified Numpy Matrix Class
***************************
"""
#todo: update 'close' to match numpy's implementation for all_close
import numpy
import itertools
import helpers
from decorator import decorator
@decorator
def expandCallable(fun, self, other):
"""
If the 'other' argument is a callable object call it with self.shape as the
only argument.
"""
return (
fun(self, other(self.shape))
if callable(other) else
fun(self, other)
)
class Matrix(numpy.matrix):
"""
'Imporved' version of the martix class.
mostly a collection of minor tweaks and convienience functions.
"""
rtol = 1e-5
"""
Absolute tolerence for :py:meth:`mvn.matrix.Matrix.__eq__`
passed as a parameter to :py:func:`numpy.allclose` to determine 'equality'
"""
atol = 1e-8
"""
Relative tolerence for :py:meth:`mvn.matrix.Matrix.__eq__`
passed as a parameter to :py:func:`numpy.allclose` to determine 'equality'
"""
sign = helpers.sign
unit = helpers.unit
def __new__(cls, data, dtype=None, copy=True):
"""
!!
"""
self=numpy.matrix(data, dtype, copy)
self.__class__=cls
return self
@expandCallable
def __eq__(self, other):
"""
Treats the matrix as a single object, and returns True or False.
uses class members :py:attr:`mvn.matrix.Matrix.atol` and
:py:attr:`mvn.matrix.Matrix.rtol` through :py:func:`numpy.allclose` to
determine 'equality'
Throws :py:class:`ValueError` if there is a shape miss-match
uses the :py:func:`mvn.matrix.expandCallable` decorator so that
this works
>>> assert Matrix([[0,0],[0,0],[0,0]]) == numpy.zeros
>>> assert Matrix([[1,0],[0,1]]) == Matrix.eye
>>> assert Matrix([1,2,3])+Matrix.atol/2 == Matrix([1,2,3])
"""
return numpy.allclose(self,type(self)(other,copy=False))
@expandCallable
def __add__(self, other):
"""
:py:func:`numpy.matrix.__add__` with the
:py:func:`mvn.matrix.expandCallable` decorator applied
"""
return numpy.matrix.__add__(self, other)
def __ne__(self, other):
"""
inverse of __eq__
return not (self == other)
"""
return not (self == other)
def __div__(self, other):
"""
self/other == self*other**-1
"""
return self*other**(-1)
def __rdiv__(self, other):
"""
other/self == other*self**-1
"""
return other*self**(-1)
def __repr__(self):
return 'M'+numpy.matrix.__repr__(self)[1:]
__str__ = __repr__
def diagonal(self):
"""
return the diagonal of a matrix as a 1-D array
see: :py:func:`numpy.diagonal`
"""
return numpy.squeeze(numpy.array(numpy.matrix.diagonal(self)))
def flatten(self):
"""
copy the matrix to an array and flatten it
see :py:func:`numpy.squeeze`
"""
return numpy.array(self).flatten()
def squeeze(self):
"""
copy the matrix to an array and squeeze it
see: :py:func:`numpy.squeeze`
"""
return numpy.array(self).squeeze()
def asarray(self):
"""
return the data as an array
see: :py:func:`numpy.asarray`
"""
return numpy.asarray(self)
def array(self):
"""
return a copy of the data in an array
see: :py:func:`numpy.array`
"""
return numpy.array(self)
def approx(self, other = 0.0):
"""
same function as :py:func:`numpy.allclose`, but elementwise
"""
other = type(self)(other,copy = False)
return helpers.approx(self,other, atol=self.atol, rtol=self.rtol)
@classmethod
def eye(cls, *args, **kwargs):
"""
improved version of numpy.eye
behaves the same but will accept a shape tuple as a first
argument.
>>> assert Matrix.eye((2,2)) == Matrix.eye(2,2) == Matrix.eye(2)
see: :py:func:`numpy.eye`
"""
#if isinstance(args[0],collections.Iterable):
if hasattr(args[0], '__iter__'):
args=itertools.chain(args[0], args[1:])
return cls(numpy.eye(*args, **kwargs))
@classmethod
def ones(cls, shape = (), **kwargs):
"""
return a matrix filled with ones
see: :py:func:`numpy.ones`
"""
return cls(numpy.ones(shape, **kwargs))
@classmethod
def zeros(cls, shape = (), **kwargs):
"""
return a matrix filled with zeros
see: :py:func:`numpy.zeros`
"""
return cls(numpy.zeros(shape, **kwargs))
@classmethod
def infs(cls, shape = (), **kwargs):
"""
return a matrix filled with infs
"""
return numpy.inf*Matrix.ones(shape, **kwargs)
@classmethod
def nans(cls, shape = (), **kwargs):
"""
return a matrix on filled with nans
"""
return numpy.nan*Matrix.ones(shape, **kwargs)
@classmethod
def rand(cls, shape = ()):
"""
return a matrix of uniformly distributed random numbers on [0,1]
see: :py:func:`numpy.random.rand`
"""
return cls(numpy.random.rand(*shape))
@classmethod
def randn(cls, shape = ()):
"""
return a matrix of normally distributed random numbers with unit variance
see: :py:func:`numpy.random.randn`
"""
return cls(numpy.random.randn(*shape))
@classmethod
def stack(cls, rows, default = 0):
"""
2d concatenation, expanding callables
>>> E3 = numpy.eye(3)
>>> Matrix.stack([
... [ E3,Matrix.zeros],
... [ Matrix.ones, 4],
... ])
Matrix([[ 1., 0., 0., 0.],
[ 0., 1., 0., 0.],
[ 0., 0., 1., 0.],
[ 1., 1., 1., 4.]])
"""
return cls(helpers.stack(rows, default))
def det(self):
"""
return the determinant of the matrix
see: :py:func:`numpy.linalg.det`
"""
return numpy.linalg.det(self)
def null(self):
"""
>>> R = Matrix.randn([5,10])
>>> assert R*R.null().T == Matrix.zeros
"""
(_, v, d) = numpy.linalg.svd(self, full_matrices = 1)
v = numpy.concatenate([v,numpy.zeros(len(d)-len(v))])
zeros = type(self)(v).approx().squeeze()
return d[zeros]
if __name__ == '__main__':
import mvn
A = mvn.A[1]
print A.vectors.null()
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance withv
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'SoftlayerLBDriver'
]
from libcloud.common.types import LibcloudError
from libcloud.common.softlayer import SoftLayerConnection
from libcloud.utils.misc import find, reverse_dict
from libcloud.loadbalancer.types import State
from libcloud.loadbalancer.base import Algorithm, Driver, LoadBalancer
from libcloud.loadbalancer.base import DEFAULT_ALGORITHM, Member
lb_service = 'SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_'\
'VirtualIpAddress'
class LBPackage(object):
"""
Defines a single Softlayer package to be used when placing orders (
e.g. via ex_place_balancer_order method).
:param id: Package id.
:type id: ``int``
:param name: Package name.
:type name: ``str``
:param description: Package short description.
:type description: ``str``
:param price_id: Id of the price for this package.
:type price_id: ``int``
:param capacity: Provides a numerical representation of the capacity given
in the description of this package.
:type capacity: ``int``
"""
def __init__(self, id, name, description, price_id, capacity):
self.id = id
self.name = name
self.description = description
self.price_id = price_id
self.capacity = capacity
def __repr__(self):
return (
'<LBPackage: id=%s, name=%s, description=%s, price_id=%s, '
'capacity=%s>' % (self.id, self.name, self.description,
self.price_id, self.capacity))
class SoftlayerLBDriver(Driver):
name = 'Softlayer Load Balancing'
website = 'http://www.softlayer.com/'
connectionCls = SoftLayerConnection
_VALUE_TO_ALGORITHM_MAP = {
'ROUND_ROBIN': Algorithm.ROUND_ROBIN,
'LEAST_CONNECTIONS': Algorithm.LEAST_CONNECTIONS,
'SHORTEST_RESPONSE': Algorithm.SHORTEST_RESPONSE,
'PERSISTENT_IP': Algorithm.PERSISTENT_IP
}
_ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP)
def list_balancers(self):
mask = {
'adcLoadBalancers': {
'ipAddress': '',
'loadBalancerHardware': {
'datacenter': ''
},
'virtualServers': {
'serviceGroups': {
'routingMethod': '',
'routingType': '',
'services': {
'ipAddress': ''
}
}
}
}
}
res = self.connection.request(
'SoftLayer_Account', 'getAdcLoadBalancers',
object_mask=mask).object
return [self._to_balancer(lb) for lb in res]
def get_balancer(self, balancer_id):
lb = self._get_balancer_model(balancer_id)
return self._to_balancer(lb)
def list_protocols(self):
"""
Return a list of supported protocols.
:rtype: ``list`` of ``str``
"""
return ['dns', 'ftp', 'http', 'https', 'tcp', 'udp']
def balancer_list_members(self, balancer):
lb = self._get_balancer_model(balancer.id)
members = []
vs = self._locate_service_group(lb, balancer.port)
if vs:
if vs['serviceGroups']:
srvgrp = vs['serviceGroups'][0]
members = [self._to_member(srv, balancer) for
srv in srvgrp['services']]
return members
def balancer_attach_member(self, balancer, member):
lb = self._get_balancer_model(balancer.id)
vs = self._locate_service_group(lb, balancer.port)
if not vs:
raise LibcloudError(value='No service_group found for balancer '
'port: %s' % balancer.port, driver=self)
if vs['serviceGroups']:
services = vs['serviceGroups'][0]['services']
services.append(self._to_service_template(member.ip,
member.port))
self.connection.request(lb_service, 'editObject', lb, id=balancer.id)
return [m for m in balancer.list_members() if m.ip == member.ip][0]
def balancer_detach_member(self, balancer, member):
svc_lbsrv = 'SoftLayer_Network_Application_Delivery_Controller_'\
'LoadBalancer_Service'
self.connection.request(svc_lbsrv, 'deleteObject', id=member.id)
return True
def destroy_balancer(self, balancer):
res_billing = self.connection.request(lb_service, 'getBillingItem',
id=balancer.id).object
self.connection.request('SoftLayer_Billing_Item', 'cancelService',
id=res_billing['id'])
return True
def ex_list_balancer_packages(self):
"""
Retrieves the available local load balancer packages.
:rtype: ``list`` of :class:`LBPackage`
"""
mask = {
'prices': ''
}
res = self.connection.request('SoftLayer_Product_Package', 'getItems',
id=0, object_mask=mask).object
res_lb_pkgs = [r for r in res if r['description'].find
('Load Balancer') != -1]
res_lb_pkgs = [r for r in res_lb_pkgs if not r['description'].
startswith('Global')]
return [self._to_lb_package(r) for r in res_lb_pkgs]
def ex_place_balancer_order(self, package, location):
"""
Places an order for a local loadbalancer in the specified
location.
:param package: The package to create the loadbalancer from.
:type package: :class:`LBPackage`
:param string location: The location (datacenter) to create the
loadbalancer.
:type location: :class:`NodeLocation`
:return: ``True`` if ex_place_balancer_order was successful.
:rtype: ``bool``
"""
data = {
'complexType': 'SoftLayer_Container_Product_Order_Network_'
'LoadBalancer',
'quantity': 1,
'packageId': 0,
'location': self._get_location(location.id),
'prices': [{'id': package.price_id}]
}
self.connection.request('SoftLayer_Product_Order', 'placeOrder',
data)
return True
def ex_configure_load_balancer(self, balancer, port=80,
protocol='http',
algorithm=DEFAULT_ALGORITHM,
ex_allocation=100):
"""
Configure the loadbalancer by adding it with a front-end port (aka
a service group in the Softlayer loadbalancer model).
Softlayer loadbalancer may be defined with multiple service
groups (front-end ports) each defined with a unique port number.
:param balancer: The loadbalancer.
:type balancer: :class:`LoadBalancer`
:param port: Port of the service group, defaults to 80.
:type port: ``int``
:param protocol: Loadbalancer protocol, defaults to http.
:type protocol: ``str``
:param algorithm: Load balancing algorithm, defaults to
Algorithm.ROUND_ROBIN
:type algorithm: :class:`Algorithm`
:param ex_allocation: The percentage of the total connection
allocations to allocate for this group.
:type ex_allocation: ``int``
:rtype: :class:`LoadBalancer`
"""
_types = self._get_routing_types()
_methods = self._get_routing_methods()
rt = find(_types, lambda t: t['keyname'] == protocol.upper())
if not rt:
raise LibcloudError(value='Invalid protocol %s' % protocol,
driver=self)
value = self._algorithm_to_value(algorithm)
meth = find(_methods, lambda m: m['keyname'] == value)
if not meth:
raise LibcloudError(value='Invalid algorithm %s' % algorithm,
driver=self)
service_group_template = {
'port': port,
'allocation': ex_allocation,
'serviceGroups': [{
'routingTypeId': rt['id'],
'routingMethodId': meth['id']
}]
}
lb = self._get_balancer_model(balancer.id)
if len(lb['virtualServers']) > 0:
port = lb['virtualServers'][0]['port']
raise LibcloudError(value='Loadbalancer already configured with '
'a service group (front-end port)' % port,
driver=self)
lb['virtualServers'].append(service_group_template)
self.connection.request(lb_service, 'editObject', lb, id=balancer.id)
return self.get_balancer(balancer.id)
def _get_balancer_model(self, balancer_id):
"""
Retrieve Softlayer loadbalancer model.
"""
lb_mask = {
'ipAddress': '',
'loadBalancerHardware': {
'datacenter': ''
},
'virtualServers': {
'serviceGroups': {
'routingMethod': '',
'routingType': '',
'services': {
'ipAddress': '',
'groupReferences': '',
}
},
'scaleLoadBalancers': {}
}
}
lb_res = self.connection.request(lb_service, 'getObject',
object_mask=lb_mask, id=balancer_id).\
object
return lb_res
def _locate_service_group(self, lb, port):
"""
Locate service group with given port.
Return virtualServers (vs) entry whose port matches the
supplied parameter port. For a negative port, just return
the first vs entry.
None is returned if no match found.
:param lb: Softlayer loadbalancer model.
:type lb: ``dict``
:param port: loadbalancer front-end port.
:type port: ``int``
:return: Matched entry in the virtualServers array of the supplied
model.
:rtype: ``dict``
"""
vs = None
if port < 0:
vs = lb['virtualServers'][0] if lb['virtualServers']\
else None
else:
vs = find(lb['virtualServers'], lambda v: v['port'] == port)
return vs
def _get_routing_types(self):
svc_rtype = 'SoftLayer_Network_Application_Delivery_Controller_'\
'LoadBalancer_Routing_Type'
return self.connection.request(svc_rtype, 'getAllObjects').object
def _get_routing_methods(self):
svc_rmeth = 'SoftLayer_Network_Application_Delivery_Controller_'\
'LoadBalancer_Routing_Method'
return self.connection.request(svc_rmeth, 'getAllObjects').object
def _get_location(self, location_id):
res = self.connection.request('SoftLayer_Location_Datacenter',
'getDatacenters').object
dcenter = find(res, lambda d: d['name'] == location_id)
if not dcenter:
raise LibcloudError(value='Invalid value %s' % location_id,
driver=self)
return dcenter['id']
def _get_ipaddress(self, ip):
svc_ipaddress = 'SoftLayer_Network_Subnet_IpAddress'
return self.connection.request(svc_ipaddress, 'getByIpAddress',
ip).object
def _to_lb_package(self, pkg):
try:
price_id = pkg['prices'][0]['id']
except:
price_id = -1
capacity = int(pkg.get('capacity', 0))
return LBPackage(id=pkg['id'], name=pkg['keyName'],
description=pkg['description'],
price_id=price_id, capacity=capacity)
def _to_service_template(self, ip, port):
"""
Builds single member entry in Softlayer loadbalancer model
"""
template = {
'enabled': 1, # enable the service
'port': port, # back-end port
'ipAddressId': self._get_ipaddress(ip)['id'],
'healthChecks': [{
'healthCheckTypeId': 21 # default health check
}],
'groupReferences': [{
'weight': 1
}]
}
return template
def _to_balancer(self, lb):
ipaddress = lb['ipAddress']['ipAddress']
extra = {}
extra['connection_limit'] = lb['connectionLimit']
extra['ssl_active'] = lb['sslActiveFlag']
extra['ssl_enabled'] = lb['sslEnabledFlag']
extra['ha'] = lb['highAvailabilityFlag']
extra['datacenter'] = \
lb['loadBalancerHardware'][0]['datacenter']['name']
# In Softlayer, there could be multiple group of members (aka service
# groups), so retrieve the first one
vs = self._locate_service_group(lb, -1)
if vs:
port = vs['port']
if vs['serviceGroups']:
srvgrp = vs['serviceGroups'][0]
routing_method = srvgrp['routingMethod']['keyname']
routing_type = srvgrp['routingType']['keyname']
try:
extra['algorithm'] = self.\
_value_to_algorithm(routing_method)
except:
pass
extra['protocol'] = routing_type.lower()
if not vs:
port = -1
balancer = LoadBalancer(
id=lb['id'],
name='',
state=State.UNKNOWN,
ip=ipaddress,
port=port,
driver=self.connection.driver,
extra=extra
)
return balancer
def _to_member(self, srv, balancer=None):
svc_id = srv['id']
ip = srv['ipAddress']['ipAddress']
port = srv['port']
extra = {}
extra['status'] = srv['status']
extra['enabled'] = srv['enabled']
return Member(id=svc_id, ip=ip, port=port, balancer=balancer,
extra=extra)
|
|
#!/usr/bin/env python
import curses
import os
MAX_FILENAME_LENGTH = os.pathconf('.', 'PC_NAME_MAX')
def debug(text):
with open('/tmp/fileStuff.log', 'a') as f:
f.write(str(text) + '\n')
class Colors(object):
WHITE_ON_BLACK = 1
WHITE_ON_BLUE = 2
class TextScreen(object):
def __init__(self, screen):
self.screen = screen
self.screen.idlok(1)
self.screen.scrollok(True)
self.max_y, self.max_x = self.screen.getmaxyx()
self.max_y -= 1
self.current_buffer = []
self.current_view = [0, self.max_y]
self.reset_screen()
def add_line(self, text):
if self.cursor_y <= self.max_y:
self.cursor_y += 1
self.text_height += 1
self.screen.move(self.cursor_y, 0)
self.screen.addstr(text)
self.screen.move(self.cursor_y, 0)
else:
raise Exception("No more lines left!")
def replace_line_text(self, newText):
self.screen.move(self.cursor_y, 0)
self.screen.clrtoeol()
self.current_buffer[self.cursor_y] = newText
self.screen.attron(curses.color_pair(Colors.WHITE_ON_BLUE)) # Current line was already selected
self.screen.addstr(newText)
self.screen.attron(curses.color_pair(Colors.WHITE_ON_BLACK))
def get_line_text(self):
text = self.screen.instr().strip()
return text
def draw_buffer(self, buffer):
self.current_buffer = buffer
self.current_view = [0, self.max_y]
self.draw_view()
def draw_view(self):
self.reset_screen()
for line in self.current_buffer[self.current_view[0]:self.current_view[1]+1]:
self.add_line(line)
self.screen.refresh()
def select_line(self, to_y):
# First deselect the current text
from_y, _ = self.screen.getyx()
self.screen.move(from_y, 0)
self.screen.clrtoeol()
self.screen.attron(curses.color_pair(Colors.WHITE_ON_BLACK))
self.screen.addstr(self.current_buffer[self.current_view[0]+from_y])
# Set what our new line should be
self.cursor_y = to_y
# Now select current line
self.screen.move(to_y, 0)
self.screen.attron(curses.color_pair(Colors.WHITE_ON_BLUE))
self.screen.addstr(self.current_buffer[self.current_view[0]+to_y])
self.screen.move(to_y, 0)
# Set the color back for the next time we draw
self.screen.attron(curses.color_pair(Colors.WHITE_ON_BLACK))
self.screen.refresh()
def move_up(self):
# If we're at the top of the screen
if self.cursor_y > 0:
self.select_line(self.cursor_y - 1)
# If there's more buffer left above our current view
elif self.current_view[0] > 0:
self.current_view = [index-1 for index in self.current_view]
self.draw_view()
self.select_line(0)
def move_down(self):
# If we're within the current view and within the current buffer
if self.cursor_y < self.max_y and self.cursor_y < len(self.current_buffer)-1:
self.select_line(self.cursor_y + 1)
# If there's more left in our current buffer that's not shown in the view
elif self.current_view[1] < len(self.current_buffer)-1:
self.current_view = [index+1 for index in self.current_view]
self.draw_view()
self.select_line(self.cursor_y)
def reset_screen(self):
self.screen.erase()
self.cursor_y = -1
self.cursor_x = 0
self.text_height = -1
self.screen.move(0, 0)
self.screen.refresh()
class FileManager(object):
def __init__(self, directory=os.curdir, show_dirs=True, show_hidden=False):
self.files = self.generate_file_list(directory, show_dirs, show_hidden)
self.changes = {}
for entry in self.files:
self.changes[entry] = None
def generate_file_list(self, directory, show_dirs, show_hidden):
walker = os.walk(directory)
current_dir, subdirs, files = walker.next()
file_list = []
# This is slightly awkward because we want dirs first in the list
if show_dirs:
if not directory == '/':
file_list.append('..')
for dir in sorted(subdirs):
if show_hidden or not dir.startswith('.'):
file_list.append('/{}'.format(dir))
for file_ in sorted(files):
if show_hidden or not file_.startswith('.'):
file_list.append(file_)
return file_list
def get_needed_digits(file_buffer):
return len(str(len(file_buffer)))
def next_number_generator(file_buffer, total_digits=None):
total_files = len(file_buffer)
if not total_digits:
total_digits = get_needed_digits(file_buffer)
for number in range(1, total_files+1):
# Probably a prettier way to do this...
yield ('{:0' + str(total_digits) + '}').format(number)
def create_command_bar(screen):
new_screen = curses.newwin(1, screen.max_x, screen.max_y-2, 0)
new_screen.addstr("hi")
new_screen.refresh()
def main(stdscr):
screen = TextScreen(stdscr)
# # Setup color and cursor
curses.start_color()
curses.init_pair(Colors.WHITE_ON_BLACK, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(Colors.WHITE_ON_BLUE, curses.COLOR_WHITE, curses.COLOR_BLUE)
screen.screen.attron(curses.color_pair(Colors.WHITE_ON_BLACK))
curses.curs_set(0)
dir = FileManager(os.getcwd(), show_dirs=False, show_hidden=False)
screen.draw_buffer(dir.files)
screen.select_line(0)
# Textbox?
# textbox = Textbox(screen.screen)
needed_digits = get_needed_digits(dir.files)
next_number = next_number_generator(dir.files, needed_digits)
while True:
c = stdscr.getch()
if c == ord('q'):
break
elif c == curses.KEY_UP:
screen.move_up()
elif c == curses.KEY_DOWN:
screen.move_down()
elif c == ord(' '):
old_text = screen.get_line_text()
new_text = '{}_{}'.format(next_number.next(), old_text)
dir.changes[old_text] = new_text
screen.replace_line_text(new_text)
elif c == ord('s'):
for old_name, new_name in dir.changes.items():
if new_name:
# Just letting exceptions bubble up for now...
os.rename(old_name, new_name)
break
# elif c == curses.KEY_NPAGE:
# screen.screen.scroll(1)
# elif c == curses.KEY_PPAGE:
# screen.screen.scroll(-1)
# else:
# textbox.do_command(c)
# if c == curses.KEY_UP:
# current_line -= 1
# screen.move(current_line, 0)
# elif c == curses.KEY_DOWN:
# current_line += 1
# screen.move(current_line, 0)
# text = textbox.gather()
# print(text)
debug('new instance')
curses.wrapper(main)
|
|
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APIClient
from openslides.agenda.models import Item, Speaker
from openslides.core.config import config
from openslides.core.models import CustomSlide, Projector
from openslides.utils.test import TestCase
class RetrieveItem(TestCase):
"""
Tests retrieving items.
"""
def setUp(self):
self.client = APIClient()
config['general_system_enable_anonymous'] = True
self.item = CustomSlide.objects.create(title='test_title_Idais2pheepeiz5uph1c').agenda_item
def test_normal_by_anonymous_without_perm_to_see_hidden_items(self):
group = get_user_model().groups.field.related_model.objects.get(pk=1) # Group with pk 1 is for anonymous users.
permission_string = 'agenda.can_see_hidden_items'
app_label, codename = permission_string.split('.')
permission = group.permissions.get(content_type__app_label=app_label, codename=codename)
group.permissions.remove(permission)
self.item.type = Item.AGENDA_ITEM
self.item.save()
response = self.client.get(reverse('item-detail', args=[self.item.pk]))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_hidden_by_anonymous_without_perm_to_see_hidden_items(self):
group = get_user_model().groups.field.related_model.objects.get(pk=1) # Group with pk 1 is for anonymous users.
permission_string = 'agenda.can_see_hidden_items'
app_label, codename = permission_string.split('.')
permission = group.permissions.get(content_type__app_label=app_label, codename=codename)
group.permissions.remove(permission)
response = self.client.get(reverse('item-detail', args=[self.item.pk]))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class ManageSpeaker(TestCase):
"""
Tests managing speakers.
"""
def setUp(self):
self.client = APIClient()
self.client.login(username='admin', password='admin')
self.item = CustomSlide.objects.create(title='test_title_aZaedij4gohn5eeQu8fe').agenda_item
self.user = get_user_model().objects.create_user(
username='test_user_jooSaex1bo5ooPhuphae',
password='test_password_e6paev4zeeh9n')
def test_add_oneself(self):
response = self.client.post(
reverse('item-manage-speaker', args=[self.item.pk]))
self.assertEqual(response.status_code, 200)
self.assertTrue(Speaker.objects.all().exists())
def test_add_oneself_twice(self):
Speaker.objects.add(get_user_model().objects.get(username='admin'), self.item)
response = self.client.post(
reverse('item-manage-speaker', args=[self.item.pk]))
self.assertEqual(response.status_code, 400)
def test_add_oneself_when_closed(self):
self.item.speaker_list_closed = True
self.item.save()
response = self.client.post(
reverse('item-manage-speaker', args=[self.item.pk]))
self.assertEqual(response.status_code, 400)
def test_remove_oneself(self):
Speaker.objects.add(get_user_model().objects.get(username='admin'), self.item)
response = self.client.delete(
reverse('item-manage-speaker', args=[self.item.pk]))
self.assertEqual(response.status_code, 200)
self.assertFalse(Speaker.objects.all().exists())
def test_remove_self_not_on_list(self):
response = self.client.delete(
reverse('item-manage-speaker', args=[self.item.pk]))
self.assertEqual(response.status_code, 400)
def test_add_someone_else(self):
response = self.client.post(
reverse('item-manage-speaker', args=[self.item.pk]),
{'user': self.user.pk})
self.assertEqual(response.status_code, 200)
self.assertTrue(Speaker.objects.filter(item=self.item, user=self.user).exists())
def test_invalid_data_string_instead_of_integer(self):
response = self.client.post(
reverse('item-manage-speaker', args=[self.item.pk]),
{'user': 'string_instead_of_integer'})
self.assertEqual(response.status_code, 400)
def test_invalid_data_user_does_not_exist(self):
# ID of a user that does not exist.
# Be careful: Here we do not test that the user does not exist.
inexistent_user_pk = self.user.pk + 1000
response = self.client.post(
reverse('item-manage-speaker', args=[self.item.pk]),
{'user': inexistent_user_pk})
self.assertEqual(response.status_code, 400)
def test_add_someone_else_twice(self):
Speaker.objects.add(self.user, self.item)
response = self.client.post(
reverse('item-manage-speaker', args=[self.item.pk]),
{'user': self.user.pk})
self.assertEqual(response.status_code, 400)
def test_add_someone_else_non_admin(self):
admin = get_user_model().objects.get(username='admin')
group_staff = admin.groups.get(name='Staff')
group_delegates = type(group_staff).objects.get(name='Delegates')
admin.groups.add(group_delegates)
admin.groups.remove(group_staff)
response = self.client.post(
reverse('item-manage-speaker', args=[self.item.pk]),
{'user': self.user.pk})
self.assertEqual(response.status_code, 403)
def test_remove_someone_else(self):
speaker = Speaker.objects.add(self.user, self.item)
response = self.client.delete(
reverse('item-manage-speaker', args=[self.item.pk]),
{'speaker': speaker.pk})
self.assertEqual(response.status_code, 200)
self.assertFalse(Speaker.objects.filter(item=self.item, user=self.user).exists())
def test_remove_someone_else_not_on_list(self):
response = self.client.delete(
reverse('item-manage-speaker', args=[self.item.pk]),
{'speaker': '1'})
self.assertEqual(response.status_code, 400)
def test_remove_someone_else_invalid_data(self):
response = self.client.delete(
reverse('item-manage-speaker', args=[self.item.pk]),
{'speaker': 'invalid'})
self.assertEqual(response.status_code, 400)
def test_remove_someone_else_non_admin(self):
admin = get_user_model().objects.get(username='admin')
group_staff = admin.groups.get(name='Staff')
group_delegates = type(group_staff).objects.get(name='Delegates')
admin.groups.add(group_delegates)
admin.groups.remove(group_staff)
speaker = Speaker.objects.add(self.user, self.item)
response = self.client.delete(
reverse('item-manage-speaker', args=[self.item.pk]),
{'speaker': speaker.pk})
self.assertEqual(response.status_code, 403)
class Speak(TestCase):
"""
Tests view to begin or end speech.
"""
def setUp(self):
self.client = APIClient()
self.client.login(username='admin', password='admin')
self.item = CustomSlide.objects.create(title='test_title_KooDueco3zaiGhiraiho').agenda_item
self.user = get_user_model().objects.create_user(
username='test_user_Aigh4vohb3seecha4aa4',
password='test_password_eneupeeVo5deilixoo8j')
def test_begin_speech(self):
Speaker.objects.add(self.user, self.item)
speaker = Speaker.objects.add(get_user_model().objects.get(username='admin'), self.item)
self.assertTrue(Speaker.objects.get(pk=speaker.pk).begin_time is None)
response = self.client.put(
reverse('item-speak', args=[self.item.pk]),
{'speaker': speaker.pk})
self.assertEqual(response.status_code, 200)
self.assertFalse(Speaker.objects.get(pk=speaker.pk).begin_time is None)
def test_begin_speech_next_speaker(self):
speaker = Speaker.objects.add(self.user, self.item)
Speaker.objects.add(get_user_model().objects.get(username='admin'), self.item)
response = self.client.put(reverse('item-speak', args=[self.item.pk]))
self.assertEqual(response.status_code, 200)
self.assertFalse(Speaker.objects.get(pk=speaker.pk).begin_time is None)
def test_begin_speech_invalid_speaker_id(self):
response = self.client.put(
reverse('item-speak', args=[self.item.pk]),
{'speaker': '1'})
self.assertEqual(response.status_code, 400)
def test_begin_speech_invalid_data(self):
response = self.client.put(
reverse('item-speak', args=[self.item.pk]),
{'speaker': 'invalid'})
self.assertEqual(response.status_code, 400)
def test_end_speech(self):
speaker = Speaker.objects.add(get_user_model().objects.get(username='admin'), self.item)
speaker.begin_speech()
self.assertFalse(Speaker.objects.get(pk=speaker.pk).begin_time is None)
self.assertTrue(Speaker.objects.get(pk=speaker.pk).end_time is None)
response = self.client.delete(reverse('item-speak', args=[self.item.pk]))
self.assertEqual(response.status_code, 200)
self.assertFalse(Speaker.objects.get(pk=speaker.pk).end_time is None)
def test_end_speech_no_current_speaker(self):
response = self.client.delete(reverse('item-speak', args=[self.item.pk]))
self.assertEqual(response.status_code, 400)
def test_begin_speech_with_countdown(self):
config['agenda_couple_countdown_and_speakers'] = True
projector = Projector.objects.get(pk=1)
projector.config['03e87dea9c3f43c88b756c06a4c044fb'] = {
'name': 'core/countdown',
'status': 'stop',
'visible': True,
'default': 60,
'countdown_time': 60,
'stable': True,
'index': 0
}
projector.save()
Speaker.objects.add(self.user, self.item)
speaker = Speaker.objects.add(get_user_model().objects.get(username='admin'), self.item)
self.client.put(
reverse('item-speak', args=[self.item.pk]),
{'speaker': speaker.pk})
for key, value in Projector.objects.get().config.items():
if value['name'] == 'core/countdown':
self.assertEqual(value['status'], 'running')
success = True
break
else:
success = False
self.assertTrue(success)
def test_end_speech_with_countdown(self):
config['agenda_couple_countdown_and_speakers'] = True
projector = Projector.objects.get(pk=1)
projector.config['03e87dea9c3f43c88b756c06a4c044fb'] = {
'name': 'core/countdown',
'status': 'stop',
'visible': True,
'default': 60,
'countdown_time': 60,
'stable': True,
'index': 0
}
projector.save()
speaker = Speaker.objects.add(get_user_model().objects.get(username='admin'), self.item)
speaker.begin_speech()
self.client.delete(reverse('item-speak', args=[self.item.pk]))
for key, value in Projector.objects.get().config.items():
if value['name'] == 'core/countdown':
self.assertEqual(value['status'], 'stop')
success = True
break
else:
success = False
self.assertTrue(success)
class Numbering(TestCase):
"""
Tests view to number the agenda
"""
def setUp(self):
self.client = APIClient()
self.client.login(username='admin', password='admin')
self.item_1 = CustomSlide.objects.create(title='test_title_thuha8eef7ohXar3eech').agenda_item
self.item_1.type = Item.AGENDA_ITEM
self.item_1.weight = 1
self.item_1.save()
self.item_2 = CustomSlide.objects.create(title='test_title_eisah7thuxa1eingaeLo').agenda_item
self.item_2.type = Item.AGENDA_ITEM
self.item_2.weight = 2
self.item_2.save()
self.item_2_1 = CustomSlide.objects.create(title='test_title_Qui0audoaz5gie1phish').agenda_item
self.item_2_1.type = Item.AGENDA_ITEM
self.item_2_1.parent = self.item_2
self.item_2_1.save()
self.item_3 = CustomSlide.objects.create(title='test_title_ah7tphisheineisgaeLo').agenda_item
self.item_3.type = Item.AGENDA_ITEM
self.item_3.weight = 3
self.item_3.save()
def test_numbering(self):
response = self.client.post(reverse('item-numbering'))
self.assertEqual(response.status_code, 200)
self.assertEqual(Item.objects.get(pk=self.item_1.pk).item_number, '1')
self.assertEqual(Item.objects.get(pk=self.item_2.pk).item_number, '2')
self.assertEqual(Item.objects.get(pk=self.item_2_1.pk).item_number, '2.1')
self.assertEqual(Item.objects.get(pk=self.item_3.pk).item_number, '3')
def test_roman_numbering(self):
config['agenda_numeral_system'] = 'roman'
response = self.client.post(reverse('item-numbering'))
self.assertEqual(response.status_code, 200)
self.assertEqual(Item.objects.get(pk=self.item_1.pk).item_number, 'I')
self.assertEqual(Item.objects.get(pk=self.item_2.pk).item_number, 'II')
self.assertEqual(Item.objects.get(pk=self.item_2_1.pk).item_number, 'II.1')
self.assertEqual(Item.objects.get(pk=self.item_3.pk).item_number, 'III')
def test_with_hidden_item(self):
self.item_2.type = Item.HIDDEN_ITEM
self.item_2.save()
response = self.client.post(reverse('item-numbering'))
self.assertEqual(response.status_code, 200)
self.assertEqual(Item.objects.get(pk=self.item_1.pk).item_number, '1')
self.assertEqual(Item.objects.get(pk=self.item_2.pk).item_number, '')
self.assertEqual(Item.objects.get(pk=self.item_2_1.pk).item_number, '')
self.assertEqual(Item.objects.get(pk=self.item_3.pk).item_number, '2')
def test_reset_numbering_with_hidden_item(self):
self.item_2.item_number = 'test_number_Cieghae6ied5ool4hiem'
self.item_2.type = Item.HIDDEN_ITEM
self.item_2.save()
self.item_2_1.item_number = 'test_number_roQueTohg7fe1Is7aemu'
self.item_2_1.save()
response = self.client.post(reverse('item-numbering'))
self.assertEqual(response.status_code, 200)
self.assertEqual(Item.objects.get(pk=self.item_1.pk).item_number, '1')
self.assertEqual(Item.objects.get(pk=self.item_2.pk).item_number, '')
self.assertEqual(Item.objects.get(pk=self.item_2_1.pk).item_number, '')
self.assertEqual(Item.objects.get(pk=self.item_3.pk).item_number, '2')
|
|
import unittest
import os
import time
import pg8000
import datetime
from .connection_settings import db_connect
from sys import exc_info
from pg8000.six import b, IS_JYTHON
from distutils.version import LooseVersion
# DBAPI compatible interface tests
class Tests(unittest.TestCase):
def setUp(self):
self.db = pg8000.connect(**db_connect)
# Jython 2.5.3 doesn't have a time.tzset() so skip
if not IS_JYTHON:
os.environ['TZ'] = "UTC"
time.tzset()
try:
c = self.db.cursor()
try:
c = self.db.cursor()
c.execute("DROP TABLE t1")
except pg8000.DatabaseError:
e = exc_info()[1]
# the only acceptable error is:
self.assertEqual(e.args[1], '42P01') # table does not exist
self.db.rollback()
c.execute(
"CREATE TEMPORARY TABLE t1 "
"(f1 int primary key, f2 int not null, f3 varchar(50) null)")
c.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
(1, 1, None))
c.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
(2, 10, None))
c.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
(3, 100, None))
c.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
(4, 1000, None))
c.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
(5, 10000, None))
self.db.commit()
finally:
c.close()
def tearDown(self):
self.db.close()
def testParallelQueries(self):
try:
c1 = self.db.cursor()
c2 = self.db.cursor()
c1.execute("SELECT f1, f2, f3 FROM t1")
while 1:
row = c1.fetchone()
if row is None:
break
f1, f2, f3 = row
c2.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > %s", (f1,))
while 1:
row = c2.fetchone()
if row is None:
break
f1, f2, f3 = row
finally:
c1.close()
c2.close()
self.db.rollback()
def testQmark(self):
orig_paramstyle = pg8000.paramstyle
try:
pg8000.paramstyle = "qmark"
c1 = self.db.cursor()
c1.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > ?", (3,))
while 1:
row = c1.fetchone()
if row is None:
break
f1, f2, f3 = row
self.db.rollback()
finally:
pg8000.paramstyle = orig_paramstyle
c1.close()
def testNumeric(self):
orig_paramstyle = pg8000.paramstyle
try:
pg8000.paramstyle = "numeric"
c1 = self.db.cursor()
c1.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > :1", (3,))
while 1:
row = c1.fetchone()
if row is None:
break
f1, f2, f3 = row
self.db.rollback()
finally:
pg8000.paramstyle = orig_paramstyle
c1.close()
def testNamed(self):
orig_paramstyle = pg8000.paramstyle
try:
pg8000.paramstyle = "named"
c1 = self.db.cursor()
c1.execute(
"SELECT f1, f2, f3 FROM t1 WHERE f1 > :f1", {"f1": 3})
while 1:
row = c1.fetchone()
if row is None:
break
f1, f2, f3 = row
self.db.rollback()
finally:
pg8000.paramstyle = orig_paramstyle
c1.close()
def testFormat(self):
orig_paramstyle = pg8000.paramstyle
try:
pg8000.paramstyle = "format"
c1 = self.db.cursor()
c1.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > %s", (3,))
while 1:
row = c1.fetchone()
if row is None:
break
f1, f2, f3 = row
self.db.commit()
finally:
pg8000.paramstyle = orig_paramstyle
c1.close()
def testPyformat(self):
orig_paramstyle = pg8000.paramstyle
try:
pg8000.paramstyle = "pyformat"
c1 = self.db.cursor()
c1.execute(
"SELECT f1, f2, f3 FROM t1 WHERE f1 > %(f1)s", {"f1": 3})
while 1:
row = c1.fetchone()
if row is None:
break
f1, f2, f3 = row
self.db.commit()
finally:
pg8000.paramstyle = orig_paramstyle
c1.close()
def testArraysize(self):
try:
c1 = self.db.cursor()
c1.arraysize = 3
c1.execute("SELECT * FROM t1")
retval = c1.fetchmany()
self.assertEqual(len(retval), c1.arraysize)
finally:
c1.close()
self.db.commit()
def testDate(self):
val = pg8000.Date(2001, 2, 3)
self.assertEqual(val, datetime.date(2001, 2, 3))
def testTime(self):
val = pg8000.Time(4, 5, 6)
self.assertEqual(val, datetime.time(4, 5, 6))
def testTimestamp(self):
val = pg8000.Timestamp(2001, 2, 3, 4, 5, 6)
self.assertEqual(val, datetime.datetime(2001, 2, 3, 4, 5, 6))
def testDateFromTicks(self):
if IS_JYTHON:
return
val = pg8000.DateFromTicks(1173804319)
self.assertEqual(val, datetime.date(2007, 3, 13))
def testTimeFromTicks(self):
if IS_JYTHON:
return
val = pg8000.TimeFromTicks(1173804319)
self.assertEqual(val, datetime.time(16, 45, 19))
def testTimestampFromTicks(self):
if IS_JYTHON:
return
val = pg8000.TimestampFromTicks(1173804319)
self.assertEqual(val, datetime.datetime(2007, 3, 13, 16, 45, 19))
def testBinary(self):
v = pg8000.Binary(b("\x00\x01\x02\x03\x02\x01\x00"))
self.assertEqual(v, b("\x00\x01\x02\x03\x02\x01\x00"))
self.assertTrue(isinstance(v, pg8000.BINARY))
def testRowCount(self):
try:
c1 = self.db.cursor()
c1.execute("SELECT * FROM t1")
# Before PostgreSQL 9 we don't know the row count for a select
if self.db._server_version > LooseVersion('8.0.0'):
self.assertEqual(5, c1.rowcount)
c1.execute("UPDATE t1 SET f3 = %s WHERE f2 > 101", ("Hello!",))
self.assertEqual(2, c1.rowcount)
c1.execute("DELETE FROM t1")
self.assertEqual(5, c1.rowcount)
finally:
c1.close()
self.db.commit()
def testFetchMany(self):
try:
cursor = self.db.cursor()
cursor.arraysize = 2
cursor.execute("SELECT * FROM t1")
self.assertEqual(2, len(cursor.fetchmany()))
self.assertEqual(2, len(cursor.fetchmany()))
self.assertEqual(1, len(cursor.fetchmany()))
self.assertEqual(0, len(cursor.fetchmany()))
finally:
cursor.close()
self.db.commit()
def testIterator(self):
from warnings import filterwarnings
filterwarnings("ignore", "DB-API extension cursor.next()")
filterwarnings("ignore", "DB-API extension cursor.__iter__()")
try:
cursor = self.db.cursor()
cursor.execute("SELECT * FROM t1 ORDER BY f1")
f1 = 0
for row in cursor:
next_f1 = row[0]
assert next_f1 > f1
f1 = next_f1
except:
cursor.close()
self.db.commit()
# Vacuum can't be run inside a transaction, so we need to turn
# autocommit on.
def testVacuum(self):
self.db.autocommit = True
try:
cursor = self.db.cursor()
cursor.execute("vacuum")
finally:
cursor.close()
# If autocommit is on and we do an operation that returns more rows than
# the cache holds, make sure exception raised.
def testAutocommitMaxRows(self):
self.db.autocommit = True
try:
cursor = self.db.cursor()
self.assertRaises(
pg8000.InterfaceError, cursor.execute,
"select generate_series(1, " +
str(pg8000.core.Connection._row_cache_size + 1) + ")")
finally:
cursor.close()
if __name__ == "__main__":
unittest.main()
|
|
# -*- coding: utf-8 -*-
import json
from os import path
from gluon import current
from gluon.html import *
from gluon.storage import Storage
from gluon.http import redirect
from s3 import FS, ICON, S3CustomController
from s3theme import formstyle_foundation_inline
THEME = "historic.RW"
# =============================================================================
class index(S3CustomController):
""" Custom Home Page """
def __call__(self):
output = {}
T = current.T
request = current.request
response = current.response
s3 = response.s3
# Check logged in and permissions
auth = current.auth
settings = current.deployment_settings
roles = current.session.s3.roles
system_roles = auth.get_system_roles()
AUTHENTICATED = system_roles.AUTHENTICATED
# Login/Registration forms
self_registration = settings.get_security_registration_visible()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
# Project Links
project_links = DIV(_class="title-links hide-for-small")
project_description = settings.get_frontpage("project_description")
if project_description:
project_links.append(A(ICON("link"), T("Project Description"),
_class = "action-lnk",
_href = project_description,
_target = "_blank",
))
project_links.append(A(ICON("link"), T("User Manual"),
_class = "action-lnk",
_href = URL(c="default", f="index",
args = ["docs"],
vars = {"name": "UserManual"},
),
_target = "_blank",
))
mailing_list = settings.get_frontpage("mailing_list")
if mailing_list:
project_links.append(A(ICON("link"), T("Mailing List"),
_class = "action-lnk",
_href = mailing_list,
_target = "_blank",
))
# Contact Form
request_email = settings.get_frontpage("request_email")
if request_email:
from gluon import IS_NOT_EMPTY, SQLFORM
from s3dal import Field
fields = [Field("name",
label="Your name",
requires=IS_NOT_EMPTY(),
),
Field("address",
label="Your e-mail address",
requires=IS_NOT_EMPTY(),
),
Field("subject",
label="Subject",
requires=IS_NOT_EMPTY(),
),
Field("message", "text",
label="Message",
requires=IS_NOT_EMPTY(),
),
]
from s3 import s3_mark_required
labels, required = s3_mark_required(fields)
s3.has_required = required
response.form_label_separator = ""
contact_form = SQLFORM.factory(formstyle = settings.get_ui_formstyle(),
submit_button = T("Submit"),
labels = labels,
separator = "",
table_name = "contact", # Dummy table name
_id = "mailform",
*fields
)
if contact_form.accepts(request.post_vars,
current.session,
formname="contact_form",
keepvalues=False,
hideerror=False):
# Processs Contact Form
form_vars = contact_form.vars
sender = "%s <%s>" % (form_vars.name, form_vars.address)
result = current.msg.send_email(to=request_email,
sender=sender,
subject=form_vars.subject,
message=form_vars.message,
reply_to=form_vars.address,
)
if result:
response.confirmation = "Thank you for your message - we'll be in touch shortly"
if s3.cdn:
if s3.debug:
s3.scripts.append("http://ajax.aspnetcdn.com/ajax/jquery.validate/1.9/jquery.validate.js")
else:
s3.scripts.append("http://ajax.aspnetcdn.com/ajax/jquery.validate/1.9/jquery.validate.min.js")
else:
if s3.debug:
s3.scripts.append("/%s/static/scripts/jquery.validate.js" % request.application)
else:
s3.scripts.append("/%s/static/scripts/jquery.validate.min.js" % request.application)
validation_script = '''
$('#mailform').validate({
errorClass:'req',
rules:{
name:{
required:true
},
address: {
required:true,
email:true
},
subject:{
required:true
},
message:{
required:true
}
},
messages:{
name:"Enter your name",
subject:"Enter a subject",
message:"Enter a message",
address:{
required:"Please enter a valid email address",
email:"Please enter a valid email address"
}
},
errorPlacement:function(error,element){
error.appendTo(element.parents('div.controls'))
},
submitHandler:function(form){
form.submit()
}
})'''
s3.jquery_ready.append(validation_script)
else:
contact_form = ""
if AUTHENTICATED not in roles:
login_buttons = DIV(A(T("Login"),
_id="show-login",
_class="tiny secondary button"),
_id="login-buttons"
)
script = '''
$('#show-mailform').click(function(e){
e.preventDefault()
$('#login_box').fadeOut(function(){$('#intro').fadeIn()})
})
$('#show-login').click(function(e){
e.preventDefault()
$('#login_form').show()
$('#register_form').hide()
$('#intro').fadeOut(function(){$('#login_box').fadeIn()})
})'''
s3.jquery_ready.append(script)
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
if self_registration is True:
# Provide a Registration box on front page
login_buttons.append(A(T("Register"),
_id="show-register",
_class="tiny secondary button",
_style="margin-left:5px"))
script = '''
$('#show-register').click(function(e){
e.preventDefault()
$('#login_form').hide()
$('#register_form').show()
$('#intro').fadeOut(function(){$('#login_box').fadeIn()})
})'''
s3.jquery_ready.append(script)
register_form = auth.register()
register_div = DIV(H3(T("Register")),
P(XML(T("If you would like to help, then please %(sign_up_now)s") % \
dict(sign_up_now=B(T("sign-up now"))))))
register_script = '''
$('#register-btn').click(function(e){
e.preventDefault()
$('#login_form').fadeOut(function(){$('#register_form').fadeIn()})
})
$('#login-btn').click(function(e){
e.preventDefault()
$('#register_form').fadeOut(function(){$('#login_form').fadeIn()})
})'''
s3.jquery_ready.append(register_script)
# Provide a login box on front page
auth.messages.submit_button = T("Login")
login_form = auth.login(inline=True)
login_div = DIV(H3(T("Login")),
P(XML(T("Registered users can %(login)s to access the system") % \
dict(login=B(T("login"))))))
else:
login_buttons = ""
# Create output dict
output = {"login_buttons": login_buttons,
"self_registration": self_registration,
"registered": registered,
"login_div": login_div,
"login_form": login_form,
"register_div": register_div,
"register_form": register_form,
"contact_form": contact_form,
"project_links": project_links,
}
# Count records (@todo: provide total/updated counters?)
s3db = current.s3db
db = current.db
# Organisations
table = s3db.org_organisation
query = (table.deleted != True)
count = table.id.count()
row = db(query).select(count).first()
output["total_organisations"] = row[count]
# Service Locations (@todo)
#table = s3db.org_service_location
#query = (table.deleted != True)
#count = table.id.count()
#row = db(query).select(count).first()
output["total_services"] = 0 #row[count]
# Needs lists
table = s3db.req_organisation_needs
query = (table.deleted != True)
count = table.id.count()
row = db(query).select(count).first()
output["total_needs"] = row[count]
# Frontpage Feed Control
if settings.frontpage.rss:
s3.external_stylesheets.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.css")
s3.scripts.append("http://www.google.com/jsapi?key=notsupplied-wizard")
s3.scripts.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.js")
counter = 0
feeds = ""
for feed in settings.frontpage.rss:
counter += 1
feeds = "".join((feeds,
"{title:'%s',\n" % feed["title"],
"url:'%s'}" % feed["url"]))
# Don't add a trailing comma for old IEs
if counter != len(settings.frontpage.rss):
feeds += ",\n"
# feedCycleTime: milliseconds before feed is reloaded (5 minutes)
feed_control = "".join(('''
function LoadDynamicFeedControl(){
var feeds=[
''', feeds, '''
]
var options={
feedCycleTime:300000,
numResults:5,
stacked:true,
horizontal:false,
title:"''', str(T("News")), '''"
}
new GFdynamicFeedControl(feeds,'feed-control',options)
}
google.load('feeds','1')
google.setOnLoadCallback(LoadDynamicFeedControl)'''))
s3.js_global.append(feed_control)
s3.stylesheets.append("../themes/RW/homepage.css")
self._view(THEME, "index.html")
return output
# =============================================================================
class docs(S3CustomController):
"""
Custom controller to display online documentation, accessible
for anonymous users (e.g. information how to register/login)
"""
def __call__(self):
response = current.response
def prep(r):
default_url = URL(f="index", args=[], vars={})
return current.s3db.cms_documentation(r, "HELP", default_url)
response.s3.prep = prep
output = current.rest_controller("cms", "post")
# Custom view
self._view("RW", "docs.html")
current.menu.dashboard = None
return output
# END =========================================================================
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Gouthaman Balaraman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
"""
Support for SQLAlchmey. Provides SQLAlchemyTarget for storing in databases
supported by SQLAlchemy. The user would be responsible for installing the
required database driver to connect using SQLAlchemy.
Minimal example of a job to copy data to database using SQLAlchemy is as shown
below:
.. code-block:: python
from sqlalchemy import String
import luigi
from luigi.contrib import sqla
class SQLATask(sqla.CopyToTable):
# columns defines the table schema, with each element corresponding
# to a column in the format (args, kwargs) which will be sent to
# the sqlalchemy.Column(*args, **kwargs)
columns = [
(["item", String(64)], {"primary_key": True}),
(["property", String(64)], {})
]
connection_string = "sqlite://" # in memory SQLite database
table = "item_property" # name of the table to store data
def rows(self):
for row in [("item1", "property1"), ("item2", "property2")]:
yield row
if __name__ == '__main__':
task = SQLATask()
luigi.build([task], local_scheduler=True)
If the target table where the data needs to be copied already exists, then
the column schema definition can be skipped and instead the reflect flag
can be set as True. Here is a modified version of the above example:
.. code-block:: python
from sqlalchemy import String
import luigi
from luigi.contrib import sqla
class SQLATask(sqla.CopyToTable):
# If database table is already created, then the schema can be loaded
# by setting the reflect flag to True
reflect = True
connection_string = "sqlite://" # in memory SQLite database
table = "item_property" # name of the table to store data
def rows(self):
for row in [("item1", "property1"), ("item2", "property2")]:
yield row
if __name__ == '__main__':
task = SQLATask()
luigi.build([task], local_scheduler=True)
In the above examples, the data that needs to be copied was directly provided by
overriding the rows method. Alternately, if the data comes from another task, the
modified example would look as shown below:
.. code-block:: python
from sqlalchemy import String
import luigi
from luigi.contrib import sqla
from luigi.mock import MockFile
class BaseTask(luigi.Task):
def output(self):
return MockFile("BaseTask")
def run(self):
out = self.output().open("w")
TASK_LIST = ["item%d\\tproperty%d\\n" % (i, i) for i in range(10)]
for task in TASK_LIST:
out.write(task)
out.close()
class SQLATask(sqla.CopyToTable):
# columns defines the table schema, with each element corresponding
# to a column in the format (args, kwargs) which will be sent to
# the sqlalchemy.Column(*args, **kwargs)
columns = [
(["item", String(64)], {"primary_key": True}),
(["property", String(64)], {})
]
connection_string = "sqlite://" # in memory SQLite database
table = "item_property" # name of the table to store data
def requires(self):
return BaseTask()
if __name__ == '__main__':
task1, task2 = SQLATask(), BaseTask()
luigi.build([task1, task2], local_scheduler=True)
In the above example, the output from `BaseTask` is copied into the
database. Here we did not have to implement the `rows` method because
by default `rows` implementation assumes every line is a row with
column values separated by a tab. One can define `column_separator`
option for the task if the values are say comma separated instead of
tab separated.
You can pass in database specific connection arguments by setting the connect_args
dictionary. The options will be passed directly to the DBAPI's connect method as
keyword arguments.
The other option to `sqla.CopyToTable` that can be of help with performance aspect is the
`chunk_size`. The default is 5000. This is the number of rows that will be inserted in
a transaction at a time. Depending on the size of the inserts, this value can be tuned
for performance.
See here for a `tutorial on building task pipelines using luigi
<http://gouthamanbalaraman.com/blog/building-luigi-task-pipeline.html>`_ and
using `SQLAlchemy in workflow pipelines <http://gouthamanbalaraman.com/blog/sqlalchemy-luigi-workflow-pipeline.html>`_.
Author: Gouthaman Balaraman
Date: 01/02/2015
"""
import abc
import collections
import datetime
import itertools
import logging
import luigi
import os
import sqlalchemy
class SQLAlchemyTarget(luigi.Target):
"""
Database target using SQLAlchemy.
This will rarely have to be directly instantiated by the user.
Typical usage would be to override `luigi.contrib.sqla.CopyToTable` class
to create a task to write to the database.
"""
marker_table = None
_engine_dict = {} # dict of sqlalchemy engine instances
Connection = collections.namedtuple("Connection", "engine pid")
def __init__(self, connection_string, target_table, update_id, echo=False, connect_args={}):
"""
Constructor for the SQLAlchemyTarget.
:param connection_string: SQLAlchemy connection string
:type connection_string: str
:param target_table: The table name for the data
:type target_table: str
:param update_id: An identifier for this data set
:type update_id: str
:param echo: Flag to setup SQLAlchemy logging
:type echo: bool
:param connect_args: A dictionary of connection arguments
:type connect_args: dict
:return:
"""
self.target_table = target_table
self.update_id = update_id
self.connection_string = connection_string
self.echo = echo
self.connect_args = connect_args
self.marker_table_bound = None
@property
def engine(self):
"""
Return an engine instance, creating it if it doesn't exist.
Recreate the engine connection if it wasn't originally created
by the current process.
"""
pid = os.getpid()
conn = SQLAlchemyTarget._engine_dict.get(self.connection_string)
if not conn or conn.pid != pid:
# create and reset connection
engine = sqlalchemy.create_engine(
self.connection_string,
connect_args=self.connect_args,
echo=self.echo
)
SQLAlchemyTarget._engine_dict[self.connection_string] = self.Connection(engine, pid)
return SQLAlchemyTarget._engine_dict[self.connection_string].engine
def touch(self):
"""
Mark this update as complete.
"""
if self.marker_table_bound is None:
self.create_marker_table()
table = self.marker_table_bound
id_exists = self.exists()
with self.engine.begin() as conn:
if not id_exists:
ins = table.insert().values(update_id=self.update_id, target_table=self.target_table,
inserted=datetime.datetime.now())
else:
ins = table.update().where(sqlalchemy.and_(table.c.update_id == self.update_id,
table.c.target_table == self.target_table)).\
values(update_id=self.update_id, target_table=self.target_table,
inserted=datetime.datetime.now())
conn.execute(ins)
assert self.exists()
def exists(self):
row = None
if self.marker_table_bound is None:
self.create_marker_table()
with self.engine.begin() as conn:
table = self.marker_table_bound
s = sqlalchemy.select([table]).where(sqlalchemy.and_(table.c.update_id == self.update_id,
table.c.target_table == self.target_table)).limit(1)
row = conn.execute(s).fetchone()
return row is not None
def create_marker_table(self):
"""
Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset.
"""
if self.marker_table is None:
self.marker_table = luigi.configuration.get_config().get('sqlalchemy', 'marker-table', 'table_updates')
engine = self.engine
with engine.begin() as con:
metadata = sqlalchemy.MetaData()
if not con.dialect.has_table(con, self.marker_table):
self.marker_table_bound = sqlalchemy.Table(
self.marker_table, metadata,
sqlalchemy.Column("update_id", sqlalchemy.String(128), primary_key=True),
sqlalchemy.Column("target_table", sqlalchemy.String(128)),
sqlalchemy.Column("inserted", sqlalchemy.DateTime, default=datetime.datetime.now()))
metadata.create_all(engine)
else:
metadata.reflect(bind=engine)
self.marker_table_bound = metadata.tables[self.marker_table]
def open(self, mode):
raise NotImplementedError("Cannot open() SQLAlchemyTarget")
class CopyToTable(luigi.Task):
"""
An abstract task for inserting a data set into SQLAlchemy RDBMS
Usage:
* subclass and override the required `connection_string`, `table` and `columns` attributes.
"""
_logger = logging.getLogger('luigi-interface')
echo = False
connect_args = {}
@abc.abstractproperty
def connection_string(self):
return None
@abc.abstractproperty
def table(self):
return None
# specify the columns that define the schema. The format for the columns is a list
# of tuples. For example :
# columns = [
# (["id", sqlalchemy.Integer], dict(primary_key=True)),
# (["name", sqlalchemy.String(64)], {}),
# (["value", sqlalchemy.String(64)], {})
# ]
# The tuple (args_list, kwargs_dict) here is the args and kwargs
# that need to be passed to sqlalchemy.Column(*args, **kwargs).
# If the tables have already been setup by another process, then you can
# completely ignore the columns. Instead set the reflect value to True below
columns = []
# options
column_separator = "\t" # how columns are separated in the file copied into postgres
chunk_size = 5000 # default chunk size for insert
reflect = False # Set this to true only if the table has already been created by alternate means
def create_table(self, engine):
"""
Override to provide code for creating the target table.
By default it will be created using types specified in columns.
If the table exists, then it binds to the existing table.
If overridden, use the provided connection object for setting up the table in order to
create the table and insert data using the same transaction.
:param engine: The sqlalchemy engine instance
:type engine: object
"""
def construct_sqla_columns(columns):
retval = [sqlalchemy.Column(*c[0], **c[1]) for c in columns]
return retval
needs_setup = (len(self.columns) == 0) or (False in [len(c) == 2 for c in self.columns]) if not self.reflect else False
if needs_setup:
# only names of columns specified, no types
raise NotImplementedError("create_table() not implemented for %r and columns types not specified" % self.table)
else:
# if columns is specified as (name, type) tuples
with engine.begin() as con:
metadata = sqlalchemy.MetaData()
try:
if not con.dialect.has_table(con, self.table):
sqla_columns = construct_sqla_columns(self.columns)
self.table_bound = sqlalchemy.Table(self.table, metadata, *sqla_columns)
metadata.create_all(engine)
else:
metadata.reflect(bind=engine)
self.table_bound = metadata.tables[self.table]
except Exception as e:
self._logger.exception(self.table + str(e))
def update_id(self):
"""
This update id will be a unique identifier for this insert on this table.
"""
return self.task_id
def output(self):
return SQLAlchemyTarget(
connection_string=self.connection_string,
target_table=self.table,
update_id=self.update_id(),
connect_args=self.connect_args,
echo=self.echo)
def rows(self):
"""
Return/yield tuples or lists corresponding to each row to be inserted.
This method can be overridden for custom file types or formats.
"""
with self.input().open('r') as fobj:
for line in fobj:
yield line.strip("\n").split(self.column_separator)
def run(self):
self._logger.info("Running task copy to table for update id %s for table %s" % (self.update_id(), self.table))
output = self.output()
engine = output.engine
self.create_table(engine)
with engine.begin() as conn:
rows = iter(self.rows())
ins_rows = [dict(zip(("_" + c.key for c in self.table_bound.c), row))
for row in itertools.islice(rows, self.chunk_size)]
while ins_rows:
self.copy(conn, ins_rows, self.table_bound)
ins_rows = [dict(zip(("_" + c.key for c in self.table_bound.c), row))
for row in itertools.islice(rows, self.chunk_size)]
self._logger.info("Finished inserting %d rows into SQLAlchemy target" % len(ins_rows))
output.touch()
self._logger.info("Finished inserting rows into SQLAlchemy target")
def copy(self, conn, ins_rows, table_bound):
"""
This method does the actual insertion of the rows of data given by ins_rows into the
database. A task that needs row updates instead of insertions should overload this method.
:param conn: The sqlalchemy connection object
:param ins_rows: The dictionary of rows with the keys in the format _<column_name>. For example
if you have a table with a column name "property", then the key in the dictionary
would be "_property". This format is consistent with the bindparam usage in sqlalchemy.
:param table_bound: The object referring to the table
:return:
"""
bound_cols = dict((c, sqlalchemy.bindparam("_" + c.key)) for c in table_bound.columns)
ins = table_bound.insert().values(bound_cols)
conn.execute(ins, ins_rows)
|
|
#
# Copyright 2014-2015 University of Southern California
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
#
from . import np as numpylib
import re
import datetime
import sys
import os
import random
import numpy as np
from numpy import array, float32, int32, empty, newaxis, dot, cross, zeros, ones
from numpy.linalg import norm
import scipy
from scipy import ndimage
import json
import math
import re
import csv
from volspy.util import bin_reduce, load_and_mangle_image
from .util import *
from functools import reduce
class BlockedAnalyzer (object):
"""Analyze image using block decomposition for scalability.
Conceptually we perform the sequence:
prepare_kernels (cheap, one-shot)
process_volume (expensive, scales with image size)
analyze (expensive, scales with image size)
This can be decomposed into blocks to operate on
spatial sub-problems:
process_volume_block (totally independent)
analyze_block (might be independent in practice?)
"""
def convNx1d(self, *args):
return numpylib.convNx1d(*args)
def convNd_sparse(self, *args):
return numpylib.convNd_sparse(*args)
def maxNx1d(self, *args):
return numpylib.maxNx1d(*args)
def array_mult(self, a1, a2):
return a1 * a2
def sum_labeled(self, src, labels, n):
return ndimage.sum(src, labels, list(range(n)))
def __init__(self, image, synapse_diam_micron, vicinity_diam_micron, maskblur_micron, view_reduction, desired_block_size=None):
if desired_block_size is None:
try:
desired_block_size = tuple(map(int, os.getenv('ZYX_BLOCK_SIZE').split(",")))
assert len(desired_block_size) == 3
except:
desired_block_size = (384,384,450)
print("Using %s voxel preferred sub-block size. Override with ZYX_BLOCK_SIZE='int,int,int'" % (desired_block_size,))
view_mode = os.getenv('VIEW_MODE', 'raw')
if view_mode.lower() not in ['raw', 'dog']:
raise ValueError('Unknown VIEW_MODE "%s"' % view_mode)
self.view_raw = view_mode.lower() == 'raw'
print("Using %s viewing mode. Override with VIEW_MODE=raw or VIEW_MODE=dog." % (self.view_raw and 'raw' or 'dog'))
self.image = image
self.view_reduction = view_reduction
self.kernels_3x1d, self.kernels_3d = prepare_kernels(image.micron_spacing, synapse_diam_micron, vicinity_diam_micron, maskblur_micron)
# maximum dependency chain of filters trims this much invalid border data
self.max_border_widths = (
# DoG is largest separated filter
radii_3x1d(self.kernels_3x1d[3])
# sparse measures consume DoG output
+ reduce(
lambda a, b: np.maximum(a, b),
[radii_3d(k) for k in self.kernels_3d]
)
# add some padding for peak detection at block borders
+ radii_3x1d(self.kernels_3x1d[3])
)
# round up to multiple of reduction size
self.max_border_widths += np.where(
(self.max_border_widths % np.array(self.view_reduction, dtype=np.int32)),
np.array(self.view_reduction, dtype=np.int32) - self.max_border_widths % np.array(self.view_reduction, dtype=np.int32),
np.zeros((3,), dtype=np.int32)
)
print("Kernel radii %s, %s implies max-border %s" % (
[tuple(radii_3x1d(k)) for k in self.kernels_3x1d],
[tuple(radii_3d(k)) for k in self.kernels_3d],
self.max_border_widths
))
self.block_size, self.num_blocks = self.find_blocking(desired_block_size)
for d in range(3):
if self.num_blocks[d] > 1:
# block has border trimmed from one edge
trim_factor = 1
else:
# single block has border trimmed from two edges
trim_factor = 2
if self.block_size[d] <= self.max_border_widths[d] * trim_factor:
raise ValueError("Block size %s too small for filter borders %s" % (self.block_size, self.max_border_widths))
self.dst_shape = tuple(
self.image.shape[d] - 2 * self.max_border_widths[d]
for d in range(3)
)
print("Using %s blocks of size %s to process %s into %s" % (
self.num_blocks,
self.block_size,
self.image.shape,
self.dst_shape
))
def block_slice_src(self, blockpos):
"""Return slice for source block.
This slice is used to extract a source sub-array from the
original input image channels.
"""
def slice1d(d):
if blockpos[d] == 0:
lower = 0
else:
lower = self.block_size[d] * blockpos[d] - self.max_border_widths[d]
if blockpos[d] == self.num_blocks[d] - 1:
upper = self.image.shape[d]
else:
upper = self.block_size[d] * (1 + blockpos[d]) + self.max_border_widths[d]
return slice(lower,upper)
slc = tuple([ slice1d(d) for d in range(3)] + [slice(None)])
return slc
def block_slice_dst(self, blockpos):
"""Return slice for dest. block.
This slice is used to store a destination sub-array into a
global result image channel, if reassembling a full image.
"""
def slice1d(d):
# invalid border gets trimmed from first and last blocks
if blockpos[d] == 0:
lower = 0
else:
lower = self.block_size[d] * blockpos[d] - self.max_border_widths[d]
if blockpos[d] == (self.num_blocks[d] - 1):
upper = self.block_size[d] * (1 + blockpos[d]) - self.max_border_widths[d] * 2
else:
upper = self.block_size[d] * (1 + blockpos[d]) - self.max_border_widths[d]
return slice(lower, upper)
slc = tuple([ slice1d(d) for d in range(3)] + [slice(None)])
return slc
def block_slice_viewdst(self, blockpos):
"""Return slice for view_image dest. block.
This slice is used to store a destination sub-array into a
global result image channel, if reassembling a full image.
"""
def slice1d(d):
# invalid border gets trimmed from first and last blocks
if blockpos[d] == 0:
lower = self.max_border_widths[d]
else:
lower = self.block_size[d] * blockpos[d]
if blockpos[d] == (self.num_blocks[d] - 1):
upper = self.block_size[d] * (1 + blockpos[d]) - self.max_border_widths[d]
else:
upper = self.block_size[d] * (1 + blockpos[d])
assert lower % self.view_reduction[d] == 0
assert upper % self.view_reduction[d] == 0
return slice(lower//self.view_reduction[d], upper//self.view_reduction[d])
slc = tuple([ slice1d(d) for d in range(3)] + [slice(None)])
return slc
def block_iter(self):
def helper(counts):
if counts[1:]:
for block in range(counts[0]):
for position in helper(counts[1:]):
yield (block,) + position
else:
for block in range(counts[0]):
yield (block,)
for position in helper(self.num_blocks):
yield position
def find_blocking(self, desired_block_size):
"""Return N-dimensional block_size, num_blocks plan for image.
The N-dimensional desired_block_size is the prefered size,
and a size smaller (but at least half desired size) or
larger (but at most twice desired size) are considered
until an evenly divisibe size is found.
As a last-ditch effort, the source data may be trimmed by
one pixel in each dimension to try to find a divisible
size, in case there is no match. In this case, the image
channels and shape of the object are modified as
side-effects.
This may all fail, raising a ValueError if no match is
possible.
"""
def find_blocking_1d(d):
if self.image.shape[d] < desired_block_size[d]:
if self.image.shape[d] % self.view_reduction[d] == 0:
return self.image.shape[d], 1
else:
raise ValueError("Dimension %d, length %d, smaller than desired block size %d but not divisible by reduction %d" % (d, self.image.shape[d], desired_block_size[d], self.view_reduction[d]))
# prefer desired_block_size or something a bit smaller
for w in range(desired_block_size[d], max(desired_block_size[d]//2, 2*self.max_border_widths[d]), -1):
if (self.image.shape[d] % w) == 0 and (w % self.view_reduction[d]) == 0:
return w, self.image.shape[d] // w
# also consider something larger
for w in range(max(desired_block_size[d], 2*self.max_border_widths[d]), desired_block_size[d]*2):
if (self.image.shape[d] % w) == 0 and (w % self.view_reduction[d]) == 0:
return w, self.image.shape[d] // w
raise ValueError("No blocking found for image dimension %d, length %d, desired block size %d, reduction %d"
% (d, self.image.shape[d], desired_block_size[d], self.view_reduction[d]))
block_size = []
num_blocks = []
for d in range(3):
try:
w, n = find_blocking_1d(d)
except ValueError:
# try trimming one voxel and repeating
print("WARNING: trimming image dimension %d to try to find divisible block size" % d)
axis_size = self.view_reduction[d]*(min(desired_block_size[d], self.image.shape[d])//self.view_reduction[d])
trimmed_shape = axis_size*(self.image.shape[d]//axis_size)
trim_slice = tuple(
[ slice(None) for i in range(d) ]
+ [ slice(0, trimmed_shape) ]
+ [ slice(None) for i in range(d+1, self.image.ndim) ]
)
self.image = self.image.lazyget(trim_slice)
w, n = find_blocking_1d(d)
block_size.append(w)
num_blocks.append(n)
return tuple(block_size), tuple(num_blocks)
def volume_process(self):
view_image = zeros(tuple(
list(map(lambda w, r: w//r, self.image.shape[0:3], self.view_reduction))
+ [self.image.shape[-1]]
), dtype=np.float32)
print("Allocated %s %s view_image with %s voxel size for %s reduction of %s source image with %s voxel size." % (view_image.shape, view_image.dtype, list(map(lambda a, b: a*b, self.image.micron_spacing, self.view_reduction)), self.view_reduction, self.image.shape, self.image.micron_spacing))
centroids = None
centroid_measures = None
perf_vector = None
total_blocks = reduce(lambda a, b: a*b, self.num_blocks, 1)
done_blocks = 0
last_progress = 0
sys.stderr.write("Progress processing %d blocks:\n" % total_blocks)
for blockpos in self.block_iter():
view, cent, meas, perf = self.block_process(blockpos)
view_image[self.block_slice_viewdst(blockpos)] = view
if centroids is None:
centroids = cent
centroid_measures = meas
perf_vector = perf
else:
centroids = np.concatenate((centroids, cent))
centroid_measures = np.concatenate((centroid_measures, meas))
perf_vector = list(map(lambda a, b: (a[0]+b[0], a[1]), perf_vector, perf))
done_blocks += 1
progress = int(100 * done_blocks // total_blocks)
for i in range(last_progress, progress, 2):
sys.stderr.write('%x' % (i//10))
last_progress = progress
sys.stderr.write(' DONE.\n')
#view_image -= view_image.min()
#view_image *= self.image.max() / view_image.max()
total = 0.
for elapsed, desc in perf_vector:
total += elapsed
print("%8.2fs %s task time" % (elapsed, desc))
print("%8.2fs TOTAL processing time" % total)
print("Found %d centroids" % len(centroids))
return view_image, centroids, centroid_measures
def block_process(self, blockpos):
"""Process block data to return convolved results.
Parameters:
blockpos: N-dimensional block numbers
Result is a 3-tuple:
(view_image, centroids, centroid_measures, perf_vector)
"""
splits = [(datetime.datetime.now(), None)]
image = self.image[self.block_slice_src(blockpos)].astype(np.float32, copy=False)
splits.append((datetime.datetime.now(), 'image load'))
low_channel = self.convNx1d(image[:,:,:,0], self.kernels_3x1d[0])
splits.append((datetime.datetime.now(), 'image*low'))
scale1_channel = self.convNx1d(image[:,:,:,0], self.kernels_3x1d[2])
splits.append((datetime.datetime.now(), 'image*syn'))
scale2_channel = self.convNx1d(image[:,:,:,0], self.kernels_3x1d[3])
dog = crop_centered(scale1_channel, scale2_channel.shape) - scale2_channel
splits.append((datetime.datetime.now(), 'image*vlow'))
# allow tinkering w/ multiple peak detection fields
max_inputs = [
low_channel,
# dog,
]
if len(max_inputs) > 1:
crop_shape = list(map(min, *[img.shape for img in max_inputs]))
else:
crop_shape = max_inputs[0].shape
max_inputs = [crop_centered(img, crop_shape) for img in max_inputs]
if self.view_raw:
view_image = crop_centered(
image,
list(map(lambda w, b: w-2*b, image.shape[0:3], self.max_border_widths)) + [image.shape[3]]
)
else:
# caller expects view_image to have same number of channels as raw image
view_image = zeros(
tuple(list(map(lambda w, b: w-2*b, image.shape[0:3], self.max_border_widths)) + [image.shape[3]]),
dtype=dog.dtype
)
view_image[:,:,:,0] = crop_centered(
dog,
list(map(lambda w, b: w-2*b, image.shape[0:3], self.max_border_widths))
)
splits.append((datetime.datetime.now(), 'view image DoG'))
view_image = bin_reduce(view_image, self.view_reduction + (1,))
splits.append((datetime.datetime.now(), 'view image reduce'))
max_kernel = self.kernels_3d[3].shape
max_channels = [self.maxNx1d(img, max_kernel) for img in max_inputs]
splits.append((datetime.datetime.now(), 'local maxima'))
# need to trim borders discarded by max_channel computation
max_inputs = [crop_centered(img, max_channels[0].shape) for img in max_inputs]
# find syn cores via local maxima test
peaks = np.zeros(max_channels[0].shape, dtype=np.bool)
for i in range(len(max_inputs)):
assert max_inputs[i].shape == max_channels[i].shape
peaks += max_inputs[i] >= (max_channels[i])
clipbox = tuple(
slice(peaks_border, peaks_width-peaks_border)
for peaks_width, peaks_border in map(
lambda iw, bw, pw: (pw, bw - (iw-pw)//2),
image.shape[0:3],
self.max_border_widths,
peaks.shape
)
)
splits.append((datetime.datetime.now(), 'mask peaks'))
label_im, nb_labels = ndimage.label(peaks)
splits.append((datetime.datetime.now(), 'label peaks'))
sizes = self.sum_labeled(
label_im > 0,
label_im,
nb_labels + 1
)[1:]
splits.append((datetime.datetime.now(), 'centroid sizes'))
centroid_components = [ ]
for d in range(3):
coords = self.array_mult(
array(
list(range(0, peaks.shape[d]))
).astype('float32')[
tuple([ None for i in range(d) ]) # add dims before axis
+ ( slice(None), ) # get axis
+ tuple([ None for i in range(peaks.ndim - 1 - d) ]) # add dims after axis
],
ones(peaks.shape, 'float32') # broadcast to full volume
)
centroid_components.append(
(self.sum_labeled(
coords,
label_im,
nb_labels + 1
)[1:] / sizes)#.astype(np.int32)
)
# centroids are in block peaks grid
centroids = list(zip(*centroid_components))
filtered_centroids = []
if centroids:
# discard centroids outside clipbox (we searched slightly
# larger to handle peaks at edges
for i in range(len(centroids)):
clip = False
for d in range(3):
if int(centroids[i][d]) < clipbox[d].start or int(centroids[i][d]) >= clipbox[d].stop:
clip = True
if not clip:
filtered_centroids.append(centroids[i])
if filtered_centroids:
# centroids are in block core grid
centroids = array(filtered_centroids, int32) - array([slc.start for slc in clipbox], int32)
# image_centroids are in block image grid
image_centroids = centroids + array(self.max_border_widths, int32)
# dog_centroids are in difference-of-gaussians grid
dog_centroids = centroids + array(list(map(lambda iw, dw: (iw-dw)/2, image.shape[0:3], dog.shape)))
# global_centroids are in self.image grid
global_centroids = (
array([slc.start or 0 for slc in self.block_slice_src(blockpos)[0:3]], int32)
+ image_centroids
)
splits.append((datetime.datetime.now(), 'centroid coords'))
centroid_measures = [self.convNd_sparse(image[:,:,:,0], self.kernels_3d[0], image_centroids)]
splits.append((datetime.datetime.now(), 'raw corevals'))
centroid_measures.append(self.convNd_sparse(image[:,:,:,0], self.kernels_3d[1], image_centroids))
splits.append((datetime.datetime.now(), 'raw hollowvals'))
centroid_measures.append(self.convNd_sparse(dog, self.kernels_3d[0], dog_centroids))
splits.append((datetime.datetime.now(), 'DoG corevals'))
centroid_measures.append(self.convNd_sparse(dog, self.kernels_3d[1], dog_centroids))
splits.append((datetime.datetime.now(), 'DoG hollowvals'))
if image.shape[3] > 1:
centroid_measures.append(self.convNd_sparse(image[:,:,:,1], self.kernels_3d[2], image_centroids))
splits.append((datetime.datetime.now(), 'centroid redvals'))
else:
# defaults if we have no centroids in block...
image_centroids = zeros((0,3), int32)
global_centroids = zeros((0,3), int32)
centroid_measures = [
zeros((0,), float32), # raw coreval
zeros((0,), float32), # raw hollowval
zeros((0,), float32), # dog coreval
zeros((0,), float32), # dog hollowval
]
if image.shape[3] > 1:
centroid_measures.append(
zeros((0,), float32), # redvals
)
# need to keep same shape for splits list
splits.append((datetime.datetime.now(), 'centroid coords'))
splits.append((datetime.datetime.now(), 'raw corevals'))
splits.append((datetime.datetime.now(), 'raw hollowvals'))
splits.append((datetime.datetime.now(), 'DoG corevals'))
splits.append((datetime.datetime.now(), 'DoG hollowvals'))
centroid_measures = np.column_stack(tuple(centroid_measures))
splits.append((datetime.datetime.now(), 'stack centroid measures'))
perf_vector = list(map(lambda t0, t1: ((t1[0]-t0[0]).total_seconds(), t1[1]), splits[0:-1], splits[1:]))
return view_image, global_centroids, centroid_measures, perf_vector
def fwhm_estimate(self, synapse, centroids, syn_vals, vcn_vals, noise):
"""Estimate FWHM measures for synapse candidates."""
centroid_widths = []
for i in range(len(syn_vals)):
centroid = centroids[i]
# use synapse core value as proxy for maximum
# since we did peak detection
# treat vicinity measure as another local background estimate
# and give it a fudge-factor
floor_value = max(vcn_vals[i] * 1.5, noise)
fm = max(syn_vals[i] - floor_value, 0)
hm = fm / 2 + floor_value
widths = []
def slice_d(d, pos):
return tuple(
[ centroid[a] for a in range(d) ]
+ [ pos ]
+ [ centroid[a] for a in range(d+1, 3) ]
)
def interp_d(d, p0, p1, v):
v0 = synapse[slice_d(d, p0)]
if p1 >= 0 and p1 < synapse.shape[d]:
v1 = synapse[slice_d(d, p1)]
else:
v1 = v0
if v0 < v and v < v1 \
or v0 > v and v > v1:
return float(p0) + (v - v0) / (v1 - v0)
else:
return p0
for d in range(3):
# scan from center along axes in negative and positive
# directions until half-maximum is found
for pos in range(centroid[d], -1, -1):
lower = pos
if synapse[slice_d(d, pos)] <= hm:
break
# interpolate to find hm sub-pixel position
lower = interp_d(d, lower, lower+1, hm)
for pos in range(centroid[d], synapse.shape[d]):
upper = pos
if synapse[slice_d(d, pos)] <= hm:
break
# interpolate to find hm sub-pixel position
upper = interp_d(d, upper, upper-1, hm)
# accumulate N-d measurement for centroid
widths.append(
(upper - lower) * [
self.image_meta.z_microns,
self.image_meta.y_microns,
self.image_meta.x_microns
][d]
)
# accumulate measurements for all centroids
centroid_widths.append( tuple(widths) )
return centroid_widths
BlockedAnalyzerOpt = BlockedAnalyzer
assign_voxels_opt = numpylib.assign_voxels
try:
from . import nexpr as numerexprlib
class BlockedAnalyzerNumerexpr (BlockedAnalyzer):
def convNx1d(self, *args):
return numerexprlib.convNx1d(*args)
def array_mult(self, a1, a2):
return numerexprlib.array_mult(a1, a2)
BlockedAnalyzerOpt = BlockedAnalyzerNumerexpr
except:
pass
try:
from . import ocl as opencllib
class BlockedAnalyzerOpenCL (BlockedAnalyzerOpt):
def convNx1d(self, *args):
return opencllib.convNx1d(*args)
def maxNx1d(self, *args):
return opencllib.maxNx1d(*args)
def sum_labeled(self, src, labels, n, clq=None):
return opencllib.sum_labeled(src, labels, n, clq=clq)
def fwhm_estimate(self, synapse, centroids, syn_vals, vcn_vals, noise):
return opencllib.fwhm_estimate(
synapse, centroids, syn_vals, vcn_vals, noise,
(self.image_meta.z_microns, self.image_meta.y_microns, self.image_meta.x_microns)
)
def convNd_sparse(self, data, kernel, centroids, clq=None):
if clq is None:
# CL would actually slower due to data input bottleneck!
return BlockedAnalyzer.convNd_sparse(self, data, kernel, centroids)
else:
return opencllib.weighted_measure(data, centroids, kernel, clq=clq)
def block_process(self, blockpos):
"""Process block data to return convolved results.
"""
clq = opencllib.cl.CommandQueue(opencllib.ctx)
splits = [(datetime.datetime.now(), None)]
image = self.image[self.block_slice_src(blockpos)].astype(np.float32, copy=False)
splits.append((datetime.datetime.now(), 'image load'))
# PyOpenCL complains about discontiguous arrays when we project C dimension
if image.strides[3] == 0:
# but, a volspy.util TiffLazyNDArray slice repacks implicitly
image0_dev = opencllib.cl_array.to_device(clq, image[:,:,:,0])
else:
# while a regular ndarray needs repacking here
# this happens with the VOLSPY_ZNOISE_PERCENTILE pre-filtering hack
image0_dev = opencllib.cl_array.empty(clq, image.shape[0:3], image.dtype)
image0_tmp = image0_dev.map_to_host()
image0_tmp[...] = image[:,:,:,0]
del image0_tmp
clq.finish()
splits.append((datetime.datetime.now(), 'image to dev'))
low_channel = self.convNx1d(image0_dev, self.kernels_3x1d[0], clq).map_to_host()
splits.append((datetime.datetime.now(), 'image*low'))
scale1_channel = self.convNx1d(image0_dev, self.kernels_3x1d[2], clq).map_to_host()
splits.append((datetime.datetime.now(), 'image*syn'))
scale2_channel = self.convNx1d(image0_dev, self.kernels_3x1d[3], clq).map_to_host()
clq.finish()
dog = crop_centered(scale1_channel, scale2_channel.shape) - scale2_channel
splits.append((datetime.datetime.now(), 'image*vlow'))
# allow tinkering w/ multiple peak detection fields
max_inputs = [
low_channel,
# dog,
]
if len(max_inputs) > 1:
crop_shape = list(map(min, *[img.shape for img in max_inputs]))
else:
crop_shape = max_inputs[0].shape
max_inputs = [crop_centered(img, crop_shape) for img in max_inputs]
if self.view_raw:
view_image = crop_centered(
image,
list(map(lambda w, b: w-2*b, image.shape[0:3], self.max_border_widths)) + [image.shape[3]]
)
else:
view_image = crop_centered(
dog,
list(map(lambda w, b: w-2*b, image.shape[0:3], self.max_border_widths))
)
view_image = view_image[:,:,:,None]
splits.append((datetime.datetime.now(), 'view image DoG'))
view_image = bin_reduce(view_image, self.view_reduction + (1,))
splits.append((datetime.datetime.now(), 'view image reduce'))
max_kernel = self.kernels_3d[3].shape
max_channels = [self.maxNx1d(img, max_kernel) for img in max_inputs]
splits.append((datetime.datetime.now(), 'local maxima'))
# need to trim borders discarded by max_channel computation
max_inputs = [crop_centered(img, max_channels[0].shape) for img in max_inputs]
# find syn cores via local maxima test
peaks = np.zeros(max_channels[0].shape, dtype=np.bool)
for i in range(len(max_inputs)):
assert max_inputs[i].shape == max_channels[i].shape
peaks += max_inputs[i] >= (max_channels[i])
clipbox = tuple(
slice(peaks_border, peaks_width-peaks_border)
for peaks_width, peaks_border in map(
lambda iw, bw, pw: (pw, bw - (iw-pw)//2),
image.shape[0:3],
self.max_border_widths,
peaks.shape
)
)
splits.append((datetime.datetime.now(), 'mask peaks'))
label_im, nb_labels = ndimage.label(peaks)
islabel_im = (label_im > 0).astype(np.uint8)
label_im_dev = opencllib.cl_array.to_device(clq, label_im)
islabel_im_dev = opencllib.cl_array.to_device(clq, islabel_im)
splits.append((datetime.datetime.now(), 'label peaks'))
sizes = self.sum_labeled(
islabel_im_dev,
label_im_dev,
nb_labels + 1,
clq=clq
)[1:].map_to_host()
splits.append((datetime.datetime.now(), 'centroid sizes'))
centroid_components = [ ]
for d in range(3):
coords_dev = opencllib.nd_arange(peaks.shape, d, 0, 1, clq)
centroid_components.append(
(self.sum_labeled(
coords_dev,
label_im_dev,
nb_labels + 1,
clq=clq
)[1:].map_to_host()/sizes)#.astype(np.int32)
)
# centroids are in block peaks grid
centroids = list(zip(*centroid_components))
filtered_centroids = []
if centroids:
# discard centroids outside clipbox (we searched slightly
# larger to handle peaks at edges
for i in range(len(centroids)):
clip = False
for d in range(3):
if int(centroids[i][d]) < clipbox[d].start or int(centroids[i][d]) >= clipbox[d].stop:
clip = True
if not clip:
filtered_centroids.append(centroids[i])
if filtered_centroids:
centroids = array(filtered_centroids, int32) - array([slc.start for slc in clipbox], int32)
# image_centroids are in block image grid
image_centroids = centroids + array(self.max_border_widths, int32)
# dog_centroids are in difference-of-gaussians grid
dog_centroids = centroids + array(list(map(lambda iw, dw: (iw-dw)//2, image.shape[0:3], dog.shape)))
# global_centroids are in self.image grid
global_centroids = (
array([slc.start or 0 for slc in self.block_slice_src(blockpos)[0:3]], int32)
+ image_centroids
)
splits.append((datetime.datetime.now(), 'centroid coords'))
image_centroids_dev = opencllib.cl_array.to_device(clq, image_centroids)
centroid_measures = [
self.convNd_sparse(
image0_dev,
opencllib.cl_array.to_device(clq, self.kernels_3d[0]),
image_centroids_dev,
clq=clq
).map_to_host()
]
splits.append((datetime.datetime.now(), 'raw corevals'))
centroid_measures.append(
self.convNd_sparse(
image0_dev,
opencllib.cl_array.to_device(clq, self.kernels_3d[1]),
image_centroids_dev,
clq=clq
).map_to_host()
)
del image0_dev
del image_centroids_dev
splits.append((datetime.datetime.now(), 'raw hollowvals'))
dog_dev = opencllib.cl_array.to_device(clq, dog)
dog_centroids_dev = opencllib.cl_array.to_device(clq, dog_centroids)
centroid_measures.append(
self.convNd_sparse(
dog_dev,
opencllib.cl_array.to_device(clq, self.kernels_3d[0]),
dog_centroids_dev,
clq=clq
).map_to_host()
)
splits.append((datetime.datetime.now(), 'DoG corevals'))
centroid_measures.append(
self.convNd_sparse(
dog_dev,
opencllib.cl_array.to_device(clq, self.kernels_3d[1]),
dog_centroids_dev,
clq=clq
).map_to_host()
)
del dog_dev
del dog_centroids_dev
splits.append((datetime.datetime.now(), 'DoG hollowvals'))
if image.shape[3] > 1:
centroid_measures.append(self.convNd_sparse(image[:,:,:,1], self.kernels_3d[2], image_centroids))
splits.append((datetime.datetime.now(), 'centroid redvals'))
else:
# defaults if we have no centroids in block...
image_centroids = zeros((0,3), int32)
global_centroids = zeros((0,3), int32)
centroid_measures = [
zeros((0,), float32), # raw coreval
zeros((0,), float32), # raw hollowval
zeros((0,), float32), # dog coreval
zeros((0,), float32), # dog hollowval
]
if image.shape[3] > 1:
centroid_measures.append(
zeros((0,), float32), # redvals
)
# need to keep same shape for splits list
splits.append((datetime.datetime.now(), 'centroid coords'))
splits.append((datetime.datetime.now(), 'raw corevals'))
splits.append((datetime.datetime.now(), 'raw hollowvals'))
splits.append((datetime.datetime.now(), 'DoG corevals'))
splits.append((datetime.datetime.now(), 'DoG hollowvals'))
centroid_measures = np.column_stack(tuple(centroid_measures))
splits.append((datetime.datetime.now(), 'stack centroid measures'))
perf_vector = list(map(lambda t0, t1: ((t1[0]-t0[0]).total_seconds(), t1[1]), splits[0:-1], splits[1:]))
return view_image, global_centroids, centroid_measures, perf_vector
BlockedAnalyzerOpt = BlockedAnalyzerOpenCL
assign_voxels_opt = opencllib.assign_voxels
except:
pass
def batch_analyze(image, cdiam_microns, vdiam_microns, rdiam_microns, view_reduction=(1,1,1)):
analyzer = BlockedAnalyzerOpt(image, cdiam_microns, vdiam_microns, rdiam_microns, view_reduction)
view_image, centroids, centroid_measures = analyzer.volume_process()
return analyzer, view_image, centroids, centroid_measures
synaptic_footprints = (
(2.75, 1.5, 1.5),
(4.0, 2.75, 2.75),
(3.0, 3.0, 3.0),
)
nucleic_footprints = (
(8., 8., 8.),
(16., 16., 16.),
(3.0, 3.0, 3.0),
)
def get_mode_and_footprints():
do_nuclei = os.getenv('SYNSPY_DETECT_NUCLEI', 'false').lower() == 'true'
footprints = nucleic_footprints if do_nuclei else synaptic_footprints
return do_nuclei, footprints
def batch_analyze_cli(fname):
"""Analyze file given as argument and write NPZ output file.
Arguments:
fname: OME-TIFF input file name
Environment parameters:
DUMP_PREFIX: defaults to './basename' where '.ome.tiff' suffix has been stripped
ZYX_SLICE: selects ROI within full image
ZYX_IMAGE_GRID: overrides image grid step metadata
SYNSPY_DETECT_NUCLEI: 'true' for nuclei mode, else synapse mode
OMIT_VOXELS: 'true' to omit voxel data from NPZ result
Output NPZ array keys:
'properties.json': various metadata as 1D uint8 array of UTF-8 JSON data
'voxels': 4D voxel data with axes (channel, z, y, x) unless OMIT_VOXELS is true
'centroids': 2D centroid list with axes (N, c) for coords [z y x]
'measures': 2D measure list with axes (N, m) for measures []
Output is written to file names by DUMP_PREFIX + '.npz'
"""
dump_prefix = os.path.basename(fname)
try:
m = re.match('^(?P<accession>.*)(?P<ome>[.]ome)[.]tif+$', dump_prefix)
dump_prefix = m.groupdict()['accession']
except:
pass
dump_prefix = os.getenv('DUMP_PREFIX', dump_prefix)
omit_voxels = os.getenv('OMIT_VOXELS', 'false').lower() == 'true'
image, meta, slice_origin = load_and_mangle_image(fname)
do_nuclei, footprints = get_mode_and_footprints()
cdiam, vdiam, rdiam = footprints
analyzer, view_image, centroids, measures = batch_analyze(image, cdiam, vdiam, rdiam)
props = {
"image_grid": list(image.micron_spacing),
"shape": list(image.shape),
"slice_origin": list(slice_origin),
"core_diam_microns": list(footprints[0]),
"vicinity_diam_microns": list(footprints[1]),
"synspy_nuclei_mode": do_nuclei,
}
if image.shape[0] > 1:
props['redblur_diam_mirons'] = list(footprints[2])
if view_image.dtype == np.float32 and measures.dtype == np.float32:
maxval = max(view_image.max(), measures.max())
view_image = view_image * 1.0/maxval
view_image = view_image.astype(np.float16)
measures = measures * 1.0/maxval
measures = measures.astype(np.float16)
props['voxel_divisor'] = float(maxval)
props['measures_divisor'] = float(maxval)
if centroids.dtype == np.int32 and centroids.max() < 2**16-1 and centroids.min() >= 0:
centroids = centroids.astype(np.uint16)
dump_fname = '%s.npz' % dump_prefix
outf = open(dump_fname, 'wb')
np.savez(
outf,
properties=np.fromstring(json.dumps(props), np.uint8),
voxels=view_image if not omit_voxels else np.zeros((0,), dtype=np.float16),
centroids=centroids,
measures=measures
)
outf.close()
print('Dumped ROI analysis data to %s' % dump_fname)
return 0
|
|
'''Test module for cssccc'''
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
from unittest import TestCase, main as unittest_main
from pocketlint.contrib.cssccc import (
CSSCodingConventionChecker, CSSAtRule, CSSRuleSet, CSSStatementMember)
class TestCSSCodingConventionChecker(TestCase):
'''Test for parsing the CSS text.'''
def test_getNextRule_start(self):
text = 'selector{}'
lint = CSSCodingConventionChecker(text)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
self.assertEqual('selector', rule.selector.text)
self.assertEqual(0, rule.selector.start_line)
self.assertEqual(0, rule.selector.start_character)
text = '\nselector{}'
lint = CSSCodingConventionChecker(text)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
self.assertEqual('\nselector', rule.selector.text)
self.assertEqual(0, rule.selector.start_line)
self.assertEqual(0, rule.selector.start_character)
text = '\n\nselector{}'
lint = CSSCodingConventionChecker(text)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
self.assertEqual('\n\nselector', rule.selector.text)
self.assertEqual(0, rule.selector.start_line)
self.assertEqual(0, rule.selector.start_character)
text = 'selector\n{}'
lint = CSSCodingConventionChecker(text)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
self.assertEqual('selector\n', rule.selector.text)
self.assertEqual(0, rule.selector.start_line)
self.assertEqual(0, rule.selector.start_character)
text = 'selector, {}'
lint = CSSCodingConventionChecker(text)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
self.assertEqual('selector, ', rule.selector.text)
self.assertEqual(0, rule.selector.start_line)
self.assertEqual(0, rule.selector.start_character)
def test_getNextRule_content(self):
text = 'selector { content; }'
lint = CSSCodingConventionChecker(text)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
self.assertEqual(' content; ', rule.declarations.text)
self.assertEqual(0, rule.declarations.start_line)
self.assertEqual(10, rule.declarations.start_character)
text = 'selector \n{\n content; }'
lint = CSSCodingConventionChecker(text)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
self.assertEqual('\n content; ', rule.declarations.text)
self.assertEqual(1, rule.declarations.start_line)
self.assertEqual(1, rule.declarations.start_character)
def test_getNextRule_continue(self):
text = 'selector1\n { content1; }\n\nselector2\n{content2}\n'
lint = CSSCodingConventionChecker(text)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
self.assertEqual('selector1\n ', rule.selector.text)
self.assertEqual(0, rule.selector.start_line)
self.assertEqual(0, rule.selector.start_character)
self.assertEqual(' content1; ', rule.declarations.text)
self.assertEqual(1, rule.declarations.start_line)
self.assertEqual(2, rule.declarations.start_character)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
self.assertEqual('\n\nselector2\n', rule.selector.text)
self.assertEqual(1, rule.selector.start_line)
self.assertEqual(14, rule.selector.start_character)
self.assertEqual('content2', rule.declarations.text)
self.assertEqual(4, rule.declarations.start_line)
self.assertEqual(1, rule.declarations.start_character)
def test_getNextRule_stop(self):
text = 'rule1{st1\n}\n@font-face {\n src: url("u\n u"); \n }\nr2{st2}'
lint = CSSCodingConventionChecker(text)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSAtRule.type)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
self.failUnlessRaises(StopIteration, lint.getNextRule)
def test_getNextRule_comment_multiline(self):
text = (
'\n'
'\n'
'/* multi line\n'
' * comment \n'
' */\n'
'selector {\n'
'cont2;\n'
'}')
lint = CSSCodingConventionChecker(text)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
self.assertEqual('\n\nselector ', rule.selector.text)
self.assertEqual(0, rule.selector.start_line)
self.assertEqual(0, rule.selector.start_character)
def test_getNextRule_comment_inline(self):
text = (
'selector {\n'
'so/* inline comment*/me\n'
'}\n')
lint = CSSCodingConventionChecker(text)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
self.assertEqual('\nsome\n', rule.declarations.text)
self.assertEqual(0, rule.declarations.start_line)
self.assertEqual(10, rule.declarations.start_character)
def test_getNextRule_comment_end_of_line(self):
text = (
'selector {\n'
'cont1; /*end of line comment*/\n'
'}')
lint = CSSCodingConventionChecker(text)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
self.assertEqual('\ncont1; \n', rule.declarations.text)
self.assertEqual(0, rule.declarations.start_line)
self.assertEqual(10, rule.declarations.start_character)
def test_getNextRule_comment_single_line(self):
text = (
'\n'
'/* single line comment */\n'
'selector2 {\n'
'cont1;\n'
'}')
lint = CSSCodingConventionChecker(text)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
self.assertEqual('\nselector2 ', rule.selector.text)
self.assertEqual(0, rule.selector.start_line)
self.assertEqual(0, rule.selector.start_character)
def test_get_at_import_rule(self):
'''Test for @import url(/css/screen.css) screen, projection;'''
text = 'rule1{st1\n}\n@import url(somet) print, soment ;rule2{st2}'
lint = CSSCodingConventionChecker(text)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSAtRule.type)
self.assertTrue(rule.block is None)
self.assertEqual('import', rule.identifier)
self.assertEqual('\n@import ', rule.keyword.text)
self.assertEqual(1, rule.keyword.start_line)
self.assertEqual(1, rule.keyword.start_character)
self.assertEqual(' url(somet) print, soment ', rule.text.text)
self.assertEqual(2, rule.text.start_line)
self.assertEqual(8, rule.text.start_character)
def test_get_at_charset_rule(self):
'''Test for @charset "ISO-8859-15";'''
text = 'rule1{st1\n}\n@charset "utf" ;rule2{st2}'
lint = CSSCodingConventionChecker(text)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSAtRule.type)
self.assertTrue(rule.block is None)
self.assertEqual('charset', rule.identifier)
self.assertEqual('\n@charset ', rule.keyword.text)
self.assertEqual(1, rule.keyword.start_line)
self.assertEqual(1, rule.keyword.start_character)
self.assertEqual(' "utf" ', rule.text.text)
self.assertEqual(2, rule.text.start_line)
self.assertEqual(9, rule.text.start_character)
def test_get_at_namespace_rule(self):
'''Test for @namespace foo "http://foo" ;'''
text = 'rule1{st1\n}@namespace foo "http://foo" ;rule2{st2}'
lint = CSSCodingConventionChecker(text)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSAtRule.type)
self.assertTrue(rule.block is None)
self.assertEqual('namespace', rule.identifier)
self.assertEqual('@namespace ', rule.keyword.text)
self.assertEqual(1, rule.keyword.start_line)
self.assertEqual(1, rule.keyword.start_character)
self.assertEqual(' foo "http://foo" ', rule.text.text)
self.assertEqual(1, rule.text.start_line)
self.assertEqual(12, rule.text.start_character)
def test_get_at_page_rule(self):
'''Test for @page
@page :left {
margin-left: 5cm; /* left pages only */
}
'''
text = 'rule1{st1\n}\n@page :left {\n mar; /*com*/\n }\nrule2{st2}'
lint = CSSCodingConventionChecker(text)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSAtRule.type)
self.assertTrue(rule.text is None)
self.assertEqual('page', rule.identifier)
self.assertEqual('\n@page :left ', rule.keyword.text)
self.assertEqual(1, rule.keyword.start_line)
self.assertEqual(1, rule.keyword.start_character)
self.assertEqual('\n mar; \n ', rule.block.text)
self.assertEqual(2, rule.block.start_line)
self.assertEqual(13, rule.block.start_character)
def test_get_at_font_face_rule(self):
'''Test for @font-face
@font-face {
font-family: "Example Font";
src: url("http://www.example.com
/fonts/example");
}
'''
text = 'rule1{st1\n}\n@font-face {\n src: url("u\n u"); \n }\nr2{st2}'
lint = CSSCodingConventionChecker(text)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSAtRule.type)
self.assertTrue(rule.text is None)
self.assertEqual('font-face', rule.identifier)
self.assertEqual('\n@font-face ', rule.keyword.text)
self.assertEqual(1, rule.keyword.start_line)
self.assertEqual(1, rule.keyword.start_character)
self.assertEqual('\n src: url("u\n u"); \n ', rule.block.text)
self.assertEqual(2, rule.block.start_line)
self.assertEqual(12, rule.block.start_character)
rule = lint.getNextRule()
self.assertTrue(rule.type is CSSRuleSet.type)
self.failUnlessRaises(StopIteration, lint.getNextRule)
class TestCSSStatementMember(TestCase):
'''Tests for CSSStatementMember.'''
def test_getStartLine(self):
statement = CSSStatementMember(0, 4, 'some')
self.assertEqual(1, statement.getStartLine())
statement = CSSStatementMember(3, 4, 'some')
self.assertEqual(4, statement.getStartLine())
statement = CSSStatementMember(3, 4, '\n\nsome')
self.assertEqual(6, statement.getStartLine())
def test_getStartLine_empty_selector(self):
statement = CSSStatementMember(0, 1, '')
self.assertEqual(1, statement.getStartLine())
def test_getStartLine_newlines_only(self):
statement = CSSStatementMember(0, 1, '\n')
self.assertEqual(2, statement.getStartLine())
def test_getStartLine_spaces_only(self):
statement = CSSStatementMember(0, 1, ' ')
self.assertEqual(1, statement.getStartLine())
class TestLog(object):
'''Container for a test log.'''
def __init__(self, line_number, code, message):
self.line_number = line_number
self.code = code
self.message = message
class RuleTesterBase(TestCase):
'''Base class for rule checkers.'''
ignored_messaged = []
def setUp(self):
self.logs = []
def log(self, line_number, code, message):
if code in self.ignored_messaged:
return
self.logs.append((line_number, code, message))
@property
def last_log(self):
(line_number, code, message) = self.logs.pop()
return TestLog(line_number, code, message)
class RuleTesterConventionA(RuleTesterBase):
'''Class for convention A.
selector1,
selecter2
{
property1: value1;
property2: value2;
}
'''
ignored_messaged = ['I013', 'I014']
class TestCSSRuleSetSelectorChecksA(RuleTesterConventionA):
'''Test coding conventions for selector from rule sets.'''
def test_valid_selector(self):
selector = CSSStatementMember(0, 0, 'something\n')
rule = CSSRuleSet(selector=selector, declarations=None, log=self.log)
rule.checkSelector()
self.assertEqual([], self.logs)
selector = CSSStatementMember(0, 0, '\nsomething\n')
rule = CSSRuleSet(selector=selector, declarations=None, log=self.log)
rule.checkSelector()
self.assertEqual([], self.logs)
selector = CSSStatementMember(1, 0, '\n\nsomething\n')
rule = CSSRuleSet(selector=selector, declarations=None, log=self.log)
rule.checkSelector()
self.assertEqual([], self.logs)
selector = CSSStatementMember(2, 0, '\n\nsomething,\nsomethi\n')
rule = CSSRuleSet(selector=selector, declarations=None, log=self.log)
rule.checkSelector()
self.assertEqual([], self.logs)
selector = CSSStatementMember(3, 0, '\n\nsom:some some,\n#somethi\n')
rule = CSSRuleSet(selector=selector, declarations=None, log=self.log)
rule.checkSelector()
self.assertEqual([], self.logs)
def test_I002(self):
selector = CSSStatementMember(2, 0, '\n\n\nsomething\n')
rule = CSSRuleSet(selector=selector, declarations=None, log=self.log)
rule.checkSelector()
last_log = self.last_log
self.assertEqual('I002', last_log.code)
self.assertEqual(6, last_log.line_number)
selector = CSSStatementMember(4, 0, '\n\n\n\nsomething\n')
rule = CSSRuleSet(selector=selector, declarations=None, log=self.log)
rule.checkSelector()
last_log = self.last_log
self.assertEqual('I002', last_log.code)
self.assertEqual(9, last_log.line_number)
def test_I003(self):
selector = CSSStatementMember(2, 0, '\nsomething\n')
rule = CSSRuleSet(selector=selector, declarations=None, log=self.log)
rule.checkSelector()
last_log = self.last_log
self.assertEqual('I003', last_log.code)
self.assertEqual(4, last_log.line_number)
selector = CSSStatementMember(2, 0, 'something\n')
rule = CSSRuleSet(selector=selector, declarations=None, log=self.log)
rule.checkSelector()
last_log = self.last_log
self.assertEqual('I003', last_log.code)
self.assertEqual(3, last_log.line_number)
def test_I004(self):
selector = CSSStatementMember(3, 0, '\n\nsomething, something\n')
rule = CSSRuleSet(selector=selector, declarations=None, log=self.log)
rule.checkSelector()
last_log = self.last_log
self.assertEqual('I004', last_log.code)
self.assertEqual(6, last_log.line_number)
def test_I005(self):
selector = CSSStatementMember(4, 0, '\nsomething,\nsomething')
rule = CSSRuleSet(selector=selector, declarations=None, log=self.log)
rule.checkSelector()
last_log = self.last_log
self.assertEqual('I005', last_log.code)
self.assertEqual(7, last_log.line_number)
class TestCSSRuleSetDeclarationsChecksA(RuleTesterConventionA):
'''Test coding conventions for declarations from rule sets.'''
def test_valid_declarations(self):
stmt = CSSStatementMember(
0, 0, '\n some: 3px;\n other:\n url();\n')
rule = CSSRuleSet(selector=None, declarations=stmt, log=self.log)
rule.checkDeclarations()
self.assertEqual([], self.logs)
def test_I006(self):
stmt = CSSStatementMember(
4, 0, '\n some: 3px;\n other: url();')
rule = CSSRuleSet(selector=None, declarations=stmt, log=self.log)
rule.checkDeclarations()
last_log = self.last_log
self.assertEqual('I006', last_log.code)
self.assertEqual(7, last_log.line_number)
stmt = CSSStatementMember(
4, 0, '\n some: 3px;\n other: url();\n ')
rule = CSSRuleSet(selector=None, declarations=stmt, log=self.log)
rule.checkDeclarations()
last_log = self.last_log
self.assertEqual('I006', last_log.code)
self.assertEqual(8, last_log.line_number)
stmt = CSSStatementMember(
4, 0, '\n some: 3px;\n other: url();\n\n ')
rule = CSSRuleSet(selector=None, declarations=stmt, log=self.log)
rule.checkDeclarations()
last_log = self.last_log
self.assertEqual('I006', last_log.code)
self.assertEqual(9, last_log.line_number)
def test_I007(self):
stmt = CSSStatementMember(
4, 0, '\n some: 3px; other: url();\n')
rule = CSSRuleSet(selector=None, declarations=stmt, log=self.log)
rule.checkDeclarations()
last_log = self.last_log
self.assertEqual('I007', last_log.code)
self.assertEqual(6, last_log.line_number)
def test_I008(self):
stmt = CSSStatementMember(
0, 0, '\n some: 3px;\n other: url();\n')
rule = CSSRuleSet(selector=None, declarations=stmt, log=self.log)
rule.checkDeclarations()
self.assertEqual('I008', self.last_log.code)
stmt = CSSStatementMember(
0, 0, '\n some: 3px;\n other: url();\n')
rule = CSSRuleSet(selector=None, declarations=stmt, log=self.log)
rule.checkDeclarations()
self.assertEqual('I008', self.last_log.code)
def test_I009(self):
stmt = CSSStatementMember(
0, 0, '\n some 3px;\n other: url();\n')
rule = CSSRuleSet(selector=None, declarations=stmt, log=self.log)
rule.checkDeclarations()
self.assertEqual('I009', self.last_log.code)
stmt = CSSStatementMember(
0, 0, '\n some: 3:px;\n other: url();\n')
rule = CSSRuleSet(selector=None, declarations=stmt, log=self.log)
rule.checkDeclarations()
self.assertEqual('I009', self.last_log.code)
def test_I010(self):
stmt = CSSStatementMember(
0, 0, '\n some : 3px;\n')
rule = CSSRuleSet(selector=None, declarations=stmt, log=self.log)
rule.checkDeclarations()
self.assertEqual('I010', self.last_log.code)
def test_I011(self):
stmt = CSSStatementMember(
0, 0, '\n some:3px;\n')
rule = CSSRuleSet(selector=None, declarations=stmt, log=self.log)
rule.checkDeclarations()
self.assertEqual('I011', self.last_log.code)
def test_I012(self):
stmt = CSSStatementMember(
0, 0, '\n some: 3px;\n')
rule = CSSRuleSet(selector=None, declarations=stmt, log=self.log)
rule.checkDeclarations()
self.assertEqual('I012', self.last_log.code)
class RuleTesterConventionB(RuleTesterBase):
'''Class for convention B.
selector1,
selecter2 {
property1: value1;
property2: value2;
}
'''
ignored_messaged = ['I005', 'I014']
class TestCSSRuleSetSelectorChecksB(RuleTesterConventionB):
'''Test coding conventions for selector from rule sets.'''
def test_valid_selector(self):
selector = CSSStatementMember(0, 0, 'something ')
rule = CSSRuleSet(selector=selector, declarations=None, log=self.log)
rule.checkSelector()
self.assertEqual([], self.logs)
selector = CSSStatementMember(0, 0, '\nsomething ')
rule = CSSRuleSet(selector=selector, declarations=None, log=self.log)
rule.checkSelector()
self.assertEqual([], self.logs)
selector = CSSStatementMember(1, 0, '\n\nsomething ')
rule = CSSRuleSet(selector=selector, declarations=None, log=self.log)
rule.checkSelector()
self.assertEqual([], self.logs)
selector = CSSStatementMember(2, 0, '\n\nsomething,\nsomethi ')
rule = CSSRuleSet(selector=selector, declarations=None, log=self.log)
rule.checkSelector()
self.assertEqual([], self.logs)
selector = CSSStatementMember(3, 0, '\n\nsom:some some,\n#somethi ')
rule = CSSRuleSet(selector=selector, declarations=None, log=self.log)
rule.checkSelector()
self.assertEqual([], self.logs)
def test_I013(self):
selector = CSSStatementMember(2, 0, '\n\nsomething\n')
rule = CSSRuleSet(selector=selector, declarations=None, log=self.log)
rule.checkSelector()
last_log = self.last_log
self.assertEqual('I013', last_log.code)
self.assertEqual(5, last_log.line_number)
def test_I013_compressed_file(self):
selector = CSSStatementMember(0, 0, 'something')
rule = CSSRuleSet(selector=selector, declarations=None, log=self.log)
rule.selector.text = ''
rule.checkSelector()
last_log = self.last_log
self.assertEqual('I013', last_log.code)
self.assertEqual(1, last_log.line_number)
class RuleTesterConventionC(RuleTesterBase):
'''Class for convention C.
selector1,
selecter2 {
property1: value1;
property2: value2;
}
'''
ignored_messaged = ['I005', 'I006']
class TestCSSRuleSetDeclarationsChecksC(RuleTesterConventionC):
'''Test coding conventions for declarations from rule sets.'''
def test_valid_declarations(self):
stmt = CSSStatementMember(
0, 0, '\n some: 3px;\n other:\n url();\n ')
rule = CSSRuleSet(selector=None, declarations=stmt, log=self.log)
rule.checkDeclarations()
self.assertEqual([], self.logs)
def test_I014(self):
stmt = CSSStatementMember(
0, 0, '\n some: 3px;\n')
rule = CSSRuleSet(selector=None, declarations=stmt, log=self.log)
rule.checkDeclarations()
self.assertEqual('I014', self.last_log.code)
stmt = CSSStatementMember(
0, 0, '\n some: 3px;\n ')
rule = CSSRuleSet(selector=None, declarations=stmt, log=self.log)
rule.checkDeclarations()
self.assertEqual('I014', self.last_log.code)
if __name__ == '__main__':
unittest_main()
|
|
"""
audfprint_match.py
Fingerprint matching code for audfprint
2014-05-26 Dan Ellis dpwe@ee.columbia.edu
"""
import librosa
import numpy as np
import scipy.signal
import time
# for checking phys mem size
import resource
# for localtest and illustrate
import audfprint_analyze
import matplotlib.pyplot as plt
import audio_read
from scipy import stats
def log(message):
""" log info with stats """
print time.ctime(), \
"physmem=", resource.getrusage(resource.RUSAGE_SELF).ru_maxrss, \
"utime=", resource.getrusage(resource.RUSAGE_SELF).ru_utime, \
message
def encpowerof2(val):
""" Return N s.t. 2^N >= val """
return int(np.ceil(np.log(max(1, val))/np.log(2)))
def locmax(vec, indices=False):
""" Return a boolean vector of which points in vec are local maxima.
End points are peaks if larger than single neighbors.
if indices=True, return the indices of the True values instead
of the boolean vector. (originally from audfprint.py)
"""
# x[-1]-1 means last value can be a peak
#nbr = np.greater_equal(np.r_[x, x[-1]-1], np.r_[x[0], x])
# the np.r_ was killing us, so try an optimization...
nbr = np.zeros(len(vec)+1, dtype=bool)
nbr[0] = True
nbr[1:-1] = np.greater_equal(vec[1:], vec[:-1])
maxmask = (nbr[:-1] & ~nbr[1:])
if indices:
return np.nonzero(maxmask)[0]
else:
return maxmask
def find_modes(data, threshold=5, window=0):
""" Find multiple modes in data, Report a list of (mode, count)
pairs for every mode greater than or equal to threshold.
Only local maxima in counts are returned.
"""
# TODO: Ignores window at present
datamin = np.amin(data)
fullvector = np.bincount(data - datamin)
# Find local maxima
localmaxes = np.nonzero(np.logical_and(locmax(fullvector),
np.greater_equal(fullvector,
threshold)))[0]
return localmaxes + datamin, fullvector[localmaxes]
class Matcher(object):
"""Provide matching for audfprint fingerprint queries to hash table"""
def __init__(self):
"""Set up default object values"""
# Tolerance window for time differences
self.window = 1
# Absolute minimum number of matching hashes to count as a match
self.threshcount = 5
# How many hits to return?
self.max_returns = 1
# How deep to search in return list?
self.search_depth = 100
# Sort those returns by time (instead of counts)?
self.sort_by_time = False
# Verbose reporting?
self.verbose = False
# Do illustration?
self.illustrate = False
# Careful counts?
self.exact_count = False
# Search for time range?
self.find_time_range = False
# Quantile of time range to report.
self.time_quantile = 0.02
# Display pre-emphasized spectrogram in illustrate_match?
self.illustrate_hpf = False
def _best_count_ids(self, hits, ht):
""" Return the indexes for the ids with the best counts.
hits is a matrix as returned by hash_table.get_hits()
with rows of consisting of [id dtime hash otime] """
allids = hits[:, 0]
ids = np.unique(allids)
#rawcounts = np.sum(np.equal.outer(ids, allids), axis=1)
# much faster, and doesn't explode memory
rawcounts = np.bincount(allids)[ids]
# Divide the raw counts by the total number of hashes stored
# for the ref track, to downweight large numbers of chance
# matches against longer reference tracks.
wtdcounts = rawcounts/(ht.hashesperid[ids].astype(float))
# Find all the actual hits for a the most popular ids
bestcountsixs = np.argsort(wtdcounts)[::-1]
# We will examine however many hits have rawcounts above threshold
# up to a maximum of search_depth.
maxdepth = np.minimum(np.count_nonzero(np.greater(rawcounts,
self.threshcount)),
self.search_depth)
# Return the ids to check
bestcountsixs = bestcountsixs[:maxdepth]
return ids[bestcountsixs], rawcounts[bestcountsixs]
def _unique_match_hashes(self, id, hits, mode):
""" Return the list of unique matching hashes. Split out so
we can recover the actual matching hashes for the best
match if required. """
allids = hits[:, 0]
alltimes = hits[:, 1]
allhashes = hits[:, 2].astype(np.int64)
allotimes = hits[:, 3]
timebits = max(1, encpowerof2(np.amax(allotimes)))
# matchhashes may include repeats because multiple
# ref hashes may match a single query hash under window.
# Uniqify:
#matchhashes = sorted(list(set(matchhashes)))
# much, much faster:
matchix = np.nonzero(
np.logical_and(allids == id, np.less_equal(np.abs(alltimes - mode),
self.window)))[0]
matchhasheshash = np.unique(allotimes[matchix]
+ (allhashes[matchix] << timebits))
timemask = (1 << timebits) - 1
matchhashes = np.c_[matchhasheshash & timemask,
matchhasheshash >> timebits]
return matchhashes
def _calculate_time_ranges(self, hits, id, mode):
"""Given the id and mode, return the actual time support."""
match_times = sorted(hits[row, 3]
for row in np.nonzero(hits[:, 0]==id)[0]
if mode - self.window <= hits[row, 1]
and hits[row, 1] <= mode + self.window)
min_time = match_times[int(len(match_times)*self.time_quantile)]
max_time = match_times[int(len(match_times)*(1.0 - self.time_quantile)) - 1]
return min_time, max_time
def _exact_match_counts(self, hits, ids, rawcounts, hashesfor=None):
"""Find the number of "filtered" (time-consistent) matching hashes
for each of the promising ids in <ids>. Return an
np.array whose rows are [id, filtered_count,
modal_time_skew, unfiltered_count, original_rank,
min_time, max_time]. Results are sorted by original rank
(but will not in general include all the the original
IDs). There can be multiple rows for a single ID, if
there are several distinct time_skews giving good
matches.
"""
# Slower, old process for exact match counts
allids = hits[:, 0]
alltimes = hits[:, 1]
allhashes = hits[:, 2]
#allotimes = hits[:, 3]
# Allocate enough space initially for 4 modes per hit
maxnresults = len(ids) * 4
results = np.zeros((maxnresults, 7), np.int32)
nresults = 0
min_time = 0
max_time = 0
for urank, (id, rawcount) in enumerate(zip(ids, rawcounts)):
modes, counts = find_modes(alltimes[np.nonzero(allids==id)[0]],
window=self.window,
threshold=self.threshcount)
for mode in modes:
matchhashes = self._unique_match_hashes(id, hits, mode)
# Now we get the exact count
filtcount = len(matchhashes)
if filtcount >= self.threshcount:
if nresults == maxnresults:
# Extend array
maxnresults *= 2
results.resize((maxnresults, results.shape[1]))
if self.find_time_range:
min_time, max_time = self._calculate_time_ranges(
hits, id, mode)
results[nresults, :] = [id, filtcount, mode, rawcount,
urank, min_time, max_time]
nresults += 1
return results[:nresults, :]
def _approx_match_counts(self, hits, ids, rawcounts):
""" Quick and slightly inaccurate routine to count time-aligned hits.
Only considers largest mode for reference ID match.
Args:
hits: np.array of hash matches, each row consists of
<track_id, skew_time, ...>.
ids: list of the IDs to check, based on raw match count.
rawcounts: list giving the actual raw counts for each id to try.
Returns:
Rows of [id, filt_count, time_skew, raw_count, orig_rank,
min_time, max_time].
Ids occur in the same order as the input list, but ordering
of (potentially multiple) hits within each track may not be
sorted (they are sorted by the largest single count value, not
the total count integrated over -window:+window bins).
"""
# In fact, the counts should be the same as exact_match_counts
# *but* some matches may be pruned because we don't bother to
# apply the window (allowable drift in time alignment) unless
# there are more than threshcount matches at the single best time skew.
# Note: now we allow multiple matches per ID, this may need to grow
# so it can grow inside the loop.
results = np.zeros((len(ids), 7), np.int32)
if not hits.size:
# No hits found, return empty results
return results
allids = hits[:, 0].astype(int)
alltimes = hits[:, 1].astype(int)
# Make sure every value in alltimes is >=0 for bincount
mintime = np.amin(alltimes)
alltimes -= mintime
nresults = 0
# Hash IDs and times together, so only a single bincount
timebits = max(1, encpowerof2(np.amax(alltimes)))
allbincounts = np.bincount((allids << timebits) + alltimes)
min_time = 0
max_time = 0
for urank, (id, rawcount) in enumerate(zip(ids, rawcounts)):
# Make sure id is an int64 before shifting it up.
id = int(id)
# Select the subrange of bincounts corresponding to this id
bincounts = allbincounts[(id << timebits):(((id+1)<<timebits)-1)]
still_looking = True
while still_looking:
mode = np.argmax(bincounts)
if bincounts[mode] <= self.threshcount:
# Too few - skip to the next id
still_looking = False
continue
count = np.sum(bincounts[max(0, mode - self.window) :
(mode + self.window + 1)])
if self.find_time_range:
min_time, max_time = self._calculate_time_ranges(
hits, id, mode + mintime)
results[nresults, :] = [id, count, mode + mintime, rawcount,
urank, min_time, max_time]
nresults += 1
if nresults >= results.shape[0]:
results = np.vstack([results, np.zeros(results.shape,
np.int32)])
# Clear this hit to find next largest.
bincounts[max(0, mode - self.window):
(mode + self.window + 1)] = 0
return results[:nresults, :]
def match_hashes(self, ht, hashes, hashesfor=None):
""" Match audio against fingerprint hash table.
Return top N matches as (id, filteredmatches, timoffs, rawmatches,
origrank, mintime, maxtime)
If hashesfor specified, return the actual matching hashes for that
hit (0=top hit).
"""
# find the implicated id, time pairs from hash table
#log("nhashes=%d" % np.shape(hashes)[0])
hits = ht.get_hits(hashes)
bestids, rawcounts = self._best_count_ids(hits, ht)
#log("len(rawcounts)=%d max(bestcountsixs)=%d" %
# (len(rawcounts), max(bestcountsixs)))
if not self.exact_count:
results = self._approx_match_counts(hits, bestids, rawcounts)
else:
results = self._exact_match_counts(hits, bestids, rawcounts,
hashesfor)
# Sort results by filtered count, descending
results = results[(-results[:,1]).argsort(),]
# Where was our best hit in the unfiltered count ranking?
# (4th column is rank in original list; look at top hit)
#if np.shape(results)[0] > 0:
# bestpos = results[0, 4]
# print "bestpos =", bestpos
# Could use to collect stats on best search-depth to use...
# Now strip the final column (original raw-count-based rank)
#results = results[:, :4]
if hashesfor is None:
return results
else:
id = results[hashesfor, 0]
mode = results[hashesfor, 2]
hashesforhashes = self._unique_match_hashes(id, hits, mode)
return results, hashesforhashes
def match_file(self, analyzer, ht, filename, number=None):
""" Read in an audio file, calculate its landmarks, query against
hash table. Return top N matches as (id, filterdmatchcount,
timeoffs, rawmatchcount), also length of input file in sec,
and count of raw query hashes extracted
"""
q_hashes = analyzer.wavfile2hashes(filename)
# Fake durations as largest hash time
if len(q_hashes) == 0:
durd = 0.0
else:
durd = float(analyzer.n_hop * q_hashes[-1][0])/analyzer.target_sr
if self.verbose:
if number is not None:
numberstring = "#%d"%number
else:
numberstring = ""
print time.ctime(), "Analyzed", numberstring, filename, "of", \
('%.3f'%durd), "s " \
"to", len(q_hashes), "hashes"
# Run query
rslts = self.match_hashes(ht, q_hashes)
# Post filtering
if self.sort_by_time:
rslts = rslts[(-rslts[:, 2]).argsort(), :]
return (rslts[:self.max_returns, :], durd, len(q_hashes))
def file_match_to_msgs(self, analyzer, ht, qry, number=None):
""" Perform a match on a single input file, return list
of message strings """
rslts, dur, nhash = self.match_file(analyzer, ht, qry, number)
t_hop = analyzer.n_hop/float(analyzer.target_sr)
if self.verbose:
qrymsg = qry + (' %.1f '%dur) + "sec " + str(nhash) + " raw hashes"
else:
qrymsg = qry
msgrslt = []
if len(rslts) == 0:
# No matches returned at all
nhashaligned = 0
if self.verbose:
msgrslt.append("NOMATCH "+qrymsg)
else:
msgrslt.append(qrymsg+"\t")
else:
for (tophitid, nhashaligned, aligntime, nhashraw, rank,
min_time, max_time) in rslts:
# figure the number of raw and aligned matches for top hit
if self.verbose:
if self.find_time_range:
msg = ("Matched {:6.1f} s starting at {:6.1f} s in {:s}"
" to time {:6.1f} s in {:s}").format(
(max_time - min_time)*t_hop, min_time*t_hop, qry,
(min_time + aligntime)*t_hop, ht.names[tophitid])
else:
msg = "Matched {:s} as {:s} at {:6.1f} s".format(
qrymsg, ht.names[tophitid], aligntime*t_hop)
msg += (" with {:5d} of {:5d} common hashes"
" at rank {:2d}").format(
nhashaligned, nhashraw, rank)
msgrslt.append(msg)
else:
msgrslt.append(qrymsg + "\t" + ht.names[tophitid])
if self.illustrate:
self.illustrate_match(analyzer, ht, qry)
return msgrslt
def illustrate_match(self, analyzer, ht, filename):
""" Show the query fingerprints and the matching ones
plotted over a spectrogram """
# Make the spectrogram
#d, sr = librosa.load(filename, sr=analyzer.target_sr)
d, sr = audio_read.audio_read(filename, sr=analyzer.target_sr, channels=1)
sgram = np.abs(librosa.stft(d, n_fft=analyzer.n_fft,
hop_length=analyzer.n_hop,
window=np.hanning(analyzer.n_fft+2)[1:-1]))
sgram = 20.0*np.log10(np.maximum(sgram, np.max(sgram)/1e6))
sgram = sgram - np.mean(sgram)
# High-pass filter onset emphasis
# [:-1,] discards top bin (nyquist) of sgram so bins fit in 8 bits
# spectrogram enhancement
if self.illustrate_hpf:
HPF_POLE = 0.98
sgram = np.array([scipy.signal.lfilter([1, -1],
[1, -HPF_POLE], s_row)
for s_row in sgram])[:-1,]
sgram = sgram - np.max(sgram)
librosa.display.specshow(sgram, sr=sr, hop_length=analyzer.n_hop,
y_axis='linear', x_axis='time',
cmap='gray_r', vmin=-80.0, vmax=0)
# Do the match?
q_hashes = analyzer.wavfile2hashes(filename)
# Run query, get back the hashes for match zero
results, matchhashes = self.match_hashes(ht, q_hashes, hashesfor=0)
if self.sort_by_time:
results = sorted(results, key=lambda x: -x[2])
# Convert the hashes to landmarks
lms = audfprint_analyze.hashes2landmarks(q_hashes)
mlms = audfprint_analyze.hashes2landmarks(matchhashes)
# Overplot on the spectrogram
plt.plot(np.array([[x[0], x[0]+x[3]] for x in lms]).T,
np.array([[x[1], x[2]] for x in lms]).T,
'.-g')
plt.plot(np.array([[x[0], x[0]+x[3]] for x in mlms]).T,
np.array([[x[1], x[2]] for x in mlms]).T,
'.-r')
# Add title
plt.title(filename + " : Matched as " + ht.names[results[0][0]]
+ (" with %d of %d hashes" % (len(matchhashes),
len(q_hashes))))
# Display
plt.show()
# Return
return results
def localtest():
"""Function to provide quick test"""
pat = '/Users/dpwe/projects/shazam/Nine_Lives/*mp3'
qry = 'query.mp3'
hash_tab = audfprint_analyze.glob2hashtable(pat)
matcher = Matcher()
rslts, dur, nhash = matcher.match_file(audfprint_analyze.g2h_analyzer,
hash_tab, qry)
t_hop = 0.02322
print "Matched", qry, "(", dur, "s,", nhash, "hashes)", \
"as", hash_tab.names[rslts[0][0]], \
"at", t_hop*float(rslts[0][2]), "with", rslts[0][1], \
"of", rslts[0][3], "hashes"
# Run the main function if called from the command line
if __name__ == "__main__":
localtest()
|
|
from sympy.simplify import simplify, trigsimp
from sympy import pi, sqrt, symbols, ImmutableMatrix as Matrix, \
sin, cos, Function, Integral, Derivative, diff, integrate
from sympy.vector.vector import Vector, BaseVector, VectorAdd, \
VectorMul, VectorZero
from sympy.vector.coordsysrect import CoordSysCartesian
C = CoordSysCartesian('C')
i, j, k = C.base_vectors()
a, b, c = symbols('a b c')
def test_vector_sympy():
"""
Test whether the Vector framework confirms to the hashing
and equality testing properties of SymPy.
"""
v1 = 3*j
assert v1 == j*3
assert v1.components == {j: 3}
v2 = 3*i + 4*j + 5*k
v3 = 2*i + 4*j + i + 4*k + k
assert v3 == v2
assert v3.__hash__() == v2.__hash__()
def test_vector():
assert isinstance(i, BaseVector)
assert i != j
assert j != k
assert k != i
assert i - i == Vector.zero
assert i + Vector.zero == i
assert i - Vector.zero == i
assert Vector.zero != 0
assert -Vector.zero == Vector.zero
v1 = a*i + b*j + c*k
v2 = a**2*i + b**2*j + c**2*k
v3 = v1 + v2
v4 = 2 * v1
v5 = a * i
assert isinstance(v1, VectorAdd)
assert v1 - v1 == Vector.zero
assert v1 + Vector.zero == v1
assert v1.dot(i) == a
assert v1.dot(j) == b
assert v1.dot(k) == c
assert i.dot(v2) == a**2
assert j.dot(v2) == b**2
assert k.dot(v2) == c**2
assert v3.dot(i) == a**2 + a
assert v3.dot(j) == b**2 + b
assert v3.dot(k) == c**2 + c
assert v1 + v2 == v2 + v1
assert v1 - v2 == -1 * (v2 - v1)
assert a * v1 == v1 * a
assert isinstance(v5, VectorMul)
assert v5.base_vector == i
assert v5.measure_number == a
assert isinstance(v4, Vector)
assert isinstance(v4, VectorAdd)
assert isinstance(v4, Vector)
assert isinstance(Vector.zero, VectorZero)
assert isinstance(Vector.zero, Vector)
assert isinstance(v1 * 0, VectorZero)
assert v1.to_matrix(C) == Matrix([[a], [b], [c]])
assert i.components == {i: 1}
assert v5.components == {i: a}
assert v1.components == {i: a, j: b, k: c}
assert VectorAdd(v1, Vector.zero) == v1
assert VectorMul(a, v1) == v1*a
assert VectorMul(1, i) == i
assert VectorAdd(v1, Vector.zero) == v1
assert VectorMul(0, Vector.zero) == Vector.zero
def test_vector_magnitude_normalize():
assert Vector.zero.magnitude() == 0
assert Vector.zero.normalize() == Vector.zero
assert i.magnitude() == 1
assert j.magnitude() == 1
assert k.magnitude() == 1
assert i.normalize() == i
assert j.normalize() == j
assert k.normalize() == k
v1 = a * i
assert v1.normalize() == (a/sqrt(a**2))*i
assert v1.magnitude() == sqrt(a**2)
v2 = a*i + b*j + c*k
assert v2.magnitude() == sqrt(a**2 + b**2 + c**2)
assert v2.normalize() == v2 / v2.magnitude()
v3 = i + j
assert v3.normalize() == (sqrt(2)/2)*C.i + (sqrt(2)/2)*C.j
def test_vector_simplify():
A, s, k, m = symbols('A, s, k, m')
test1 = (1 / a + 1 / b) * i
assert (test1 & i) != (a + b) / (a * b)
test1 = simplify(test1)
assert (test1 & i) == (a + b) / (a * b)
assert test1.simplify() == simplify(test1)
test2 = (A**2 * s**4 / (4 * pi * k * m**3)) * i
test2 = simplify(test2)
assert (test2 & i) == (A**2 * s**4 / (4 * pi * k * m**3))
test3 = ((4 + 4 * a - 2 * (2 + 2 * a)) / (2 + 2 * a)) * i
test3 = simplify(test3)
assert (test3 & i) == 0
test4 = ((-4 * a * b**2 - 2 * b**3 - 2 * a**2 * b) / (a + b)**2) * i
test4 = simplify(test4)
assert (test4 & i) == -2 * b
v = (sin(a)+cos(a))**2*i - j
assert trigsimp(v) == (2*sin(a + pi/4)**2)*i + (-1)*j
assert trigsimp(v) == v.trigsimp()
assert simplify(Vector.zero) == Vector.zero
def test_vector_dot():
assert i.dot(Vector.zero) == 0
assert Vector.zero.dot(i) == 0
assert i & Vector.zero == 0
assert i.dot(i) == 1
assert i.dot(j) == 0
assert i.dot(k) == 0
assert i & i == 1
assert i & j == 0
assert i & k == 0
assert j.dot(i) == 0
assert j.dot(j) == 1
assert j.dot(k) == 0
assert j & i == 0
assert j & j == 1
assert j & k == 0
assert k.dot(i) == 0
assert k.dot(j) == 0
assert k.dot(k) == 1
assert k & i == 0
assert k & j == 0
assert k & k == 1
def test_vector_cross():
assert i.cross(Vector.zero) == Vector.zero
assert Vector.zero.cross(i) == Vector.zero
assert i.cross(i) == Vector.zero
assert i.cross(j) == k
assert i.cross(k) == -j
assert i ^ i == Vector.zero
assert i ^ j == k
assert i ^ k == -j
assert j.cross(i) == -k
assert j.cross(j) == Vector.zero
assert j.cross(k) == i
assert j ^ i == -k
assert j ^ j == Vector.zero
assert j ^ k == i
assert k.cross(i) == j
assert k.cross(j) == -i
assert k.cross(k) == Vector.zero
assert k ^ i == j
assert k ^ j == -i
assert k ^ k == Vector.zero
def test_projection():
v1 = i + j + k
v2 = 3*i + 4*j
v3 = 0*i + 0*j
assert v1.projection(v1) == i + j + k
assert v1.projection(v2) == 7/3*C.i + 7/3*C.j + 7/3*C.k
assert v1.projection(v1, scalar=True) == 1
assert v1.projection(v2, scalar=True) == 7/3
assert v3.projection(v1) == Vector.zero
def test_vector_diff_integrate():
f = Function('f')
v = f(a)*C.i + a**2*C.j - C.k
assert Derivative(v, a) == Derivative((f(a))*C.i +
a**2*C.j + (-1)*C.k, a)
assert (diff(v, a) == v.diff(a) == Derivative(v, a).doit() ==
(Derivative(f(a), a))*C.i + 2*a*C.j)
assert (Integral(v, a) == (Integral(f(a), a))*C.i +
(Integral(a**2, a))*C.j + (Integral(-1, a))*C.k)
|
|
from twisted.trial import unittest
from twisted.internet import defer
from coherence.dispatcher import Dispatcher, UnknownSignal, Receiver, \
SignalingProperty, ChangedSignalingProperty, CustomSignalingProperty
class TestDispatcher(Dispatcher):
__signals__ = {'test': 'Test signal'}
class SimpleTarget(object):
def __init__(self):
self.called = 0
self.called_a = 0
self.called_b = 0
self.called_c = 0
self.called_d = 0
def callback(self):
self.called += 1
def updater(self, arg1, arg2, value, arg4, key_a='p', variable=None):
setattr(self, variable, value)
setattr(self, "%s_%s" % (variable, arg2), key_a)
def plus(self, plus, variable=False):
setattr(self, variable, getattr(self, variable) + plus)
def fail_before(self, plus, variable=False):
raise TypeError(':(')
self.update(plus, variable=variable)
class TestDispatching(unittest.TestCase):
def setUp(self):
self.called_counter = 0
self.dispatcher = TestDispatcher()
self.target = SimpleTarget()
def test_simple_emit(self):
receiver = self.dispatcher.connect('test', self.target.callback)
self.dispatcher.emit('test')
self.assertEquals(self.target.called, 1)
self.dispatcher.emit('test')
self.assertEquals(self.target.called, 2)
self.dispatcher.disconnect(receiver)
self.dispatcher.emit('test')
self.assertEquals(self.target.called, 2)
def test_simple_deferred_emit(self):
receiver = self.dispatcher.connect('test', self.target.callback)
self.dispatcher.deferred_emit('test')
self.assertEquals(self.target.called, 1)
self.dispatcher.deferred_emit('test')
self.assertEquals(self.target.called, 2)
self.dispatcher.disconnect(receiver)
self.dispatcher.deferred_emit('test')
self.assertEquals(self.target.called, 2)
def test_simple_save_emit(self):
def call(res):
return self.dispatcher.save_emit('test')
def test(res, val):
self.assertEquals(self.target.called, val)
receiver = self.dispatcher.connect('test', self.target.callback)
dfr = defer.succeed(None)
dfr.addCallback(call)
dfr.addCallback(test, 1)
dfr.addCallback(call)
dfr.addCallback(test, 2)
dfr.addCallback(lambda x: self.dispatcher.disconnect(receiver))
dfr.addCallback(call)
dfr.addCallback(test, 2)
return dfr
def test_connect_typo(self):
self.assertRaises(UnknownSignal, self.dispatcher.connect, 'Test', None)
def test_disconnect_none_receiver(self):
"""
trying to disconnect with None shouldn't fail, it is a valid use case
"""
self.dispatcher.disconnect(None)
def test_disconnect_false_receiver(self):
"""
this receiver isn't coming from this dispatcher
"""
# this is REALLY constructed. you may *not* instantiate a Receiver yourself anyway
rec = Receiver('test', None, None, None)
self.dispatcher.disconnect(rec)
def test_disconnect_wrong_signal_receiver(self):
rec = Receiver('Test', None, None, None)
self.assertRaises(UnknownSignal, self.dispatcher.disconnect, rec)
def test_disconnect_not_receiver(self):
self.assertRaises(TypeError, self.dispatcher.disconnect, 'test')
def test_emit_false_signal(self):
self.assertRaises(UnknownSignal, self.dispatcher.emit, False)
def test_emit_without_receivers(self):
self.dispatcher.emit('test')
self.assertEquals(self.target.called, 0)
def test_emit_with_multiple_receiver(self):
rc1 = self.dispatcher.connect('test', self.target.updater,
1, 2, variable='va1')
rc2 = self.dispatcher.connect('test', self.target.updater,
'value', 2, variable='variable')
rc3 = self.dispatcher.connect('test', self.target.updater,
'other', 2, variable='one')
self.dispatcher.emit('test', self, 'other', key_a='q')
# check rc1
self.assertEquals(self.target.va1, 1)
self.assertEquals(self.target.va1_other, 'q')
#check rc2
self.assertEquals(self.target.variable, 'value')
self.assertEquals(self.target.variable_other, 'q')
# check rc3
self.assertEquals(self.target.one, 'other')
self.assertEquals(self.target.one_other, 'q')
# now removing the one in the middel
self.dispatcher.disconnect(rc2)
# and try again with other data
self.dispatcher.emit('test', self, 'other', key_a='thistime')
# check rc1
self.assertEquals(self.target.va1, 1)
self.assertEquals(self.target.va1_other, 'thistime')
#check rc2
self.assertEquals(self.target.variable, 'value')
self.assertEquals(self.target.variable_other, 'q')
# check rc3
self.assertEquals(self.target.one, 'other')
self.assertEquals(self.target.one_other, 'thistime')
# no keyword
self.dispatcher.emit('test', self, 'a')
# worked for rc1 and rc3 with the default value
self.assertEquals(self.target.va1_a, 'p')
self.assertEquals(self.target.one_a, 'p')
# but not on rc2
self.assertFalse(hasattr(self.target, 'variable_a'))
self.dispatcher.disconnect(rc1)
self.dispatcher.disconnect(rc3)
def test_emit_multiple_with_failing_in_between(self):
rc1 = self.dispatcher.connect('test', self.target.plus,
1, variable='called_a')
rc2 = self.dispatcher.connect('test', self.target.plus,
2, variable='called_b')
rc3 = self.dispatcher.connect('test', self.target.fail_before,
3, variable='called_c')
rc4 = self.dispatcher.connect('test', self.target.plus,
4, variable='called_d')
self.dispatcher.emit('test')
self.assertEquals(self.target.called_a, 1)
self.assertEquals(self.target.called_b, 2)
self.assertEquals(self.target.called_c, 0)
self.assertEquals(self.target.called_d, 4)
self.dispatcher.emit('test')
self.assertEquals(self.target.called_a, 2)
self.assertEquals(self.target.called_b, 4)
self.assertEquals(self.target.called_c, 0)
self.assertEquals(self.target.called_d, 8)
self.dispatcher.disconnect(rc1)
self.dispatcher.disconnect(rc2)
self.dispatcher.disconnect(rc3)
self.dispatcher.disconnect(rc4)
# Receiver tests
class TestReceiver(unittest.TestCase):
def setUp(self):
self.called = 0
def _callback(self, *args, **kw):
self.called += 1
self.args = args
self.kw = kw
def test_simple_calling(self):
rec = Receiver('test', self._callback, (), {})
self.assertEquals(rec.signal, 'test')
rec()
self.assertEquals(self.called, 1)
self.assertEquals(self.args, ())
self.assertEquals(self.kw, {})
rec()
self.assertEquals(self.called, 2)
self.assertEquals(self.args, ())
self.assertEquals(self.kw, {})
rec()
self.assertEquals(self.called, 3)
self.assertEquals(self.args, ())
self.assertEquals(self.kw, {})
def test_calling_with_args(self):
rec = Receiver('test', self._callback, (1, 2, 3), {'test': 'a'})
self.assertEquals(rec.signal, 'test')
rec(0)
self.assertEquals(self.called, 1)
self.assertEquals(self.args, (0, 1, 2, 3))
self.assertEquals(self.kw, {'test': 'a'})
rec(-1)
self.assertEquals(self.called, 2)
self.assertEquals(self.args, (-1, 1, 2, 3))
self.assertEquals(self.kw, {'test': 'a'})
rec(-2)
self.assertEquals(self.called, 3)
self.assertEquals(self.args, (-2, 1, 2, 3))
self.assertEquals(self.kw, {'test': 'a'})
def test_calling_with_kw(self):
rec = Receiver('test', self._callback, (1, 2, 3), {'test': 'a'})
self.assertEquals(rec.signal, 'test')
rec(p='q')
self.assertEquals(self.called, 1)
self.assertEquals(self.args, (1, 2, 3))
self.assertEquals(self.kw, {'test': 'a', 'p': 'q'})
rec(other='wise')
self.assertEquals(self.called, 2)
self.assertEquals(self.args, (1, 2, 3))
self.assertEquals(self.kw, {'test': 'a', 'other': 'wise'})
rec(and_one='more')
self.assertEquals(self.called, 3)
self.assertEquals(self.args, (1, 2, 3))
self.assertEquals(self.kw, {'test': 'a', 'and_one': 'more'})
def test_calling_with_clashing_kw(self):
rec = Receiver('test', self._callback, (1, 2, 3), {'test': 'a', 'p': 'a'})
self.assertEquals(rec.signal, 'test')
rec(p='q')
self.assertEquals(self.called, 1)
self.assertEquals(self.args, (1, 2, 3))
self.assertEquals(self.kw, {'test': 'a', 'p': 'q'})
rec(other='wise')
self.assertEquals(self.called, 2)
self.assertEquals(self.args, (1, 2, 3))
self.assertEquals(self.kw, {'test': 'a', 'other': 'wise', 'p': 'a'})
def test_calling_with_clashing_kw_and_args(self):
rec = Receiver('test', self._callback, (1, 2, 3), {'test': 'a', 'p': 'a'})
self.assertEquals(rec.signal, 'test')
# without
rec()
self.assertEquals(self.called, 1)
self.assertEquals(self.args, (1, 2, 3))
self.assertEquals(self.kw, {'test': 'a', 'p': 'a'})
rec(1, 2, 7, test='True', o='p')
self.assertEquals(self.called, 2)
self.assertEquals(self.args, (1, 2, 7, 1, 2, 3))
self.assertEquals(self.kw, {'test': 'True', 'o': 'p', 'p': 'a'})
def test_repr(self):
rec = Receiver('test', 'callback', (0, 1, 2), {})
self.assertIn('%s' % id(rec), '%r' % rec)
self.assertIn('test', '%r' % rec)
self.assertIn('callback', '%r' % rec)
self.assertIn('0, 1, 2', '%r' % rec)
# Signal Descriptor test
class SimpleSignaler(object):
simple = SignalingProperty('simple')
def __init__(self):
self.emitted = []
def emit(self, signal, *values, **kw):
self.emitted.append((signal, values, kw))
class DummySignaler(SimpleSignaler):
simple_with_default = SignalingProperty('simple2', default=0)
double_a = SignalingProperty('same-signal')
double_b = SignalingProperty('same-signal')
double_c = SignalingProperty('dif-var', var_name='_a')
double_d = SignalingProperty('dif-var', var_name='_b')
changer = ChangedSignalingProperty('state')
changer_with_default = ChangedSignalingProperty('state2', default='off')
def __init__(self):
self.emitted = []
self._x = 0
self.x_get = 0
self.x_set = 0
def xget(self):
self.x_get += 1
return self._x
def xset(self, value):
self.x_set += 1
self._x = value
def xsq(self, value):
self.x_set += 1
self._x = value * value
x = CustomSignalingProperty('x-changed', xget, xset)
x_square = CustomSignalingProperty('x-square', xget, xsq)
class TestSignalingDescriptors(unittest.TestCase):
def setUp(self):
self.signaler = DummySignaler()
def test_simple(self):
self.signaler.simple = 'A'
self._check(values=[('simple', ('A',), {})])
# empty
self.signaler.emitted = []
self.signaler.simple = 'A'
# stays empty
self._check()
def test_simple_with_default(self):
self.signaler.simple_with_default = 'B'
self._check(values=[('simple2', ('B',), {})])
# empty
self.signaler.emitted = []
self.signaler.simple_with_default = 'B'
# stays empty
self._check()
def test_changer(self):
self.signaler.changer = 'Yes'
self._check(values=[('state', ('Yes', None), {})])
# empty
self.signaler.emitted = []
self.signaler.changer = 'Yes'
# stays empty
self._check()
def test_changer_with_default(self):
self.signaler.changer_with_default = 'another'
self._check(values=[('state2', ('another', 'off'), {})])
# empty
self.signaler.emitted = []
self.signaler.changer_with_default = 'another'
# stays empty
self._check()
def test_double_same_var(self):
self.signaler.double_a = 'A1'
self.signaler.double_b = 'B2'
self._check(values=[('same-signal', ('A1',), {}),
('same-signal', ('B2',), {})])
# empty
self.signaler.emitted = []
# sending B2 over double a even thought it was changed by b
self.signaler.double_a = 'B2'
self.signaler.double_b = 'B2'
# stays empty
self._check()
# but changing them different works
self.signaler.double_a = 'B1'
self.signaler.double_b = 'A2'
self._check(values=[('same-signal', ('B1',), {}),
('same-signal', ('A2',), {})])
def test_double_differnt_var(self):
self.signaler.double_c = 'A1'
self.signaler.double_d = 'B2'
self._check(values=[('dif-var', ('A1',), {}),
('dif-var', ('B2',), {})])
# empty
self.signaler.emitted = []
self.signaler.double_c = 'A1'
self.signaler.double_d = 'B2'
# stays empty
self._check()
# but they still allow changes
self.signaler.double_c = 'B1'
self.signaler.double_d = 'A2'
self._check(values=[('dif-var', ('B1',), {}),
('dif-var', ('A2',), {})])
def test_custom(self):
self.signaler.x = 'Pocahontas'
self._check(values=[('x-changed', ('Pocahontas',), {})],
x='Pocahontas', x_get=2, x_set=1)
self.assertEquals(self.signaler.x, 'Pocahontas')
# settings again to the same value is boring me
self.signaler.emitted = []
self.signaler.x_get = 0
self.signaler.x_set = 0
self.signaler.x = 'Pocahontas'
self.assertEquals(self.signaler.emitted, [])
self.assertEquals(self.signaler.x, 'Pocahontas')
def test_custom_square(self):
self.signaler.x_square = 10
self._check(values=[('x-square', (100,), {})],
x=100, x_get=2, x_set=1)
self.assertEquals(self.signaler.x, 100)
def test_custom_square_nearly_the_same(self):
self.signaler._x = 10
self.signaler.x_square = 10
self._check(values=[('x-square', (100,), {})],
x=100, x_get=2, x_set=1)
self.assertEquals(self.signaler.x, 100)
def _check(self, values=[], x=0, x_set=0, x_get=0):
self.assertEquals(self.signaler._x, x)
self.assertEquals(self.signaler.x_set, x_set)
self.assertEquals(self.signaler.x_get, x_get)
self.assertEquals(self.signaler.emitted, values)
class TestStayInObjectSignaling(unittest.TestCase):
def setUp(self):
self.foo = SimpleSignaler()
self.bar = SimpleSignaler()
def test_double_different_values(self):
self.foo.simple = 'A'
self.bar.simple = 'B'
self.assertEquals(self.foo.simple, 'A')
self.assertEquals(self.bar.simple, 'B')
self.assertEquals(len(self.foo.emitted), 1)
self.assertEquals(len(self.bar.emitted), 1)
self.assertEquals(self.foo.emitted[0][1][0], 'A')
self.assertEquals(self.bar.emitted[0][1][0], 'B')
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class netbridge(base_resource) :
""" Configuration for network bridge resource. """
def __init__(self) :
self._name = ""
self.___count = 0
@property
def name(self) :
"""The name of the network bridge.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""The name of the network bridge.
"""
try :
self._name = name
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(netbridge_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.netbridge
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add netbridge.
"""
try :
if type(resource) is not list :
addresource = netbridge()
addresource.name = resource.name
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ netbridge() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete netbridge.
"""
try :
if type(resource) is not list :
deleteresource = netbridge()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ netbridge() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ netbridge() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the netbridge resources that are configured on netscaler.
"""
try :
if not name :
obj = netbridge()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = netbridge()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [netbridge() for _ in range(len(name))]
obj = [netbridge() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = netbridge()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of netbridge resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = netbridge()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the netbridge resources configured on NetScaler.
"""
try :
obj = netbridge()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of netbridge resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = netbridge()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class netbridge_response(base_response) :
def __init__(self, length=1) :
self.netbridge = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.netbridge = [netbridge() for _ in range(length)]
|
|
from __future__ import absolute_import
from typing import Any, Dict, List, Optional, Text
import logging
import re
from email.header import decode_header
import email.message as message
from django.conf import settings
from zerver.lib.actions import decode_email_address, get_email_gateway_message_string_from_address, \
internal_send_message
from zerver.lib.notifications import convert_html_to_markdown
from zerver.lib.queue import queue_json_publish
from zerver.lib.redis_utils import get_redis_client
from zerver.lib.upload import upload_message_image
from zerver.lib.utils import generate_random_token
from zerver.lib.str_utils import force_text
from zerver.models import Stream, Recipient, get_user_profile_by_email, \
get_user_profile_by_id, get_display_recipient, get_recipient, \
Message, Realm, UserProfile
from six import binary_type
import six
import talon
from talon import quotations
talon.init()
logger = logging.getLogger(__name__)
def redact_stream(error_message):
# type: (Text) -> Text
domain = settings.EMAIL_GATEWAY_PATTERN.rsplit('@')[-1]
stream_match = re.search(u'\\b(.*?)@' + domain, error_message)
if stream_match:
stream_name = stream_match.groups()[0]
return error_message.replace(stream_name, "X" * len(stream_name))
return error_message
def report_to_zulip(error_message):
# type: (Text) -> None
if settings.ERROR_BOT is None:
return
error_bot = get_user_profile_by_email(settings.ERROR_BOT)
error_stream = Stream.objects.get(name="errors", realm=error_bot.realm)
send_zulip(settings.ERROR_BOT, error_stream, u"email mirror error",
u"""~~~\n%s\n~~~""" % (error_message,))
def log_and_report(email_message, error_message, debug_info):
# type: (message.Message, Text, Dict[str, Any]) -> None
scrubbed_error = u"Sender: %s\n%s" % (email_message.get("From"),
redact_stream(error_message))
if "to" in debug_info:
scrubbed_error = u"Stream: %s\n%s" % (redact_stream(debug_info["to"]),
scrubbed_error)
if "stream" in debug_info:
scrubbed_error = u"Realm: %s\n%s" % (debug_info["stream"].realm.string_id,
scrubbed_error)
logger.error(scrubbed_error)
report_to_zulip(scrubbed_error)
# Temporary missed message addresses
redis_client = get_redis_client()
def missed_message_redis_key(token):
# type: (Text) -> Text
return 'missed_message:' + token
def is_missed_message_address(address):
# type: (Text) -> bool
msg_string = get_email_gateway_message_string_from_address(address)
return is_mm_32_format(msg_string)
def is_mm_32_format(msg_string):
# type: (Text) -> bool
'''
Missed message strings are formatted with a little "mm" prefix
followed by a randomly generated 32-character string.
'''
return msg_string.startswith('mm') and len(msg_string) == 34
def get_missed_message_token_from_address(address):
# type: (Text) -> Text
msg_string = get_email_gateway_message_string_from_address(address)
if msg_string is None:
raise ZulipEmailForwardError('Address not recognized by gateway.')
if not is_mm_32_format(msg_string):
raise ZulipEmailForwardError('Could not parse missed message address')
# strip off the 'mm' before returning the redis key
return msg_string[2:]
def create_missed_message_address(user_profile, message):
# type: (UserProfile, Message) -> Text
if settings.EMAIL_GATEWAY_PATTERN == '':
logging.warning("EMAIL_GATEWAY_PATTERN is an empty string, using "
"NOREPLY_EMAIL_ADDRESS in the 'from' field.")
return settings.NOREPLY_EMAIL_ADDRESS
if message.recipient.type == Recipient.PERSONAL:
# We need to reply to the sender so look up their personal recipient_id
recipient_id = get_recipient(Recipient.PERSONAL, message.sender_id).id
else:
recipient_id = message.recipient_id
data = {
'user_profile_id': user_profile.id,
'recipient_id': recipient_id,
'subject': message.subject,
}
while True:
token = generate_random_token(32)
key = missed_message_redis_key(token)
if redis_client.hsetnx(key, 'uses_left', 1):
break
with redis_client.pipeline() as pipeline:
pipeline.hmset(key, data)
pipeline.expire(key, 60 * 60 * 24 * 5)
pipeline.execute()
address = u'mm' + token
return settings.EMAIL_GATEWAY_PATTERN % (address,)
def mark_missed_message_address_as_used(address):
# type: (Text) -> None
token = get_missed_message_token_from_address(address)
key = missed_message_redis_key(token)
with redis_client.pipeline() as pipeline:
pipeline.hincrby(key, 'uses_left', -1)
pipeline.expire(key, 60 * 60 * 24 * 5)
new_value = pipeline.execute()[0]
if new_value < 0:
redis_client.delete(key)
raise ZulipEmailForwardError('Missed message address has already been used')
def send_to_missed_message_address(address, message):
# type: (Text, message.Message) -> None
token = get_missed_message_token_from_address(address)
key = missed_message_redis_key(token)
result = redis_client.hmget(key, 'user_profile_id', 'recipient_id', 'subject')
if not all(val is not None for val in result):
raise ZulipEmailForwardError('Missing missed message address data')
user_profile_id, recipient_id, subject = result
user_profile = get_user_profile_by_id(user_profile_id)
recipient = Recipient.objects.get(id=recipient_id)
display_recipient = get_display_recipient(recipient)
# Testing with basestring so we don't depend on the list return type from
# get_display_recipient
if not isinstance(display_recipient, six.string_types):
recipient_str = u','.join([user['email'] for user in display_recipient])
else:
recipient_str = display_recipient
body = filter_footer(extract_body(message))
body += extract_and_upload_attachments(message, user_profile.realm)
if not body:
body = '(No email body)'
if recipient.type == Recipient.STREAM:
recipient_type_name = 'stream'
else:
recipient_type_name = 'private'
internal_send_message(user_profile.realm, user_profile.email,
recipient_type_name, recipient_str, subject, body)
logging.info("Successfully processed email from %s to %s" % (
user_profile.email, recipient_str))
## Sending the Zulip ##
class ZulipEmailForwardError(Exception):
pass
def send_zulip(sender, stream, topic, content):
# type: (Text, Stream, Text, Text) -> None
internal_send_message(
stream.realm,
sender,
"stream",
stream.name,
topic[:60],
content[:2000])
def valid_stream(stream_name, token):
# type: (Text, Text) -> bool
try:
stream = Stream.objects.get(email_token=token)
return stream.name.lower() == stream_name.lower()
except Stream.DoesNotExist:
return False
def get_message_part_by_type(message, content_type):
# type: (message.Message, Text) -> Optional[Text]
charsets = message.get_charsets()
for idx, part in enumerate(message.walk()):
if part.get_content_type() == content_type:
content = part.get_payload(decode=True)
assert isinstance(content, binary_type)
if charsets[idx]:
text = content.decode(charsets[idx], errors="ignore")
return text
return None
def extract_body(message):
# type: (message.Message) -> Text
# If the message contains a plaintext version of the body, use
# that.
plaintext_content = get_message_part_by_type(message, "text/plain")
if plaintext_content:
return quotations.extract_from_plain(plaintext_content)
# If we only have an HTML version, try to make that look nice.
html_content = get_message_part_by_type(message, "text/html")
if html_content:
html_content = quotations.extract_from_html(html_content)
return convert_html_to_markdown(html_content)
raise ZulipEmailForwardError("Unable to find plaintext or HTML message body")
def filter_footer(text):
# type: (Text) -> Text
# Try to filter out obvious footers.
possible_footers = [line for line in text.split("\n") if line.strip().startswith("--")]
if len(possible_footers) != 1:
# Be conservative and don't try to scrub content if there
# isn't a trivial footer structure.
return text
return text.partition("--")[0].strip()
def extract_and_upload_attachments(message, realm):
# type: (message.Message, Realm) -> Text
user_profile = get_user_profile_by_email(settings.EMAIL_GATEWAY_BOT)
attachment_links = []
payload = message.get_payload()
if not isinstance(payload, list):
# This is not a multipart message, so it can't contain attachments.
return ""
for part in payload:
content_type = part.get_content_type()
filename = part.get_filename()
if filename:
attachment = part.get_payload(decode=True)
if isinstance(attachment, binary_type):
s3_url = upload_message_image(filename, len(attachment), content_type,
attachment,
user_profile,
target_realm=realm)
formatted_link = u"[%s](%s)" % (filename, s3_url)
attachment_links.append(formatted_link)
else:
logger.warning("Payload is not bytes (invalid attachment %s in message from %s)." %
(filename, message.get("From")))
return u"\n".join(attachment_links)
def extract_and_validate(email):
# type: (Text) -> Stream
try:
stream_name, token = decode_email_address(email)
except (TypeError, ValueError):
raise ZulipEmailForwardError("Malformed email recipient " + email)
if not valid_stream(stream_name, token):
raise ZulipEmailForwardError("Bad stream token from email recipient " + email)
return Stream.objects.get(email_token=token)
def find_emailgateway_recipient(message):
# type: (message.Message) -> Text
# We can't use Delivered-To; if there is a X-Gm-Original-To
# it is more accurate, so try to find the most-accurate
# recipient list in descending priority order
recipient_headers = ["X-Gm-Original-To", "Delivered-To", "To"]
recipients = [] # type: List[Text]
for recipient_header in recipient_headers:
r = message.get_all(recipient_header, None)
if r:
recipients = r
break
pattern_parts = [re.escape(part) for part in settings.EMAIL_GATEWAY_PATTERN.split('%s')]
match_email_re = re.compile(".*?".join(pattern_parts))
for recipient_email in recipients:
if match_email_re.match(recipient_email):
return recipient_email
raise ZulipEmailForwardError("Missing recipient in mirror email")
def process_stream_message(to, subject, message, debug_info):
# type: (Text, Text, message.Message, Dict[str, Any]) -> None
stream = extract_and_validate(to)
body = filter_footer(extract_body(message))
body += extract_and_upload_attachments(message, stream.realm)
debug_info["stream"] = stream
send_zulip(settings.EMAIL_GATEWAY_BOT, stream, subject, body)
logging.info("Successfully processed email to %s (%s)" % (
stream.name, stream.realm.string_id))
def process_missed_message(to, message, pre_checked):
# type: (Text, message.Message, bool) -> None
if not pre_checked:
mark_missed_message_address_as_used(to)
send_to_missed_message_address(to, message)
def process_message(message, rcpt_to=None, pre_checked=False):
# type: (message.Message, Optional[Text], bool) -> None
subject_header = message.get("Subject", "(no subject)")
encoded_subject, encoding = decode_header(subject_header)[0]
if encoding is None:
subject = force_text(encoded_subject) # encoded_subject has type str when encoding is None
else:
try:
subject = encoded_subject.decode(encoding)
except (UnicodeDecodeError, LookupError):
subject = u"(unreadable subject)"
debug_info = {}
try:
if rcpt_to is not None:
to = rcpt_to
else:
to = find_emailgateway_recipient(message)
debug_info["to"] = to
if is_missed_message_address(to):
process_missed_message(to, message, pre_checked)
else:
process_stream_message(to, subject, message, debug_info)
except ZulipEmailForwardError as e:
# TODO: notify sender of error, retry if appropriate.
log_and_report(message, str(e), debug_info)
def mirror_email_message(data):
# type: (Dict[Text, Text]) -> Dict[str, str]
rcpt_to = data['recipient']
if is_missed_message_address(rcpt_to):
try:
mark_missed_message_address_as_used(rcpt_to)
except ZulipEmailForwardError:
return {
"status": "error",
"msg": "5.1.1 Bad destination mailbox address: "
"Bad or expired missed message address."
}
else:
try:
extract_and_validate(rcpt_to)
except ZulipEmailForwardError:
return {
"status": "error",
"msg": "5.1.1 Bad destination mailbox address: "
"Please use the address specified in your Streams page."
}
queue_json_publish(
"email_mirror",
{
"message": data['msg_text'],
"rcpt_to": rcpt_to
},
lambda x: None
)
return {"status": "success"}
|
|
import base64
import cProfile
import cStringIO
import collections
import gzip
import hmac
import inspect
import itertools
import logging
import math
import os
import socket
import struct
import time
from urlparse import urljoin
from django.conf import settings
from django.db.models import Model, FloatField
from django.db.models.query import QuerySet
from django.db.models.sql.compiler import SQLInsertCompiler
from django.http import Http404
from django.template import Context, Template
from django.utils.encoding import force_unicode
from django.utils.functional import Promise
from django.utils.html import escape, strip_tags
from django.utils.safestring import mark_safe
import facebook
from jinja2 import Markup
from canvas.exceptions import NotLoggedIntoFacebookError
from canvas.json import loads, dumps, client_dumps, backend_dumps, JSONDecodeError
from configuration import Config
from services import Services
logger = logging.getLogger()
unique = lambda iterable: list(set(iterable))
clamp = lambda lower, value, upper: min(upper, max(lower, value))
#TODO this is deprecated because of functools.wraps, unless someone knows an advantage to this method. --alex
def simple_decorator(decorator):
"""
This decorator can be used to turn simple functions
into well-behaved decorators, so long as the decorators
are fairly simple. If a decorator expects a function and
returns a function (no descriptors), and if it doesn't
modify function attributes or docstring, then it is
eligible to use this. Simply apply @simple_decorator to
your decorator and it will automatically preserve the
docstring and function attributes of functions to which
it is applied.
"""
def new_decorator(f):
g = decorator(f)
g.__name__ = f.__name__
g.__doc__ = f.__doc__
g.__dict__.update(f.__dict__)
return g
# Now a few lines needed to make simple_decorator itself
# be a well-behaved decorator.
new_decorator.__name__ = decorator.__name__
new_decorator.__doc__ = decorator.__doc__
new_decorator.__dict__.update(decorator.__dict__)
return new_decorator
def iterlist(fun):
def wrapper(*args, **kwargs):
return list(fun(*args, **kwargs))
return wrapper
def ip_to_int(ip):
try:
return struct.unpack('I', socket.inet_aton(ip))[0]
except (socket.error, struct.error, TypeError):
return 0
def int_to_ip(integer):
return socket.inet_ntoa(struct.pack('I', integer))
def flatten(list_of_lists):
""" Flatten one level of nesting. """
return itertools.chain.from_iterable(list_of_lists)
def js_safety(thing, django=True, escape_html=False):
thing = thing.replace('<', '\\u003c').replace('>', '\\u003e')
if django:
return mark_safe(thing)
else:
if escape_html:
return thing
return Markup(thing)
def get_or_create(cls, **kwargs):
inst = cls.objects.get_or_none(**kwargs)
if inst is None:
inst = cls(**kwargs)
inst.save()
return inst
class GetSlice(object):
def __getitem__(self, item):
return item
get_slice = GetSlice()
# Modified, originally from http://en.wikipedia.org/wiki/Base_36
def _raw_base36encode(number):
"""
Convert positive integer to a base36 string.
JS: canvas.base36encode
"""
if not isinstance(number, (int, long)):
raise TypeError('number must be an integer')
if number <= 0:
raise ValueError('number must be a positive integer')
alphabet='0123456789abcdefghijklmnopqrstuvwxyz'
checksum = 0
base36 = ''
while number != 0:
number, i = divmod(number, 36)
checksum += i * 19
base36 = alphabet[i] + base36
return base36, alphabet[checksum % 36]
def base36encode(number):
base36, check = _raw_base36encode(number)
return base36 + check
class Base36DecodeException(Exception): pass
def base36decode(string):
if not string:
raise Base36DecodeException("Empty string")
base36, check = string[:-1], string[-1]
try:
number = int(base36, 36)
except ValueError:
raise Base36DecodeException("Invalid base36 characters.")
try:
_, expected_check = _raw_base36encode(number)
except ValueError:
raise Base36DecodeException("Invalid base36 number.")
if expected_check != check:
raise Base36DecodeException("base36 check character does not match.")
return number
def base36decode_or_404(string):
try:
return base36decode(string)
except Base36DecodeException:
raise Http404
def random_token(length=40):
assert length % 2 == 0
return base64.b16encode(os.urandom(length//2))
def placeholder(self, conn, field, value):
if isinstance(value, Now):
return value.as_sql(None, conn)[0]
else:
return SQLInsertCompiler.placeholder(self, field, value)
# EVIL HAX
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
result.append('(%s)' % ', '.join([qn(c) for c in self.query.columns]))
values = [placeholder(self, self.connection, *v) for v in self.query.values]
result.append('VALUES (%s)' % ', '.join(values))
params = [param for param in self.query.params if not isinstance(param, Now)]
if self.return_id and self.connection.features.can_return_id_from_insert:
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
r_fmt, r_params = self.connection.ops.return_insert_id()
result.append(r_fmt % col)
params = params + r_params
return ' '.join(result), params
SQLInsertCompiler.as_sql = as_sql
class UnixTimestampField(FloatField):
def get_prep_value(self, value):
if isinstance(value, Now):
return value
return FloatField.get_prep_value(self, value)
class Now(object):
def prepare_database_save(self, field):
return self
def _sql(self, executable_name):
return Services.time.sql_now(executable_name)
def as_sql(self, qn, conn):
return self._sql(conn.client.executable_name), []
def get_fb_api(request):
fb_user = facebook.get_user_from_cookie(request.COOKIES,
Config['facebook']['app_id'],
Config['facebook']['secret'])
access_token = fb_user and fb_user.get('access_token')
if not access_token:
raise NotLoggedIntoFacebookError()
return fb_user, facebook.GraphAPI(access_token)
class ArgSpec(object):
"""
Convenience wrapper around `inspect.ArgSpec`.
Properties:
`args`:
The list of arg names. Not the same as `inspect.ArgSpec#args`, however - this excludes the kwarg names.
`kwargs`:
A dictionary of kwarg names mapped to their default values.
Note that if the given function contains a member annotation named `_original_function`, it will use that
instead of the function.
"""
def __init__(self, func):
func = getattr(func, '_original_function', func)
spec = inspect.getargspec(func)
defaults = spec.defaults or []
self.args = spec.args[:len(spec.args) - len(defaults)]
self.kwargs = dict(zip(spec.args[-len(defaults):], defaults))
def page_divide(x, y):
return max(1, int(math.ceil(1.0 * x / y)))
def paginate(iterable, page=1, per_page=50):
count = len(iterable)
page_last = page_divide(count, per_page)
# Handle 'current'.
if page == 'current':
start, stop = max(0, count-per_page), count
page = page_last
else:
# Handle p=9999
page = min(int(page), page_last)
start, stop = per_page * (page-1), per_page * (page)
# page_next is None when there aren't any more pages.
page_next = page+1 if page < page_last else None
return iterable[start:stop], page, page_next, page_last
def profile(fun):
if settings.PROFILE:
def wrap(request, *args, **kwargs):
profiler = cProfile.Profile()
result = profiler.runcall(fun, request, *args, **kwargs)
profiler.dump_stats('/var/canvas/website/run/profile-%s-%s.pstats'
% (request.path.replace('/', '_'), int(time.time() * 1000)))
return result
return wrap
else:
return fun
def generate_email_links():
"""
Feel free to rewrite me, I'm just an example of the last use. Just change 'visitor' and 'data'.
"""
def visitor(item):
from canvas.models import User
username, groups = [x.strip() for x in item.split(':')]
user = User.objects.get(username=username)
subject = '%s, Canvas needs you!' % username
body = """Hey %s!\n\nWe've noticed you're one of the top posters in our Canvas-owned groups (%s), and would love to have you as a referee if you are interested. Referees are able to mark posts in appointed groups as off-topic, collapsing them and helping to keep discussion and posts relevant to the group."""
body += """\n\nIf you would be interested in helping us out, let us know, we'd greatly appreciate it!"""
body += """\n\nThanks for being awesome,\n- The Canvas Team"""
body %= (username, groups)
body = body.replace('\n', '%0A')
return {'to': user.email, 'subject': subject, 'body': body}
data = """blblnk: cute, pop_culture, canvas
nicepunk: cute, the_horror, stamps
powerfuldragon: cute, stamps, girls
cybertaco: games
tobacco: games
straitjacketfun: photography
slack_jack: photography
oliveoodle: pop_culture
ryoshi: pop_culture
oliveiralmeida: nerdy
AquilesBaeza: nerdy, the_horror
nebetsu: nerdy
Laban: food
ROPED: food
MuttonChops: canvas
Degu: stamps
sparknineone: girls"""
for item in data.split('\n'):
print """<a href="mailto:%(to)s?subject=%(subject)s&body=%(body)s">%(to)s</a><br/>""" % visitor(item)
def has_flagged_words(text):
"""
Returns True if @text has flagged words.
"""
return any((flag_word in text) for flag_word in Config.get('autoflag_words', []))
def make_absolute_url(relative_url, protocol=None):
"""
Takes a relative url and makes it absolute by prepending the Canvas absolute domain.
This refers not to relative as in "foo" resolving to "/bar/foo" when you're already on "/bar", but to an
absolute path sans the host portion of the URL.
`protocol` should be the name without the "://", e.g. "http" or "https"
"""
# Is it already absolute?
if relative_url.split('//')[-1].startswith(settings.DOMAIN) and relative_url.startswith(protocol or '//'):
return relative_url
if protocol:
protocol = protocol + '://'
else:
protocol = '//'
base = protocol + settings.DOMAIN
return urljoin(base, relative_url)
_template_tag_cache = {}
def render_template_tag(tag_name, args=None, module=None, context_instance=None):
"""
`args` may be either an list of tuples, or any other iterable. If it contains tuples,
it will create a context object out of it with the car as the key and the cdr as the value,
and the keys will be passed to the template tag. (This is to simulate an ordered dict.)
Otherwise, the items in `args` are given as strings to the template tag.
It caches templates, but only if `args` has tuples.
This renders to a string. To use it as a view response, wrap it in HttpResponse.
"""
def make_cache_key(module, tag_name, arg_cars):
return u'-'.join(e for e in [module, tag_name, arg_cars] if e is not None)
prefix, _args = '', ''
context = {}
cache_key = None # Doesn't cache if this doesn't get set.
if module:
prefix = u'{{% load {0} %}}'.format(module)
if args:
args = list(args)
if isinstance(args[0], tuple):
context.update(dict((arg[0], arg[1]) for arg in args))
_args = u' '.join(arg[0] for arg in args)
cache_key = make_cache_key(module, tag_name, _args)
else:
_args = u' '.join(u'"{0}"'.format(arg) for arg in args)
if cache_key and cache_key in _template_tag_cache:
template = _template_tag_cache[cache_key]
_template_tag_cache[cache_key] = template
else:
template = Template(u'{0}{{% {1} {2} %}}'.format(prefix, tag_name, _args))
if cache_key:
_template_tag_cache[cache_key] = template
if context_instance is None:
context_instance = Context(context)
else:
for key, val in context.iteritems():
context_instance[key] = val
return template.render(context_instance)
def get_arg_names(func):
""" Returns a list with function argument names. """
return inspect.getargspec(func)[0]
def token(msg):
""" Returns a Canvas "signed" hash of a token. This is used in unsubscribe links. """
return hmac.new(settings.SECRET_KEY, msg=str(msg)).hexdigest()
class paramaterized_defaultdict(collections.defaultdict):
""" Defaultdict where the default_factory takes key as an argument. """
def __missing__(self, key):
return self.default_factory(key)
def gzip_string(data):
str_file = cStringIO.StringIO()
gzip_file = gzip.GzipFile(fileobj=str_file, mode='wb')
gzip_file.write(data)
gzip_file.close()
return str_file.getvalue()
def strip_template_chars(text):
text = text.replace('{{', '{' * 2)
text = text.replace('}}', '}' * 2)
text = text.replace('{%', '{%')
text = text.replace('%}', '%}')
text = text.replace('{#', '{#')
text = text.replace('#}', '#}')
return text
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import random
import string
import textwrap
import unittest
from io import StringIO
from typing import Optional
from unittest import mock
import paramiko
import pytest
from parameterized import parameterized
from airflow import settings
from airflow.models import Connection
from airflow.providers.ssh.hooks.ssh import SSHHook
from airflow.utils import db
from airflow.utils.session import create_session
HELLO_SERVER_CMD = """
import socket, sys
listener = socket.socket()
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(('localhost', 2134))
listener.listen(1)
sys.stdout.write('ready')
sys.stdout.flush()
conn = listener.accept()[0]
conn.sendall(b'hello')
"""
def generate_key_string(pkey: paramiko.PKey, passphrase: Optional[str] = None):
key_fh = StringIO()
pkey.write_private_key(key_fh, password=passphrase)
key_fh.seek(0)
key_str = key_fh.read()
return key_str
def generate_host_key(pkey: paramiko.PKey):
key_fh = StringIO()
pkey.write_private_key(key_fh)
key_fh.seek(0)
key_obj = paramiko.RSAKey(file_obj=key_fh)
return key_obj.get_base64()
TEST_PKEY = paramiko.RSAKey.generate(4096)
TEST_PRIVATE_KEY = generate_key_string(pkey=TEST_PKEY)
TEST_HOST_KEY = generate_host_key(pkey=TEST_PKEY)
TEST_PKEY_ECDSA = paramiko.ECDSAKey.generate()
TEST_PRIVATE_KEY_ECDSA = generate_key_string(pkey=TEST_PKEY_ECDSA)
TEST_TIMEOUT = 20
TEST_CONN_TIMEOUT = 30
PASSPHRASE = ''.join(random.choice(string.ascii_letters) for i in range(10))
TEST_ENCRYPTED_PRIVATE_KEY = generate_key_string(pkey=TEST_PKEY, passphrase=PASSPHRASE)
class TestSSHHook(unittest.TestCase):
CONN_SSH_WITH_NO_EXTRA = 'ssh_with_no_extra'
CONN_SSH_WITH_PRIVATE_KEY_EXTRA = 'ssh_with_private_key_extra'
CONN_SSH_WITH_PRIVATE_KEY_ECDSA_EXTRA = 'ssh_with_private_key_ecdsa_extra'
CONN_SSH_WITH_PRIVATE_KEY_PASSPHRASE_EXTRA = 'ssh_with_private_key_passphrase_extra'
CONN_SSH_WITH_TIMEOUT_EXTRA = 'ssh_with_timeout_extra'
CONN_SSH_WITH_CONN_TIMEOUT_EXTRA = 'ssh_with_conn_timeout_extra'
CONN_SSH_WITH_TIMEOUT_AND_CONN_TIMEOUT_EXTRA = 'ssh_with_timeout_and_conn_timeout_extra'
CONN_SSH_WITH_EXTRA = 'ssh_with_extra'
CONN_SSH_WITH_EXTRA_FALSE_LOOK_FOR_KEYS = 'ssh_with_extra_false_look_for_keys'
CONN_SSH_WITH_HOST_KEY_EXTRA = 'ssh_with_host_key_extra'
CONN_SSH_WITH_HOST_KEY_EXTRA_WITH_TYPE = 'ssh_with_host_key_extra_with_type'
CONN_SSH_WITH_HOST_KEY_AND_NO_HOST_KEY_CHECK_FALSE = 'ssh_with_host_key_and_no_host_key_check_false'
CONN_SSH_WITH_HOST_KEY_AND_NO_HOST_KEY_CHECK_TRUE = 'ssh_with_host_key_and_no_host_key_check_true'
CONN_SSH_WITH_NO_HOST_KEY_AND_NO_HOST_KEY_CHECK_FALSE = 'ssh_with_no_host_key_and_no_host_key_check_false'
@classmethod
def tearDownClass(cls) -> None:
with create_session() as session:
conns_to_reset = [
cls.CONN_SSH_WITH_NO_EXTRA,
cls.CONN_SSH_WITH_PRIVATE_KEY_EXTRA,
cls.CONN_SSH_WITH_PRIVATE_KEY_PASSPHRASE_EXTRA,
cls.CONN_SSH_WITH_PRIVATE_KEY_ECDSA_EXTRA,
cls.CONN_SSH_WITH_TIMEOUT_EXTRA,
cls.CONN_SSH_WITH_CONN_TIMEOUT_EXTRA,
cls.CONN_SSH_WITH_TIMEOUT_AND_CONN_TIMEOUT_EXTRA,
cls.CONN_SSH_WITH_EXTRA,
cls.CONN_SSH_WITH_HOST_KEY_EXTRA,
cls.CONN_SSH_WITH_HOST_KEY_EXTRA_WITH_TYPE,
cls.CONN_SSH_WITH_HOST_KEY_AND_NO_HOST_KEY_CHECK_FALSE,
cls.CONN_SSH_WITH_HOST_KEY_AND_NO_HOST_KEY_CHECK_TRUE,
cls.CONN_SSH_WITH_NO_HOST_KEY_AND_NO_HOST_KEY_CHECK_FALSE,
]
connections = session.query(Connection).filter(Connection.conn_id.in_(conns_to_reset))
connections.delete(synchronize_session=False)
session.commit()
@classmethod
def setUpClass(cls) -> None:
db.merge_conn(
Connection(
conn_id=cls.CONN_SSH_WITH_NO_EXTRA,
host='localhost',
conn_type='ssh',
extra=None,
)
)
db.merge_conn(
Connection(
conn_id=cls.CONN_SSH_WITH_EXTRA,
host='localhost',
conn_type='ssh',
extra='{"compress" : true, "no_host_key_check" : "true", "allow_host_key_change": false}',
)
)
db.merge_conn(
Connection(
conn_id=cls.CONN_SSH_WITH_EXTRA_FALSE_LOOK_FOR_KEYS,
host='localhost',
conn_type='ssh',
extra='{"compress" : true, "no_host_key_check" : "true", '
'"allow_host_key_change": false, "look_for_keys": false}',
)
)
db.merge_conn(
Connection(
conn_id=cls.CONN_SSH_WITH_PRIVATE_KEY_EXTRA,
host='localhost',
conn_type='ssh',
extra=json.dumps({"private_key": TEST_PRIVATE_KEY}),
)
)
db.merge_conn(
Connection(
conn_id=cls.CONN_SSH_WITH_PRIVATE_KEY_PASSPHRASE_EXTRA,
host='localhost',
conn_type='ssh',
extra=json.dumps(
{"private_key": TEST_ENCRYPTED_PRIVATE_KEY, "private_key_passphrase": PASSPHRASE}
),
)
)
db.merge_conn(
Connection(
conn_id=cls.CONN_SSH_WITH_PRIVATE_KEY_ECDSA_EXTRA,
host='localhost',
conn_type='ssh',
extra=json.dumps({"private_key": TEST_PRIVATE_KEY_ECDSA}),
)
)
db.merge_conn(
Connection(
conn_id=cls.CONN_SSH_WITH_TIMEOUT_EXTRA,
host='localhost',
conn_type='ssh',
extra=json.dumps({"timeout": TEST_TIMEOUT}),
)
)
db.merge_conn(
Connection(
conn_id=cls.CONN_SSH_WITH_CONN_TIMEOUT_EXTRA,
host='localhost',
conn_type='ssh',
extra=json.dumps({"conn_timeout": TEST_CONN_TIMEOUT}),
)
)
db.merge_conn(
Connection(
conn_id=cls.CONN_SSH_WITH_TIMEOUT_AND_CONN_TIMEOUT_EXTRA,
host='localhost',
conn_type='ssh',
extra=json.dumps({"conn_timeout": TEST_CONN_TIMEOUT, 'timeout': TEST_TIMEOUT}),
)
)
db.merge_conn(
Connection(
conn_id=cls.CONN_SSH_WITH_HOST_KEY_EXTRA,
host='localhost',
conn_type='ssh',
extra=json.dumps({"private_key": TEST_PRIVATE_KEY, "host_key": TEST_HOST_KEY}),
)
)
db.merge_conn(
Connection(
conn_id=cls.CONN_SSH_WITH_HOST_KEY_EXTRA_WITH_TYPE,
host='localhost',
conn_type='ssh',
extra=json.dumps({"private_key": TEST_PRIVATE_KEY, "host_key": "ssh-rsa " + TEST_HOST_KEY}),
)
)
db.merge_conn(
Connection(
conn_id=cls.CONN_SSH_WITH_HOST_KEY_AND_NO_HOST_KEY_CHECK_FALSE,
host='remote_host',
conn_type='ssh',
extra=json.dumps(
{"private_key": TEST_PRIVATE_KEY, "host_key": TEST_HOST_KEY, "no_host_key_check": False}
),
)
)
db.merge_conn(
Connection(
conn_id=cls.CONN_SSH_WITH_HOST_KEY_AND_NO_HOST_KEY_CHECK_TRUE,
host='remote_host',
conn_type='ssh',
extra=json.dumps(
{"private_key": TEST_PRIVATE_KEY, "host_key": TEST_HOST_KEY, "no_host_key_check": True}
),
)
)
db.merge_conn(
Connection(
conn_id=cls.CONN_SSH_WITH_NO_HOST_KEY_AND_NO_HOST_KEY_CHECK_FALSE,
host='remote_host',
conn_type='ssh',
extra=json.dumps({"private_key": TEST_PRIVATE_KEY, "no_host_key_check": False}),
)
)
@mock.patch('airflow.providers.ssh.hooks.ssh.paramiko.SSHClient')
def test_ssh_connection_with_password(self, ssh_mock):
hook = SSHHook(
remote_host='remote_host',
port='port',
username='username',
password='password',
timeout=10,
key_file='fake.file',
)
with hook.get_conn():
ssh_mock.return_value.connect.assert_called_once_with(
banner_timeout=30.0,
hostname='remote_host',
username='username',
password='password',
key_filename='fake.file',
timeout=10,
compress=True,
port='port',
sock=None,
look_for_keys=True,
)
@mock.patch('airflow.providers.ssh.hooks.ssh.paramiko.SSHClient')
def test_ssh_connection_without_password(self, ssh_mock):
hook = SSHHook(
remote_host='remote_host', port='port', username='username', timeout=10, key_file='fake.file'
)
with hook.get_conn():
ssh_mock.return_value.connect.assert_called_once_with(
banner_timeout=30.0,
hostname='remote_host',
username='username',
key_filename='fake.file',
timeout=10,
compress=True,
port='port',
sock=None,
look_for_keys=True,
)
@mock.patch('airflow.providers.ssh.hooks.ssh.SSHTunnelForwarder')
def test_tunnel_with_password(self, ssh_mock):
hook = SSHHook(
remote_host='remote_host',
port='port',
username='username',
password='password',
timeout=10,
key_file='fake.file',
)
with hook.get_tunnel(1234):
ssh_mock.assert_called_once_with(
'remote_host',
ssh_port='port',
ssh_username='username',
ssh_password='password',
ssh_pkey='fake.file',
ssh_proxy=None,
local_bind_address=('localhost',),
remote_bind_address=('localhost', 1234),
logger=hook.log,
)
@mock.patch('airflow.providers.ssh.hooks.ssh.SSHTunnelForwarder')
def test_tunnel_without_password(self, ssh_mock):
hook = SSHHook(
remote_host='remote_host', port='port', username='username', timeout=10, key_file='fake.file'
)
with hook.get_tunnel(1234):
ssh_mock.assert_called_once_with(
'remote_host',
ssh_port='port',
ssh_username='username',
ssh_pkey='fake.file',
ssh_proxy=None,
local_bind_address=('localhost',),
remote_bind_address=('localhost', 1234),
host_pkey_directories=None,
logger=hook.log,
)
def test_conn_with_extra_parameters(self):
ssh_hook = SSHHook(ssh_conn_id=self.CONN_SSH_WITH_EXTRA)
assert ssh_hook.compress is True
assert ssh_hook.no_host_key_check is True
assert ssh_hook.allow_host_key_change is False
assert ssh_hook.look_for_keys is True
def test_conn_with_extra_parameters_false_look_for_keys(self):
ssh_hook = SSHHook(ssh_conn_id=self.CONN_SSH_WITH_EXTRA_FALSE_LOOK_FOR_KEYS)
assert ssh_hook.look_for_keys is False
@mock.patch('airflow.providers.ssh.hooks.ssh.SSHTunnelForwarder')
def test_tunnel_with_private_key(self, ssh_mock):
hook = SSHHook(
ssh_conn_id=self.CONN_SSH_WITH_PRIVATE_KEY_EXTRA,
remote_host='remote_host',
port='port',
username='username',
timeout=10,
)
with hook.get_tunnel(1234):
ssh_mock.assert_called_once_with(
'remote_host',
ssh_port='port',
ssh_username='username',
ssh_pkey=TEST_PKEY,
ssh_proxy=None,
local_bind_address=('localhost',),
remote_bind_address=('localhost', 1234),
host_pkey_directories=None,
logger=hook.log,
)
@mock.patch('airflow.providers.ssh.hooks.ssh.SSHTunnelForwarder')
def test_tunnel_with_private_key_passphrase(self, ssh_mock):
hook = SSHHook(
ssh_conn_id=self.CONN_SSH_WITH_PRIVATE_KEY_PASSPHRASE_EXTRA,
remote_host='remote_host',
port='port',
username='username',
timeout=10,
)
with hook.get_tunnel(1234):
ssh_mock.assert_called_once_with(
'remote_host',
ssh_port='port',
ssh_username='username',
ssh_pkey=TEST_PKEY,
ssh_proxy=None,
local_bind_address=('localhost',),
remote_bind_address=('localhost', 1234),
host_pkey_directories=None,
logger=hook.log,
)
@mock.patch('airflow.providers.ssh.hooks.ssh.SSHTunnelForwarder')
def test_tunnel_with_private_key_ecdsa(self, ssh_mock):
hook = SSHHook(
ssh_conn_id=self.CONN_SSH_WITH_PRIVATE_KEY_ECDSA_EXTRA,
remote_host='remote_host',
port='port',
username='username',
timeout=10,
)
with hook.get_tunnel(1234):
ssh_mock.assert_called_once_with(
'remote_host',
ssh_port='port',
ssh_username='username',
ssh_pkey=TEST_PKEY_ECDSA,
ssh_proxy=None,
local_bind_address=('localhost',),
remote_bind_address=('localhost', 1234),
host_pkey_directories=None,
logger=hook.log,
)
def test_ssh_connection(self):
hook = SSHHook(ssh_conn_id='ssh_default')
with hook.get_conn() as client:
(_, stdout, _) = client.exec_command('ls')
assert stdout.read() is not None
def test_ssh_connection_no_connection_id(self):
hook = SSHHook(remote_host='localhost')
assert hook.ssh_conn_id is None
with hook.get_conn() as client:
(_, stdout, _) = client.exec_command('ls')
assert stdout.read() is not None
def test_ssh_connection_old_cm(self):
with SSHHook(ssh_conn_id='ssh_default') as hook:
client = hook.get_conn()
(_, stdout, _) = client.exec_command('ls')
assert stdout.read() is not None
def test_tunnel(self):
hook = SSHHook(ssh_conn_id='ssh_default')
import socket
import subprocess
subprocess_kwargs = dict(
args=["python", "-c", HELLO_SERVER_CMD],
stdout=subprocess.PIPE,
)
with subprocess.Popen(**subprocess_kwargs) as server_handle, hook.get_tunnel(
local_port=2135, remote_port=2134
):
server_output = server_handle.stdout.read(5)
assert b"ready" == server_output
socket = socket.socket()
socket.connect(("localhost", 2135))
response = socket.recv(5)
assert response == b"hello"
socket.close()
server_handle.communicate()
assert server_handle.returncode == 0
@mock.patch('airflow.providers.ssh.hooks.ssh.paramiko.SSHClient')
def test_ssh_connection_with_private_key_extra(self, ssh_mock):
hook = SSHHook(
ssh_conn_id=self.CONN_SSH_WITH_PRIVATE_KEY_EXTRA,
remote_host='remote_host',
port='port',
username='username',
timeout=10,
)
with hook.get_conn():
ssh_mock.return_value.connect.assert_called_once_with(
banner_timeout=30.0,
hostname='remote_host',
username='username',
pkey=TEST_PKEY,
timeout=10,
compress=True,
port='port',
sock=None,
look_for_keys=True,
)
@mock.patch('airflow.providers.ssh.hooks.ssh.paramiko.SSHClient')
def test_ssh_connection_with_private_key_passphrase_extra(self, ssh_mock):
hook = SSHHook(
ssh_conn_id=self.CONN_SSH_WITH_PRIVATE_KEY_PASSPHRASE_EXTRA,
remote_host='remote_host',
port='port',
username='username',
timeout=10,
)
with hook.get_conn():
ssh_mock.return_value.connect.assert_called_once_with(
banner_timeout=30.0,
hostname='remote_host',
username='username',
pkey=TEST_PKEY,
timeout=10,
compress=True,
port='port',
sock=None,
look_for_keys=True,
)
@mock.patch('airflow.providers.ssh.hooks.ssh.paramiko.SSHClient')
def test_ssh_connection_with_host_key_extra(self, ssh_client):
hook = SSHHook(ssh_conn_id=self.CONN_SSH_WITH_HOST_KEY_EXTRA)
assert hook.host_key is not None
with hook.get_conn():
assert ssh_client.return_value.connect.called is True
assert ssh_client.return_value.get_host_keys.return_value.add.called
assert ssh_client.return_value.get_host_keys.return_value.add.call_args == mock.call(
hook.remote_host, 'ssh-rsa', hook.host_key
)
@mock.patch('airflow.providers.ssh.hooks.ssh.paramiko.SSHClient')
def test_ssh_connection_with_host_key_extra_with_type(self, ssh_client):
hook = SSHHook(ssh_conn_id=self.CONN_SSH_WITH_HOST_KEY_EXTRA_WITH_TYPE)
assert hook.host_key is not None
with hook.get_conn():
assert ssh_client.return_value.connect.called is True
assert ssh_client.return_value.get_host_keys.return_value.add.called
assert ssh_client.return_value.get_host_keys.return_value.add.call_args == mock.call(
hook.remote_host, 'ssh-rsa', hook.host_key
)
@mock.patch('airflow.providers.ssh.hooks.ssh.paramiko.SSHClient')
def test_ssh_connection_with_no_host_key_where_no_host_key_check_is_false(self, ssh_client):
hook = SSHHook(ssh_conn_id=self.CONN_SSH_WITH_NO_HOST_KEY_AND_NO_HOST_KEY_CHECK_FALSE)
assert hook.host_key is None
with hook.get_conn():
assert ssh_client.return_value.connect.called is True
assert ssh_client.return_value.get_host_keys.return_value.add.called is False
@mock.patch('airflow.providers.ssh.hooks.ssh.paramiko.SSHClient')
def test_ssh_connection_with_conn_timeout(self, ssh_mock):
hook = SSHHook(
remote_host='remote_host',
port='port',
username='username',
password='password',
conn_timeout=20,
key_file='fake.file',
)
with hook.get_conn():
ssh_mock.return_value.connect.assert_called_once_with(
banner_timeout=30.0,
hostname='remote_host',
username='username',
password='password',
key_filename='fake.file',
timeout=20,
compress=True,
port='port',
sock=None,
look_for_keys=True,
)
@mock.patch('airflow.providers.ssh.hooks.ssh.paramiko.SSHClient')
def test_ssh_connection_with_conn_timeout_and_timeout(self, ssh_mock):
hook = SSHHook(
remote_host='remote_host',
port='port',
username='username',
password='password',
timeout=10,
conn_timeout=20,
key_file='fake.file',
)
with hook.get_conn():
ssh_mock.return_value.connect.assert_called_once_with(
banner_timeout=30.0,
hostname='remote_host',
username='username',
password='password',
key_filename='fake.file',
timeout=20,
compress=True,
port='port',
sock=None,
look_for_keys=True,
)
@mock.patch('airflow.providers.ssh.hooks.ssh.paramiko.SSHClient')
def test_ssh_connection_with_timeout_extra(self, ssh_mock):
hook = SSHHook(
ssh_conn_id=self.CONN_SSH_WITH_TIMEOUT_EXTRA,
remote_host='remote_host',
port='port',
username='username',
timeout=10,
)
with hook.get_conn():
ssh_mock.return_value.connect.assert_called_once_with(
banner_timeout=30.0,
hostname='remote_host',
username='username',
timeout=20,
compress=True,
port='port',
sock=None,
look_for_keys=True,
)
@mock.patch('airflow.providers.ssh.hooks.ssh.paramiko.SSHClient')
def test_ssh_connection_with_conn_timeout_extra(self, ssh_mock):
hook = SSHHook(
ssh_conn_id=self.CONN_SSH_WITH_CONN_TIMEOUT_EXTRA,
remote_host='remote_host',
port='port',
username='username',
timeout=10,
conn_timeout=15,
)
# conn_timeout parameter wins over extra options
with hook.get_conn():
ssh_mock.return_value.connect.assert_called_once_with(
banner_timeout=30.0,
hostname='remote_host',
username='username',
timeout=15,
compress=True,
port='port',
sock=None,
look_for_keys=True,
)
@mock.patch('airflow.providers.ssh.hooks.ssh.paramiko.SSHClient')
def test_ssh_connection_with_timeout_extra_and_conn_timeout_extra(self, ssh_mock):
hook = SSHHook(
ssh_conn_id=self.CONN_SSH_WITH_TIMEOUT_AND_CONN_TIMEOUT_EXTRA,
remote_host='remote_host',
port='port',
username='username',
timeout=10,
conn_timeout=15,
)
# conn_timeout parameter wins over extra options
with hook.get_conn():
ssh_mock.return_value.connect.assert_called_once_with(
banner_timeout=30.0,
hostname='remote_host',
username='username',
timeout=15,
compress=True,
port='port',
sock=None,
look_for_keys=True,
)
@parameterized.expand(
[
(TEST_TIMEOUT, TEST_CONN_TIMEOUT, True, True, TEST_CONN_TIMEOUT),
(TEST_TIMEOUT, TEST_CONN_TIMEOUT, True, False, TEST_CONN_TIMEOUT),
(TEST_TIMEOUT, TEST_CONN_TIMEOUT, False, True, TEST_CONN_TIMEOUT),
(TEST_TIMEOUT, TEST_CONN_TIMEOUT, False, False, TEST_CONN_TIMEOUT),
(TEST_TIMEOUT, None, True, True, TEST_CONN_TIMEOUT),
(TEST_TIMEOUT, None, True, False, TEST_TIMEOUT),
(TEST_TIMEOUT, None, False, True, TEST_CONN_TIMEOUT),
(TEST_TIMEOUT, None, False, False, TEST_TIMEOUT),
(None, TEST_CONN_TIMEOUT, True, True, TEST_CONN_TIMEOUT),
(None, TEST_CONN_TIMEOUT, True, False, TEST_CONN_TIMEOUT),
(None, TEST_CONN_TIMEOUT, False, True, TEST_CONN_TIMEOUT),
(None, TEST_CONN_TIMEOUT, False, False, TEST_CONN_TIMEOUT),
(None, None, True, True, TEST_CONN_TIMEOUT),
(None, None, True, False, TEST_TIMEOUT),
(None, None, False, True, TEST_CONN_TIMEOUT),
(None, None, False, False, 10),
]
)
@mock.patch('airflow.providers.ssh.hooks.ssh.paramiko.SSHClient')
def test_ssh_connection_with_all_timeout_param_and_extra_combinations(
self, timeout, conn_timeout, timeoutextra, conn_timeoutextra, expected_value, ssh_mock
):
if timeoutextra and conn_timeoutextra:
ssh_conn_id = self.CONN_SSH_WITH_TIMEOUT_AND_CONN_TIMEOUT_EXTRA
elif timeoutextra and not conn_timeoutextra:
ssh_conn_id = self.CONN_SSH_WITH_TIMEOUT_EXTRA
elif not timeoutextra and conn_timeoutextra:
ssh_conn_id = self.CONN_SSH_WITH_CONN_TIMEOUT_EXTRA
else:
ssh_conn_id = self.CONN_SSH_WITH_NO_EXTRA
hook = SSHHook(
ssh_conn_id=ssh_conn_id,
remote_host='remote_host',
port='port',
username='username',
timeout=timeout,
conn_timeout=conn_timeout,
)
# conn_timeout parameter wins over extra options
with hook.get_conn():
ssh_mock.return_value.connect.assert_called_once_with(
banner_timeout=30.0,
hostname='remote_host',
username='username',
timeout=expected_value,
compress=True,
port='port',
sock=None,
look_for_keys=True,
)
def test_openssh_private_key(self):
# Paramiko behaves differently with OpenSSH generated keys to paramiko
# generated keys, so we need a test one.
# This has been generated specifically to put here, it is not otherwise in use
TEST_OPENSSH_PRIVATE_KEY = "-----BEGIN OPENSSH " + textwrap.dedent(
"""\
PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAlwAAAAdzc2gtcn
NhAAAAAwEAAQAAAIEAuPKIGPWtIpMDrXwMAvNKQlhQ1gXV/tKyufElw/n6hrr6lvtfGhwX
DihHMsAF+8+KKWQjWgh0fttbIF3+3C56Ns8hgvgMQJT2nyWd7egwqn+LQa08uCEBEka3MO
arKzj39P66EZ/KQDD29VErlVOd97dPhaR8pOZvzcHxtLbU6rMAAAIA3uBiZd7gYmUAAAAH
c3NoLXJzYQAAAIEAuPKIGPWtIpMDrXwMAvNKQlhQ1gXV/tKyufElw/n6hrr6lvtfGhwXDi
hHMsAF+8+KKWQjWgh0fttbIF3+3C56Ns8hgvgMQJT2nyWd7egwqn+LQa08uCEBEka3MOar
Kzj39P66EZ/KQDD29VErlVOd97dPhaR8pOZvzcHxtLbU6rMAAAADAQABAAAAgA2QC5b4/T
dZ3J0uSZs1yC5RV6w6RVUokl68Zm6WuF6E+7dyu6iogrBRF9eK6WVr9M/QPh9uG0zqPSaE
fhobdm7KeycXmtDtrJnXE2ZSk4oU29++TvYZBrAqAli9aHlSArwiLnOIMzY/kIHoSJLJmd
jwXykdQ7QAd93KPEnkaMzBAAAAQGTyp6/wWqtqpMmYJ5prCGNtpVOGthW5upeiuQUytE/K
5pyPoq6dUCUxQpkprtkuNAv/ff9nW6yy1v2DWohKfaEAAABBAO3y+erRXmiMreMOAd1S84
RK2E/LUHOvClQqf6GnVavmIgkxIYEgjcFiWv4xIkTc1/FN6aX5aT4MB3srvuM7sxEAAABB
AMb6QAkvxo4hT/xKY0E0nG7zCUMXeBV35MEXQK0/InFC7aZ0tjzFsQJzLe/7q7ljIf+9/O
rCqNhxgOrv7XrRuYMAAAAKYXNoQHNpbm9wZQE=
-----END OPENSSH PRIVATE KEY-----
"""
)
session = settings.Session()
try:
conn = Connection(
conn_id='openssh_pkey',
host='localhost',
conn_type='ssh',
extra={"private_key": TEST_OPENSSH_PRIVATE_KEY},
)
session.add(conn)
session.flush()
hook = SSHHook(ssh_conn_id=conn.conn_id)
assert isinstance(hook.pkey, paramiko.RSAKey)
finally:
session.delete(conn)
session.commit()
@pytest.mark.flaky(max_runs=5, min_passes=1)
def test_exec_ssh_client_command(self):
hook = SSHHook(
ssh_conn_id='ssh_default',
conn_timeout=30,
banner_timeout=100,
)
with hook.get_conn() as client:
ret = hook.exec_ssh_client_command(
client,
'echo airflow',
False,
None,
30,
)
assert ret == (0, b'airflow\n', b'')
|
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1KubeVirt(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'K8sIoApimachineryPkgApisMetaV1ObjectMeta',
'spec': 'V1KubeVirtSpec',
'status': 'V1KubeVirtStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
"""
V1KubeVirt - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""
Gets the api_version of this V1KubeVirt.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:return: The api_version of this V1KubeVirt.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1KubeVirt.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param api_version: The api_version of this V1KubeVirt.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1KubeVirt.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:return: The kind of this V1KubeVirt.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1KubeVirt.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param kind: The kind of this V1KubeVirt.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1KubeVirt.
:return: The metadata of this V1KubeVirt.
:rtype: K8sIoApimachineryPkgApisMetaV1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1KubeVirt.
:param metadata: The metadata of this V1KubeVirt.
:type: K8sIoApimachineryPkgApisMetaV1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this V1KubeVirt.
:return: The spec of this V1KubeVirt.
:rtype: V1KubeVirtSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this V1KubeVirt.
:param spec: The spec of this V1KubeVirt.
:type: V1KubeVirtSpec
"""
if spec is None:
raise ValueError("Invalid value for `spec`, must not be `None`")
self._spec = spec
@property
def status(self):
"""
Gets the status of this V1KubeVirt.
:return: The status of this V1KubeVirt.
:rtype: V1KubeVirtStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1KubeVirt.
:param status: The status of this V1KubeVirt.
:type: V1KubeVirtStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1KubeVirt):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
# Copyright (c) 2013 dotCloud, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A Docker Hypervisor which allows running Linux Containers instead of VMs.
"""
import os
import random
import socket
import time
from oslo.config import cfg
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova import exception
from nova.image import glance
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log
from nova.openstack.common import units
from nova import utils
import nova.virt.docker.client
from nova.virt.docker import hostinfo
from nova.virt.docker import network
from nova.virt import driver
docker_opts = [
cfg.IntOpt('registry_default_port',
default=5042,
help=_('Default TCP port to find the '
'docker-registry container'),
deprecated_group='DEFAULT',
deprecated_name='docker_registry_default_port'),
]
CONF = cfg.CONF
CONF.register_opts(docker_opts, 'docker')
CONF.import_opt('my_ip', 'nova.netconf')
LOG = log.getLogger(__name__)
class DockerDriver(driver.ComputeDriver):
"""Docker hypervisor driver."""
def __init__(self, virtapi):
super(DockerDriver, self).__init__(virtapi)
self._docker = None
@property
def docker(self):
if self._docker is None:
self._docker = nova.virt.docker.client.DockerHTTPClient()
return self._docker
def init_host(self, host):
LOG.warning(_('The docker driver does not meet the Nova project\'s '
'requirements for quality verification and is planned '
'for removal. This may change, but users should plan '
'accordingly. Additional details here: '
'https://wiki.openstack.org/wiki/HypervisorSupportMatrix'
'/DeprecationPlan'))
if self.is_daemon_running() is False:
raise exception.NovaException(_('Docker daemon is not running or '
'is not reachable (check the rights on /var/run/docker.sock)'))
def is_daemon_running(self):
try:
self.docker.list_containers()
return True
except socket.error:
# NOTE(samalba): If the daemon is not running, we'll get a socket
# error. The list_containers call is safe to call often, there
# is an internal hard limit in docker if the amount of containers
# is huge.
return False
def list_instances(self, inspect=False):
res = []
for container in self.docker.list_containers():
info = self.docker.inspect_container(container['id'])
if inspect:
res.append(info)
else:
res.append(info['Config'].get('Hostname'))
return res
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
msg = _("VIF plugging is not supported by the Docker driver.")
raise NotImplementedError(msg)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
msg = _("VIF unplugging is not supported by the Docker driver.")
raise NotImplementedError(msg)
def find_container_by_name(self, name):
for info in self.list_instances(inspect=True):
if info['Config'].get('Hostname') == name:
return info
return {}
def get_info(self, instance):
container = self.find_container_by_name(instance['name'])
if not container:
raise exception.InstanceNotFound(instance_id=instance['name'])
running = container['State'].get('Running')
info = {
'max_mem': 0,
'mem': 0,
'num_cpu': 1,
'cpu_time': 0
}
info['state'] = power_state.RUNNING if running \
else power_state.SHUTDOWN
return info
def get_host_stats(self, refresh=False):
hostname = socket.gethostname()
memory = hostinfo.get_memory_usage()
disk = hostinfo.get_disk_usage()
stats = self.get_available_resource(hostname)
stats['hypervisor_hostname'] = stats['hypervisor_hostname']
stats['host_hostname'] = stats['hypervisor_hostname']
stats['host_name_label'] = stats['hypervisor_hostname']
return stats
def get_available_resource(self, nodename):
if not hasattr(self, '_nodename'):
self._nodename = nodename
if nodename != self._nodename:
LOG.error(_('Hostname has changed from %(old)s to %(new)s. '
'A restart is required to take effect.'
) % {'old': self._nodename,
'new': nodename})
memory = hostinfo.get_memory_usage()
disk = hostinfo.get_disk_usage()
stats = {
'vcpus': 1,
'vcpus_used': 0,
'memory_mb': memory['total'] / units.Mi,
'memory_mb_used': memory['used'] / units.Mi,
'local_gb': disk['total'] / units.Gi,
'local_gb_used': disk['used'] / units.Gi,
'disk_available_least': disk['available'] / units.Gi,
'hypervisor_type': 'docker',
'hypervisor_version': utils.convert_version_to_int('1.0'),
'hypervisor_hostname': self._nodename,
'cpu_info': '?',
'supported_instances': jsonutils.dumps([
('i686', 'docker', 'lxc'),
('x86_64', 'docker', 'lxc')
])
}
return stats
def _find_container_pid(self, container_id):
cgroup_path = hostinfo.get_cgroup_devices_path()
lxc_path = os.path.join(cgroup_path, 'lxc')
tasks_path = os.path.join(lxc_path, container_id, 'tasks')
n = 0
while True:
# NOTE(samalba): We wait for the process to be spawned inside the
# container in order to get the the "container pid". This is
# usually really fast. To avoid race conditions on a slow
# machine, we allow 10 seconds as a hard limit.
if n > 20:
return
try:
with open(tasks_path) as f:
pids = f.readlines()
if pids:
return int(pids[0].strip())
except IOError:
pass
time.sleep(0.5)
n += 1
def _setup_network(self, instance, network_info):
if not network_info:
return
container_id = self.find_container_by_name(instance['name']).get('id')
if not container_id:
return
network_info = network_info[0]['network']
netns_path = '/var/run/netns'
if not os.path.exists(netns_path):
utils.execute(
'mkdir', '-p', netns_path, run_as_root=True)
nspid = self._find_container_pid(container_id)
if not nspid:
msg = _('Cannot find any PID under container "{0}"')
raise RuntimeError(msg.format(container_id))
netns_path = os.path.join(netns_path, container_id)
utils.execute(
'ln', '-sf', '/proc/{0}/ns/net'.format(nspid),
'/var/run/netns/{0}'.format(container_id),
run_as_root=True)
rand = random.randint(0, 100000)
if_local_name = 'pvnetl{0}'.format(rand)
if_remote_name = 'pvnetr{0}'.format(rand)
bridge = network_info['bridge']
gateway = network.find_gateway(instance, network_info)
ip = network.find_fixed_ip(instance, network_info)
undo_mgr = utils.UndoManager()
try:
utils.execute(
'ip', 'link', 'add', 'name', if_local_name, 'type',
'veth', 'peer', 'name', if_remote_name,
run_as_root=True)
undo_mgr.undo_with(lambda: utils.execute(
'ip', 'link', 'delete', if_local_name, run_as_root=True))
# NOTE(samalba): Deleting the interface will delete all associated
# resources (remove from the bridge, its pair, etc...)
utils.execute(
'brctl', 'addif', bridge, if_local_name,
run_as_root=True)
utils.execute(
'ip', 'link', 'set', if_local_name, 'up',
run_as_root=True)
utils.execute(
'ip', 'link', 'set', if_remote_name, 'netns', nspid,
run_as_root=True)
utils.execute(
'ip', 'netns', 'exec', container_id, 'ifconfig',
if_remote_name, ip,
run_as_root=True)
utils.execute(
'ip', 'netns', 'exec', container_id,
'ip', 'route', 'replace', 'default', 'via', gateway, 'dev',
if_remote_name, run_as_root=True)
except Exception:
msg = _('Failed to setup the network, rolling back')
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def _get_memory_limit_bytes(self, instance):
system_meta = utils.instance_sys_meta(instance)
return int(system_meta.get('instance_type_memory_mb', 0)) * units.Mi
def _get_image_name(self, context, instance, image):
fmt = image['container_format']
if fmt != 'docker':
msg = _('Image container format not supported ({0})')
raise exception.InstanceDeployFailure(msg.format(fmt),
instance_id=instance['name'])
registry_port = self._get_registry_port()
return '{0}:{1}/{2}'.format(CONF.my_ip,
registry_port,
image['name'])
def _get_default_cmd(self, image_name):
default_cmd = ['sh']
info = self.docker.inspect_image(image_name)
if not info:
return default_cmd
if not info['container_config']['Cmd']:
return default_cmd
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
image_name = self._get_image_name(context, instance, image_meta)
args = {
'Hostname': instance['name'],
'Image': image_name,
'Memory': self._get_memory_limit_bytes(instance),
'CpuShares': self._get_cpu_shares(instance)
}
default_cmd = self._get_default_cmd(image_name)
if default_cmd:
args['Cmd'] = default_cmd
container_id = self._create_container(instance, args)
if not container_id:
msg = _('Image name "{0}" does not exist, fetching it...')
LOG.info(msg.format(image_name))
res = self.docker.pull_repository(image_name)
if res is False:
raise exception.InstanceDeployFailure(
_('Cannot pull missing image'),
instance_id=instance['name'])
container_id = self._create_container(instance, args)
if not container_id:
raise exception.InstanceDeployFailure(
_('Cannot create container'),
instance_id=instance['name'])
self.docker.start_container(container_id)
try:
self._setup_network(instance, network_info)
except Exception as e:
msg = _('Cannot setup network: {0}')
self.docker.kill_container(container_id)
self.docker.destroy_container(container_id)
raise exception.InstanceDeployFailure(msg.format(e),
instance_id=instance['name'])
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
container_id = self.find_container_by_name(instance['name']).get('id')
if not container_id:
return
self.docker.stop_container(container_id)
self.docker.destroy_container(container_id)
network.teardown_network(container_id)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
container_id = self.find_container_by_name(instance['name']).get('id')
if not container_id:
return
if not self.docker.stop_container(container_id):
LOG.warning(_('Cannot stop the container, '
'please check docker logs'))
if not self.docker.start_container(container_id):
LOG.warning(_('Cannot restart the container, '
'please check docker logs'))
def power_on(self, context, instance, network_info, block_device_info):
container_id = self.find_container_by_name(instance['name']).get('id')
if not container_id:
return
self.docker.start_container(container_id)
def power_off(self, instance):
container_id = self.find_container_by_name(instance['name']).get('id')
if not container_id:
return
self.docker.stop_container(container_id)
def get_console_output(self, context, instance):
container_id = self.find_container_by_name(instance.name).get('id')
if not container_id:
return
return self.docker.get_container_logs(container_id)
def _get_registry_port(self):
default_port = CONF.docker.registry_default_port
registry = None
for container in self.docker.list_containers(_all=False):
container = self.docker.inspect_container(container['id'])
if 'docker-registry' in container['Path']:
registry = container
break
if not registry:
return default_port
# NOTE(samalba): The registry service always binds on port 5000 in the
# container
try:
return container['NetworkSettings']['PortMapping']['Tcp']['5000']
except (KeyError, TypeError):
# NOTE(samalba): Falling back to a default port allows more
# flexibility (run docker-registry outside a container)
return default_port
def snapshot(self, context, instance, image_href, update_task_state):
container_id = self.find_container_by_name(instance['name']).get('id')
if not container_id:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
(image_service, image_id) = glance.get_remote_image_service(
context, image_href)
image = image_service.show(context, image_id)
registry_port = self._get_registry_port()
name = image['name']
default_tag = (':' not in name)
name = '{0}:{1}/{2}'.format(CONF.my_ip,
registry_port,
name)
commit_name = name if not default_tag else name + ':latest'
self.docker.commit_container(container_id, commit_name)
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
headers = {'X-Meta-Glance-Image-Id': image_href}
self.docker.push_repository(name, headers=headers)
def _get_cpu_shares(self, instance):
"""Get allocated CPUs from configured flavor.
Docker/lxc supports relative CPU allocation.
cgroups specifies following:
/sys/fs/cgroup/lxc/cpu.shares = 1024
/sys/fs/cgroup/cpu.shares = 1024
For that reason we use 1024 as multiplier.
This multiplier allows to divide the CPU
resources fair with containers started by
the user (e.g. docker registry) which has
the default CpuShares value of zero.
"""
flavor = flavors.extract_flavor(instance)
return int(flavor['vcpus']) * 1024
def _create_container(self, instance, args):
name = "nova-" + instance['uuid']
return self.docker.create_container(args, name)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Filters
=======
Filter bank construction
------------------------
.. autosummary::
:toctree: generated/
dct
mel
chroma
constant_q
Miscellaneous
-------------
.. autosummary::
:toctree: generated/
constant_q_lengths
cq_to_chroma
window_bandwidth
"""
import warnings
import numpy as np
import scipy
import scipy.signal
from . import cache
from . import util
from .util.exceptions import ParameterError
from .core.time_frequency import note_to_hz, hz_to_midi, hz_to_octs
from .core.time_frequency import fft_frequencies, mel_frequencies
# Dictionary of window function bandwidths
WINDOW_BANDWIDTHS = dict(hann=0.725)
__all__ = ['dct',
'mel',
'chroma',
'constant_q',
'constant_q_lengths',
'cq_to_chroma',
'window_bandwidth']
@cache
def dct(n_filters, n_input):
"""Discrete cosine transform (DCT type-III) basis.
.. [1] http://en.wikipedia.org/wiki/Discrete_cosine_transform
Parameters
----------
n_filters : int > 0 [scalar]
number of output components (DCT filters)
n_input : int > 0 [scalar]
number of input components (frequency bins)
Returns
-------
dct_basis: np.ndarray [shape=(n_filters, n_input)]
DCT (type-III) basis vectors [1]_
Examples
--------
>>> n_fft = 2048
>>> dct_filters = librosa.filters.dct(13, 1 + n_fft // 2)
>>> dct_filters
array([[ 0.031, 0.031, ..., 0.031, 0.031],
[ 0.044, 0.044, ..., -0.044, -0.044],
...,
[ 0.044, 0.044, ..., -0.044, -0.044],
[ 0.044, 0.044, ..., 0.044, 0.044]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(dct_filters, x_axis='linear')
>>> plt.ylabel('DCT function')
>>> plt.title('DCT filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
basis = np.empty((n_filters, n_input))
basis[0, :] = 1.0 / np.sqrt(n_input)
samples = np.arange(1, 2*n_input, 2) * np.pi / (2.0 * n_input)
for i in range(1, n_filters):
basis[i, :] = np.cos(i*samples) * np.sqrt(2.0/n_input)
return basis
@cache
def mel(sr, n_fft, n_mels=128, fmin=0.0, fmax=None, htk=False):
"""Create a Filterbank matrix to combine FFT bins into Mel-frequency bins
Parameters
----------
sr : number > 0 [scalar]
sampling rate of the incoming signal
n_fft : int > 0 [scalar]
number of FFT components
n_mels : int > 0 [scalar]
number of Mel bands to generate
fmin : float >= 0 [scalar]
lowest frequency (in Hz)
fmax : float >= 0 [scalar]
highest frequency (in Hz).
If `None`, use `fmax = sr / 2.0`
htk : bool [scalar]
use HTK formula instead of Slaney
Returns
-------
M : np.ndarray [shape=(n_mels, 1 + n_fft/2)]
Mel transform matrix
Examples
--------
>>> melfb = librosa.filters.mel(22050, 2048)
>>> melfb
array([[ 0. , 0.016, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
Clip the maximum frequency to 8KHz
>>> librosa.filters.mel(22050, 2048, fmax=8000)
array([[ 0. , 0.02, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(melfb, x_axis='linear')
>>> plt.ylabel('Mel filter')
>>> plt.title('Mel filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
if fmax is None:
fmax = float(sr) / 2
# Initialize the weights
n_mels = int(n_mels)
weights = np.zeros((n_mels, int(1 + n_fft // 2)))
# Center freqs of each FFT bin
fftfreqs = fft_frequencies(sr=sr, n_fft=n_fft)
# 'Center freqs' of mel bands - uniformly spaced between limits
freqs = mel_frequencies(n_mels + 2,
fmin=fmin,
fmax=fmax,
htk=htk)
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (freqs[2:n_mels+2] - freqs[:n_mels])
for i in range(n_mels):
# lower and upper slopes for all bins
lower = (fftfreqs - freqs[i]) / (freqs[i+1] - freqs[i])
upper = (freqs[i+2] - fftfreqs) / (freqs[i+2] - freqs[i+1])
# .. then intersect them with each other and zero
weights[i] = np.maximum(0, np.minimum(lower, upper)) * enorm[i]
return weights
@cache
def chroma(sr, n_fft, n_chroma=12, A440=440.0, ctroct=5.0,
octwidth=2, norm=2, base_c=True):
"""Create a Filterbank matrix to convert STFT to chroma
Parameters
----------
sr : number > 0 [scalar]
audio sampling rate
n_fft : int > 0 [scalar]
number of FFT bins
n_chroma : int > 0 [scalar]
number of chroma bins
A440 : float > 0 [scalar]
Reference frequency for A440
ctroct : float > 0 [scalar]
octwidth : float > 0 or None [scalar]
`ctroct` and `octwidth` specify a dominance window -
a Gaussian weighting centered on `ctroct` (in octs, A0 = 27.5Hz)
and with a gaussian half-width of `octwidth`.
Set `octwidth` to `None` to use a flat weighting.
norm : float > 0 or np.inf
Normalization factor for each filter
base_c : bool
If True, the filter bank will start at 'C'.
If False, the filter bank will start at 'A'.
Returns
-------
wts : ndarray [shape=(n_chroma, 1 + n_fft / 2)]
Chroma filter matrix
See Also
--------
util.normalize
feature.chroma_stft
Examples
--------
Build a simple chroma filter bank
>>> chromafb = librosa.filters.chroma(22050, 4096)
array([[ 1.689e-05, 3.024e-04, ..., 4.639e-17, 5.327e-17],
[ 1.716e-05, 2.652e-04, ..., 2.674e-25, 3.176e-25],
...,
[ 1.578e-05, 3.619e-04, ..., 8.577e-06, 9.205e-06],
[ 1.643e-05, 3.355e-04, ..., 1.474e-10, 1.636e-10]])
Use quarter-tones instead of semitones
>>> librosa.filters.chroma(22050, 4096, n_chroma=24)
array([[ 1.194e-05, 2.138e-04, ..., 6.297e-64, 1.115e-63],
[ 1.206e-05, 2.009e-04, ..., 1.546e-79, 2.929e-79],
...,
[ 1.162e-05, 2.372e-04, ..., 6.417e-38, 9.923e-38],
[ 1.180e-05, 2.260e-04, ..., 4.697e-50, 7.772e-50]])
Equally weight all octaves
>>> librosa.filters.chroma(22050, 4096, octwidth=None)
array([[ 3.036e-01, 2.604e-01, ..., 2.445e-16, 2.809e-16],
[ 3.084e-01, 2.283e-01, ..., 1.409e-24, 1.675e-24],
...,
[ 2.836e-01, 3.116e-01, ..., 4.520e-05, 4.854e-05],
[ 2.953e-01, 2.888e-01, ..., 7.768e-10, 8.629e-10]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(chromafb, x_axis='linear')
>>> plt.ylabel('Chroma filter')
>>> plt.title('Chroma filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
wts = np.zeros((n_chroma, n_fft))
# Get the FFT bins, not counting the DC component
frequencies = np.linspace(0, sr, n_fft, endpoint=False)[1:]
frqbins = n_chroma * hz_to_octs(frequencies, A440)
# make up a value for the 0 Hz bin = 1.5 octaves below bin 1
# (so chroma is 50% rotated from bin 1, and bin width is broad)
frqbins = np.concatenate(([frqbins[0] - 1.5 * n_chroma], frqbins))
binwidthbins = np.concatenate((np.maximum(frqbins[1:] - frqbins[:-1],
1.0), [1]))
D = np.subtract.outer(frqbins, np.arange(0, n_chroma, dtype='d')).T
n_chroma2 = np.round(float(n_chroma) / 2)
# Project into range -n_chroma/2 .. n_chroma/2
# add on fixed offset of 10*n_chroma to ensure all values passed to
# rem are positive
D = np.remainder(D + n_chroma2 + 10*n_chroma, n_chroma) - n_chroma2
# Gaussian bumps - 2*D to make them narrower
wts = np.exp(-0.5 * (2*D / np.tile(binwidthbins, (n_chroma, 1)))**2)
# normalize each column
wts = util.normalize(wts, norm=norm, axis=0)
# Maybe apply scaling for fft bins
if octwidth is not None:
wts *= np.tile(
np.exp(-0.5 * (((frqbins/n_chroma - ctroct)/octwidth)**2)),
(n_chroma, 1))
if base_c:
wts = np.roll(wts, -3, axis=0)
# remove aliasing columns, copy to ensure row-contiguity
return np.ascontiguousarray(wts[:, :int(1 + n_fft/2)])
def __float_window(window_function):
'''Decorator function for windows with fractional input.
This function guarantees that for fractional `x`, the following hold:
1. `__float_window(window_function)(x)` has length `np.ceil(x)`
2. all values from `np.floor(x)` are set to 0.
For integer-valued `x`, there should be no change in behavior.
'''
def _wrap(n, *args, **kwargs):
'''The wrapped window'''
n_min, n_max = int(np.floor(n)), int(np.ceil(n))
window = window_function(n, *args, **kwargs)
if len(window) < n_max:
window = np.pad(window, [(0, n_max - len(window))],
mode='constant')
window[n_min:] = 0.0
return window
return _wrap
@cache
def constant_q(sr, fmin=None, n_bins=84, bins_per_octave=12, tuning=0.0,
window=None, filter_scale=1, pad_fft=True, norm=1,
**kwargs):
r'''Construct a constant-Q basis.
This uses the filter bank described by [1]_.
.. [1] McVicar, Matthew.
"A machine learning approach to automatic chord extraction."
Dissertation, University of Bristol. 2013.
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin. Defaults to `C1 ~= 32.70`
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin
window : function or `None`
Windowing function to apply to filters.
Default: `scipy.signal.hann`
filter_scale : float > 0 [scalar]
Scale of filter windows.
Small values (<1) use shorter windows for higher temporal resolution.
pad_fft : boolean
Center-pad all filters up to the nearest integral power of 2.
By default, padding is done with zeros, but this can be overridden
by setting the `mode=` field in *kwargs*.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See librosa.util.normalize
kwargs : additional keyword arguments
Arguments to `np.pad()` when `pad==True`.
Returns
-------
filters : np.ndarray, `len(filters) == n_bins`
`filters[i]` is `i`\ th time-domain CQT basis filter
lengths : np.ndarray, `len(lengths) == n_bins`
The (fractional) length of each filter
See Also
--------
constant_q_lengths
librosa.core.cqt
librosa.util.normalize
Examples
--------
Use a shorter window for each filter
>>> basis, lengths = librosa.filters.constant_q(22050, filter_scale=0.5)
Plot one octave of filters in time and frequency
>>> import matplotlib.pyplot as plt
>>> basis, lengths = librosa.filters.constant_q(22050)
>>> plt.figure(figsize=(10, 6))
>>> plt.subplot(2, 1, 1)
>>> notes = librosa.midi_to_note(np.arange(24, 24 + len(basis)))
>>> for i, (f, n) in enumerate(zip(basis, notes[:12])):
... f_scale = librosa.util.normalize(f) / 2
... plt.plot(i + f_scale.real)
... plt.plot(i + f_scale.imag, linestyle=':')
>>> plt.axis('tight')
>>> plt.yticks(np.arange(len(notes[:12])), notes[:12])
>>> plt.ylabel('CQ filters')
>>> plt.title('CQ filters (one octave, time domain)')
>>> plt.xlabel('Time (samples at 22050 Hz)')
>>> plt.legend(['Real', 'Imaginary'], frameon=True, framealpha=0.8)
>>> plt.subplot(2, 1, 2)
>>> F = np.abs(np.fft.fftn(basis, axes=[-1]))
>>> # Keep only the positive frequencies
>>> F = F[:, :(1 + F.shape[1] // 2)]
>>> librosa.display.specshow(F, x_axis='linear')
>>> plt.yticks(np.arange(len(notes))[::12], notes[::12])
>>> plt.ylabel('CQ filters')
>>> plt.title('CQ filter magnitudes (frequency domain)')
>>> plt.tight_layout()
'''
if fmin is None:
fmin = note_to_hz('C1')
if window is None:
window = scipy.signal.hann
# Pass-through parameters to get the filter lengths
lengths = constant_q_lengths(sr, fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
window=window,
filter_scale=filter_scale)
# Apply tuning correction
correction = 2.0**(float(tuning) / bins_per_octave)
fmin = correction * fmin
# Q should be capitalized here, so we suppress the name warning
# pylint: disable=invalid-name
Q = float(filter_scale) / (2.0**(1. / bins_per_octave) - 1)
# Convert lengths back to frequencies
freqs = Q * sr / lengths
# Build the filters
filters = []
for ilen, freq in zip(lengths, freqs):
# Build the filter: note, length will be ceil(ilen)
sig = np.exp(np.arange(ilen, dtype=float) * 1j * 2 * np.pi * freq / sr)
# Apply the windowing function
sig = sig * __float_window(window)(ilen)
# Normalize
sig = util.normalize(sig, norm=norm)
filters.append(sig)
# Pad and stack
max_len = max(lengths)
if pad_fft:
max_len = int(2.0**(np.ceil(np.log2(max_len))))
else:
max_len = int(np.ceil(max_len))
filters = np.asarray([util.pad_center(filt, max_len, **kwargs)
for filt in filters])
return filters, np.asarray(lengths)
@cache
def constant_q_lengths(sr, fmin, n_bins=84, bins_per_octave=12,
tuning=0.0, window='hann', filter_scale=1):
r'''Return length of each filter in a constant-Q basis.
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin.
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin
window : str or callable
Window function to use on filters
filter_scale : float > 0 [scalar]
Resolution of filter windows. Larger values use longer windows.
Returns
-------
lengths : np.ndarray
The length of each filter.
See Also
--------
constant_q
librosa.core.cqt
'''
if fmin <= 0:
raise ParameterError('fmin must be positive')
if bins_per_octave <= 0:
raise ParameterError('bins_per_octave must be positive')
if filter_scale <= 0:
raise ParameterError('filter_scale must be positive')
if n_bins <= 0 or not isinstance(n_bins, int):
raise ParameterError('n_bins must be a positive integer')
correction = 2.0**(float(tuning) / bins_per_octave)
fmin = correction * fmin
# Q should be capitalized here, so we suppress the name warning
# pylint: disable=invalid-name
Q = float(filter_scale) / (2.0**(1. / bins_per_octave) - 1)
# Compute the frequencies
freq = fmin * (2.0 ** (np.arange(n_bins, dtype=float) / bins_per_octave))
if np.any(freq * (1 + window_bandwidth(window) / Q) > sr / 2.0):
raise ParameterError('Filter pass-band lies beyond Nyquist')
# Convert frequencies to filter lengths
lengths = Q * sr / freq
return lengths
@cache
def cq_to_chroma(n_input, bins_per_octave=12, n_chroma=12,
fmin=None, window=None, base_c=True):
'''Convert a Constant-Q basis to Chroma.
Parameters
----------
n_input : int > 0 [scalar]
Number of input components (CQT bins)
bins_per_octave : int > 0 [scalar]
How many bins per octave in the CQT
n_chroma : int > 0 [scalar]
Number of output bins (per octave) in the chroma
fmin : None or float > 0
Center frequency of the first constant-Q channel.
Default: 'C1' ~= 32.7 Hz
window : None or np.ndarray
If provided, the cq_to_chroma filter bank will be
convolved with `window`.
base_c : bool
If True, the first chroma bin will start at 'C'
If False, the first chroma bin will start at 'A'
Returns
-------
cq_to_chroma : np.ndarray [shape=(n_chroma, n_input)]
Transformation matrix: `Chroma = np.dot(cq_to_chroma, CQT)`
Raises
------
ParameterError
If `n_input` is not an integer multiple of `n_chroma`
Examples
--------
Get a CQT, and wrap bins to chroma
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> CQT = librosa.cqt(y, sr=sr)
>>> chroma_map = librosa.filters.cq_to_chroma(CQT.shape[0])
>>> chromagram = chroma_map.dot(CQT)
>>> # Max-normalize each time step
>>> chromagram = librosa.util.normalize(chromagram, axis=0)
>>> import matplotlib.pyplot as plt
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.logamplitude(CQT**2,
... ref_power=np.max),
... y_axis='cqt_note', x_axis='time')
>>> plt.title('CQT Power')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(chromagram, y_axis='chroma', x_axis='time')
>>> plt.title('Chroma (wrapped CQT)')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> chroma = librosa.feature.chroma_stft(y=y, sr=sr)
>>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
>>> plt.title('librosa.feature.chroma_stft')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
# How many fractional bins are we merging?
n_merge = float(bins_per_octave) / n_chroma
if fmin is None:
fmin = note_to_hz('C1')
if np.mod(n_merge, 1) != 0:
raise ParameterError('Incompatible CQ merge: '
'input bins must be an '
'integer multiple of output bins.')
# Tile the identity to merge fractional bins
cq_to_ch = np.repeat(np.eye(n_chroma), n_merge, axis=1)
# Roll it left to center on the target bin
cq_to_ch = np.roll(cq_to_ch, - int(n_merge // 2), axis=1)
# How many octaves are we repeating?
n_octaves = np.ceil(np.float(n_input) / bins_per_octave)
# Repeat and trim
cq_to_ch = np.tile(cq_to_ch, int(n_octaves))[:, :n_input]
# What's the note number of the first bin in the CQT?
# midi uses 12 bins per octave here
midi_0 = np.mod(hz_to_midi(fmin), 12)
if base_c:
# rotate to C
roll = midi_0
else:
# rotate to A
roll = midi_0 - 9
# Adjust the roll in terms of how many chroma we want out
# We need to be careful with rounding here
roll = int(np.round(roll * (n_chroma / 12.)))
# Apply the roll
cq_to_ch = np.roll(cq_to_ch, roll, axis=0).astype(float)
if window is not None:
cq_to_ch = scipy.signal.convolve(cq_to_ch,
np.atleast_2d(window),
mode='same')
return cq_to_ch
def window_bandwidth(window, default=1.0):
'''Get the bandwidth of a window function.
If the window function is unknown, return a default value.
Parameters
----------
window : callable or string
A window function, or the name of a window function.
Examples:
- scipy.signal.hann
- 'boxcar'
default : float >= 0
The default value, if `window` is unknown.
Returns
-------
bandwidth : float
The bandwidth of the given window function
See Also
--------
scipy.signal.get_window
'''
if hasattr(window, '__name__'):
key = window.__name__
else:
key = window
if key not in WINDOW_BANDWIDTHS:
warnings.warn("Unknown window function '{:s}'.".format(key))
return WINDOW_BANDWIDTHS.get(key, default)
|
|
# Copyright 2012 Calvin Rien
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A pbxproj file is an OpenStep format plist
# {} represents dictionary of key=value pairs delimited by ;
# () represents list of values delimited by ,
# file starts with a comment specifying the character type
# // !$*UTF8*$!
# when adding a file to a project, create the PBXFileReference
# add the PBXFileReference's guid to a group
# create a PBXBuildFile with the PBXFileReference's guid
# add the PBXBuildFile to the appropriate build phase
# when adding a header search path add
# HEADER_SEARCH_PATHS = "path/**";
# to each XCBuildConfiguration object
# Xcode4 will read either a OpenStep or XML plist.
# this script uses `plutil` to validate, read and write
# the pbxproj file. Plutil is available in OS X 10.2 and higher
# Plutil can't write OpenStep plists, so I save as XML
import datetime
import json
import ntpath
import os
import plistlib
import re
import shutil
import subprocess
import uuid
from UserDict import IterableUserDict
from UserList import UserList
regex = '[a-zA-Z0-9\\._/-]*'
class PBXEncoder(json.JSONEncoder):
def default(self, obj):
"""Tests the input object, obj, to encode as JSON."""
if isinstance(obj, (PBXList, PBXDict)):
return obj.data
return json.JSONEncoder.default(self, obj)
class PBXDict(IterableUserDict):
def __init__(self, d=None):
if d:
d = dict([(PBXType.Convert(k), PBXType.Convert(v)) for k, v in d.items()])
IterableUserDict.__init__(self, d)
def __setitem__(self, key, value):
IterableUserDict.__setitem__(self, PBXType.Convert(key), PBXType.Convert(value))
def remove(self, key):
self.data.pop(PBXType.Convert(key), None)
class PBXList(UserList):
def __init__(self, l=None):
if isinstance(l, basestring):
UserList.__init__(self)
self.add(l)
return
elif l:
l = [PBXType.Convert(v) for v in l]
UserList.__init__(self, l)
def add(self, value):
value = PBXType.Convert(value)
if value in self.data:
return False
self.data.append(value)
return True
def remove(self, value):
value = PBXType.Convert(value)
if value in self.data:
self.data.remove(value)
return True
return False
def __setitem__(self, key, value):
UserList.__setitem__(self, PBXType.Convert(key), PBXType.Convert(value))
class PBXType(PBXDict):
def __init__(self, d=None):
PBXDict.__init__(self, d)
if 'isa' not in self:
self['isa'] = self.__class__.__name__
self.id = None
@staticmethod
def Convert(o):
if isinstance(o, list):
return PBXList(o)
elif isinstance(o, dict):
isa = o.get('isa')
if not isa:
return PBXDict(o)
cls = globals().get(isa)
if cls and issubclass(cls, PBXType):
return cls(o)
print 'warning: unknown PBX type: %s' % isa
return PBXDict(o)
else:
return o
@staticmethod
def IsGuid(o):
return re.match('^[A-F0-9]{24}$', str(o))
@classmethod
def GenerateId(cls):
return ''.join(str(uuid.uuid4()).upper().split('-')[1:])
@classmethod
def Create(cls, *args, **kwargs):
return cls(*args, **kwargs)
class PBXFileReference(PBXType):
def __init__(self, d=None):
PBXType.__init__(self, d)
self.build_phase = None
types = {'.a': ('archive.ar', 'PBXFrameworksBuildPhase'), '.app': ('wrapper.application', None), '.s': (
'sourcecode.asm', 'PBXSourcesBuildPhase'), '.c': ('sourcecode.c.c', 'PBXSourcesBuildPhase'), '.cpp': (
'sourcecode.cpp.cpp', 'PBXSourcesBuildPhase'), '.framework': (
'wrapper.framework', 'PBXFrameworksBuildPhase'), '.h': ('sourcecode.c.h', None), '.hpp': (
'sourcecode.c.h', None), '.swift': ('sourcecode.swift', 'PBXSourcesBuildPhase'), '.icns': (
'image.icns', 'PBXResourcesBuildPhase'), '.m': ('sourcecode.c.objc', 'PBXSourcesBuildPhase'), '.j': (
'sourcecode.c.objc', 'PBXSourcesBuildPhase'), '.mm': ('sourcecode.cpp.objcpp', 'PBXSourcesBuildPhase'), '.nib': (
'wrapper.nib', 'PBXResourcesBuildPhase'), '.plist': ('text.plist.xml', 'PBXResourcesBuildPhase'), '.json': (
'text.json', 'PBXResourcesBuildPhase'), '.png': ('image.png', 'PBXResourcesBuildPhase'), '.rtf': (
'text.rtf', 'PBXResourcesBuildPhase'), '.tiff': ('image.tiff', 'PBXResourcesBuildPhase'), '.txt': (
'text', 'PBXResourcesBuildPhase'), '.xcodeproj': ('wrapper.pb-project', None), '.xib': (
'file.xib', 'PBXResourcesBuildPhase'), '.strings': ('text.plist.strings', 'PBXResourcesBuildPhase'), '.bundle': (
'wrapper.plug-in', 'PBXResourcesBuildPhase'), '.dylib': (
'compiled.mach-o.dylib', 'PBXFrameworksBuildPhase'), '.xcdatamodeld': (
'wrapper.xcdatamodel', 'PBXSourcesBuildPhase'), '.xcassets': ('folder.assetcatalog', 'PBXResourcesBuildPhase')}
trees = ['<absolute>', '<group>', 'BUILT_PRODUCTS_DIR', 'DEVELOPER_DIR', 'SDKROOT', 'SOURCE_ROOT', ]
def guess_file_type(self, ignore_unknown_type=False):
self.remove('explicitFileType')
self.remove('lastKnownFileType')
ext = os.path.splitext(self.get('name', ''))[1]
if os.path.isdir(self.get('path')) and ext not in XcodeProject.special_folders:
f_type = 'folder'
build_phase = None
ext = ''
else:
f_type, build_phase = PBXFileReference.types.get(ext, ('?', 'PBXResourcesBuildPhase'))
self['lastKnownFileType'] = f_type
self.build_phase = build_phase
if f_type == '?' and not ignore_unknown_type:
print 'unknown file extension: %s' % ext
print 'please add extension and Xcode type to PBXFileReference.types'
return f_type
def set_file_type(self, ft):
self.remove('explicitFileType')
self.remove('lastKnownFileType')
self['explicitFileType'] = ft
@classmethod
def Create(cls, os_path, tree='SOURCE_ROOT', ignore_unknown_type=False):
if tree not in cls.trees:
print 'Not a valid sourceTree type: %s' % tree
return None
fr = cls()
fr.id = cls.GenerateId()
fr['path'] = os_path
fr['name'] = os.path.split(os_path)[1]
fr['sourceTree'] = '<absolute>' if os.path.isabs(os_path) else tree
fr.guess_file_type(ignore_unknown_type=ignore_unknown_type)
return fr
class PBXBuildFile(PBXType):
def set_weak_link(self, weak=False):
k_settings = 'settings'
k_attributes = 'ATTRIBUTES'
s = self.get(k_settings)
if not s:
if weak:
self[k_settings] = PBXDict({k_attributes: PBXList(['Weak'])})
return True
atr = s.get(k_attributes)
if not atr:
if weak:
atr = PBXList()
else:
return False
if weak:
atr.add('Weak')
else:
atr.remove('Weak')
self[k_settings][k_attributes] = atr
return True
def add_compiler_flag(self, flag):
k_settings = 'settings'
k_attributes = 'COMPILER_FLAGS'
if k_settings not in self:
self[k_settings] = PBXDict()
if k_attributes not in self[k_settings]:
self[k_settings][k_attributes] = flag
return True
flags = self[k_settings][k_attributes].split(' ')
if flag in flags:
return False
flags.append(flag)
self[k_settings][k_attributes] = ' '.join(flags)
@classmethod
def Create(cls, file_ref, weak=False):
if isinstance(file_ref, PBXFileReference):
file_ref = file_ref.id
bf = cls()
bf.id = cls.GenerateId()
bf['fileRef'] = file_ref
if weak:
bf.set_weak_link(True)
return bf
class PBXGroup(PBXType):
def add_child(self, ref):
if not isinstance(ref, PBXDict):
return None
isa = ref.get('isa')
if isa != 'PBXFileReference' and isa != 'PBXGroup':
return None
if 'children' not in self:
self['children'] = PBXList()
self['children'].add(ref.id)
return ref.id
def remove_child(self, id):
if 'children' not in self:
self['children'] = PBXList()
return
if not PBXType.IsGuid(id):
id = id.id
self['children'].remove(id)
def has_child(self, id):
if 'children' not in self:
self['children'] = PBXList()
return False
if not PBXType.IsGuid(id):
id = id.id
return id in self['children']
def get_name(self):
path_name = os.path.split(self.get('path', ''))[1]
return self.get('name', path_name)
@classmethod
def Create(cls, name, path=None, tree='SOURCE_ROOT'):
grp = cls()
grp.id = cls.GenerateId()
grp['name'] = name
grp['children'] = PBXList()
if path:
grp['path'] = path
grp['sourceTree'] = tree
else:
grp['sourceTree'] = '<group>'
return grp
class PBXNativeTarget(PBXType):
pass
class PBXProject(PBXType):
pass
class PBXContainerItemProxy(PBXType):
pass
class PBXReferenceProxy(PBXType):
pass
class PBXVariantGroup(PBXType):
pass
class PBXTargetDependency(PBXType):
pass
class PBXAggregateTarget(PBXType):
pass
class PBXHeadersBuildPhase(PBXType):
pass
class XCVersionGroup(PBXType):
pass
class PBXBuildPhase(PBXType):
def add_build_file(self, bf):
if bf.get('isa') != 'PBXBuildFile':
return False
if 'files' not in self:
self['files'] = PBXList()
self['files'].add(bf.id)
return True
def remove_build_file(self, id):
if 'files' not in self:
self['files'] = PBXList()
return
self['files'].remove(id)
def has_build_file(self, id):
if 'files' not in self:
self['files'] = PBXList()
return False
if not PBXType.IsGuid(id):
id = id.id
return id in self['files']
class PBXFrameworksBuildPhase(PBXBuildPhase):
pass
class PBXResourcesBuildPhase(PBXBuildPhase):
pass
class PBXShellScriptBuildPhase(PBXBuildPhase):
@classmethod
def Create(cls, script, shell="/bin/sh", files=[], input_paths=[], output_paths=[], show_in_log='0', name=None):
bf = cls()
bf.id = cls.GenerateId()
bf['files'] = files
bf['name'] = "Run Script" if name is None else name
bf['inputPaths'] = input_paths
bf['outputPaths'] = output_paths
bf['runOnlyForDeploymentPostprocessing'] = '0';
bf['shellPath'] = shell
bf['shellScript'] = script
bf['showEnvVarsInLog'] = show_in_log
return bf
class PBXSourcesBuildPhase(PBXBuildPhase):
pass
class PBXCopyFilesBuildPhase(PBXBuildPhase):
pass
class XCBuildConfiguration(PBXType):
def add_search_paths(self, paths, base, key, recursive=True, escape=True):
modified = False
if not isinstance(paths, list):
paths = [paths]
if base not in self:
self[base] = PBXDict()
for path in paths:
if recursive and not path.endswith('/**'):
path = os.path.join(path, '**')
if key not in self[base]:
self[base][key] = PBXList()
elif isinstance(self[base][key], basestring):
self[base][key] = PBXList(self[base][key])
if path == '$(inherited)':
escape = False
if escape:
if self[base][key].add('"%s"' % path): # '\\"%s\\"' % path
modified = True
else:
if self[base][key].add(path): # '\\"%s\\"' % path
modified = True
return modified
def add_header_search_paths(self, paths, recursive=True):
return self.add_search_paths(paths, 'buildSettings', 'HEADER_SEARCH_PATHS', recursive=recursive)
def add_library_search_paths(self, paths, recursive=True):
return self.add_search_paths(paths, 'buildSettings', 'LIBRARY_SEARCH_PATHS', recursive=recursive)
def add_framework_search_paths(self, paths, recursive=True):
return self.add_search_paths(paths, 'buildSettings', 'FRAMEWORK_SEARCH_PATHS', recursive=recursive)
def add_other_cflags(self, flags):
return self.add_flag('OTHER_CFLAGS', flags)
def add_other_ldflags(self, flags):
return self.add_flag('OTHER_LDFLAGS', flags)
def add_flag(self, key, flags):
modified = False
base = 'buildSettings'
if isinstance(flags, basestring):
flags = PBXList(flags)
if base not in self:
self[base] = PBXDict()
for flag in flags:
if key not in self[base]:
self[base][key] = PBXList()
elif isinstance(self[base][key], basestring):
self[base][key] = PBXList(self[base][key])
if self[base][key].add(flag):
self[base][key] = [e for e in self[base][key] if e]
modified = True
return modified
def remove_flag(self, key, flags):
modified = False
base = 'buildSettings'
if isinstance(flags, basestring):
flags = PBXList(flags)
if base in self: # there are flags, so we can "remove" something
for flag in flags:
if key not in self[base]:
return False
elif isinstance(self[base][key], basestring):
self[base][key] = PBXList(self[base][key])
if self[base][key].remove(flag):
self[base][key] = [e for e in self[base][key] if e]
modified = True
if len(self[base][key]) == 0:
self[base].pop(key, None)
return modified
def remove_other_ldflags(self, flags):
return self.remove_flag('OTHER_LD_FLAGS', flags)
class XCConfigurationList(PBXType):
pass
class XcodeProject(PBXDict):
plutil_path = 'plutil'
special_folders = ['.bundle', '.framework', '.xcodeproj', '.xcassets', '.xcdatamodeld']
def __init__(self, d=None, path=None):
if not path:
path = os.path.join(os.getcwd(), 'project.pbxproj')
self.pbxproj_path = os.path.abspath(path)
self.source_root = os.path.abspath(os.path.join(os.path.split(path)[0], '..'))
IterableUserDict.__init__(self, d)
self.data = PBXDict(self.data)
self.objects = self.get('objects')
self.modified = False
root_id = self.get('rootObject')
if root_id:
self.root_object = self.objects[root_id]
root_group_id = self.root_object.get('mainGroup')
self.root_group = self.objects[root_group_id]
else:
print "error: project has no root object"
self.root_object = None
self.root_group = None
for k, v in self.objects.iteritems():
v.id = k
def add_other_cflags(self, flags):
build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration']
for b in build_configs:
if b.add_other_cflags(flags):
self.modified = True
def add_other_ldflags(self, flags):
build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration']
for b in build_configs:
if b.add_other_ldflags(flags):
self.modified = True
def remove_other_ldflags(self, flags):
build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration']
for b in build_configs:
if b.remove_other_ldflags(flags):
self.modified = True
def add_header_search_paths(self, paths, recursive=True):
build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration']
for b in build_configs:
if b.add_header_search_paths(paths, recursive):
self.modified = True
def add_framework_search_paths(self, paths, recursive=True):
build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration']
for b in build_configs:
if b.add_framework_search_paths(paths, recursive):
self.modified = True
def add_library_search_paths(self, paths, recursive=True):
build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration']
for b in build_configs:
if b.add_library_search_paths(paths, recursive):
self.modified = True
def add_flags(self, pairs, configuration='All'):
build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration']
# iterate over all the pairs of configurations
for b in build_configs:
if configuration != "All" and b.get('name') != configuration:
continue
for k in pairs:
if b.add_flag(k, pairs[k]):
self.modified = True
def remove_flags(self, pairs, configuration='All'):
build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration']
# iterate over all the pairs of configurations
for b in build_configs:
if configuration != "All" and b.get('name') != configuration:
continue
for k in pairs:
if b.remove_flag(k, pairs[k]):
self.modified = True
def get_obj(self, id):
return self.objects.get(id)
def get_ids(self):
return self.objects.keys()
def get_files_by_os_path(self, os_path, tree='SOURCE_ROOT'):
files = [f for f in self.objects.values() if
f.get('isa') == 'PBXFileReference' and f.get('path') == os_path and f.get('sourceTree') == tree]
return files
def get_files_by_name(self, name, parent=None):
if parent:
files = [f for f in self.objects.values() if
f.get('isa') == 'PBXFileReference' and f.get('name') == name and parent.has_child(f)]
else:
files = [f for f in self.objects.values() if f.get('isa') == 'PBXFileReference' and f.get('name') == name]
return files
def get_build_files(self, id):
files = [f for f in self.objects.values() if f.get('isa') == 'PBXBuildFile' and f.get('fileRef') == id]
return files
def get_groups_by_name(self, name, parent=None):
if parent:
groups = [g for g in self.objects.values() if
g.get('isa') == 'PBXGroup' and g.get_name() == name and parent.has_child(g)]
else:
groups = [g for g in self.objects.values() if g.get('isa') == 'PBXGroup' and g.get_name() == name]
return groups
def get_or_create_group(self, name, path=None, parent=None):
if not name:
return None
if not parent:
parent = self.root_group
elif not isinstance(parent, PBXGroup):
# assume it's an id
parent = self.objects.get(parent, self.root_group)
groups = self.get_groups_by_name(name)
for grp in groups:
if parent.has_child(grp.id):
return grp
grp = PBXGroup.Create(name, path)
parent.add_child(grp)
self.objects[grp.id] = grp
self.modified = True
return grp
def get_groups_by_os_path(self, path):
path = os.path.abspath(path)
groups = [g for g in self.objects.values() if
g.get('isa') == 'PBXGroup' and os.path.abspath(g.get('path', '/dev/null')) == path]
return groups
def get_build_phases(self, phase_name):
phases = [p for p in self.objects.values() if p.get('isa') == phase_name]
return phases
def get_relative_path(self, os_path):
return os.path.relpath(os_path, self.source_root)
def verify_files(self, file_list, parent=None):
# returns list of files not in the current project.
if not file_list:
return []
if parent:
exists_list = [f.get('name') for f in self.objects.values() if
f.get('isa') == 'PBXFileReference' and f.get('name') in file_list and parent.has_child(f)]
else:
exists_list = [f.get('name') for f in self.objects.values() if
f.get('isa') == 'PBXFileReference' and f.get('name') in file_list]
return set(file_list).difference(exists_list)
def add_run_script(self, target, script=None, insert_before_compile=False, name=None):
result = []
targets = [t for t in self.get_build_phases('PBXNativeTarget') + self.get_build_phases('PBXAggregateTarget') if
t.get('name') == target]
if len(targets) != 0:
script_phase = PBXShellScriptBuildPhase.Create(script, name=name)
for t in targets:
skip = False
for buildPhase in t['buildPhases']:
if self.objects[buildPhase].get('isa') == 'PBXShellScriptBuildPhase' and self.objects[
buildPhase].get('shellScript') == script:
skip = True
if not skip:
if insert_before_compile:
t['buildPhases'].insert(0, script_phase.id)
else:
t['buildPhases'].add(script_phase.id)
self.objects[script_phase.id] = script_phase
result.append(script_phase)
return result
def add_run_script_all_targets(self, script=None):
result = []
targets = self.get_build_phases('PBXNativeTarget') + self.get_build_phases('PBXAggregateTarget')
if len(targets) != 0:
script_phase = PBXShellScriptBuildPhase.Create(script)
for t in targets:
skip = False
for buildPhase in t['buildPhases']:
if self.objects[buildPhase].get('isa') == 'PBXShellScriptBuildPhase' and self.objects[
buildPhase].get('shellScript') == script:
skip = True
if not skip:
t['buildPhases'].add(script_phase.id)
self.objects[script_phase.id] = script_phase
result.append(script_phase)
return result
def add_folder(self, os_path, parent=None, excludes=None, recursive=True, create_build_files=True):
if not os.path.isdir(os_path):
return []
if not excludes:
excludes = []
results = []
if not parent:
parent = self.root_group
elif not isinstance(parent, PBXGroup):
# assume it's an id
parent = self.objects.get(parent, self.root_group)
path_dict = {os.path.split(os_path)[0]: parent}
special_list = []
for (grp_path, subdirs, files) in os.walk(os_path):
parent_folder, folder_name = os.path.split(grp_path)
parent = path_dict.get(parent_folder, parent)
if [sp for sp in special_list if parent_folder.startswith(sp)]:
continue
if folder_name.startswith('.'):
special_list.append(grp_path)
continue
if os.path.splitext(grp_path)[1] in XcodeProject.special_folders:
# if this file has a special extension (bundle or framework mainly) treat it as a file
special_list.append(grp_path)
new_files = self.verify_files([folder_name], parent=parent)
# Ignore this file if it is in excludes
if new_files and not [m for m in excludes if re.match(m, grp_path)]:
results.extend(self.add_file(grp_path, parent, create_build_files=create_build_files))
continue
# create group
grp = self.get_or_create_group(folder_name, path=self.get_relative_path(grp_path), parent=parent)
path_dict[grp_path] = grp
results.append(grp)
file_dict = {}
for f in files:
if f[0] == '.' or [m for m in excludes if re.match(m, f)]:
continue
kwds = {'create_build_files': create_build_files, 'parent': grp, 'name': f}
f_path = os.path.join(grp_path, f)
file_dict[f_path] = kwds
new_files = self.verify_files([n.get('name') for n in file_dict.values()], parent=grp)
add_files = [(k, v) for k, v in file_dict.items() if v.get('name') in new_files]
for path, kwds in add_files:
kwds.pop('name', None)
self.add_file(path, **kwds)
if not recursive:
break
for r in results:
self.objects[r.id] = r
return results
def path_leaf(self, path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def add_file_if_doesnt_exist(self, f_path, parent=None, tree='SOURCE_ROOT', create_build_files=True, weak=False,
ignore_unknown_type=False):
for obj in self.objects.values():
if 'path' in obj:
if self.path_leaf(f_path) == self.path_leaf(obj.get('path')):
return []
return self.add_file(f_path, parent, tree, create_build_files, weak, ignore_unknown_type=ignore_unknown_type)
def add_file(self, f_path, parent=None, tree='SOURCE_ROOT', create_build_files=True, weak=False,
ignore_unknown_type=False):
results = []
abs_path = ''
if os.path.isabs(f_path):
abs_path = f_path
if not os.path.exists(f_path):
return results
elif tree == 'SOURCE_ROOT':
f_path = os.path.relpath(f_path, self.source_root)
else:
tree = '<absolute>'
if not parent:
parent = self.root_group
elif not isinstance(parent, PBXGroup):
# assume it's an id
parent = self.objects.get(parent, self.root_group)
file_ref = PBXFileReference.Create(f_path, tree, ignore_unknown_type=ignore_unknown_type)
parent.add_child(file_ref)
results.append(file_ref)
# create a build file for the file ref
if file_ref.build_phase and create_build_files:
phases = self.get_build_phases(file_ref.build_phase)
for phase in phases:
build_file = PBXBuildFile.Create(file_ref, weak=weak)
phase.add_build_file(build_file)
results.append(build_file)
if abs_path and tree == 'SOURCE_ROOT' and os.path.isfile(
abs_path) and file_ref.build_phase == 'PBXFrameworksBuildPhase':
library_path = os.path.join('$(SRCROOT)', os.path.split(f_path)[0])
self.add_library_search_paths([library_path], recursive=False)
if abs_path and tree == 'SOURCE_ROOT' and not os.path.isfile(
abs_path) and file_ref.build_phase == 'PBXFrameworksBuildPhase':
framework_path = os.path.join('$(SRCROOT)', os.path.split(f_path)[0])
self.add_framework_search_paths([framework_path, '$(inherited)'], recursive=False)
for r in results:
self.objects[r.id] = r
if results:
self.modified = True
return results
def check_and_repair_framework(self, base):
name = os.path.basename(base)
if ".framework" in name:
basename = name[:-len(".framework")]
finalHeaders = os.path.join(base, "Headers")
finalCurrent = os.path.join(base, "Versions/Current")
finalLib = os.path.join(base, basename)
srcHeaders = "Versions/A/Headers"
srcCurrent = "A"
srcLib = "Versions/A/" + basename
if not os.path.exists(finalHeaders):
os.symlink(srcHeaders, finalHeaders)
if not os.path.exists(finalCurrent):
os.symlink(srcCurrent, finalCurrent)
if not os.path.exists(finalLib):
os.symlink(srcLib, finalLib)
def get_file_id_by_path(self, f_path):
for k, v in self.objects.iteritems():
if str(v.get('path')) == f_path:
return k
return 0
def remove_file_by_path(self, f_path, recursive=True):
id = self.get_file_id_by_path(f_path)
if id != 0:
self.remove_file(id, recursive=recursive)
return
def remove_file(self, id, recursive=True):
if not PBXType.IsGuid(id):
id = id.id
if id in self.objects:
self.objects.remove(id)
# Remove from PBXResourcesBuildPhase and PBXSourcesBuildPhase if necessary
buildFiles = [f for f in self.objects.values() if f.get('isa') == 'PBXBuildFile']
for buildFile in buildFiles:
if id == buildFile.get('fileRef'):
key = buildFile.id
PBXRBP = [f for f in self.objects.values() if f.get('isa') == 'PBXResourcesBuildPhase']
PBXSBP = [f for f in self.objects.values() if f.get('isa') == 'PBXSourcesBuildPhase']
self.objects.remove(key)
if len(PBXSBP) and PBXSBP[0].has_build_file(key):
PBXSBP[0].remove_build_file(key)
if len(PBXRBP) and PBXRBP[0].has_build_file(key):
PBXRBP[0].remove_build_file(key)
if recursive:
groups = [g for g in self.objects.values() if g.get('isa') == 'PBXGroup']
for group in groups:
if id in group['children']:
group.remove_child(id)
self.modified = True
def remove_group(self, id, recursive=True):
if not PBXType.IsGuid(id):
id = id.id
name = self.objects.get(id).get('path')
children = self.objects.get(id).get('children')
if name is None:
name = id
if id in self.objects:
if recursive:
for childKey in children:
childValue = self.objects.get(childKey)
if childValue.get('isa') == 'PBXGroup':
self.remove_group(childKey, True)
else:
self.remove_file(childKey, False)
self.objects.remove(id);
def remove_group_by_name(self, name, recursive=True):
groups = self.get_groups_by_name(name)
if len(groups):
for group in groups:
self.remove_group(group.id, recursive)
def move_file(self, id, dest_grp=None):
pass
def apply_patch(self, patch_path, xcode_path):
if not os.path.isfile(patch_path) or not os.path.isdir(xcode_path):
print 'ERROR: couldn\'t apply "%s" to "%s"' % (patch_path, xcode_path)
return
print 'applying "%s" to "%s"' % (patch_path, xcode_path)
return subprocess.call(['patch', '-p1', '--forward', '--directory=%s' % xcode_path, '--input=%s' % patch_path])
def apply_mods(self, mod_dict, default_path=None):
if not default_path:
default_path = os.getcwd()
keys = mod_dict.keys()
for k in keys:
v = mod_dict.pop(k)
mod_dict[k.lower()] = v
parent = mod_dict.pop('group', None)
if parent:
parent = self.get_or_create_group(parent)
excludes = mod_dict.pop('excludes', [])
if excludes:
excludes = [re.compile(e) for e in excludes]
compiler_flags = mod_dict.pop('compiler_flags', {})
for k, v in mod_dict.items():
if k == 'patches':
for p in v:
if not os.path.isabs(p):
p = os.path.join(default_path, p)
self.apply_patch(p, self.source_root)
elif k == 'folders':
# get and compile excludes list
# do each folder individually
for folder in v:
kwds = {}
# if path contains ':' remove it and set recursive to False
if ':' in folder:
args = folder.split(':')
kwds['recursive'] = False
folder = args.pop(0)
if os.path.isabs(folder) and os.path.isdir(folder):
pass
else:
folder = os.path.join(default_path, folder)
if not os.path.isdir(folder):
continue
if parent:
kwds['parent'] = parent
if excludes:
kwds['excludes'] = excludes
self.add_folder(folder, **kwds)
elif k == 'headerpaths' or k == 'librarypaths':
paths = []
for p in v:
if p.endswith('/**'):
p = os.path.split(p)[0]
if not os.path.isabs(p):
p = os.path.join(default_path, p)
if not os.path.exists(p):
continue
p = self.get_relative_path(p)
paths.append(os.path.join('$(SRCROOT)', p, "**"))
if k == 'headerpaths':
self.add_header_search_paths(paths)
else:
self.add_library_search_paths(paths)
elif k == 'other_cflags':
self.add_other_cflags(v)
elif k == 'other_ldflags':
self.add_other_ldflags(v)
elif k == 'libs' or k == 'frameworks' or k == 'files':
paths = {}
for p in v:
kwds = {}
if ':' in p:
args = p.split(':')
p = args.pop(0)
if 'weak' in args:
kwds['weak'] = True
file_path = os.path.join(default_path, p)
search_path, file_name = os.path.split(file_path)
if [m for m in excludes if re.match(m, file_name)]:
continue
try:
expr = re.compile(file_name)
except re.error:
expr = None
if expr and os.path.isdir(search_path):
file_list = os.listdir(search_path)
for f in file_list:
if [m for m in excludes if re.match(m, f)]:
continue
if re.search(expr, f):
kwds['name'] = f
paths[os.path.join(search_path, f)] = kwds
p = None
if k == 'libs':
kwds['parent'] = self.get_or_create_group('Libraries', parent=parent)
elif k == 'frameworks':
kwds['parent'] = self.get_or_create_group('Frameworks', parent=parent)
if p:
kwds['name'] = file_name
if k == 'libs':
p = os.path.join('usr', 'lib', p)
kwds['tree'] = 'SDKROOT'
elif k == 'frameworks':
p = os.path.join('System', 'Library', 'Frameworks', p)
kwds['tree'] = 'SDKROOT'
elif k == 'files' and not os.path.exists(file_path):
# don't add non-existent files to the project.
continue
paths[p] = kwds
new_files = self.verify_files([n.get('name') for n in paths.values()])
add_files = [(k, v) for k, v in paths.items() if v.get('name') in new_files]
for path, kwds in add_files:
kwds.pop('name', None)
if 'parent' not in kwds and parent:
kwds['parent'] = parent
self.add_file(path, **kwds)
if compiler_flags:
for k, v in compiler_flags.items():
filerefs = []
for f in v:
filerefs.extend([fr.id for fr in self.objects.values() if
fr.get('isa') == 'PBXFileReference' and fr.get('name') == f])
buildfiles = [bf for bf in self.objects.values() if
bf.get('isa') == 'PBXBuildFile' and bf.get('fileRef') in filerefs]
for bf in buildfiles:
if bf.add_compiler_flag(k):
self.modified = True
def backup(self, file_name=None, backup_name=None):
if not file_name:
file_name = self.pbxproj_path
if not backup_name:
backup_name = "%s.%s.backup" % (file_name, datetime.datetime.now().strftime('%d%m%y-%H%M%S'))
shutil.copy2(file_name, backup_name)
return backup_name
def save(self, file_name=None, old_format=False, sort=False):
if old_format:
self.save_format_xml(file_name)
else:
self.save_new_format(file_name, sort)
def save_format_xml(self, file_name=None):
"""Saves in old (xml) format"""
if not file_name:
file_name = self.pbxproj_path
# This code is adapted from plistlib.writePlist
with open(file_name, "w") as f:
writer = PBXWriter(f)
writer.writeln("<plist version=\"1.0\">")
writer.writeValue(self.data)
writer.writeln("</plist>")
def save_new_format(self, file_name=None, sort=False):
"""Save in Xcode 3.2 compatible (new) format"""
if not file_name:
file_name = self.pbxproj_path
# process to get the section's info and names
objs = self.data.get('objects')
sections = dict()
uuids = dict()
for key in objs:
l = list()
if objs.get(key).get('isa') in sections:
l = sections.get(objs.get(key).get('isa'))
l.append(tuple([key, objs.get(key)]))
sections[objs.get(key).get('isa')] = l
if 'name' in objs.get(key):
uuids[key] = objs.get(key).get('name')
elif 'path' in objs.get(key):
uuids[key] = objs.get(key).get('path')
else:
if objs.get(key).get('isa') == 'PBXProject':
uuids[objs.get(key).get(
'buildConfigurationList')] = 'Build configuration list for PBXProject "Unity-iPhone"'
elif objs.get(key).get('isa')[0:3] == 'PBX':
uuids[key] = objs.get(key).get('isa')[3:-10]
else:
uuids[key] = 'Build configuration list for PBXNativeTarget "TARGET_NAME"'
ro = self.data.get('rootObject')
uuids[ro] = 'Project object'
for key in objs:
# transitive references (used in the BuildFile section)
if 'fileRef' in objs.get(key) and objs.get(key).get('fileRef') in uuids:
uuids[key] = uuids[objs.get(key).get('fileRef')]
# transitive reference to the target name (used in the Native target section)
if objs.get(key).get('isa') == 'PBXNativeTarget':
uuids[objs.get(key).get('buildConfigurationList')] = uuids[
objs.get(key).get('buildConfigurationList')].replace('TARGET_NAME', uuids[key])
self.uuids = uuids
self.sections = sections
out = open(file_name, 'w')
out.write('// !$*UTF8*$!\n')
self._printNewXCodeFormat(out, self.data, '', enters=True, sort=sort)
out.close()
@classmethod
def addslashes(cls, s):
d = {'"': '\\"', "'": "\\'", "\0": "\\\0", "\\": "\\\\", "\n": "\\n"}
return ''.join(d.get(c, c) for c in s)
def _printNewXCodeFormat(self, out, root, deep, enters=True, sort=False):
if isinstance(root, IterableUserDict):
out.write('{')
if enters:
out.write('\n')
isa = root.pop('isa', '')
if isa != '': # keep the isa in the first spot
if enters:
out.write('\t' + deep)
out.write('isa = ')
self._printNewXCodeFormat(out, isa, '\t' + deep, enters=enters)
out.write(';')
if enters:
out.write('\n')
else:
out.write(' ')
for key in sorted(root.iterkeys()): # keep the same order as Apple.
if enters:
out.write('\t' + deep)
if re.match(regex, key).group(0) == key:
out.write(key.encode("utf-8") + ' = ')
else:
out.write('"' + key.encode("utf-8") + '" = ')
if key == 'objects':
out.write('{') # open the objects section
if enters:
out.write('\n')
# root.remove('objects') # remove it to avoid problems
sections = [('PBXBuildFile', False), ('PBXCopyFilesBuildPhase', True), ('PBXFileReference', False),
('PBXFrameworksBuildPhase', True), ('PBXGroup', True), ('PBXAggregateTarget', True),
('PBXNativeTarget', True), ('PBXProject', True), ('PBXResourcesBuildPhase', True),
('PBXShellScriptBuildPhase', True), ('PBXSourcesBuildPhase', True),
('XCBuildConfiguration', True), ('XCConfigurationList', True), ('PBXTargetDependency', True),
('PBXVariantGroup', True), ('PBXReferenceProxy', True), ('PBXContainerItemProxy', True),
('XCVersionGroup', True)]
for section in sections: # iterate over the sections
if self.sections.get(section[0]) is None:
continue
out.write('\n/* Begin %s section */' % section[0].encode("utf-8"))
self.sections.get(section[0]).sort(cmp=lambda x, y: cmp(x[0], y[0]))
if sort and section[0] == 'PBXGroup':
for entry in self.sections.get(section[0]):
entry[1]['children'] = sorted(entry[1]['children'],
key=lambda x: self.uuids[x].encode("utf-8"))
for pair in self.sections.get(section[0]):
key = pair[0]
value = pair[1]
out.write('\n')
if enters:
out.write('\t\t' + deep)
out.write(key.encode("utf-8"))
if key in self.uuids:
out.write(" /* " + self.uuids[key].encode("utf-8") + " */")
out.write(" = ")
self._printNewXCodeFormat(out, value, '\t\t' + deep, enters=section[1])
out.write(';')
out.write('\n/* End %s section */\n' % section[0].encode("utf-8"))
out.write(deep + '\t}') # close of the objects section
else:
self._printNewXCodeFormat(out, root[key], '\t' + deep, enters=enters)
out.write(';')
if enters:
out.write('\n')
else:
out.write(' ')
root['isa'] = isa # restore the isa for further calls
if enters:
out.write(deep)
out.write('}')
elif isinstance(root, UserList):
out.write('(')
if enters:
out.write('\n')
for value in root:
if enters:
out.write('\t' + deep)
self._printNewXCodeFormat(out, value, '\t' + deep, enters=enters)
out.write(',')
if enters:
out.write('\n')
if enters:
out.write(deep)
out.write(')')
else:
if len(root) > 0 and re.match(regex, root).group(0) == root:
out.write(root.encode("utf-8"))
else:
out.write('"' + XcodeProject.addslashes(root.encode("utf-8")) + '"')
if root in self.uuids:
out.write(" /* " + self.uuids[root].encode("utf-8") + " */")
@classmethod
def Load(cls, path, pure_python=False):
if pure_python:
import openstep_parser as osp
tree = osp.OpenStepDecoder.ParseFromFile(open(path, 'r'))
else:
cls.plutil_path = os.path.join(os.path.split(__file__)[0], 'plutil')
if not os.path.isfile(XcodeProject.plutil_path):
cls.plutil_path = 'plutil'
# load project by converting to xml and then convert that using plistlib
p = subprocess.Popen([XcodeProject.plutil_path, '-convert', 'xml1', '-o', '-', path],
stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
# If the plist was malformed, return code will be non-zero
if p.returncode != 0:
print stdout
return None
tree = plistlib.readPlistFromString(stdout)
return XcodeProject(tree, path)
@classmethod
def LoadFromXML(cls, path):
tree = plistlib.readPlist(path)
return XcodeProject(tree, path)
# The code below was adapted from plistlib.py.
class PBXWriter(plistlib.PlistWriter):
def writeValue(self, value):
if isinstance(value, (PBXList, PBXDict)):
plistlib.PlistWriter.writeValue(self, value.data)
else:
plistlib.PlistWriter.writeValue(self, value)
def simpleElement(self, element, value=None):
"""
We have to override this method to deal with Unicode text correctly.
Non-ascii characters have to get encoded as character references.
"""
if value is not None:
value = _escapeAndEncode(value)
self.writeln("<%s>%s</%s>" % (element, value, element))
else:
self.writeln("<%s/>" % element)
# Regex to find any control chars, except for \t \n and \r
_controlCharPat = re.compile(r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f"
r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]")
def _escapeAndEncode(text):
m = _controlCharPat.search(text)
if m is not None:
raise ValueError("strings can't contains control characters; "
"use plistlib.Data instead")
text = text.replace("\r\n", "\n") # convert DOS line endings
text = text.replace("\r", "\n") # convert Mac line endings
text = text.replace("&", "&") # escape '&'
text = text.replace("<", "<") # escape '<'
text = text.replace(">", ">") # escape '>'
return text.encode("ascii", "xmlcharrefreplace") # encode as ascii with xml character references
|
|
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import collections
import re
from traceback import format_exc
from django.conf import settings
from graphite.future import Future
from graphite.logger import log
from graphite.readers.utils import wait_for_result
from graphite.storage import STORE
from graphite.util import timebounds, logtime
from graphite.render.utils import extractPathExpressions
class TimeSeries(list):
def __init__(self, name, start, end, step, values, consolidate='average', tags=None):
list.__init__(self, values)
self.name = name
self.start = start
self.end = end
self.step = step
self.consolidationFunc = consolidate
self.valuesPerPoint = 1
self.options = {}
self.pathExpression = name
if tags:
self.tags = tags
else:
self.tags = {'name': name}
# parse for tags if a tagdb is configured and name doesn't look like a function-wrapped name
if STORE.tagdb and not re.match('^[a-z]+[(].+[)]$', name, re.IGNORECASE):
try:
self.tags = STORE.tagdb.parse(name).tags
except Exception as err:
# tags couldn't be parsed, just use "name" tag
log.debug("Couldn't parse tags for %s: %s" % (name, err))
def __eq__(self, other):
if isinstance(other, TimeSeries):
color_check = True
if hasattr(self, 'color'):
if hasattr(other, 'color'):
color_check = (self.color == other.color)
else:
color_check = False
elif hasattr(other, 'color'):
color_check = False
return ((self.name, self.start, self.end, self.step, self.consolidationFunc, self.valuesPerPoint, self.options) ==
(other.name, other.start, other.end, other.step, other.consolidationFunc, other.valuesPerPoint, other.options)) and list.__eq__(self, other) and color_check
return False
def __iter__(self):
if self.valuesPerPoint > 1:
return self.__consolidatingGenerator( list.__iter__(self) )
else:
return list.__iter__(self)
def consolidate(self, valuesPerPoint):
self.valuesPerPoint = int(valuesPerPoint)
def __consolidatingGenerator(self, gen):
buf = []
for x in gen:
buf.append(x)
if len(buf) == self.valuesPerPoint:
while None in buf: buf.remove(None)
if buf:
yield self.__consolidate(buf)
buf = []
else:
yield None
while None in buf: buf.remove(None)
if buf: yield self.__consolidate(buf)
else: yield None
raise StopIteration
def __consolidate(self, values):
usable = [v for v in values if v is not None]
if not usable: return None
if self.consolidationFunc == 'sum':
return sum(usable)
if self.consolidationFunc == 'average':
return float(sum(usable)) / len(usable)
if self.consolidationFunc == 'max':
return max(usable)
if self.consolidationFunc == 'min':
return min(usable)
if self.consolidationFunc == 'first':
return usable[0]
if self.consolidationFunc == 'last':
return usable[-1]
raise Exception("Invalid consolidation function: '%s'" % self.consolidationFunc)
def __repr__(self):
return 'TimeSeries(name=%s, start=%s, end=%s, step=%s)' % (self.name, self.start, self.end, self.step)
def getInfo(self):
"""Pickle-friendly representation of the series"""
return {
'name' : self.name,
'start' : self.start,
'end' : self.end,
'step' : self.step,
'values' : list(self),
'pathExpression' : self.pathExpression,
}
@logtime()
def _fetchData(pathExpr, startTime, endTime, now, requestContext, seriesList):
result_queue = []
remote_done = False
if settings.REMOTE_PREFETCH_DATA:
prefetched = requestContext['prefetched'].get((startTime, endTime, now), None)
if prefetched is not None:
for result in prefetched[pathExpr]:
result_queue.append(result)
# Since we pre-fetched remote data only, now we can get local data only.
remote_done = True
local = remote_done or requestContext['localOnly']
matching_nodes = STORE.find(
pathExpr, startTime, endTime,
local=local,
headers=requestContext['forwardHeaders'],
leaves_only=True,
)
for node in matching_nodes:
result_queue.append(
(node.path, node.fetch(startTime, endTime, now, requestContext)))
return _merge_results(pathExpr, startTime, endTime, result_queue, seriesList)
def _merge_results(pathExpr, startTime, endTime, result_queue, seriesList):
log.debug("render.datalib.fetchData :: starting to merge")
for path, results in result_queue:
results = wait_for_result(results)
if not results:
log.debug("render.datalib.fetchData :: no results for %s.fetch(%s, %s)" % (path, startTime, endTime))
continue
try:
(timeInfo, values) = results
except ValueError as e:
raise Exception("could not parse timeInfo/values from metric '%s': %s" % (path, e))
(start, end, step) = timeInfo
series = TimeSeries(path, start, end, step, values)
# hack to pass expressions through to render functions
series.pathExpression = pathExpr
# Used as a cache to avoid recounting series None values below.
series_best_nones = {}
if series.name in seriesList:
# This counts the Nones in each series, and is unfortunately O(n) for each
# series, which may be worth further optimization. The value of doing this
# at all is to avoid the "flipping" effect of loading a graph multiple times
# and having inconsistent data returned if one of the backing stores has
# inconsistent data. This is imperfect as a validity test, but in practice
# nicely keeps us using the "most complete" dataset available. Think of it
# as a very weak CRDT resolver.
candidate_nones = 0
if not settings.REMOTE_STORE_MERGE_RESULTS:
candidate_nones = len(
[val for val in values if val is None])
known = seriesList[series.name]
# To avoid repeatedly recounting the 'Nones' in series we've already seen,
# cache the best known count so far in a dict.
if known.name in series_best_nones:
known_nones = series_best_nones[known.name]
else:
known_nones = len([val for val in known if val is None])
if known_nones > candidate_nones and len(series):
if settings.REMOTE_STORE_MERGE_RESULTS:
# This series has potential data that might be missing from
# earlier series. Attempt to merge in useful data and update
# the cache count.
log.debug("Merging multiple TimeSeries for %s" % known.name)
for i, j in enumerate(known):
if j is None and series[i] is not None:
known[i] = series[i]
known_nones -= 1
# Store known_nones in our cache
series_best_nones[known.name] = known_nones
else:
# Not merging data -
# we've found a series better than what we've already seen. Update
# the count cache and replace the given series in the array.
series_best_nones[known.name] = candidate_nones
seriesList[known.name] = series
else:
if settings.REMOTE_PREFETCH_DATA:
# if we're using REMOTE_PREFETCH_DATA we can save some time by skipping
# find, but that means we don't know how many nodes to expect so we
# have to iterate over all returned results
continue
# In case if we are merging data - the existing series has no gaps and
# there is nothing to merge together. Save ourselves some work here.
#
# OR - if we picking best serie:
#
# We already have this series in the seriesList, and the
# candidate is 'worse' than what we already have, we don't need
# to compare anything else. Save ourselves some work here.
break
else:
# If we looked at this series above, and it matched a 'known'
# series already, then it's already in the series list (or ignored).
# If not, append it here.
seriesList[series.name] = series
# Stabilize the order of the results by ordering the resulting series by name.
# This returns the result ordering to the behavior observed pre PR#1010.
return [seriesList[k] for k in sorted(seriesList)]
# Data retrieval API
@logtime()
def fetchData(requestContext, pathExpr):
seriesList = {}
(startTime, endTime, now) = timebounds(requestContext)
retries = 1 # start counting at one to make log output and settings more readable
while True:
try:
seriesList = _fetchData(pathExpr, startTime, endTime, now, requestContext, seriesList)
break
except Exception:
if retries >= settings.MAX_FETCH_RETRIES:
log.exception("Failed after %s retry! Root cause:\n%s" %
(settings.MAX_FETCH_RETRIES, format_exc()))
raise
else:
log.exception("Got an exception when fetching data! Try: %i of %i. Root cause:\n%s" %
(retries, settings.MAX_FETCH_RETRIES, format_exc()))
retries += 1
return seriesList
def nonempty(series):
for value in series:
if value is not None:
return True
return False
class PrefetchedData(Future):
def __init__(self, results):
self._results = results
self._prefetched = None
def _data(self):
if self._prefetched is None:
self._fetch_data()
return self._prefetched
def _fetch_data(self):
prefetched = collections.defaultdict(list)
for result in self._results:
fetched = wait_for_result(result)
if fetched is None:
continue
for result in fetched:
prefetched[result['pathExpression']].append((
result['name'],
(
result['time_info'],
result['values'],
),
))
self._prefetched = prefetched
def prefetchRemoteData(requestContext, targets):
"""Prefetch a bunch of path expressions and stores them in the context.
The idea is that this will allow more batching that doing a query
each time evaluateTarget() needs to fetch a path. All the prefetched
data is stored in the requestContext, to be accessed later by datalib.
"""
pathExpressions = extractPathExpressions(targets)
log.rendering("Prefetching remote data for [%s]" % (', '.join(pathExpressions)))
(startTime, endTime, now) = timebounds(requestContext)
results = STORE.fetch_remote(pathExpressions, startTime, endTime, now, requestContext)
requestContext['prefetched'][(startTime, endTime, now)] = PrefetchedData(results)
|
|
# This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the command-line interface.
"""
import os
import shutil
import textwrap
import re
import yaml
import _common
from _common import unittest
from beets import library
from beets import ui
from beets.ui import commands
from beets import autotag
from beets.autotag.match import distance
from beets import importer
from beets.mediafile import MediaFile
from beets import config
from beets.util import confit
class ListTest(_common.TestCase):
def setUp(self):
super(ListTest, self).setUp()
self.io.install()
self.lib = library.Library(':memory:')
i = _common.item()
i.path = 'xxx/yyy'
self.lib.add(i)
self.lib.add_album([i])
self.item = i
def _run_list(self, query='', album=False, path=False, fmt=None):
commands.list_items(self.lib, query, album, fmt)
def test_list_outputs_item(self):
self._run_list()
out = self.io.getoutput()
self.assertTrue(u'the title' in out)
def test_list_unicode_query(self):
self.item.title = u'na\xefve'
self.item.store()
self.lib._connection().commit()
self._run_list([u'na\xefve'])
out = self.io.getoutput()
self.assertTrue(u'na\xefve' in out.decode(self.io.stdout.encoding))
def test_list_item_path(self):
self._run_list(fmt='$path')
out = self.io.getoutput()
self.assertEqual(out.strip(), u'xxx/yyy')
def test_list_album_outputs_something(self):
self._run_list(album=True)
out = self.io.getoutput()
self.assertGreater(len(out), 0)
def test_list_album_path(self):
self._run_list(album=True, fmt='$path')
out = self.io.getoutput()
self.assertEqual(out.strip(), u'xxx')
def test_list_album_omits_title(self):
self._run_list(album=True)
out = self.io.getoutput()
self.assertTrue(u'the title' not in out)
def test_list_uses_track_artist(self):
self._run_list()
out = self.io.getoutput()
self.assertTrue(u'the artist' in out)
self.assertTrue(u'the album artist' not in out)
def test_list_album_uses_album_artist(self):
self._run_list(album=True)
out = self.io.getoutput()
self.assertTrue(u'the artist' not in out)
self.assertTrue(u'the album artist' in out)
def test_list_item_format_artist(self):
self._run_list(fmt='$artist')
out = self.io.getoutput()
self.assertTrue(u'the artist' in out)
def test_list_item_format_multiple(self):
self._run_list(fmt='$artist - $album - $year')
out = self.io.getoutput()
self.assertTrue(u'1' in out)
self.assertTrue(u'the album' in out)
self.assertTrue(u'the artist' in out)
self.assertEqual(u'the artist - the album - 1', out.strip())
def test_list_album_format(self):
self._run_list(album=True, fmt='$genre')
out = self.io.getoutput()
self.assertTrue(u'the genre' in out)
self.assertTrue(u'the album' not in out)
class RemoveTest(_common.TestCase):
def setUp(self):
super(RemoveTest, self).setUp()
self.io.install()
self.libdir = os.path.join(self.temp_dir, 'testlibdir')
os.mkdir(self.libdir)
# Copy a file into the library.
self.lib = library.Library(':memory:', self.libdir)
self.i = library.Item.from_path(os.path.join(_common.RSRC, 'full.mp3'))
self.lib.add(self.i)
self.i.move(True)
def test_remove_items_no_delete(self):
self.io.addinput('y')
commands.remove_items(self.lib, '', False, False)
items = self.lib.items()
self.assertEqual(len(list(items)), 0)
self.assertTrue(os.path.exists(self.i.path))
def test_remove_items_with_delete(self):
self.io.addinput('y')
commands.remove_items(self.lib, '', False, True)
items = self.lib.items()
self.assertEqual(len(list(items)), 0)
self.assertFalse(os.path.exists(self.i.path))
class ModifyTest(_common.TestCase):
def setUp(self):
super(ModifyTest, self).setUp()
self.io.install()
self.libdir = os.path.join(self.temp_dir, 'testlibdir')
# Copy a file into the library.
self.lib = library.Library(':memory:', self.libdir)
self.i = library.Item.from_path(os.path.join(_common.RSRC, 'full.mp3'))
self.lib.add(self.i)
self.i.move(True)
self.album = self.lib.add_album([self.i])
def _modify(self, mods, query=(), write=False, move=False, album=False):
self.io.addinput('y')
commands.modify_items(self.lib, mods, query,
write, move, album, True)
def test_modify_item_dbdata(self):
self._modify(["title=newTitle"])
item = self.lib.items().get()
self.assertEqual(item.title, 'newTitle')
def test_modify_album_dbdata(self):
self._modify(["album=newAlbum"], album=True)
album = self.lib.albums()[0]
self.assertEqual(album.album, 'newAlbum')
def test_modify_item_tag_unmodified(self):
self._modify(["title=newTitle"], write=False)
item = self.lib.items().get()
item.read()
self.assertEqual(item.title, 'full')
def test_modify_album_tag_unmodified(self):
self._modify(["album=newAlbum"], write=False, album=True)
item = self.lib.items().get()
item.read()
self.assertEqual(item.album, 'the album')
def test_modify_item_tag(self):
self._modify(["title=newTitle"], write=True)
item = self.lib.items().get()
item.read()
self.assertEqual(item.title, 'newTitle')
def test_modify_album_tag(self):
self._modify(["album=newAlbum"], write=True, album=True)
item = self.lib.items().get()
item.read()
self.assertEqual(item.album, 'newAlbum')
def test_item_move(self):
self._modify(["title=newTitle"], move=True)
item = self.lib.items().get()
self.assertTrue('newTitle' in item.path)
def test_album_move(self):
self._modify(["album=newAlbum"], move=True, album=True)
item = self.lib.items().get()
item.read()
self.assertTrue('newAlbum' in item.path)
def test_item_not_move(self):
self._modify(["title=newTitle"], move=False)
item = self.lib.items().get()
self.assertFalse('newTitle' in item.path)
def test_album_not_move(self):
self._modify(["album=newAlbum"], move=False, album=True)
item = self.lib.items().get()
item.read()
self.assertFalse('newAlbum' in item.path)
class MoveTest(_common.TestCase):
def setUp(self):
super(MoveTest, self).setUp()
self.io.install()
self.libdir = os.path.join(self.temp_dir, 'testlibdir')
os.mkdir(self.libdir)
self.itempath = os.path.join(self.libdir, 'srcfile')
shutil.copy(os.path.join(_common.RSRC, 'full.mp3'), self.itempath)
# Add a file to the library but don't copy it in yet.
self.lib = library.Library(':memory:', self.libdir)
self.i = library.Item.from_path(self.itempath)
self.lib.add(self.i)
self.album = self.lib.add_album([self.i])
# Alternate destination directory.
self.otherdir = os.path.join(self.temp_dir, 'testotherdir')
def _move(self, query=(), dest=None, copy=False, album=False):
commands.move_items(self.lib, dest, query, copy, album)
def test_move_item(self):
self._move()
self.i.load()
self.assertTrue('testlibdir' in self.i.path)
self.assertExists(self.i.path)
self.assertNotExists(self.itempath)
def test_copy_item(self):
self._move(copy=True)
self.i.load()
self.assertTrue('testlibdir' in self.i.path)
self.assertExists(self.i.path)
self.assertExists(self.itempath)
def test_move_album(self):
self._move(album=True)
self.i.load()
self.assertTrue('testlibdir' in self.i.path)
self.assertExists(self.i.path)
self.assertNotExists(self.itempath)
def test_copy_album(self):
self._move(copy=True, album=True)
self.i.load()
self.assertTrue('testlibdir' in self.i.path)
self.assertExists(self.i.path)
self.assertExists(self.itempath)
def test_move_item_custom_dir(self):
self._move(dest=self.otherdir)
self.i.load()
self.assertTrue('testotherdir' in self.i.path)
self.assertExists(self.i.path)
self.assertNotExists(self.itempath)
def test_move_album_custom_dir(self):
self._move(dest=self.otherdir, album=True)
self.i.load()
self.assertTrue('testotherdir' in self.i.path)
self.assertExists(self.i.path)
self.assertNotExists(self.itempath)
class UpdateTest(_common.TestCase):
def setUp(self):
super(UpdateTest, self).setUp()
self.io.install()
self.libdir = os.path.join(self.temp_dir, 'testlibdir')
# Copy a file into the library.
self.lib = library.Library(':memory:', self.libdir)
self.i = library.Item.from_path(os.path.join(_common.RSRC, 'full.mp3'))
self.lib.add(self.i)
self.i.move(True)
self.album = self.lib.add_album([self.i])
# Album art.
artfile = os.path.join(_common.RSRC, 'testart.jpg')
_common.touch(artfile)
self.album.set_art(artfile)
self.album.store()
os.remove(artfile)
def _update(self, query=(), album=False, move=False, reset_mtime=True):
self.io.addinput('y')
if reset_mtime:
self.i.mtime = 0
self.i.store()
commands.update_items(self.lib, query, album, move, False)
def test_delete_removes_item(self):
self.assertTrue(list(self.lib.items()))
os.remove(self.i.path)
self._update()
self.assertFalse(list(self.lib.items()))
def test_delete_removes_album(self):
self.assertTrue(self.lib.albums())
os.remove(self.i.path)
self._update()
self.assertFalse(self.lib.albums())
def test_delete_removes_album_art(self):
artpath = self.album.artpath
self.assertExists(artpath)
os.remove(self.i.path)
self._update()
self.assertNotExists(artpath)
def test_modified_metadata_detected(self):
mf = MediaFile(self.i.path)
mf.title = 'differentTitle'
mf.save()
self._update()
item = self.lib.items().get()
self.assertEqual(item.title, 'differentTitle')
def test_modified_metadata_moved(self):
mf = MediaFile(self.i.path)
mf.title = 'differentTitle'
mf.save()
self._update(move=True)
item = self.lib.items().get()
self.assertTrue('differentTitle' in item.path)
def test_modified_metadata_not_moved(self):
mf = MediaFile(self.i.path)
mf.title = 'differentTitle'
mf.save()
self._update(move=False)
item = self.lib.items().get()
self.assertTrue('differentTitle' not in item.path)
def test_modified_album_metadata_moved(self):
mf = MediaFile(self.i.path)
mf.album = 'differentAlbum'
mf.save()
self._update(move=True)
item = self.lib.items().get()
self.assertTrue('differentAlbum' in item.path)
def test_modified_album_metadata_art_moved(self):
artpath = self.album.artpath
mf = MediaFile(self.i.path)
mf.album = 'differentAlbum'
mf.save()
self._update(move=True)
album = self.lib.albums()[0]
self.assertNotEqual(artpath, album.artpath)
def test_mtime_match_skips_update(self):
mf = MediaFile(self.i.path)
mf.title = 'differentTitle'
mf.save()
# Make in-memory mtime match on-disk mtime.
self.i.mtime = os.path.getmtime(self.i.path)
self.i.store()
self._update(reset_mtime=False)
item = self.lib.items().get()
self.assertEqual(item.title, 'full')
class PrintTest(_common.TestCase):
def setUp(self):
super(PrintTest, self).setUp()
self.io.install()
def test_print_without_locale(self):
lang = os.environ.get('LANG')
if lang:
del os.environ['LANG']
try:
ui.print_(u'something')
except TypeError:
self.fail('TypeError during print')
finally:
if lang:
os.environ['LANG'] = lang
def test_print_with_invalid_locale(self):
old_lang = os.environ.get('LANG')
os.environ['LANG'] = ''
old_ctype = os.environ.get('LC_CTYPE')
os.environ['LC_CTYPE'] = 'UTF-8'
try:
ui.print_(u'something')
except ValueError:
self.fail('ValueError during print')
finally:
if old_lang:
os.environ['LANG'] = old_lang
else:
del os.environ['LANG']
if old_ctype:
os.environ['LC_CTYPE'] = old_ctype
else:
del os.environ['LC_CTYPE']
class AutotagTest(_common.TestCase):
def setUp(self):
super(AutotagTest, self).setUp()
self.io.install()
def _no_candidates_test(self, result):
task = importer.ImportTask(
'toppath',
'path',
[_common.item()],
)
task.set_candidates('artist', 'album', [], autotag.recommendation.none)
session = _common.import_session(cli=True)
res = session.choose_match(task)
self.assertEqual(res, result)
self.assertTrue('No match' in self.io.getoutput())
def test_choose_match_with_no_candidates_skip(self):
self.io.addinput('s')
self._no_candidates_test(importer.action.SKIP)
def test_choose_match_with_no_candidates_asis(self):
self.io.addinput('u')
self._no_candidates_test(importer.action.ASIS)
class ImportTest(_common.TestCase):
def test_quiet_timid_disallowed(self):
config['import']['quiet'] = True
config['import']['timid'] = True
self.assertRaises(ui.UserError, commands.import_files, None, [],
None)
class InputTest(_common.TestCase):
def setUp(self):
super(InputTest, self).setUp()
self.io.install()
def test_manual_search_gets_unicode(self):
self.io.addinput('\xc3\x82me')
self.io.addinput('\xc3\x82me')
artist, album = commands.manual_search(False)
self.assertEqual(artist, u'\xc2me')
self.assertEqual(album, u'\xc2me')
class ConfigTest(_common.TestCase):
def setUp(self):
super(ConfigTest, self).setUp()
self.io.install()
self.test_cmd = ui.Subcommand('test', help='test')
commands.default_commands.append(self.test_cmd)
def tearDown(self):
super(ConfigTest, self).tearDown()
commands.default_commands.pop()
def _run_main(self, args, config_yaml, func):
self.test_cmd.func = func
config_yaml = textwrap.dedent(config_yaml).strip()
if config_yaml:
config_data = yaml.load(config_yaml, Loader=confit.Loader)
config.set(config_data)
ui._raw_main(args + ['test'])
def test_paths_section_respected(self):
def func(lib, opts, args):
key, template = lib.path_formats[0]
self.assertEqual(key, 'x')
self.assertEqual(template.original, 'y')
self._run_main([], """
paths:
x: y
""", func)
def test_default_paths_preserved(self):
default_formats = ui.get_path_formats()
def func(lib, opts, args):
self.assertEqual(lib.path_formats[1:],
default_formats)
self._run_main([], """
paths:
x: y
""", func)
def test_nonexistant_config_file(self):
os.environ['BEETSCONFIG'] = '/xxxxx'
ui.main(['version'])
def test_nonexistant_db(self):
def func(lib, opts, args):
pass
with self.assertRaises(ui.UserError):
self._run_main([], """
library: /xxx/yyy/not/a/real/path
""", func)
def test_replacements_parsed(self):
def func(lib, opts, args):
replacements = lib.replacements
self.assertEqual(replacements, [(re.compile(ur'[xy]'), u'z')])
self._run_main([], """
replace:
'[xy]': z
""", func)
def test_multiple_replacements_parsed(self):
def func(lib, opts, args):
replacements = lib.replacements
self.assertEqual(replacements, [
(re.compile(ur'[xy]'), u'z'),
(re.compile(ur'foo'), u'bar'),
])
self._run_main([], """
replace:
'[xy]': z
foo: bar
""", func)
class ShowdiffTest(_common.TestCase):
def setUp(self):
super(ShowdiffTest, self).setUp()
self.io.install()
def test_showdiff_strings(self):
commands._showdiff('field', 'old', 'new')
out = self.io.getoutput()
self.assertTrue('field' in out)
def test_showdiff_identical(self):
commands._showdiff('field', 'old', 'old')
out = self.io.getoutput()
self.assertFalse('field' in out)
def test_showdiff_ints(self):
commands._showdiff('field', 2, 3)
out = self.io.getoutput()
self.assertTrue('field' in out)
def test_showdiff_ints_no_color(self):
config['color'] = False
commands._showdiff('field', 2, 3)
out = self.io.getoutput()
self.assertTrue('field' in out)
def test_showdiff_shows_both(self):
commands._showdiff('field', 'old', 'new')
out = self.io.getoutput()
self.assertTrue('old' in out)
self.assertTrue('new' in out)
def test_showdiff_floats_close_to_identical(self):
commands._showdiff('field', 1.999, 2.001)
out = self.io.getoutput()
self.assertFalse('field' in out)
def test_showdiff_floats_differenct(self):
commands._showdiff('field', 1.999, 4.001)
out = self.io.getoutput()
self.assertTrue('field' in out)
def test_showdiff_ints_colorizing_is_not_stringwise(self):
commands._showdiff('field', 222, 333)
complete_diff = self.io.getoutput().split()[1]
commands._showdiff('field', 222, 232)
partial_diff = self.io.getoutput().split()[1]
self.assertEqual(complete_diff, partial_diff)
class ShowChangeTest(_common.TestCase):
def setUp(self):
super(ShowChangeTest, self).setUp()
self.io.install()
self.items = [_common.item()]
self.items[0].track = 1
self.items[0].path = '/path/to/file.mp3'
self.info = autotag.AlbumInfo(
u'the album', u'album id', u'the artist', u'artist id', [
autotag.TrackInfo(u'the title', u'track id', index=1)
])
def _show_change(self, items=None, info=None,
cur_artist=u'the artist', cur_album=u'the album',
dist=0.1):
items = items or self.items
info = info or self.info
mapping = dict(zip(items, info.tracks))
config['color'] = False
album_dist = distance(items, info, mapping)
album_dist._penalties = {'album': [dist]}
commands.show_change(
cur_artist,
cur_album,
autotag.AlbumMatch(album_dist, info, mapping, set(), set()),
)
return self.io.getoutput().lower()
def test_null_change(self):
msg = self._show_change()
self.assertTrue('similarity: 90' in msg)
self.assertTrue('tagging:' in msg)
def test_album_data_change(self):
msg = self._show_change(cur_artist='another artist',
cur_album='another album')
self.assertTrue('correcting tags from:' in msg)
def test_item_data_change(self):
self.items[0].title = u'different'
msg = self._show_change()
self.assertTrue('different -> the title' in msg)
def test_item_data_change_with_unicode(self):
self.items[0].title = u'caf\xe9'
msg = self._show_change()
self.assertTrue(u'caf\xe9 -> the title' in msg.decode('utf8'))
def test_album_data_change_with_unicode(self):
msg = self._show_change(cur_artist=u'caf\xe9',
cur_album=u'another album')
self.assertTrue('correcting tags from:' in msg)
def test_item_data_change_title_missing(self):
self.items[0].title = u''
msg = re.sub(r' +', ' ', self._show_change())
self.assertTrue('file.mp3 -> the title' in msg)
def test_item_data_change_title_missing_with_unicode_filename(self):
self.items[0].title = u''
self.items[0].path = u'/path/to/caf\xe9.mp3'.encode('utf8')
msg = re.sub(r' +', ' ', self._show_change().decode('utf8'))
self.assertTrue(u'caf\xe9.mp3 -> the title' in msg
or u'caf.mp3 ->' in msg)
class PathFormatTest(_common.TestCase):
def test_custom_paths_prepend(self):
default_formats = ui.get_path_formats()
config['paths'] = {u'foo': u'bar'}
pf = ui.get_path_formats()
key, tmpl = pf[0]
self.assertEqual(key, 'foo')
self.assertEqual(tmpl.original, 'bar')
self.assertEqual(pf[1:], default_formats)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Subclass for httplib.HTTPSConnection with optional certificate name
verification, depending on libcloud.security settings.
"""
import os
import sys
import socket
import ssl
import base64
import warnings
import libcloud.security
from libcloud.utils.py3 import b
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import urlunquote
from libcloud.utils.py3 import match_hostname
from libcloud.utils.py3 import CertificateError
__all__ = [
'LibcloudBaseConnection',
'LibcloudHTTPConnection',
'LibcloudHTTPSConnection'
]
HTTP_PROXY_ENV_VARIABLE_NAME = 'http_proxy'
class LibcloudBaseConnection(object):
"""
Base connection class to inherit from.
Note: This class should not be instantiated directly.
"""
proxy_scheme = None
proxy_host = None
proxy_port = None
proxy_username = None
proxy_password = None
http_proxy_used = False
def set_http_proxy(self, proxy_url):
"""
Set a HTTP proxy which will be used with this connection.
:param proxy_url: Proxy URL (e.g. http://<hostname>:<port> without
authentication and
http://<username>:<password>@<hostname>:<port> for
basic auth authentication information.
:type proxy_url: ``str``
"""
result = self._parse_proxy_url(proxy_url=proxy_url)
scheme = result[0]
host = result[1]
port = result[2]
username = result[3]
password = result[4]
self.proxy_scheme = scheme
self.proxy_host = host
self.proxy_port = port
self.proxy_username = username
self.proxy_password = password
self.http_proxy_used = True
self._setup_http_proxy()
def _parse_proxy_url(self, proxy_url):
"""
Parse and validate a proxy URL.
:param proxy_url: Proxy URL (e.g. http://hostname:3128)
:type proxy_url: ``str``
:rtype: ``tuple`` (``scheme``, ``hostname``, ``port``)
"""
parsed = urlparse.urlparse(proxy_url)
if parsed.scheme != 'http':
raise ValueError('Only http proxies are supported')
if not parsed.hostname or not parsed.port:
raise ValueError('proxy_url must be in the following format: '
'http://<proxy host>:<proxy port>')
proxy_scheme = parsed.scheme
proxy_host, proxy_port = parsed.hostname, parsed.port
netloc = parsed.netloc
if '@' in netloc:
username_password = netloc.split('@', 1)[0]
split = username_password.split(':', 1)
if len(split) < 2:
raise ValueError('URL is in an invalid format')
proxy_username, proxy_password = split[0], split[1]
else:
proxy_username = None
proxy_password = None
return (proxy_scheme, proxy_host, proxy_port, proxy_username,
proxy_password)
def _setup_http_proxy(self):
"""
Set up HTTP proxy.
:param proxy_url: Proxy URL (e.g. http://<host>:3128)
:type proxy_url: ``str``
"""
headers = {}
if self.proxy_username and self.proxy_password:
# Include authentication header
user_pass = '%s:%s' % (self.proxy_username, self.proxy_password)
encoded = base64.encodestring(b(urlunquote(user_pass))).strip()
auth_header = 'Basic %s' % (encoded.decode('utf-8'))
headers['Proxy-Authorization'] = auth_header
if hasattr(self, 'set_tunnel'):
# Python 2.7 and higher
# pylint: disable=no-member
self.set_tunnel(host=self.host, port=self.port, headers=headers)
elif hasattr(self, '_set_tunnel'):
# Python 2.6
# pylint: disable=no-member
self._set_tunnel(host=self.host, port=self.port, headers=headers)
else:
raise ValueError('Unsupported Python version')
self._set_hostport(host=self.proxy_host, port=self.proxy_port)
def _activate_http_proxy(self, sock):
self.sock = sock
self._tunnel() # pylint: disable=no-member
def _set_hostport(self, host, port):
"""
Backported from Python stdlib so Proxy support also works with
Python 3.4.
"""
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i + 1:])
except ValueError:
msg = "nonnumeric port: '%s'" % (host[i + 1:])
raise httplib.InvalidURL(msg)
host = host[:i]
else:
port = self.default_port # pylint: disable=no-member
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
self.host = host
self.port = port
class LibcloudHTTPConnection(httplib.HTTPConnection, LibcloudBaseConnection):
def __init__(self, *args, **kwargs):
# Support for HTTP proxy
proxy_url_env = os.environ.get(HTTP_PROXY_ENV_VARIABLE_NAME, None)
proxy_url = kwargs.pop('proxy_url', proxy_url_env)
super(LibcloudHTTPConnection, self).__init__(*args, **kwargs)
if proxy_url:
self.set_http_proxy(proxy_url=proxy_url)
class LibcloudHTTPSConnection(httplib.HTTPSConnection, LibcloudBaseConnection):
"""
LibcloudHTTPSConnection
Subclass of HTTPSConnection which verifies certificate names
if and only if CA certificates are available.
"""
verify = True # verify by default
ca_cert = None # no default CA Certificate
def __init__(self, *args, **kwargs):
"""
Constructor
"""
self._setup_verify()
# Support for HTTP proxy
proxy_url_env = os.environ.get(HTTP_PROXY_ENV_VARIABLE_NAME, None)
proxy_url = kwargs.pop('proxy_url', proxy_url_env)
super(LibcloudHTTPSConnection, self).__init__(*args, **kwargs)
if proxy_url:
self.set_http_proxy(proxy_url=proxy_url)
def _setup_verify(self):
"""
Setup Verify SSL or not
Reads security module's VERIFY_SSL_CERT and toggles whether
the class overrides the connect() class method or runs the
inherited httplib.HTTPSConnection connect()
"""
self.verify = libcloud.security.VERIFY_SSL_CERT
if self.verify:
self._setup_ca_cert()
else:
warnings.warn(libcloud.security.VERIFY_SSL_DISABLED_MSG)
def _setup_ca_cert(self):
"""
Setup CA Certs
Search in CA_CERTS_PATH for valid candidates and
return first match. Otherwise, complain about certs
not being available.
"""
if not self.verify:
return
ca_certs_available = [cert
for cert in libcloud.security.CA_CERTS_PATH
if os.path.exists(cert) and os.path.isfile(cert)]
if ca_certs_available:
# use first available certificate
self.ca_cert = ca_certs_available[0]
else:
raise RuntimeError(
libcloud.security.CA_CERTS_UNAVAILABLE_ERROR_MSG)
def connect(self):
"""
Connect
Checks if verification is toggled; if not, just call
httplib.HTTPSConnection's connect
"""
if not self.verify:
return httplib.HTTPSConnection.connect(self)
# otherwise, create a connection and verify the hostname
# use socket.create_connection (in 2.6+) if possible
if getattr(socket, 'create_connection', None):
sock = socket.create_connection((self.host, self.port),
self.timeout)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
# Activate the HTTP proxy
if self.http_proxy_used:
self._activate_http_proxy(sock=sock)
self.sock = ssl.wrap_socket(sock,
self.key_file,
self.cert_file,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.ca_cert,
ssl_version=libcloud.security.SSL_VERSION)
cert = self.sock.getpeercert()
try:
match_hostname(cert, self.host)
except CertificateError:
e = sys.exc_info()[1]
raise ssl.SSLError('Failed to verify hostname: %s' % (str(e)))
|
|
import unittest
from mock import Mock, patch, call
from pumphouse import exceptions
from pumphouse import task
from pumphouse.tasks.network.nova import floating_ip
class TestFloatingIP(unittest.TestCase):
def setUp(self):
self.test_address = "10.0.0.1"
self.test_pool = "test-pool"
self.test_instance_uuid = "123"
self.test_net_id = "234"
self.floating_ip_info = {
"address": self.test_address,
"instance_uuid": self.test_instance_uuid,
"pool": self.test_pool
}
self.fixed_ip_info = {
"addr": self.test_address
}
self.fixed_ip_nic = {
"net-id": self.test_net_id,
"v4-fixed-ip": self.test_address
}
self.server_info = {
"id": self.test_instance_uuid
}
self.floating_ip = Mock()
self.floating_ip.instance_uuid = self.test_instance_uuid
self.floating_ip.to_dict.return_value = self.floating_ip_info
self.floating_ip_info_unassigned = self.floating_ip_info.copy()
self.floating_ip_info_unassigned.update(instance_uuid=None)
self.floating_ip_unassigned = Mock()
self.floating_ip_unassigned.instance_uuid = None
self.floating_ip_unassigned.to_dict.return_value = \
self.floating_ip_info_unassigned
self.cloud = Mock()
self.cloud.nova.floating_ips_bulk.find.return_value = self.floating_ip
self.cloud.nova.servers.add_floating_ip.return_value = None
self.src = Mock()
self.dst = Mock()
self.context = Mock()
self.context.dst_cloud = self.dst
self.context.src_cloud = self.src
self.context.store = {}
def side_effect(self, *args, **kwargs):
result = self.returns.pop(0)
if isinstance(result, Exception):
raise result
return result
class TestRetrieveFloatingIP(TestFloatingIP):
def test_execute(self):
retrieve_floating_ip = floating_ip.RetrieveFloatingIP(self.cloud)
self.assertIsInstance(retrieve_floating_ip, task.BaseCloudTask)
retrieve_floating_ip.execute(self.test_address)
self.cloud.nova.floating_ips_bulk.find.assert_called_once_with(
address=self.test_address)
class TestEnsureFloatingIPBulk(TestFloatingIP):
def test_execute(self):
ensure_fip_bulk = floating_ip.EnsureFloatingIPBulk(
self.cloud)
self.assertIsInstance(ensure_fip_bulk, task.BaseCloudTask)
fip = ensure_fip_bulk.execute(self.floating_ip_info)
self.cloud.nova.floating_ips_bulk.find.assert_called_once_with(
address=self.test_address)
self.assertEquals(fip, self.floating_ip_info)
def test_execute_not_found(self):
ensure_fip_bulk = floating_ip.EnsureFloatingIPBulk(
self.cloud)
self.cloud.nova.floating_ips_bulk.find.side_effect = \
[exceptions.nova_excs.NotFound(
"404 Not Found"),
self.floating_ip]
ensure_fip_bulk.execute(self.floating_ip_info)
self.assertEquals(
self.cloud.nova.floating_ips_bulk.find.call_count, 2)
self.cloud.nova.floating_ips_bulk.create.assert_called_once_with(
self.test_address, pool=self.test_pool)
def test_execute_not_created(self):
ensure_fip_bulk = floating_ip.EnsureFloatingIPBulk(
self.cloud)
self.cloud.nova.floating_ips_bulk.find.side_effect = \
exceptions.nova_excs.NotFound("404 Not Found")
with self.assertRaises(exceptions.nova_excs.NotFound):
ensure_fip_bulk.execute(self.floating_ip_info)
self.assertEquals(
self.cloud.nova.floating_ips_bulk.find.call_count, 2)
class TestEnsureFloatingIP(TestFloatingIP):
def test_execute(self):
ensure_floating_ip = floating_ip.EnsureFloatingIP(self.cloud)
self.assertIsInstance(ensure_floating_ip, task.BaseCloudTask)
fip = ensure_floating_ip.execute(self.server_info,
self.floating_ip_info,
self.fixed_ip_nic)
self.cloud.nova.floating_ips_bulk.find.assert_run_once_with(
address=self.test_address)
self.assertEquals(
self.floating_ip_info, fip)
def test_execute_not_found(self):
ensure_floating_ip = floating_ip.EnsureFloatingIP(self.cloud)
self.cloud.nova.floating_ips_bulk.find.side_effect = \
exceptions.nova_excs.NotFound("404 Not Found")
with self.assertRaises(exceptions.nova_excs.NotFound):
ensure_floating_ip.execute(self.server_info,
self.floating_ip_info,
self.fixed_ip_nic)
def test_execute_add_floating_ip(self):
ensure_floating_ip = floating_ip.EnsureFloatingIP(self.cloud)
self.returns = [self.floating_ip_unassigned,
self.floating_ip]
self.cloud.nova.floating_ips_bulk.find.side_effect = \
self.side_effect
ensure_floating_ip.execute(self.server_info,
self.floating_ip_info,
self.fixed_ip_nic)
self.cloud.nova.servers.add_floating_ip.assert_run_once_with(
self.test_instance_uuid, self.test_address, None)
def test_execute_bad_request(self):
ensure_floating_ip = floating_ip.EnsureFloatingIP(self.cloud)
self.cloud.nova.floating_ips_bulk.find.return_value = \
self.floating_ip_unassigned
self.cloud.nova.servers.add_floating_ip.side_effect = \
exceptions.nova_excs.BadRequest("400 Bad Request")
ensure_floating_ip.assigning_error_event = Mock()
with self.assertRaises(exceptions.TimeoutException):
ensure_floating_ip.execute(self.server_info,
self.floating_ip_info,
self.fixed_ip_nic)
ensure_floating_ip.assigning_error_event.assert_called_once_with(
self.test_address, self.test_instance_uuid)
def test_execute_duplicate_association(self):
"""Test duplicated association of single floating ip address
Simulate attempt to assign a server floating ip address that already
has value in "instance_uuid" field, i.e. already assigned to some
other virtual server.
"""
ensure_floating_ip = floating_ip.EnsureFloatingIP(self.cloud)
self.floating_ip_unassigned.instance_uuid = "999"
self.cloud.nova.floating_ips_bulk.find.return_value = \
self.floating_ip_unassigned
with self.assertRaises(exceptions.Conflict):
ensure_floating_ip.execute(self.server_info,
self.floating_ip_info,
self.fixed_ip_nic)
class TestMigrateFloatingIP(TestFloatingIP):
@patch.object(floating_ip, "EnsureFloatingIPBulk")
@patch.object(floating_ip, "RetrieveFloatingIP")
@patch("taskflow.patterns.linear_flow.Flow")
def test_migrate_floating_ip(self, flow_mock,
retrieve_floating_ip_mock,
ensure_floating_ip_bulk_mock):
floating_ip_retrieve = "floating-ip-{}-retrieve".format(
self.test_address)
flow = floating_ip.migrate_floating_ip(
self.context,
self.test_address)
self.assertEqual({floating_ip_retrieve: self.test_address},
self.context.store)
flow_mock.assert_run_once_with("migrate-floating-ip-{}"
.format(self.test_address))
self.assertEqual(flow.add.call_args_list,
[call(retrieve_floating_ip_mock()),
call(ensure_floating_ip_bulk_mock())])
class TestAssociateFloatingIPServer(TestFloatingIP):
@patch.object(floating_ip, "EnsureFloatingIP")
@patch("pumphouse.tasks.utils.SyncPoint")
@patch("taskflow.patterns.linear_flow.Flow")
def test_associate_floating_ip_server(self, flow_mock,
sync_point_mock,
ensure_floating_ip_mock):
fixed_ip_binding = "fixed-ip-{}".format(self.test_instance_uuid)
flow = floating_ip.associate_floating_ip_server(
self.context,
self.test_address,
self.fixed_ip_info,
self.test_instance_uuid)
flow_mock.assert_called_once_with("associate-floating-ip-{}-server-{}"
.format(self.test_address,
self.test_instance_uuid))
self.assertEqual(flow.add.call_args_list,
[call(sync_point_mock()),
call(ensure_floating_ip_mock())])
if __name__ == '__main__':
unittest.main()
|
|
'''
This script creates carbon pools in the year of loss (emitted-year carbon) and in 2000.
For the year 2000, it creates aboveground, belowground, deadwood, litter, and total
carbon emitted_pools (soil is created in a separate script but is brought in to create total carbon). All but total carbon are to the extent
of WHRC and mangrove biomass 2000, while total carbon is to the extent of WHRC AGB, mangrove AGB, and soil C.
It also creates carbon emitted_pools for the year of loss/emissions-- only for pixels that had loss that are within the model.
To do this, it adds CO2 (carbon) accumulated since 2000 to the C (biomass) 2000 stock, so that the CO2 (carbon) emitted is 2000 + gains
until loss. (For Hansen loss+gain pixels, only the portion of C that is accumulated before loss is included in the
lost carbon (lossyr-1), not the entire carbon gain of the pixel.) Because the emissions year carbon emitted_pools depend on
carbon removals, any time the removals model changes, the emissions year carbon emitted_pools need to be regenerated.
The carbon emitted_pools in 2000 are not used for the flux model at all; they are purely for illustrative purposes. Only the
emissions year emitted_pools are used for the model.
Hence, if the flux model is updated to a new year the carbon emitted_pools is loss years need to be updated but the carbon
emitted_pools in 2000 only need to be updated if mangrove AGB, WHRC AGB, or soil C are updated.
Which carbon emitted_pools are being generated (2000 and/or loss pixels) is controlled through the command line argument --carbon-pool-extent (-ce).
This extent argument determines which AGC function is used and how the outputs of the other emitted_pools' scripts are named.
Carbon emitted_pools in both 2000 and in the year of loss can be created in a single run by using '2000,loss' or 'loss,2000'.
'''
import multiprocessing
import pandas as pd
from subprocess import Popen, PIPE, STDOUT, check_call
import datetime
import glob
import os
import argparse
from functools import partial
import sys
sys.path.append('../')
import constants_and_names as cn
import universal_util as uu
sys.path.append(os.path.join(cn.docker_app,'carbon_pools'))
import create_carbon_pools
def mp_create_carbon_pools(sensit_type, tile_id_list, carbon_pool_extent, run_date = None, no_upload = None,
save_intermediates = None):
os.chdir(cn.docker_base_dir)
if (sensit_type != 'std') & (carbon_pool_extent != 'loss'):
uu.exception_log(no_upload, "Sensitivity analysis run must use 'loss' extent")
# Checks the validity of the carbon_pool_extent argument
if (carbon_pool_extent not in ['loss', '2000', 'loss,2000', '2000,loss']):
uu.exception_log(no_upload, "Invalid carbon_pool_extent input. Please choose loss, 2000, loss,2000 or 2000,loss.")
# If a full model run is specified, the correct set of tiles for the particular script is listed.
# For runs generating carbon pools in emissions year, only tiles with model extent and loss are relevant
# because there must be loss pixels for emissions-year carbon pools to exist.
if (tile_id_list == 'all') & (carbon_pool_extent == 'loss'):
# Lists the tiles that have both model extent and loss pixels, both being necessary precursors for emissions
model_extent_tile_id_list = uu.tile_list_s3(cn.model_extent_dir, sensit_type=sensit_type)
loss_tile_id_list = uu.tile_list_s3(cn.loss_dir, sensit_type=sensit_type)
uu.print_log("Carbon pool at emissions year is combination of model_extent and loss tiles:")
tile_id_list = list(set(model_extent_tile_id_list).intersection(loss_tile_id_list))
# For runs generating carbon pools in 2000, all model extent tiles are relevant.
if (tile_id_list == 'all') & (carbon_pool_extent != 'loss'):
tile_id_list = uu.tile_list_s3(cn.model_extent_dir, sensit_type=sensit_type)
uu.print_log(tile_id_list)
uu.print_log("There are {} tiles to process".format(str(len(tile_id_list))) + "\n")
output_dir_list = []
output_pattern_list = []
# Output files and patterns and files to download if carbon emitted_pools for 2000 are being generated
if '2000' in carbon_pool_extent:
# List of output directories and output file name patterns
output_dir_list = output_dir_list + [cn.AGC_2000_dir, cn.BGC_2000_dir, cn.deadwood_2000_dir,
cn.litter_2000_dir, cn.soil_C_full_extent_2000_dir, cn.total_C_2000_dir]
output_pattern_list = output_pattern_list + [cn.pattern_AGC_2000, cn.pattern_BGC_2000, cn.pattern_deadwood_2000,
cn.pattern_litter_2000, cn.pattern_soil_C_full_extent_2000, cn.pattern_total_C_2000]
# Files to download for this script
download_dict = {
cn.removal_forest_type_dir: [cn.pattern_removal_forest_type],
cn.mangrove_biomass_2000_dir: [cn.pattern_mangrove_biomass_2000],
cn.cont_eco_dir: [cn.pattern_cont_eco_processed],
cn.bor_tem_trop_processed_dir: [cn.pattern_bor_tem_trop_processed],
cn.precip_processed_dir: [cn.pattern_precip],
cn.elevation_processed_dir: [cn.pattern_elevation],
cn.soil_C_full_extent_2000_dir: [cn.pattern_soil_C_full_extent_2000],
cn.gain_dir: [cn.pattern_gain],
}
# Adds the correct AGB tiles to the download dictionary depending on the model run
if sensit_type == 'biomass_swap':
download_dict[cn.JPL_processed_dir] = [cn.pattern_JPL_unmasked_processed]
else:
download_dict[cn.WHRC_biomass_2000_unmasked_dir] = [cn.pattern_WHRC_biomass_2000_unmasked]
# Adds the correct loss tile to the download dictionary depending on the model run
if sensit_type == 'legal_Amazon_loss':
download_dict[cn.Brazil_annual_loss_processed_dir] = [cn.pattern_Brazil_annual_loss_processed]
elif sensit_type == 'Mekong_loss':
download_dict[cn.Mekong_loss_processed_dir] = [cn.pattern_Mekong_loss_processed]
else:
download_dict[cn.loss_dir] = [cn.pattern_loss]
# Output files and patterns and files to download if carbon emitted_pools for loss year are being generated
if 'loss' in carbon_pool_extent:
# List of output directories and output file name patterns
output_dir_list = output_dir_list + [cn.AGC_emis_year_dir, cn.BGC_emis_year_dir, cn.deadwood_emis_year_2000_dir,
cn.litter_emis_year_2000_dir, cn.soil_C_emis_year_2000_dir, cn.total_C_emis_year_dir]
output_pattern_list = output_pattern_list + [cn.pattern_AGC_emis_year, cn.pattern_BGC_emis_year, cn.pattern_deadwood_emis_year_2000,
cn.pattern_litter_emis_year_2000, cn.pattern_soil_C_emis_year_2000, cn.pattern_total_C_emis_year]
# Files to download for this script. This has the same items as the download_dict for 2000 pools plus
# other tiles.
download_dict = {
cn.removal_forest_type_dir: [cn.pattern_removal_forest_type],
cn.mangrove_biomass_2000_dir: [cn.pattern_mangrove_biomass_2000],
cn.cont_eco_dir: [cn.pattern_cont_eco_processed],
cn.bor_tem_trop_processed_dir: [cn.pattern_bor_tem_trop_processed],
cn.precip_processed_dir: [cn.pattern_precip],
cn.elevation_processed_dir: [cn.pattern_elevation],
cn.soil_C_full_extent_2000_dir: [cn.pattern_soil_C_full_extent_2000],
cn.gain_dir: [cn.pattern_gain],
cn.annual_gain_AGC_all_types_dir: [cn.pattern_annual_gain_AGC_all_types],
cn.cumul_gain_AGCO2_all_types_dir: [cn.pattern_cumul_gain_AGCO2_all_types]
}
# Adds the correct AGB tiles to the download dictionary depending on the model run
if sensit_type == 'biomass_swap':
download_dict[cn.JPL_processed_dir] = [cn.pattern_JPL_unmasked_processed]
else:
download_dict[cn.WHRC_biomass_2000_unmasked_dir] = [cn.pattern_WHRC_biomass_2000_unmasked]
# Adds the correct loss tile to the download dictionary depending on the model run
if sensit_type == 'legal_Amazon_loss':
download_dict[cn.Brazil_annual_loss_processed_dir] = [cn.pattern_Brazil_annual_loss_processed]
elif sensit_type == 'Mekong_loss':
download_dict[cn.Mekong_loss_processed_dir] = [cn.pattern_Mekong_loss_processed]
else:
download_dict[cn.loss_dir] = [cn.pattern_loss]
# Downloads input files or entire directories, depending on how many tiles are in the tile_id_list
for key, values in download_dict.items():
dir = key
pattern = values[0]
uu.s3_flexible_download(dir, pattern, cn.docker_base_dir, sensit_type, tile_id_list)
# If the model run isn't the standard one, the output directory and file names are changed
if sensit_type != 'std':
uu.print_log("Changing output directory and file name pattern based on sensitivity analysis")
output_dir_list = uu.alter_dirs(sensit_type, output_dir_list)
output_pattern_list = uu.alter_patterns(sensit_type, output_pattern_list)
else:
uu.print_log("Output directory list for standard model:", output_dir_list)
# A date can optionally be provided by the full model script or a run of this script.
# This replaces the date in constants_and_names.
# Only done if output upload is enabled.
if run_date is not None and no_upload is not None:
output_dir_list = uu.replace_output_dir_date(output_dir_list, run_date)
# Table with IPCC Wetland Supplement Table 4.4 default mangrove gain rates
cmd = ['aws', 's3', 'cp', os.path.join(cn.gain_spreadsheet_dir, cn.gain_spreadsheet), cn.docker_base_dir, '--no-sign-request']
uu.log_subprocess_output_full(cmd)
pd.options.mode.chained_assignment = None
# Imports the table with the ecozone-continent codes and the carbon gain rates
gain_table = pd.read_excel("{}".format(cn.gain_spreadsheet),
sheet_name="mangrove gain, for model")
# Removes rows with duplicate codes (N. and S. America for the same ecozone)
gain_table_simplified = gain_table.drop_duplicates(subset='gainEcoCon', keep='first')
mang_BGB_AGB_ratio = create_carbon_pools.mangrove_pool_ratio_dict(gain_table_simplified,
cn.below_to_above_trop_dry_mang,
cn.below_to_above_trop_wet_mang,
cn.below_to_above_subtrop_mang)
mang_deadwood_AGB_ratio = create_carbon_pools.mangrove_pool_ratio_dict(gain_table_simplified,
cn.deadwood_to_above_trop_dry_mang,
cn.deadwood_to_above_trop_wet_mang,
cn.deadwood_to_above_subtrop_mang)
mang_litter_AGB_ratio = create_carbon_pools.mangrove_pool_ratio_dict(gain_table_simplified,
cn.litter_to_above_trop_dry_mang,
cn.litter_to_above_trop_wet_mang,
cn.litter_to_above_subtrop_mang)
uu.print_log("Creating tiles of aboveground carbon in {}".format(carbon_pool_extent))
if cn.count == 96:
# More processors can be used for loss carbon pools than for 2000 carbon pools
if carbon_pool_extent == 'loss':
if sensit_type == 'biomass_swap':
processes = 16 # 16 processors = XXX GB peak
else:
processes = 20 # 25 processors > 750 GB peak; 16 = 560 GB peak;
# 18 = 570 GB peak; 19 = 620 GB peak; 20 = 690 GB peak (stops at 600, then increases slowly); 21 > 750 GB peak
else: # For 2000, or loss & 2000
processes = 15 # 12 processors = 490 GB peak (stops around 455, then increases slowly); 15 = XXX GB peak
else:
processes = 2
uu.print_log('AGC loss year max processors=', processes)
pool = multiprocessing.Pool(processes)
pool.map(partial(create_carbon_pools.create_AGC,
sensit_type=sensit_type, carbon_pool_extent=carbon_pool_extent, no_upload=no_upload), tile_id_list)
pool.close()
pool.join()
# # For single processor use
# for tile_id in tile_id_list:
# create_carbon_pools.create_AGC(tile_id, sensit_type, carbon_pool_extent, no_upload)
# If no_upload flag is not activated (by choice or by lack of AWS credentials), output is uploaded
if not no_upload:
if carbon_pool_extent in ['loss', '2000']:
uu.upload_final_set(output_dir_list[0], output_pattern_list[0])
else:
uu.upload_final_set(output_dir_list[0], output_pattern_list[0])
uu.upload_final_set(output_dir_list[6], output_pattern_list[6])
uu.check_storage()
if not save_intermediates:
uu.print_log(":::::Freeing up memory for belowground carbon creation; deleting unneeded tiles")
tiles_to_delete = glob.glob('*{}*tif'.format(cn.pattern_annual_gain_AGC_all_types))
tiles_to_delete.extend(glob.glob('*{}*tif'.format(cn.pattern_cumul_gain_AGCO2_all_types)))
uu.print_log(" Deleting", len(tiles_to_delete), "tiles...")
for tile_to_delete in tiles_to_delete:
os.remove(tile_to_delete)
uu.print_log(":::::Deleted unneeded tiles")
uu.check_storage()
uu.print_log("Creating tiles of belowground carbon in {}".format(carbon_pool_extent))
# Creates a single filename pattern to pass to the multiprocessor call
if cn.count == 96:
# More processors can be used for loss carbon pools than for 2000 carbon pools
if carbon_pool_extent == 'loss':
if sensit_type == 'biomass_swap':
processes = 30 # 30 processors = XXX GB peak
else:
processes = 39 # 20 processors = 370 GB peak; 32 = 590 GB peak; 36 = 670 GB peak; 38 = 690 GB peak; 39 = XXX GB peak
else: # For 2000, or loss & 2000
processes = 30 # 20 processors = 370 GB peak; 25 = 460 GB peak; 30 = XXX GB peak
else:
processes = 2
uu.print_log('BGC max processors=', processes)
pool = multiprocessing.Pool(processes)
pool.map(partial(create_carbon_pools.create_BGC, mang_BGB_AGB_ratio=mang_BGB_AGB_ratio,
carbon_pool_extent=carbon_pool_extent,
sensit_type=sensit_type, no_upload=no_upload), tile_id_list)
pool.close()
pool.join()
# # For single processor use
# for tile_id in tile_id_list:
# create_carbon_pools.create_BGC(tile_id, mang_BGB_AGB_ratio, carbon_pool_extent, sensit_type, no_upload)
# If no_upload flag is not activated (by choice or by lack of AWS credentials), output is uploaded
if not no_upload:
if carbon_pool_extent in ['loss', '2000']:
uu.upload_final_set(output_dir_list[1], output_pattern_list[1])
else:
uu.upload_final_set(output_dir_list[1], output_pattern_list[1])
uu.upload_final_set(output_dir_list[7], output_pattern_list[7])
uu.check_storage()
# 825 GB isn't enough space to create deadwood and litter 2000 while having AGC and BGC 2000 on.
# Thus must delete AGC, BGC, and soil C 2000 for creation of deadwood and litter, then copy them back to spot machine
# for total C 2000 calculation.
if '2000' in carbon_pool_extent:
uu.print_log(":::::Freeing up memory for deadwood and litter carbon 2000 creation; deleting unneeded tiles")
tiles_to_delete = []
tiles_to_delete.extend(glob.glob('*{}*tif'.format(cn.pattern_BGC_2000)))
tiles_to_delete.extend(glob.glob('*{}*tif'.format(cn.pattern_removal_forest_type)))
tiles_to_delete.extend(glob.glob('*{}*tif'.format(cn.pattern_gain)))
tiles_to_delete.extend(glob.glob('*{}*tif'.format(cn.pattern_soil_C_full_extent_2000)))
uu.print_log(" Deleting", len(tiles_to_delete), "tiles...")
for tile_to_delete in tiles_to_delete:
os.remove(tile_to_delete)
uu.print_log(":::::Deleted unneeded tiles")
uu.check_storage()
uu.print_log("Creating tiles of deadwood and litter carbon in {}".format(carbon_pool_extent))
if cn.count == 96:
# More processors can be used for loss carbon pools than for 2000 carbon pools
if carbon_pool_extent == 'loss':
if sensit_type == 'biomass_swap':
processes = 10 # 10 processors = XXX GB peak
else:
processes = 15 # 32 processors = >750 GB peak; 24 > 750 GB peak; 14 = 685 GB peak (stops around 600, then increases very very slowly); 15 = 700 GB peak
else: # For 2000, or loss & 2000
### Note: deleted precip, elevation, and WHRC AGB tiles at equatorial latitudes as deadwood and litter were produced.
### There wouldn't have been enough room for all deadwood and litter otherwise.
### For example, when deadwood and litter generation started getting up to around 50N, I deleted
### 00N precip, elevation, and WHRC AGB. I deleted all of those from 30N to 20S.
processes = 16 # 7 processors = 320 GB peak; 14 = 620 GB peak; 16 = XXX GB peak
else:
processes = 2
uu.print_log('Deadwood and litter max processors=', processes)
pool = multiprocessing.Pool(processes)
pool.map(
partial(create_carbon_pools.create_deadwood_litter, mang_deadwood_AGB_ratio=mang_deadwood_AGB_ratio,
mang_litter_AGB_ratio=mang_litter_AGB_ratio,
carbon_pool_extent=carbon_pool_extent,
sensit_type=sensit_type, no_upload=no_upload), tile_id_list)
pool.close()
pool.join()
# # For single processor use
# for tile_id in tile_id_list:
# create_carbon_pools.create_deadwood_litter(tile_id, mang_deadwood_AGB_ratio, mang_litter_AGB_ratio, carbon_pool_extent, sensit_type, no_upload)
# If no_upload flag is not activated (by choice or by lack of AWS credentials), output is uploaded
if not no_upload:
if carbon_pool_extent in ['loss', '2000']:
uu.upload_final_set(output_dir_list[2], output_pattern_list[2]) # deadwood
uu.upload_final_set(output_dir_list[3], output_pattern_list[3]) # litter
else:
uu.upload_final_set(output_dir_list[2], output_pattern_list[2]) # deadwood
uu.upload_final_set(output_dir_list[3], output_pattern_list[3]) # litter
uu.upload_final_set(output_dir_list[8], output_pattern_list[8]) # deadwood
uu.upload_final_set(output_dir_list[9], output_pattern_list[9]) # litter
uu.check_storage()
if not save_intermediates:
uu.print_log(":::::Freeing up memory for soil and total carbon creation; deleting unneeded tiles")
tiles_to_delete = []
tiles_to_delete .extend(glob.glob('*{}*tif'.format(cn.pattern_elevation)))
tiles_to_delete.extend(glob.glob('*{}*tif'.format(cn.pattern_precip)))
tiles_to_delete.extend(glob.glob('*{}*tif'.format(cn.pattern_WHRC_biomass_2000_unmasked)))
tiles_to_delete.extend(glob.glob('*{}*tif'.format(cn.pattern_JPL_unmasked_processed)))
tiles_to_delete.extend(glob.glob('*{}*tif'.format(cn.pattern_cont_eco_processed)))
uu.print_log(" Deleting", len(tiles_to_delete), "tiles...")
for tile_to_delete in tiles_to_delete:
os.remove(tile_to_delete)
uu.print_log(":::::Deleted unneeded tiles")
uu.check_storage()
if 'loss' in carbon_pool_extent:
uu.print_log("Creating tiles of soil carbon in loss extent")
# If pools in 2000 weren't generated, soil carbon in emissions extent is 4.
# If pools in 2000 were generated, soil carbon in emissions extent is 10.
if '2000' not in carbon_pool_extent:
pattern = output_pattern_list[4]
else:
pattern = output_pattern_list[10]
if cn.count == 96:
# More processors can be used for loss carbon pools than for 2000 carbon pools
if carbon_pool_extent == 'loss':
if sensit_type == 'biomass_swap':
processes = 36 # 36 processors = XXX GB peak
else:
processes = 44 # 24 processors = 360 GB peak; 32 = 490 GB peak; 38 = 580 GB peak; 42 = 640 GB peak; 44 = XXX GB peak
else: # For 2000, or loss & 2000
processes = 12 # 12 processors = XXX GB peak
else:
processes = 2
uu.print_log('Soil carbon loss year max processors=', processes)
pool = multiprocessing.Pool(processes)
pool.map(partial(create_carbon_pools.create_soil_emis_extent, pattern=pattern,
sensit_type=sensit_type, no_upload=no_upload), tile_id_list)
pool.close()
pool.join()
# # For single processor use
# for tile_id in tile_id_list:
# create_carbon_pools.create_soil_emis_extent(tile_id, pattern, sensit_type, no_upload)
# If no_upload flag is not activated (by choice or by lack of AWS credentials), output is uploaded
if not no_upload:
# If pools in 2000 weren't generated, soil carbon in emissions extent is 4.
# If pools in 2000 were generated, soil carbon in emissions extent is 10.
if '2000' not in carbon_pool_extent:
uu.upload_final_set(output_dir_list[4], output_pattern_list[4])
else:
uu.upload_final_set(output_dir_list[10], output_pattern_list[10])
uu.check_storage()
if '2000' in carbon_pool_extent:
uu.print_log("Skipping soil for 2000 carbon pool calculation. Soil carbon in 2000 already created.")
uu.check_storage()
# 825 GB isn't enough space to create deadwood and litter 2000 while having AGC and BGC 2000 on.
# Thus must delete BGC and soil C 2000 for creation of deadwood and litter, then copy them back to spot machine
# for total C 2000 calculation.
if '2000' in carbon_pool_extent:
# Files to download for total C 2000. Previously deleted to save space
download_dict = {
cn.BGC_2000_dir: [cn.pattern_BGC_2000],
cn.soil_C_full_extent_2000_dir: [cn.pattern_soil_C_full_extent_2000]
}
for key, values in download_dict.items():
dir = key
pattern = values[0]
uu.s3_flexible_download(dir, pattern, cn.docker_base_dir, sensit_type, tile_id_list)
uu.print_log("Creating tiles of total carbon")
if cn.count == 96:
# More processors can be used for loss carbon pools than for 2000 carbon pools
if carbon_pool_extent == 'loss':
if sensit_type == 'biomass_swap':
processes = 14 # 14 processors = XXX GB peak
else:
processes = 19 # 20 processors > 750 GB peak (by just a bit, I think); 15 = 550 GB peak; 18 = 660 GB peak; 19 = XXX GB peak
else: # For 2000, or loss & 2000
processes = 12 # 12 processors = XXX GB peak
else:
processes = 2
uu.print_log('Total carbon loss year max processors=', processes)
pool = multiprocessing.Pool(processes)
pool.map(partial(create_carbon_pools.create_total_C, carbon_pool_extent=carbon_pool_extent,
sensit_type=sensit_type, no_upload=no_upload), tile_id_list)
pool.close()
pool.join()
# # For single processor use
# for tile_id in tile_id_list:
# create_carbon_pools.create_total_C(tile_id, carbon_pool_extent, sensit_type, no_upload)
# If no_upload flag is not activated (by choice or by lack of AWS credentials), output is uploaded
if not no_upload:
if carbon_pool_extent in ['loss', '2000']:
uu.upload_final_set(output_dir_list[5], output_pattern_list[5])
else:
uu.upload_final_set(output_dir_list[5], output_pattern_list[5])
uu.upload_final_set(output_dir_list[11], output_pattern_list[11])
uu.check_storage()
if __name__ == '__main__':
# The argument for what kind of model run is being done: standard conditions or a sensitivity analysis run
parser = argparse.ArgumentParser(
description='Creates tiles of carbon pool densities in the year of loss or in 2000')
parser.add_argument('--model-type', '-t', required=True,
help='{}'.format(cn.model_type_arg_help))
parser.add_argument('--tile_id_list', '-l', required=True,
help='List of tile ids to use in the model. Should be of form 00N_110E or 00N_110E,00N_120E or all.')
parser.add_argument('--carbon_pool_extent', '-ce', required=True,
help='Extent over which carbon emitted_pools should be calculated: loss, 2000, loss,2000, or 2000,loss')
parser.add_argument('--run-date', '-d', required=False,
help='Date of run. Must be format YYYYMMDD.')
parser.add_argument('--no-upload', '-nu', action='store_true',
help='Disables uploading of outputs to s3')
parser.add_argument('--save-intermediates', '-si', action='store_true',
help='Saves intermediate model outputs rather than deleting them to save storage')
args = parser.parse_args()
sensit_type = args.model_type
tile_id_list = args.tile_id_list
carbon_pool_extent = args.carbon_pool_extent # Tells the pool creation functions to calculate carbon emitted_pools as they were at the year of loss in loss pixels only
run_date = args.run_date
no_upload = args.no_upload
save_intermediates = args.save_intermediates
# Disables upload to s3 if no AWS credentials are found in environment
if not uu.check_aws_creds():
no_upload = True
# Create the output log
uu.initiate_log(tile_id_list=tile_id_list, sensit_type=sensit_type, run_date=run_date,
carbon_pool_extent=carbon_pool_extent, no_upload=no_upload, save_intermediates=save_intermediates)
# Checks whether the sensitivity analysis and tile_id_list arguments are valid
uu.check_sensit_type(sensit_type)
tile_id_list = uu.tile_id_list_check(tile_id_list)
mp_create_carbon_pools(sensit_type=sensit_type, tile_id_list=tile_id_list,
carbon_pool_extent=carbon_pool_extent, run_date=run_date, no_upload=no_upload,
save_intermediates=save_intermediates)
|
|
#!/usr/bin/env python
from __future__ import print_function
from builtins import str
from builtins import range
import tables
import os
import sys
def splitH5all(basename1,basename2,size,start,finaltime,stride):
print("==================")
print(" Extracting")
print("==================")
for proc in range(0,size):
print("Processor", proc)
splitH5single(basename1,basename2,proc,start,finaltime,stride)
print("==================")
print(" Composing")
print("==================")
H5toXMF("solution",size,start,finaltime,stride)
def splitH5single(basename1,basename2,proc,start,finaltime,stride):
# Loop over entries and put in appropriate file
filename=basename1+str(proc)+".h5"
print(" Open:",filename)
f1 = tables.open_file(filename)
filename=basename2+str(proc)+".h5"
print(" Open:",filename)
f2 = tables.open_file(filename)
print(" Step:", end=' ')
for step in range(start,finaltime+1,stride):
print(step, end=' ')
sys.stdout.flush()
filename="sol.p"+str(proc)+"."+str(step)+".h5"
hdfFile= tables.open_file(filename,
mode = "w",
title = filename+" Data")
name = "elementsSpatial_Domain"+str(step)
hdfFile.createArray("/","elements",f1.get_node("/",name)[:])
name = "nodesSpatial_Domain"+str(step)
hdfFile.createArray("/","nodes",f1.get_node("/",name)[:])
name = "u"+str(step)
hdfFile.createArray("/","u",f1.get_node("/",name)[:])
name = "v"+str(step)
hdfFile.createArray("/","v",f1.get_node("/",name)[:])
name = "w"+str(step)
hdfFile.createArray("/","w",f1.get_node("/",name)[:])
name = "p"+str(step)
hdfFile.createArray("/","p",f1.get_node("/",name)[:])
name = "phid"+str(step)
hdfFile.createArray("/","phid",f2.get_node("/",name)[:])
hdfFile.close()
f1.close()
f2.close()
print("finished")
def H5toXMF(basename,size,start,finaltime,stride):
# Open XMF files
t1=" "
t2=t1+t1
t3=t2+t1
t4=t3+t1
t5=t4+t1
XMFfile1 = open(basename+".xmf","w")
XMFfile1.write('<?xml version="1.0" ?>'+"\n")
XMFfile1.write('<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd" []>'+"\n")
XMFfile1.write('<Xdmf Version="2.0" xmlns:xi="http://www.w3.org/2001/XInclude">'+"\n")
XMFfile1.write(t1 + '<Domain>'+"\n")
XMFfile1.write(t1 + '<Grid GridType="Collection" CollectionType="Temporal">'+"\n")
string=""
print(" Step:", end=' ')
for step in range(start,finaltime+1,stride):
print(step, end=' ')
sys.stdout.flush()
filename = basename+"."+str(step)+".h5"
hdfFile= tables.open_file(filename,
mode = "w",
title = filename+" Data")
XMFfile2 = open(basename+"."+str(step)+".xmf","w")
XMFfile2.write('<?xml version="1.0" ?>'+"\n")
XMFfile2.write('<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd" []>'+"\n")
XMFfile2.write('<Xdmf Version="2.0" xmlns:xi="http://www.w3.org/2001/XInclude">'+"\n")
XMFfile2.write(t1 + '<Domain>'+"\n")
string = t2 + '<Grid GridType="Collection" CollectionType="Spatial">'+"\n"
string = string + t3 + '<Time Value="'+str(step)+'" />'+"\n"
for proc in range(0,size):
group = hdfFile.createGroup(hdfFile.root, 'p'+str(proc))
solname="sol.p"+str(proc)+"."+str(step)+".h5"
f1 = tables.open_file(solname)
string = string + t3+'<Grid GridType="Uniform">'+"\n"
data=f1.get_node("/","elements")[:]
hdfFile.createArray(group,"elements",data)
string = string + t4 + '<Topology NumberOfElements="' +str(len(data))+ '" Type="Tetrahedron">'+"\n"
string = string + t5 + '<DataItem DataType="Int" Dimensions="' +str(len(data))+ ' 4" Format="HDF">'+"\n"
string = string + t5 + filename + ':/p'+str(proc)+'/elements'+"\n"
string = string + t5 +'</DataItem>'+"\n"
string = string + t4 + '</Topology>'+"\n"
data=f1.get_node("/","nodes")[:]
hdfFile.createArray(group,"nodes",data)
string = string + t4 + '<Geometry Type="XYZ">'+"\n"
string = string + t5 + '<DataItem DataType="Float" Dimensions="' +str(len(data))+ ' 3" Format="HDF" Precision="8">' + "\n"
string = string + t5 + filename + ':/p'+str(proc)+'/nodes'+"\n"
string = string + t5 + '</DataItem>'+"\n"
string = string + t4 + '</Geometry>'+"\n"
data=f1.get_node("/","u")[:]
hdfFile.createArray(group,"u",data)
string = string + t4 + '<Attribute AttributeType="Scalar" Center="Node" Name="u">'+"\n"
string = string + t5 + '<DataItem DataType="Float" Dimensions="' +str(len(data))+ '" Format="HDF" Precision="8">' + "\n"
string = string + t5 + filename + ':/p'+str(proc)+'/u'+"\n"
string = string + t5 + '</DataItem>'+"\n"
string = string + t4 + '</Attribute>'+"\n"
data=f1.get_node("/","v")[:]
hdfFile.createArray(group,"v",data)
string = string + t4 + '<Attribute AttributeType="Scalar" Center="Node" Name="v">'+"\n"
string = string + t5 +'<DataItem DataType="Float" Dimensions="' +str(len(data))+ '" Format="HDF" Precision="8">' + "\n"
string = string + t5 + filename + ':/p'+str(proc)+'/v'+"\n"
string = string + t5 + '</DataItem>'+"\n"
string = string + t4 + '</Attribute>'+"\n"
data=f1.get_node("/","w")[:]
hdfFile.createArray(group,"w",data)
string = string + t4 + '<Attribute AttributeType="Scalar" Center="Node" Name="w">'+"\n"
string = string + t5 + '<DataItem DataType="Float" Dimensions="' +str(len(data))+ '" Format="HDF" Precision="8">' + "\n"
string = string + t5 + filename + ':/p'+str(proc)+'/w'+"\n"
string = string + t5 + '</DataItem>'+"\n"
string = string + t4 + '</Attribute>'+"\n"
data=f1.get_node("/","p")[:]
hdfFile.createArray(group,"p",data)
string = string + t4 + '<Attribute AttributeType="Scalar" Center="Node" Name="p">'+"\n"
string = string + t5 + '<DataItem DataType="Float" Dimensions="' +str(len(data))+ '" Format="HDF" Precision="8">' + "\n"
string = string + t5 + filename + ':/p'+str(proc)+'/p'+"\n"
string = string + t5 + '</DataItem>'+"\n"
string = string + t4 + '</Attribute>'+"\n"
data=f1.get_node("/","phid")[:]
hdfFile.createArray(group,"phid",data)
string = string + t4 + '<Attribute AttributeType="Scalar" Center="Node" Name="phid">'+"\n"
string = string + t5 + '<DataItem DataType="Float" Dimensions="' +str(len(data))+ '" Format="HDF" Precision="8">' + "\n"
string = string + t5 + filename + ':/p'+str(proc)+'/phid'+"\n"
string = string + t5 + '</DataItem>'+"\n"
string = string + t4 + '</Attribute>'+"\n"
string = string + t3+'</Grid>'+"\n"
f1.close()
os.remove(solname)
string = string + t2 + '</Grid>'+"\n"
XMFfile1.write(string)
XMFfile2.write(string)
XMFfile2.write(t1 + '</Domain>'+"\n")
XMFfile2.write('</Xdmf>'+"\n")
XMFfile2.close()
hdfFile.close()
XMFfile1.write(t1 + '</Grid>'+"\n")
XMFfile1.write(t1 + '</Domain>'+"\n")
XMFfile1.write('</Xdmf>'+"\n")
XMFfile1.close()
if __name__ == '__main__':
from optparse import OptionParser
usage = ""
parser = OptionParser(usage=usage)
parser.add_option("-n","--size",
help="number of processors for run",
action="store",
type="int",
dest="size",
default=1)
parser.add_option("-s","--stride",
help="stride for solution output",
action="store",
type="int",
dest="stride",
default=0)
parser.add_option("-t","--finaltime",
help="finaltime",
action="store",
type="int",
dest="finaltime",
default=1000)
parser.add_option("-f","--filebase_flow",
help="base name for storage files",
action="store",
type="string",
dest="filebase1",
default="twp_navier_stokes_p")
parser.add_option("-p","--filebase_phi",
help="base name for storage files",
action="store",
type="string",
dest="filebase2",
default="redist_p")
(opts,args) = parser.parse_args()
start = 0
if opts.stride == 0 :
start = opts.finaltime
opts.stride = 1
if (opts.size >0) :
splitH5all(opts.filebase1,opts.filebase2,opts.size,start,opts.finaltime,opts.stride)
else :
splitH5single(opts.filebase1,opts.filebase2,-opts.size,start,opts.finaltime,opts.stride)
|
|
try:
from matplotlib.widgets import RectangleSelector
except ImportError:
RectangleSelector = object
print("Could not import matplotlib -- skimage.viewer not available.")
from skimage.viewer.canvastools.base import CanvasToolBase
from skimage.viewer.canvastools.base import ToolHandles
__all__ = ['RectangleTool']
class RectangleTool(CanvasToolBase, RectangleSelector):
"""Widget for selecting a rectangular region in a plot.
After making the desired selection, press "Enter" to accept the selection
and call the `on_enter` callback function.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
Matplotlib axes where tool is displayed.
on_move : function
Function called whenever a control handle is moved.
This function must accept the rectangle extents as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
maxdist : float
Maximum pixel distance allowed when selecting control handle.
rect_props : dict
Properties for :class:`matplotlib.patches.Rectangle`. This class
redefines defaults in :class:`matplotlib.widgets.RectangleSelector`.
Attributes
----------
extents : tuple
Rectangle extents: (xmin, xmax, ymin, ymax).
"""
def __init__(self, ax, on_move=None, on_release=None, on_enter=None,
maxdist=10, rect_props=None):
CanvasToolBase.__init__(self, ax, on_move=on_move,
on_enter=on_enter, on_release=on_release)
props = dict(edgecolor=None, facecolor='r', alpha=0.15)
props.update(rect_props if rect_props is not None else {})
if props['edgecolor'] is None:
props['edgecolor'] = props['facecolor']
RectangleSelector.__init__(self, ax, lambda *args: None,
rectprops=props,
useblit=self.useblit)
# Alias rectangle attribute, which is initialized in RectangleSelector.
self._rect = self.to_draw
self._rect.set_animated(True)
self.maxdist = maxdist
self.active_handle = None
self._extents_on_press = None
if on_enter is None:
def on_enter(extents):
print("(xmin=%.3g, xmax=%.3g, ymin=%.3g, ymax=%.3g)" % extents)
self.callback_on_enter = on_enter
props = dict(mec=props['edgecolor'])
self._corner_order = ['NW', 'NE', 'SE', 'SW']
xc, yc = self.corners
self._corner_handles = ToolHandles(ax, xc, yc, marker_props=props)
self._edge_order = ['W', 'N', 'E', 'S']
xe, ye = self.edge_centers
self._edge_handles = ToolHandles(ax, xe, ye, marker='s',
marker_props=props)
self._artists = [self._rect,
self._corner_handles.artist,
self._edge_handles.artist]
@property
def _rect_bbox(self):
x0 = self._rect.get_x()
y0 = self._rect.get_y()
width = self._rect.get_width()
height = self._rect.get_height()
return x0, y0, width, height
@property
def corners(self):
"""Corners of rectangle from lower left, moving clockwise."""
x0, y0, width, height = self._rect_bbox
xc = x0, x0 + width, x0 + width, x0
yc = y0, y0, y0 + height, y0 + height
return xc, yc
@property
def edge_centers(self):
"""Midpoint of rectangle edges from left, moving clockwise."""
x0, y0, width, height = self._rect_bbox
w = width / 2.
h = height / 2.
xe = x0, x0 + w, x0 + width, x0 + w
ye = y0 + h, y0, y0 + h, y0 + height
return xe, ye
@property
def extents(self):
"""Return (xmin, xmax, ymin, ymax)."""
x0, y0, width, height = self._rect_bbox
xmin, xmax = sorted([x0, x0 + width])
ymin, ymax = sorted([y0, y0 + height])
return xmin, xmax, ymin, ymax
@extents.setter
def extents(self, extents):
x1, x2, y1, y2 = extents
xmin, xmax = sorted([x1, x2])
ymin, ymax = sorted([y1, y2])
# Update displayed rectangle
self._rect.set_x(xmin)
self._rect.set_y(ymin)
self._rect.set_width(xmax - xmin)
self._rect.set_height(ymax - ymin)
# Update displayed handles
self._corner_handles.set_data(*self.corners)
self._edge_handles.set_data(*self.edge_centers)
self.set_visible(True)
self.redraw()
def release(self, event):
if event.button != 1:
return
if not self.ax.in_axes(event):
self.eventpress = None
return
RectangleSelector.release(self, event)
self._extents_on_press = None
# Undo hiding of rectangle and redraw.
self.set_visible(True)
self.redraw()
self.callback_on_release(self.geometry)
def press(self, event):
if event.button != 1 or not self.ax.in_axes(event):
return
self._set_active_handle(event)
if self.active_handle is None:
# Clear previous rectangle before drawing new rectangle.
self.set_visible(False)
self.redraw()
self.set_visible(True)
RectangleSelector.press(self, event)
def _set_active_handle(self, event):
"""Set active handle based on the location of the mouse event"""
# Note: event.xdata/ydata in data coordinates, event.x/y in pixels
c_idx, c_dist = self._corner_handles.closest(event.x, event.y)
e_idx, e_dist = self._edge_handles.closest(event.x, event.y)
# Set active handle as closest handle, if mouse click is close enough.
if c_dist > self.maxdist and e_dist > self.maxdist:
self.active_handle = None
return
elif c_dist < e_dist:
self.active_handle = self._corner_order[c_idx]
else:
self.active_handle = self._edge_order[e_idx]
# Save coordinates of rectangle at the start of handle movement.
x1, x2, y1, y2 = self.extents
# Switch variables so that only x2 and/or y2 are updated on move.
if self.active_handle in ['W', 'SW', 'NW']:
x1, x2 = x2, event.xdata
if self.active_handle in ['N', 'NW', 'NE']:
y1, y2 = y2, event.ydata
self._extents_on_press = x1, x2, y1, y2
def onmove(self, event):
if self.eventpress is None or not self.ax.in_axes(event):
return
if self.active_handle is None:
# New rectangle
x1 = self.eventpress.xdata
y1 = self.eventpress.ydata
x2, y2 = event.xdata, event.ydata
else:
x1, x2, y1, y2 = self._extents_on_press
if self.active_handle in ['E', 'W'] + self._corner_order:
x2 = event.xdata
if self.active_handle in ['N', 'S'] + self._corner_order:
y2 = event.ydata
self.extents = (x1, x2, y1, y2)
self.callback_on_move(self.geometry)
@property
def geometry(self):
return self.extents
if __name__ == '__main__':
import matplotlib.pyplot as plt
from skimage import data
f, ax = plt.subplots()
ax.imshow(data.camera(), interpolation='nearest')
rect_tool = RectangleTool(ax)
plt.show()
print("Final selection:")
rect_tool.callback_on_enter(rect_tool.extents)
|
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Fields: basic data structures that make up parts of packets.
"""
import struct,copy,socket
from .config import conf
from .volatile import *
from .data import *
from .utils import *
from .base_classes import BasePacket,Gen,Net
############
## Fields ##
############
class Field:
"""For more informations on how this work, please refer to
http://www.secdev.org/projects/scapy/files/scapydoc.pdf
chapter ``Adding a New Field''"""
islist=0
holds_packets=0
def __init__(self, name, default, fmt="H"):
self.name = name
if fmt[0] in "@=<>!":
self.fmt = fmt
else:
self.fmt = "!"+fmt
self.default = self.any2i(None,default)
self.sz = struct.calcsize(self.fmt)
self.owners = []
self.offset =0;
def get_size_bytes (self):
if hasattr(self, 'size'):
return 0; # bitfield
else:
return self.sz
def register_owner(self, cls):
self.owners.append(cls)
def i2len(self, pkt, x):
"""Convert internal value to a length usable by a FieldLenField"""
return self.sz
def i2count(self, pkt, x):
"""Convert internal value to a number of elements usable by a FieldLenField.
Always 1 except for list fields"""
return 1
def i2b(self, pkt, x):
"""Convert internal value to internal value"""
if type(x) is str:
x = bytes([ ord(i) for i in x ])
return x
def h2i(self, pkt, x):
"""Convert human value to internal value"""
if type(x) is str:
x = bytes([ ord(i) for i in x ])
return x
def i2h(self, pkt, x):
"""Convert internal value to human value"""
return x
def m2i(self, pkt, x):
"""Convert machine value to internal value"""
return x
def i2m(self, pkt, x):
"""Convert internal value to machine value"""
if x is None:
x = 0
return x
def any2i(self, pkt, x):
"""Try to understand the most input values possible and make an internal value from them"""
return self.h2i(pkt, x)
def i2repr(self, pkt, x):
"""Convert internal value to a nice representation"""
return repr(self.i2h(pkt,x))
def addfield(self, pkt, s, val):
"""Add an internal value to a string"""
return s+struct.pack(self.fmt, self.i2m(pkt,val))
def getfield(self, pkt, s):
"""Extract an internal value from a string"""
return s[self.sz:], self.m2i(pkt, struct.unpack(self.fmt, s[:self.sz])[0])
def do_copy(self, x):
if hasattr(x, "copy"):
return x.copy()
if type(x) is list:
x = x[:]
for i in range(len(x)):
if isinstance(x[i], BasePacket):
x[i] = x[i].copy()
return x
def __repr__(self):
return "<Field (%s).%s>" % (",".join(x.__name__ for x in self.owners),self.name)
def copy(self):
return copy.deepcopy(self)
def randval(self):
"""Return a volatile object whose value is both random and suitable for this field"""
fmtt = self.fmt[-1]
if fmtt in "BHIQ":
return {"B":RandByte,"H":RandShort,"I":RandInt, "Q":RandLong}[fmtt]()
elif fmtt == "s":
if self.fmt[0] in "0123456789":
l = int(self.fmt[:-1])
else:
l = int(self.fmt[1:-1])
return RandBin(l)
else:
warning("no random class for [%s] (fmt=%s)." % (self.name, self.fmt))
class Emph:
fld = b""
def __init__(self, fld):
self.fld = fld
def __getattr__(self, attr):
return getattr(self.fld,attr)
def __hash__(self):
return hash(self.fld)
def __eq__(self, other):
return self.fld == other
class ActionField:
_fld = None
def __init__(self, fld, action_method, **kargs):
self._fld = fld
self._action_method = action_method
self._privdata = kargs
def any2i(self, pkt, val):
getattr(pkt, self._action_method)(val, self._fld, **self._privdata)
return getattr(self._fld, "any2i")(pkt, val)
def __getattr__(self, attr):
return getattr(self._fld,attr)
class ConditionalField:
fld = None
def __init__(self, fld, cond):
self.fld = fld
self.cond = cond
def _evalcond(self,pkt):
return self.cond(pkt)
def getfield(self, pkt, s):
if self._evalcond(pkt):
return self.fld.getfield(pkt,s)
else:
return s,None
def addfield(self, pkt, s, val):
if self._evalcond(pkt):
return self.fld.addfield(pkt,s,val)
else:
return s
def __getattr__(self, attr):
return getattr(self.fld,attr)
class PadField:
"""Add bytes after the proxified field so that it ends at the specified
alignment from its begining"""
_fld = None
def __init__(self, fld, align, padwith=None):
self._fld = fld
self._align = align
self._padwith = padwith or b""
def padlen(self, flen):
return -flen%self._align
def getfield(self, pkt, s):
remain,val = self._fld.getfield(pkt,s)
padlen = self.padlen(len(s)-len(remain))
return remain[padlen:], val
def addfield(self, pkt, s, val):
sval = self._fld.addfield(pkt, b"", val)
return s+sval+struct.pack("%is" % (self.padlen(len(sval))), self._padwith)
def __getattr__(self, attr):
return getattr(self._fld,attr)
class MACField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "6s")
def i2m(self, pkt, x):
if x is None:
return b"\0\0\0\0\0\0"
return mac2str(x)
def m2i(self, pkt, x):
return str2mac(x)
def any2i(self, pkt, x):
if type(x) is bytes and len(x) is 6:
x = self.m2i(pkt, x)
return x
def i2repr(self, pkt, x):
x = self.i2h(pkt, x)
if self in conf.resolve:
x = conf.manufdb._resolve_MAC(x)
return x
def randval(self):
return RandMAC()
class IPField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "4s")
def h2i(self, pkt, x):
if type(x) is str:
try:
inet_aton(x)
except socket.error:
x = Net(x)
elif type(x) is list:
x = [self.h2i(pkt, n) for n in x]
return x
def resolve(self, x):
if self in conf.resolve:
try:
ret = socket.gethostbyaddr(x)[0]
except:
pass
else:
if ret:
return ret
return x
def i2m(self, pkt, x):
return inet_aton(x)
def m2i(self, pkt, x):
return inet_ntoa(x)
def any2i(self, pkt, x):
return self.h2i(pkt,x)
def i2repr(self, pkt, x):
return self.resolve(self.i2h(pkt, x))
def randval(self):
return RandIP()
class SourceIPField(IPField):
def __init__(self, name, dstname):
IPField.__init__(self, name, None)
self.dstname = dstname
def i2m(self, pkt, x):
if x is None:
iff,x,gw = pkt.route()
if x is None:
x = "0.0.0.0"
return IPField.i2m(self, pkt, x)
def i2h(self, pkt, x):
if x is None:
dst=getattr(pkt,self.dstname)
if isinstance(dst,Gen):
#r = map(conf.route.route, dst)
r = [ conf.route.route(i) for i in dst ]
r.sort()
if r[0] != r[-1]:
warning("More than one possible route for %s"%repr(dst))
iff,x,gw = r[0]
else:
iff,x,gw = conf.route.route(dst)
return IPField.i2h(self, pkt, x)
class ByteField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "B")
class XByteField(ByteField):
def i2repr(self, pkt, x):
return lhex(self.i2h(pkt, x))
class OByteField(ByteField):
def i2repr(self, pkt, x):
return "%03o"%self.i2h(pkt, x)
class X3BytesField(XByteField):
def __init__(self, name, default):
Field.__init__(self, name, default, "!I")
def addfield(self, pkt, s, val):
return s+struct.pack(self.fmt, self.i2m(pkt,val))[1:4]
def getfield(self, pkt, s):
return s[3:], self.m2i(pkt, struct.unpack(self.fmt, b"\x00"+s[:3])[0])
class ThreeBytesField(X3BytesField, ByteField):
def i2repr(self, pkt, x):
return ByteField.i2repr(self, pkt, x)
class ShortField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "H")
class LEShortField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "<H")
class XShortField(ShortField):
def i2repr(self, pkt, x):
return lhex(self.i2h(pkt, x))
class IntField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "I")
class SignedIntField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "i")
def randval(self):
return RandSInt()
class LEIntField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "<I")
class LESignedIntField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "<i")
def randval(self):
return RandSInt()
class XIntField(IntField):
def i2repr(self, pkt, x):
return lhex(self.i2h(pkt, x))
class LongField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "Q")
class XLongField(LongField):
def i2repr(self, pkt, x):
return lhex(self.i2h(pkt, x))
class IEEEFloatField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "f")
class IEEEDoubleField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "d")
class StrField(Field):
def __init__(self, name, default, fmt="H", remain=0):
Field.__init__(self,name,default,fmt)
self.remain = remain
#def i2h(self, pkt, x):
def i2repr(self, pkt, x):
try:
if type(x) is bytes:
x = x.decode('ascii')
except UnicodeDecodeError:
pass
return repr(x)
#def i2repr(self, pkt, x):
# return repr(self.i2h(pkt,x))
def i2len(self, pkt, i):
return len(i)
def i2m(self, pkt, x):
if x is None:
x = b""
elif type(x) is not bytes:
x=str(x).encode('ascii')
return x
def addfield(self, pkt, s, val):
return s+self.i2m(pkt, val)
def getfield(self, pkt, s):
if self.remain == 0:
return b"",self.m2i(pkt, s)
else:
return s[-self.remain:],self.m2i(pkt, s[:-self.remain])
def randval(self):
return RandBin(RandNum(0,1200))
class PacketField(StrField):
holds_packets=1
def __init__(self, name, default, cls, remain=0): #is remain used somewhere?
StrField.__init__(self, name, default, remain=remain)
self.cls = cls
def i2m(self, pkt, i):
return bytes(i)
def m2i(self, pkt, m):
return self.cls(m)
def getfield(self, pkt, s):
i = self.m2i(pkt, s)
remain = b""
if conf.padding_layer in i:
r = i[conf.padding_layer]
del(r.underlayer.payload)
remain = r.load
return remain,i
class PacketLenField(PacketField):
holds_packets=1
def __init__(self, name, default, cls, length_from=None):
PacketField.__init__(self, name, default, cls)
self.length_from = length_from
def getfield(self, pkt, s):
l = self.length_from(pkt)
try:
i = self.m2i(pkt, s[:l])
except Exception:
if conf.debug_dissector:
raise
i = conf.raw_layer(load=s[:l])
return s[l:],i
class PacketListField(PacketField):
islist = 1
holds_packets=1
def __init__(self, name, default, cls, count_from=None, length_from=None):
if default is None:
default = [] # Create a new list for each instance
PacketField.__init__(self, name, default, cls)
self.count_from = count_from
self.length_from = length_from
def any2i(self, pkt, x):
if type(x) is not list:
return [x]
else:
return x
def i2count(self, pkt, val):
if type(val) is list:
return len(val)
return 1
def i2len(self, pkt, val):
return sum( len(p) for p in val )
def do_copy(self, x):
#return map(lambda p:p.copy(), x)
return [ i.copy() for i in x ]
def getfield(self, pkt, s):
c = l = None
if self.length_from is not None:
l = self.length_from(pkt)
elif self.count_from is not None:
c = self.count_from(pkt)
lst = []
ret = b""
remain = s
if l is not None:
remain,ret = s[:l],s[l:]
while remain:
if c is not None:
if c <= 0:
break
c -= 1
try:
p = self.m2i(pkt,remain)
except Exception:
if conf.debug_dissector:
raise
p = conf.raw_layer(load=remain)
remain = b""
else:
if conf.padding_layer in p:
pad = p[conf.padding_layer]
remain = pad.load
del(pad.underlayer.payload)
else:
remain = b""
lst.append(p)
return remain+ret,lst
def addfield(self, pkt, s, val):
return s+b"".join([ bytes(i) for i in val ])
class StrFixedLenField(StrField):
def __init__(self, name, default, length=None, length_from=None):
StrField.__init__(self, name, default)
self.length_from = length_from
if length is not None:
self.length_from = lambda pkt,length=length: length
def i2repr(self, pkt, v):
if type(v) is bytes:
v = v.rstrip(b"\0")
return repr(v)
def getfield(self, pkt, s):
l = self.length_from(pkt)
return s[l:], self.m2i(pkt,s[:l])
def addfield(self, pkt, s, val):
l = self.length_from(pkt)
return s+struct.pack("%is"%l,self.i2m(pkt, val))
def randval(self):
try:
l = self.length_from(None)
except:
l = RandNum(0,200)
return RandBin(l)
class StrFixedLenEnumField(StrFixedLenField):
def __init__(self, name, default, length=None, enum=None, length_from=None):
StrFixedLenField.__init__(self, name, default, length=length, length_from=length_from)
self.enum = enum
def i2repr(self, pkt, v):
r = v.rstrip("\0")
rr = repr(r)
if v in self.enum:
rr = "%s (%s)" % (rr, self.enum[v])
elif r in self.enum:
rr = "%s (%s)" % (rr, self.enum[r])
return rr
class NetBIOSNameField(StrFixedLenField):
def __init__(self, name, default, length=31):
StrFixedLenField.__init__(self, name, default, length)
def i2m(self, pkt, x):
l = self.length_from(pkt)//2
if x is None:
x = b""
x += b" "*(l)
x = x[:l]
#x = b"".join(map(lambda x: chr(0x41+(ord(x)>>4))+chr(0x41+(ord(x)&0xf)), x))
x = b"".join([ bytes([0x41+(i>>4),0x41+(i&0xf)]) for i in x ])
x = b" "+x
return x
def m2i(self, pkt, x):
x = x.strip(b"\x00").strip(b" ")
#return b"".join(map(lambda x,y: chr((((ord(x)-1)&0xf)<<4)+((ord(y)-1)&0xf)), x[::2],x[1::2]))
return b"".join(map(lambda x,y: bytes([(((x-1)&0xf)<<4)+((y-1)&0xf)]), x[::2],x[1::2]))
class StrLenField(StrField):
def __init__(self, name, default, fld=None, length_from=None):
StrField.__init__(self, name, default)
self.length_from = length_from
def getfield(self, pkt, s):
l = self.length_from(pkt)
return s[l:], self.m2i(pkt,s[:l])
class FieldListField(Field):
islist=1
def __init__(self, name, default, field, length_from=None, count_from=None):
if default is None:
default = [] # Create a new list for each instance
Field.__init__(self, name, default)
self.count_from = count_from
self.length_from = length_from
self.field = field
def i2count(self, pkt, val):
if type(val) is list:
return len(val)
return 1
def i2len(self, pkt, val):
return sum( self.field.i2len(pkt,v) for v in val )
def i2m(self, pkt, val):
if val is None:
val = []
return val
def any2i(self, pkt, x):
if type(x) is not list:
return [x]
else:
return x
def addfield(self, pkt, s, val):
val = self.i2m(pkt, val)
for v in val:
s = self.field.addfield(pkt, s, v)
return s
def getfield(self, pkt, s):
c = l = None
if self.length_from is not None:
l = self.length_from(pkt)
elif self.count_from is not None:
c = self.count_from(pkt)
val = []
ret=b""
if l is not None:
s,ret = s[:l],s[l:]
while s:
if c is not None:
if c <= 0:
break
c -= 1
s,v = self.field.getfield(pkt, s)
val.append(v)
return s+ret, val
class FieldLenField(Field):
def __init__(self, name, default, length_of=None, fmt = "H", count_of=None, adjust=lambda pkt,x:x, fld=None):
Field.__init__(self, name, default, fmt)
self.length_of=length_of
self.count_of=count_of
self.adjust=adjust
if fld is not None:
FIELD_LENGTH_MANAGEMENT_DEPRECATION(self.__class__.__name__)
self.length_of = fld
def i2m(self, pkt, x):
if x is None:
if self.length_of is not None:
fld,fval = pkt.getfield_and_val(self.length_of)
f = fld.i2len(pkt, fval)
else:
fld,fval = pkt.getfield_and_val(self.count_of)
f = fld.i2count(pkt, fval)
x = self.adjust(pkt,f)
return x
class StrNullField(StrField):
def addfield(self, pkt, s, val):
return s+self.i2m(pkt, val)+b"\x00"
def getfield(self, pkt, s):
l = s.find(b"\x00")
if l < 0:
#XXX \x00 not found
return "",s
return s[l+1:],self.m2i(pkt, s[:l])
def randval(self):
return RandTermString(RandNum(0,1200),b"\x00")
class StrStopField(StrField):
def __init__(self, name, default, stop, additionnal=0):
Field.__init__(self, name, default)
self.stop=stop
self.additionnal=additionnal
def getfield(self, pkt, s):
l = s.find(self.stop)
if l < 0:
return b"",s
# raise Scapy_Exception,"StrStopField: stop value [%s] not found" %stop
l += len(self.stop)+self.additionnal
return s[l:],s[:l]
def randval(self):
return RandTermString(RandNum(0,1200),self.stop)
class LenField(Field):
def i2m(self, pkt, x):
if x is None:
x = len(pkt.payload)
return x
class BCDFloatField(Field):
def i2m(self, pkt, x):
return int(256*x)
def m2i(self, pkt, x):
return x/256.0
class BitField(Field):
def __init__(self, name, default, size):
Field.__init__(self, name, default)
self.rev = size < 0
self.size = abs(size)
def reverse(self, val):
if self.size == 16:
val = socket.ntohs(val)
elif self.size == 32:
val = socket.ntohl(val)
return val
def addfield(self, pkt, s, val):
val = self.i2m(pkt, val)
if type(s) is tuple:
s,bitsdone,v = s
else:
bitsdone = 0
v = 0
if self.rev:
val = self.reverse(val)
v <<= self.size
v |= val & ((1<<self.size) - 1)
bitsdone += self.size
while bitsdone >= 8:
bitsdone -= 8
s = s+struct.pack("!B", v >> bitsdone)
v &= (1<<bitsdone)-1
if bitsdone:
return s,bitsdone,v
else:
return s
def getfield(self, pkt, s):
if type(s) is tuple:
s,bn = s
else:
bn = 0
# we don't want to process all the string
nb_bytes = (self.size+bn-1)//8 + 1
w = s[:nb_bytes]
# split the substring byte by byte
bs = struct.unpack('!%dB' % nb_bytes , w)
b = 0
for c in range(nb_bytes):
b |= int(bs[c]) << (nb_bytes-c-1)*8
# get rid of high order bits
b &= (1 << (nb_bytes*8-bn)) - 1
# remove low order bits
b = b >> (nb_bytes*8 - self.size - bn)
if self.rev:
b = self.reverse(b)
bn += self.size
s = s[bn//8:]
bn = bn%8
b = self.m2i(pkt, b)
if bn:
return (s,bn),b
else:
return s,b
def randval(self):
return RandNum(0,2**self.size-1)
class BitFieldLenField(BitField):
def __init__(self, name, default, size, length_of=None, count_of=None, adjust=lambda pkt,x:x):
BitField.__init__(self, name, default, size)
self.length_of=length_of
self.count_of=count_of
self.adjust=adjust
def i2m(self, pkt, x):
#return FieldLenField.i2m.im_func(self, pkt, x)
return FieldLenField.i2m(self, pkt, x)
class XBitField(BitField):
def i2repr(self, pkt, x):
return lhex(self.i2h(pkt,x))
class EnumField(Field):
def __init__(self, name, default, enum, fmt = "H"):
i2s = self.i2s = {}
s2i = self.s2i = {}
if type(enum) is list:
keys = range(len(enum))
else:
keys = enum.keys()
if list(filter(lambda x: type(x) is str, keys)):
i2s,s2i = s2i,i2s
for k in keys:
i2s[k] = enum[k]
s2i[enum[k]] = k
Field.__init__(self, name, default, fmt)
def any2i_one(self, pkt, x):
if type(x) is str:
x = self.s2i[x]
return x
def i2repr_one(self, pkt, x):
if self not in conf.noenum and not isinstance(x,VolatileValue) and x in self.i2s:
return self.i2s[x]
return repr(x)
def any2i(self, pkt, x):
if type(x) is list:
return list(map(lambda z,pkt=pkt:self.any2i_one(pkt,z), x))
else:
return self.any2i_one(pkt,x)
def i2repr(self, pkt, x):
if type(x) is list:
return list(map(lambda z,pkt=pkt:self.i2repr_one(pkt,z), x))
else:
return self.i2repr_one(pkt,x)
class CharEnumField(EnumField):
def __init__(self, name, default, enum, fmt = "1s"):
EnumField.__init__(self, name, default, enum, fmt)
k = self.i2s.keys()
if k and len(k[0]) != 1:
self.i2s,self.s2i = self.s2i,self.i2s
def any2i_one(self, pkt, x):
if len(x) != 1:
x = self.s2i[x]
return x
class BitEnumField(BitField,EnumField):
def __init__(self, name, default, size, enum):
EnumField.__init__(self, name, default, enum)
self.rev = size < 0
self.size = abs(size)
def any2i(self, pkt, x):
return EnumField.any2i(self, pkt, x)
def i2repr(self, pkt, x):
return EnumField.i2repr(self, pkt, x)
class ShortEnumField(EnumField):
def __init__(self, name, default, enum):
EnumField.__init__(self, name, default, enum, "H")
class LEShortEnumField(EnumField):
def __init__(self, name, default, enum):
EnumField.__init__(self, name, default, enum, "<H")
class ByteEnumField(EnumField):
def __init__(self, name, default, enum):
EnumField.__init__(self, name, default, enum, "B")
class IntEnumField(EnumField):
def __init__(self, name, default, enum):
EnumField.__init__(self, name, default, enum, "I")
class SignedIntEnumField(EnumField):
def __init__(self, name, default, enum):
EnumField.__init__(self, name, default, enum, "i")
def randval(self):
return RandSInt()
class LEIntEnumField(EnumField):
def __init__(self, name, default, enum):
EnumField.__init__(self, name, default, enum, "<I")
class XShortEnumField(ShortEnumField):
def i2repr_one(self, pkt, x):
if self not in conf.noenum and not isinstance(x,VolatileValue) and x in self.i2s:
return self.i2s[x]
return lhex(x)
class MultiEnumField(EnumField):
def __init__(self, name, default, enum, depends_on, fmt = "H"):
self.depends_on = depends_on
self.i2s_multi = enum
self.s2i_multi = {}
self.s2i_all = {}
for m in enum:
self.s2i_multi[m] = s2i = {}
for k,v in enum[m].items():
s2i[v] = k
self.s2i_all[v] = k
Field.__init__(self, name, default, fmt)
def any2i_one(self, pkt, x):
if type (x) is str:
v = self.depends_on(pkt)
if v in self.s2i_multi:
s2i = self.s2i_multi[v]
if x in s2i:
return s2i[x]
return self.s2i_all[x]
return x
def i2repr_one(self, pkt, x):
v = self.depends_on(pkt)
if v in self.i2s_multi:
return self.i2s_multi[v].get(x,x)
return x
class BitMultiEnumField(BitField,MultiEnumField):
def __init__(self, name, default, size, enum, depends_on):
MultiEnumField.__init__(self, name, default, enum)
self.rev = size < 0
self.size = abs(size)
def any2i(self, pkt, x):
return MultiEnumField.any2i(self, pkt, x)
def i2repr(self, pkt, x):
return MultiEnumField.i2repr(self, pkt, x)
# Little endian long field
class LELongField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "<Q")
# Little endian fixed length field
class LEFieldLenField(FieldLenField):
def __init__(self, name, default, length_of=None, fmt = "<H", count_of=None, adjust=lambda pkt,x:x, fld=None):
FieldLenField.__init__(self, name, default, length_of=length_of, fmt=fmt, fld=fld, adjust=adjust)
class FlagsField(BitField):
def __init__(self, name, default, size, names):
self.multi = type(names) is list
if self.multi:
#self.names = map(lambda x:[x], names)
self.names = [ [x] for x in names ]
else:
self.names = names
BitField.__init__(self, name, default, size)
def any2i(self, pkt, x):
if type(x) is str:
if self.multi:
#x = map(lambda y:[y], x.split("+"))
x = [ [y] for y in x.split("+") ]
y = 0
for i in x:
y |= 1 << self.names.index(i)
x = y
return x
def i2repr(self, pkt, x):
if type(x) is list or type(x) is tuple:
return repr(x)
if self.multi:
r = []
else:
r = ""
i=0
while x:
if x & 1:
r += self.names[i]
i += 1
x >>= 1
if self.multi:
r = "+".join(r)
return r
class FixedPointField(BitField):
def __init__(self, name, default, size, frac_bits=16):
self.frac_bits = frac_bits
BitField.__init__(self, name, default, size)
def any2i(self, pkt, val):
if val is None:
return val
ival = int(val)
fract = int( (val-ival) * 2**self.frac_bits )
return (ival << self.frac_bits) | fract
def i2h(self, pkt, val):
int_part = val >> self.frac_bits
frac_part = val & (1 << self.frac_bits) - 1
frac_part /= 2.0**self.frac_bits
return int_part+frac_part
def i2repr(self, pkt, val):
return self.i2h(pkt, val)
|
|
#!/usr/bin/env python
# Copyright (c) 2013-2015, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Tool to tuck/untuck Baxter's arms to/from the shipping pose
"""
import argparse
from copy import deepcopy
import rospy
from std_msgs.msg import (
Empty,
Bool,
)
import baxter_interface
from baxter_core_msgs.msg import (
CollisionAvoidanceState,
)
from baxter_interface import CHECK_VERSION
class Tuck(object):
def __init__(self, tuck_cmd):
self._done = False
self._limbs = ('left', 'right')
self._arms = {
'left': baxter_interface.Limb('left'),
'right': baxter_interface.Limb('right'),
}
self._tuck = tuck_cmd
self._tuck_rate = rospy.Rate(20.0) # Hz
self._tuck_threshold = 0.2 # radians
self._peak_angle = -1.6 # radians
self._arm_state = {
'tuck': {'left': 'none', 'right': 'none'},
'collide': {'left': False, 'right': False},
'flipped': {'left': False, 'right': False}
}
self._joint_moves = {
'tuck': {
'left': [-1.0, -2.07, 3.0, 2.55, 0.0, 0.01, 0.0],
'right': [1.0, -2.07, -3.0, 2.55, -0.0, 0.01, 0.0]
},
'untuck': {
'left': [-0.08, -1.0, -1.19, 1.94, 0.67, 1.03, -0.50],
'right': [0.08, -1.0, 1.19, 1.94, -0.67, 1.03, 0.50]
}
}
self._collide_lsub = rospy.Subscriber(
'robot/limb/left/collision_avoidance_state',
CollisionAvoidanceState,
self._update_collision, 'left')
self._collide_rsub = rospy.Subscriber(
'robot/limb/right/collision_avoidance_state',
CollisionAvoidanceState,
self._update_collision, 'right')
self._disable_pub = {
'left': rospy.Publisher(
'robot/limb/left/suppress_collision_avoidance',
Empty, queue_size=10),
'right': rospy.Publisher(
'robot/limb/right/suppress_collision_avoidance',
Empty, queue_size=10)
}
self._rs = baxter_interface.RobotEnable(CHECK_VERSION)
self._enable_pub = rospy.Publisher('robot/set_super_enable',
Bool, queue_size=10)
def _update_collision(self, data, limb):
self._arm_state['collide'][limb] = len(data.collision_object) > 0
self._check_arm_state()
def _check_arm_state(self):
"""
Check for goals and behind collision field.
If s1 joint is over the peak, collision will need to be disabled
to get the arm around the head-arm collision force-field.
"""
diff_check = lambda a, b: abs(a - b) <= self._tuck_threshold
for limb in self._limbs:
angles = [self._arms[limb].joint_angle(joint)
for joint in self._arms[limb].joint_names()]
# Check if in a goal position
untuck_goal = map(diff_check, angles,
self._joint_moves['untuck'][limb])
tuck_goal = map(diff_check, angles[0:2],
self._joint_moves['tuck'][limb][0:2])
if all(untuck_goal):
self._arm_state['tuck'][limb] = 'untuck'
elif all(tuck_goal):
self._arm_state['tuck'][limb] = 'tuck'
else:
self._arm_state['tuck'][limb] = 'none'
# Check if shoulder is flipped over peak
self._arm_state['flipped'][limb] = (
self._arms[limb].joint_angle(limb + '_s1') <= self._peak_angle)
def _prepare_to_tuck(self):
# If arms are in "tucked" state, disable collision avoidance
# before enabling robot, to avoid arm jerking from "force-field".
head = baxter_interface.Head()
start_disabled = not self._rs.state().enabled
at_goal = lambda: (abs(head.pan()) <=
baxter_interface.settings.HEAD_PAN_ANGLE_TOLERANCE)
rospy.loginfo("Moving head to neutral position")
while not at_goal() and not rospy.is_shutdown():
if start_disabled:
[pub.publish(Empty()) for pub in self._disable_pub.values()]
if not self._rs.state().enabled:
self._enable_pub.publish(True)
head.set_pan(0.0, 0.5, timeout=0)
self._tuck_rate.sleep()
if start_disabled:
while self._rs.state().enabled == True and not rospy.is_shutdown():
[pub.publish(Empty()) for pub in self._disable_pub.values()]
self._enable_pub.publish(False)
self._tuck_rate.sleep()
def _move_to(self, tuck, disabled):
if any(disabled.values()):
[pub.publish(Empty()) for pub in self._disable_pub.values()]
while (any(self._arm_state['tuck'][limb] != goal
for limb, goal in tuck.viewitems())
and not rospy.is_shutdown()):
if self._rs.state().enabled == False:
self._enable_pub.publish(True)
for limb in self._limbs:
if disabled[limb]:
self._disable_pub[limb].publish(Empty())
if limb in tuck:
self._arms[limb].set_joint_positions(dict(zip(
self._arms[limb].joint_names(),
self._joint_moves[tuck[limb]][limb])))
self._check_arm_state()
self._tuck_rate.sleep()
if any(self._arm_state['collide'].values()):
self._rs.disable()
return
def supervised_tuck(self):
# Update our starting state to check if arms are tucked
self._prepare_to_tuck()
self._check_arm_state()
# Tuck Arms
if self._tuck == True:
# If arms are already tucked, report this to user and exit.
if all(self._arm_state['tuck'][limb] == 'tuck'
for limb in self._limbs):
rospy.loginfo("Tucking: Arms already in 'Tucked' position.")
self._done = True
return
else:
rospy.loginfo("Tucking: One or more arms not Tucked.")
any_flipped = not all(self._arm_state['flipped'].values())
if any_flipped:
rospy.loginfo(
"Moving to neutral start position with collision %s.",
"on" if any_flipped else "off")
# Move to neutral pose before tucking arms to avoid damage
self._check_arm_state()
actions = dict()
disabled = {'left': True, 'right': True}
for limb in self._limbs:
if not self._arm_state['flipped'][limb]:
actions[limb] = 'untuck'
disabled[limb] = False
self._move_to(actions, disabled)
# Disable collision and Tuck Arms
rospy.loginfo("Tucking: Tucking with collision avoidance off.")
actions = {'left': 'tuck', 'right': 'tuck'}
disabled = {'left': True, 'right': True}
self._move_to(actions, disabled)
self._done = True
return
# Untuck Arms
else:
# If arms are tucked disable collision and untuck arms
if any(self._arm_state['flipped'].values()):
rospy.loginfo("Untucking: One or more arms Tucked;"
" Disabling Collision Avoidance and untucking.")
self._check_arm_state()
suppress = deepcopy(self._arm_state['flipped'])
actions = {'left': 'untuck', 'right': 'untuck'}
self._move_to(actions, suppress)
self._done = True
return
# If arms already untucked, move to neutral location
else:
rospy.loginfo("Untucking: Arms already Untucked;"
" Moving to neutral position.")
self._check_arm_state()
suppress = deepcopy(self._arm_state['flipped'])
actions = {'left': 'untuck', 'right': 'untuck'}
self._move_to(actions, suppress)
self._done = True
return
def clean_shutdown(self):
"""Handles ROS shutdown (Ctrl-C) safely."""
if not self._done:
rospy.logwarn('Aborting: Shutting down safely...')
if any(self._arm_state['collide'].values()):
while self._rs.state().enabled != False:
[pub.publish(Empty()) for pub in self._disable_pub.values()]
self._enable_pub.publish(False)
self._tuck_rate.sleep()
def main():
parser = argparse.ArgumentParser()
tuck_group = parser.add_mutually_exclusive_group(required=True)
tuck_group.add_argument("-t", "--tuck", dest="tuck",
action='store_true', default=False, help="tuck arms")
tuck_group.add_argument("-u", "--untuck", dest="untuck",
action='store_true', default=False, help="untuck arms")
args = parser.parse_args(rospy.myargv()[1:])
tuck = args.tuck
rospy.loginfo("Initializing node... ")
rospy.init_node("rsdk_tuck_arms")
rospy.loginfo("%sucking arms" % ("T" if tuck else "Unt",))
tucker = Tuck(tuck)
rospy.on_shutdown(tucker.clean_shutdown)
tucker.supervised_tuck()
rospy.loginfo("Finished tuck")
if __name__ == "__main__":
main()
|
|
from collections import defaultdict
from email.Utils import formatdate
import re
from string import Template
import sys
from time import time
from urlparse import parse_qsl
import commonware.log
import jinja2
from utils import log_configure
import settings_local as settings
# This has to be imported after the settings so statsd knows where to log to.
from django_statsd.clients import statsd
# Go configure the log.
log_configure()
error_log = commonware.log.getLogger('z.pfs')
xml_template = """\
<?xml version="1.0"?>
<RDF:RDF xmlns:RDF="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:pfs="http://www.mozilla.org/2004/pfs-rdf#">
<RDF:Description about="urn:mozilla:plugin-results:$mimetype">
<pfs:plugins><RDF:Seq>
<RDF:li resource="urn:mozilla:plugin:$guid"/>
</RDF:Seq></pfs:plugins>
</RDF:Description>
<RDF:Description about="urn:mozilla:plugin:$guid">
<pfs:updates><RDF:Seq>
<RDF:li resource="urn:mozilla:plugin:$guid:$version"/>
</RDF:Seq></pfs:updates>
</RDF:Description>
<RDF:Description about="urn:mozilla:plugin:$guid:$version">
<pfs:name>$name</pfs:name>
<pfs:requestedMimetype>$mimetype</pfs:requestedMimetype>
<pfs:guid>$guid</pfs:guid>
<pfs:version>$version</pfs:version>
<pfs:IconUrl>$iconUrl</pfs:IconUrl>
<pfs:InstallerLocation>$InstallerLocation</pfs:InstallerLocation>
<pfs:InstallerHash>$InstallerHash</pfs:InstallerHash>
<pfs:XPILocation>$XPILocation</pfs:XPILocation>
<pfs:InstallerShowsUI>$InstallerShowsUI</pfs:InstallerShowsUI>
<pfs:manualInstallationURL>$manualInstallationURL</pfs:manualInstallationURL>
<pfs:licenseURL>$licenseURL</pfs:licenseURL>
<pfs:needsRestart>$needsRestart</pfs:needsRestart>
</RDF:Description>
</RDF:RDF>
"""
flash_re = re.compile(r'^(Win|(PPC|Intel) Mac OS X|Linux.+(x86_64|i\d86))|SunOs', re.IGNORECASE)
quicktime_re = re.compile(r'^(application/(sdp|x-(mpeg|rtsp|sdp))|audio/(3gpp(2)?|AMR|aiff|basic|mid(i)?|mp4|mpeg|vnd\.qcelp|wav|x-(aiff|m4(a|b|p)|midi|mpeg|wav))|image/(pict|png|tiff|x-(macpaint|pict|png|quicktime|sgi|targa|tiff))|video/(3gpp(2)?|flc|mp4|mpeg|quicktime|sd-video|x-mpeg))$')
java_re = re.compile(r'^application/x-java-((applet|bean)(;jpi-version=1\.5|;version=(1\.(1(\.[1-3])?|(2|4)(\.[1-2])?|3(\.1)?|5)))?|vm)$')
wmp_re = re.compile(r'^(application/(asx|x-(mplayer2|ms-wmp))|video/x-ms-(asf(-plugin)?|wm(p|v|x)?|wvx)|audio/x-ms-w(ax|ma))$')
def get_output(data):
g = defaultdict(str, [(k, jinja2.escape(v)) for k, v in data.iteritems()])
required = ['mimetype', 'appID', 'appVersion', 'clientOS', 'chromeLocale']
# Some defaults we override depending on what we find below.
plugin = dict(mimetype='-1', name='-1', guid='-1', version='',
iconUrl='', XPILocation='', InstallerLocation='',
InstallerHash='', InstallerShowsUI='',
manualInstallationURL='', licenseURL='',
needsRestart='true')
# Special case for mimetype if they are provided.
plugin['mimetype'] = g['mimetype'] or '-1'
output = Template(xml_template)
for s in required:
if s not in data:
# A sort of 404, matching what was returned in the original PHP.
return output.substitute(plugin)
# Figure out what plugins we've got, and what plugins we know where
# to get.
# Begin our huge and embarrassing if-else statement.
if (g['mimetype'] in ['application/x-shockwave-flash',
'application/futuresplash'] and
re.match(flash_re, g['clientOS'])):
# Tell the user where they can go to get the installer.
plugin.update(
name='Adobe Flash Player',
manualInstallationURL='http://www.adobe.com/go/getflashplayer')
# Offer Windows users a specific flash plugin installer instead.
# Don't use a https URL for the license here, per request from
# Macromedia.
if g['clientOS'].startswith('Win'):
plugin.update(
guid='{4cfaef8a-a6c9-41a0-8e6f-967eb8f49143}',
XPILocation='',
iconUrl='http://fpdownload2.macromedia.com/pub/flashplayer/current/fp_win_installer.ico',
needsRestart='false',
InstallerShowsUI='true',
version='14.0.0.179',
InstallerHash='sha256:17c1e0b09abd4dc6919abd6405ffd9c6f29460f3c1c6f44409585351b2ab1c68',
InstallerLocation='http://fpdownload2.macromedia.com/pub/flashplayer/pdc/fp_pl_pfs_installer.exe')
elif (g['mimetype'] == 'application/x-director' and
g['clientOS'].startswith('Win')):
plugin.update(
name='Adobe Shockwave Player',
manualInstallationURL='http://get.adobe.com/shockwave/otherversions')
# Even though the shockwave installer is not a silent installer, we
# need to show its EULA here since we've got a slimmed down
# installer that doesn't do that itself.
if g['chromeLocale'] != 'ja-JP':
plugin.update(
licenseURL='http://www.adobe.com/go/eula_shockwaveplayer')
else:
plugin.update(
licenseURL='http://www.adobe.com/go/eula_shockwaveplayer_jp')
plugin.update(
guid='{45f2a22c-4029-4209-8b3d-1421b989633f}',
XPILocation='',
version='12.1.3.153',
InstallerHash='sha256:db20def10dbf3bda2ca061ad3fff89033b200a894f115eb5eab3eadc59c72d9c',
InstallerLocation='http://fpdownload.macromedia.com/pub/shockwave/default/english/win95nt/latest/Shockwave_Installer_FF.exe',
manualInstallationURL='http://get.adobe.com/shockwave/otherversions',
needsRestart='false',
InstallerShowsUI='false')
elif (g['mimetype'] in ['audio/x-pn-realaudio-plugin',
'audio/x-pn-realaudio'] and
re.match(r'^(Win|Linux|PPC Mac OS X)', g['clientOS'])):
plugin.update(
name='Real Player',
version='10.5',
manualInstallationURL='http://www.real.com')
if g['clientOS'].startswith('Win'):
plugin.update(
XPILocation='http://forms.real.com/real/player/download.html?type=firefox',
guid='{d586351c-cb55-41a7-8e7b-4aaac5172d39}')
else:
plugin.update(
guid='{269eb771-59de-4702-9209-ca97ce522f6d}')
elif (re.match(quicktime_re, g['mimetype']) and
re.match(r'^(Win|PPC Mac OS X)', g['clientOS'])):
# Well, we don't have a plugin that can handle any of those
# mimetypes, but the Apple Quicktime plugin can. Point the user to
# the Quicktime download page.
plugin.update(
name='Apple Quicktime',
guid='{a42bb825-7eee-420f-8ee7-834062b6fefd}',
InstallerShowsUI='true',
manualInstallationURL='http://www.apple.com/quicktime/download/')
elif (re.match(java_re, g['mimetype']) and
re.match(r'^(Win|Linux|PPC Mac OS X)', g['clientOS'])):
# We serve up the Java plugin for the following mimetypes:
#
# application/x-java-vm
# application/x-java-applet;jpi-version=1.5
# application/x-java-bean;jpi-version=1.5
# application/x-java-applet;version=1.3
# application/x-java-bean;version=1.3
# application/x-java-applet;version=1.2.2
# application/x-java-bean;version=1.2.2
# application/x-java-applet;version=1.2.1
# application/x-java-bean;version=1.2.1
# application/x-java-applet;version=1.4.2
# application/x-java-bean;version=1.4.2
# application/x-java-applet;version=1.5
# application/x-java-bean;version=1.5
# application/x-java-applet;version=1.3.1
# application/x-java-bean;version=1.3.1
# application/x-java-applet;version=1.4
# application/x-java-bean;version=1.4
# application/x-java-applet;version=1.4.1
# application/x-java-bean;version=1.4.1
# application/x-java-applet;version=1.2
# application/x-java-bean;version=1.2
# application/x-java-applet;version=1.1.3
# application/x-java-bean;version=1.1.3
# application/x-java-applet;version=1.1.2
# application/x-java-bean;version=1.1.2
# application/x-java-applet;version=1.1.1
# application/x-java-bean;version=1.1.1
# application/x-java-applet;version=1.1
# application/x-java-bean;version=1.1
# application/x-java-applet
# application/x-java-bean
#
#
# We don't want to link users directly to the Java plugin because
# we want to warn them about ongoing security problems first. Link
# to SUMO.
plugin.update(
name='Java Runtime Environment',
manualInstallationURL='https://support.mozilla.org/kb/use-java-plugin-to-view-interactive-content',
needsRestart='false',
guid='{fbe640ef-4375-4f45-8d79-767d60bf75b8}')
elif (g['mimetype'] in ['application/pdf', 'application/vnd.fdf',
'application/vnd.adobe.xfdf',
'application/vnd.adobe.xdp+xml',
'application/vnd.adobe.xfd+xml'] and
re.match(r'^(Win|PPC Mac OS X|Linux(?! x86_64))', g['clientOS'])):
plugin.update(
name='Adobe Acrobat Plug-In',
guid='{d87cd824-67cb-4547-8587-616c70318095}',
manualInstallationURL='http://www.adobe.com/products/acrobat/readstep.html')
elif (g['mimetype'] == 'application/x-mtx' and
re.match(r'^(Win|PPC Mac OS X)', g['clientOS'])):
plugin.update(
name='Viewpoint Media Player',
guid='{03f998b2-0e00-11d3-a498-00104b6eb52e}',
manualInstallationURL='http://www.viewpoint.com/pub/products/vmp.html')
elif re.match(wmp_re, g['mimetype']):
# We serve up the Windows Media Player plugin for the following
# mimetypes:
#
# application/asx
# application/x-mplayer2
# audio/x-ms-wax
# audio/x-ms-wma
# video/x-ms-asf
# video/x-ms-asf-plugin
# video/x-ms-wm
# video/x-ms-wmp
# video/x-ms-wmv
# video/x-ms-wmx
# video/x-ms-wvx
#
# For all windows users who don't have the WMP 11 plugin, give them
# a link for it.
if g['clientOS'].startswith('Win'):
plugin.update(
name='Windows Media Player',
version='11',
guid='{cff1240a-fd24-4b9f-8183-ccd96e5300d0}',
manualInstallationURL='http://port25.technet.com/pages/windows-media-player-firefox-plugin-download.aspx')
# For OSX users -- added Intel to this since flip4mac is a UB.
# Contact at MS was okay w/ this, plus MS points to this anyway.
elif re.match(r'^(PPC|Intel) Mac OS X', g['clientOS']):
plugin.update(
name='Flip4Mac',
version='2.1',
guid='{cff0240a-fd24-4b9f-8183-ccd96e5300d0}',
manualInstallationURL='http://www.flip4mac.com/wmv_download.htm')
elif (g['mimetype'] == 'application/x-xstandard' and
re.match(r'^(Win|PPC Mac OS X)', g['clientOS'])):
plugin.update(
name='XStandard XHTML WYSIWYG Editor',
guid='{3563d917-2f44-4e05-8769-47e655e92361}',
iconUrl='http://xstandard.com/images/xicon32x32.gif',
XPILocation='http://xstandard.com/download/xstandard.xpi',
InstallerShowsUI='false',
manualInstallationURL='http://xstandard.com/download/',
licenseURL='http://xstandard.com/license/')
elif (g['mimetype'] == 'application/x-dnl' and
g['clientOS'].startswith('Win')):
plugin.update(
name='DNL Reader',
guid='{ce9317a3-e2f8-49b9-9b3b-a7fb5ec55161}',
version='5.5',
iconUrl='http://digitalwebbooks.com/reader/dwb16.gif',
XPILocation='http://digitalwebbooks.com/reader/xpinst.xpi',
InstallerShowsUI='false',
manualInstallationURL='http://digitalwebbooks.com/reader/')
elif (g['mimetype'] == 'application/x-videoegg-loader' and
g['clientOS'].startswith('Win')):
plugin.update(
name='VideoEgg Publisher',
guid='{b8b881f0-2e07-11db-a98b-0800200c9a66}',
iconUrl='http://videoegg.com/favicon.ico',
XPILocation='http://update.videoegg.com/Install/Windows/Initial/VideoEggPublisher.xpi',
InstallerShowsUI='true',
manualInstallationURL='http://www.videoegg.com/')
elif (g['mimetype'] == 'video/vnd.divx' and
g['clientOS'].startswith('Win')):
plugin.update(
name='DivX Web Player',
guid='{a8b771f0-2e07-11db-a98b-0800200c9a66}',
iconUrl='http://images.divx.com/divx/player/webplayer.png',
XPILocation='http://download.divx.com/player/DivXWebPlayer.xpi',
InstallerShowsUI='false',
licenseURL='http://go.divx.com/plugin/license/',
manualInstallationURL='http://go.divx.com/plugin/download/')
elif (g['mimetype'] == 'video/vnd.divx' and
re.match(r'^(PPC|Intel) Mac OS X', g['clientOS'])):
plugin.update(
name='DivX Web Player',
guid='{a8b771f0-2e07-11db-a98b-0800200c9a66}',
iconUrl='http://images.divx.com/divx/player/webplayer.png',
XPILocation='http://download.divx.com/player/DivXWebPlayerMac.xpi',
InstallerShowsUI='false',
licenseURL='http://go.divx.com/plugin/license/',
manualInstallationURL='http://go.divx.com/plugin/download/')
# End ridiculously huge and embarrassing if-else block.
return output.substitute(plugin)
def format_date(secs):
return '%s GMT' % formatdate(time() + secs)[:25]
def get_headers(length):
return [('Content-Type', 'text/xml; charset=utf-8'),
('Cache-Control', 'public, max-age=3600'),
('Last-Modified', format_date(0)),
('Expires', format_date(3600)),
('Content-Length', str(length))]
def log_exception(data):
(typ, value, traceback) = sys.exc_info()
error_log.error(u'Type: %s, %s. Query: %s' % (typ, value, data))
def application(environ, start_response):
status = '200 OK'
with statsd.timer('services.pfs'):
data = dict(parse_qsl(environ['QUERY_STRING']))
try:
output = get_output(data).encode('utf-8')
start_response(status, get_headers(len(output)))
except:
log_exception(data)
raise
return [output]
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import heapq
from datetime import datetime
from sqlalchemy import Column, Integer, String, Text, DateTime, Boolean
from sqlalchemy.sql import functions, expression
from sqlalchemy.exc import DataError, IntegrityError, ProgrammingError
from tzlocal import get_localzone
from app.base.roles import Roles
from app.models.base import Model
from app.models.user import User
from app.libs.db import db_session
from app.libs.sorts import hot
def _datetime_convert(date):
return datetime(date.year, date.month, date.day, date.hour,
date.minute, date.second, date.microsecond,
get_localzone())
class Topic(Model):
name = Column('name', String(30), unique=True, nullable=False)
admin_id = Column('admin_id', Integer(), default=1)
avatar = Column('avatar', Text(), nullable=False)
description = Column('description', String(420), nullable=False)
rules = Column('rules', Text(), nullable=False)
why = Column('why', Text(), nullable=False)
# 0 vote, 1 accept, -1 reject.
state = Column('state', Integer(), default=0)
date = Column('update_date', DateTime(timezone=True),
default=functions.now(), onupdate=functions.now())
@classmethod
def can_post(cls, topic_id):
r = cls.query.with_entities(cls.state).filter(cls.id==topic_id)
return r.first().state == 1
@classmethod
def page_list_all_accepted(cls, page, per_page):
r = cls.query.filter(cls.state==1).order_by(expression.desc(cls.date))
return r.paginate(page, per_page)
@classmethod
def page_list_all(cls, page, per_page):
r = cls.query.order_by(expression.desc(cls.date))
return r.paginate(page, per_page)
@classmethod
def page_list_by_user(cls, username, page, per_page):
user = User.get_by_name(username)
r = cls.query.filter(
expression.and_(cls.admin_id==user.id, cls.state==1)
).order_by(expression.desc(cls.date))
return r.paginate(page, per_page)
@classmethod
def get_by_name(cls, name):
return cls.query.filter(cls.name==name).first()
@classmethod
def create(cls, name, created_name, avatar,
description, rules, why, state=0):
user = User.get_by_name(created_name)
t = Topic(name=name, admin_id=user.id, avatar=avatar,
description=description, rules=rules,
why=why, state=state)
try:
db_session.add(t)
db_session.commit()
user.update_permission(Roles.TopicEdit.format(t.id))
except (DataError, IntegrityError, ProgrammingError):
cls.rollback()
raise
return t
def update(self, description=None, rules=None, avatar=None, state=None):
if description:
self.description = description
if rules:
self.rules = rules
if state:
self.state = state
try:
db_session.add(self)
db_session.commit()
except (DataError, IntegrityError, ProgrammingError):
db_session.roolback()
raise
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'avatar': self.avatar,
'administer': self.administer.username,
'description': self.description,
'rules': self.rules,
'state': self.state,
'why': self.why,
'date': self.date,
}
@property
def administer(self):
return User.get(self.admin_id)
class Post(Model):
topic_id = Column('topic_id', Integer(), index=True, nullable=False)
author_id = Column('author_id', Integer(), index=True, nullable=False)
title = Column('title', String(120), unique=True, index=True, nullable=False)
created_date = Column('created_date', DateTime(timezone=True),
default=functions.now())
update_date = Column('update_date', DateTime(timezone=True),
default=functions.now(), onupdate=functions.now())
comment_date = Column('comment_date', DateTime(timezone=True),
default=functions.now())
keywords = Column('keywords', String(120), nullable=False)
content = Column('content', Text(), default='')
keep_silent = Column('keep_silent', Boolean(), default=False)
is_draft = Column('is_draft', Boolean(), default=False)
@classmethod
def get_by_title(cls, title):
return cls.query.filter(cls.title==title).first()
@classmethod
def hot_list(cls, num=30):
# Maybe -> ImportError: cycle import...
from app.models.action import PostUpVote, PostDownVote
hots, count = list(), 0
# Use heapsort and query iterator to cut down memory usage...
# http://stackoverflow.com/questions/1078383/sqlalchemy-difference-between-query-and-query-all-in-for-loops
for post in cls.query:
post_id, post_date = post.id, _datetime_convert(post.update_date)
ups = PostUpVote.count_by_post(post_id)
downs = PostDownVote.count_by_post(post_id)
score = hot(ups, downs, post_date)
if count < num:
heapq.heappush(hots, (score, post))
count += 1
elif score > hots[0][0]:
heapq.heappushpop(hots, (score, post))
posts = list()
while hots:
item = heapq.heappop(hots)
posts.append(item[1])
return posts[::-1]
@classmethod
def page_list(cls, username, page, per_page):
# ImportError: cycle import...
from app.models.action import Subscription
q = cls.query
# If user logined, query subscribed topics.
if username:
subs = Subscription.list_by_user(username)
topics = set([s.topic_id for s in subs])
# If user subscribed topics.
if topics:
q = q.filter(cls.topic_id.in_(topics))
q = q.order_by(expression.desc(cls.comment_date))
return q.paginate(page, per_page)
@classmethod
def page_list_by_user(cls, username, page, per_page):
user = User.get_by_name(username)
q = cls.query.filter(cls.author_id==user.id).order_by(
expression.desc(cls.comment_date))
return q.paginate(page, per_page)
@classmethod
def page_list_by_topic(cls, topic_id, page, per_page):
q = cls.query.filter(cls.topic_id==topic_id).order_by(
expression.desc(cls.comment_date))
return q.paginate(page, per_page)
@classmethod
def create(cls, author_name, topic_id, title, keywords,
content='', keep_silent=False, is_draft=False):
user = User.get_by_name(author_name)
p = cls(
topic_id=topic_id,
author_id=user.id,
title=title,
keywords=keywords,
content=content,
keep_silent=keep_silent,
is_draft=is_draft,
)
try:
db_session.add(p)
db_session.commit()
user.update_permission(Roles.PostEdit.format(p.id))
except (DataError, IntegrityError, ProgrammingError):
db_session.rollback()
raise
return p
def update(self, keywords=None, content=None,
keep_silent=False, is_draft=None):
if keywords:
self.keywords = keywords
self.content = content
self.keep_silent = keep_silent
if is_draft:
self.is_draft = is_draft
try:
db_session.add(self)
db_session.commit()
except (DataError, IntegrityError, ProgrammingError):
db_session.rollback()
raise
def to_dict(self):
return {
'id': self.id,
'author_name': self.author.username,
'author_avatar': self.author.profile.avatar,
'topic_id': self.topic_id,
'topic_name': self.topic.name,
'title': self.title,
'keywords': self.keywords,
'content': self.content,
'keep_silent': self.keep_silent,
'created_date': self.created_date,
'update_date': self.update_date,
}
def _new_comment(self, now):
self.comment_date = now
db_session.add(self)
@property
def author(self):
return User.get(self.author_id)
@property
def topic(self):
return Topic.get(self.topic_id)
class Comment(Model):
post_id = Column('post_id', Integer(), index=True, nullable=False)
author_id = Column('author_id', Integer(), index=True, nullable=False)
date = Column('date', DateTime(timezone=True), default=functions.now())
content = Column('content', Text(), nullable=False)
@classmethod
def latest_by_post(cls, post_id):
cs = cls.query.filter(cls.post_id==post_id).order_by(
expression.desc(cls.date)).limit(1)
return cs.first()
@classmethod
def page_list(cls, page, per_page):
p = cls.query.order_by(expression.asc(cls.date))
return p.paginate(page, per_page)
@classmethod
def count_by_user(cls, username):
user = User.get_by_name(username)
return cls.query.filter(cls.author_id==user.id).count()
@classmethod
def count_by_post(cls, post_id):
return cls.query.filter(cls.post_id==post_id).count()
@classmethod
def last_with_count(cls, post_id):
q = cls.query.filter(cls.post_id==post_id)
return q.count(), q.order_by('-id').first()
@classmethod
def page_list_by_user(cls, username, page, per_page):
user = User.get_by_name(username)
p = cls.query.order_by(
expression.asc(cls.date)).filter(cls.author_id==user.id)
return p.paginate(page, per_page)
@classmethod
def page_list_by_post(cls, post_id, page, per_page):
p = cls.query.order_by(
expression.asc(cls.date)).filter(cls.post_id==post_id)
return p.paginate(page, per_page)
@classmethod
def create(cls, author_name, post_id, content):
user = User.get_by_name(author_name)
now = functions.now()
Post.get(post_id)._new_comment(now)
c = cls(author_id=user.id, post_id=post_id, content=content, date=now)
try:
db_session.add(c)
db_session.commit()
except (DataError, IntegrityError, ProgrammingError):
db_session.rollback()
raise
return c
def update(self, content):
self.content = content
try:
db_session.add(self)
db_session.commit()
except (DataError, IntegrityError, ProgrammingError):
db_session.rollback()
raise
def to_dict(self):
return {
'id': self.id,
'author': self.author.username,
'avatar': self.author.profile.avatar,
'date': self.date,
'content': self.content,
}
@property
def author(self):
if not hasattr(self, '_author'):
self._author = User.get(self.author_id)
return self._author
@property
def post(self):
return Post.get(self.post_id)
class TopicComment(Model):
topic_id = Column('topic_id', Integer(), index=True, nullable=False)
author_id = Column('author_id', Integer(), index=True, nullable=False)
date = Column('date', DateTime(timezone=True), default=functions.now())
content = Column('content', Text(), nullable=False)
@classmethod
def latest_by_topic(cls, topic_id):
cs = cls.query.filter(cls.topic_id==topic_id).order_by(
expression.desc(cls.date)).limit(1)
return cs.first()
@classmethod
def page_list(cls, page, per_page):
p = cls.query.order_by(expression.asc(cls.date))
return p.paginate(page, per_page)
@classmethod
def count_by_user(cls, username):
user = User.get_by_name(username)
return cls.query.filter(cls.author_id==user.id).count()
@classmethod
def count_by_topic(cls, topic_id):
return cls.query.filter(cls.topic_id==topic_id).count()
@classmethod
def page_list_by_user(cls, username, page, per_page):
user = User.get_by_name(username)
p = cls.query.order_by(
expression.asc(cls.date)).filter(cls.author_id==user.id)
return p.paginate(page, per_page)
@classmethod
def page_list_by_topic(cls, topic_id, page, per_page):
p = cls.query.order_by(
expression.asc(cls.date)).filter(cls.topic_id==topic_id)
return p.paginate(page, per_page)
@classmethod
def create(cls, author_name, topic_id, content):
user = User.get_by_name(author_name)
now = functions.now()
Topic.get(topic_id)._new_comment(now)
c = cls(author_id=user.id, topic_id=topic_id, content=content, date=now)
try:
db_session.add(c)
db_session.commit()
except (DataError, IntegrityError, ProgrammingError):
db_session.rollback()
raise
return c
def update(self, content):
self.content = content
try:
db_session.add(self)
db_session.commit()
except (DataError, IntegrityError, ProgrammingError):
db_session.rollback()
raise
def to_dict(self):
return {
'id': self.id,
'author': self.author.username,
'avatar': self.author.profile.avatar,
'date': self.date,
'content': self.content,
}
@property
def author(self):
if not hasattr(self, '_author'):
self._author = User.get(self.author_id)
return self._author
def topic(self):
return Topic.get(self.topic_id)
|
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from enum import IntEnum
from PyQt5.QtCore import Qt, QPersistentModelIndex, QModelIndex
from PyQt5.QtGui import QStandardItemModel, QStandardItem, QFont
from PyQt5.QtWidgets import QAbstractItemView, QComboBox, QLabel, QMenu
from electrum.i18n import _
from electrum.util import block_explorer_URL, profiler
from electrum.plugin import run_hook
from electrum.bitcoin import is_address
from electrum.wallet import InternalAddressCorruption
from .util import MyTreeView, MONOSPACE_FONT, ColorScheme, webopen
class AddressList(MyTreeView):
class Columns(IntEnum):
TYPE = 0
ADDRESS = 1
LABEL = 2
COIN_BALANCE = 3
FIAT_BALANCE = 4
NUM_TXS = 5
filter_columns = [Columns.TYPE, Columns.ADDRESS, Columns.LABEL, Columns.COIN_BALANCE]
def __init__(self, parent=None):
super().__init__(parent, self.create_menu, stretch_column=self.Columns.LABEL)
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setSortingEnabled(True)
self.show_change = 0
self.show_used = 0
self.change_button = QComboBox(self)
self.change_button.currentIndexChanged.connect(self.toggle_change)
for t in [_('All'), _('Receiving'), _('Change')]:
self.change_button.addItem(t)
self.used_button = QComboBox(self)
self.used_button.currentIndexChanged.connect(self.toggle_used)
for t in [_('All'), _('Unused'), _('Funded'), _('Used')]:
self.used_button.addItem(t)
self.setModel(QStandardItemModel(self))
self.update()
def get_toolbar_buttons(self):
return QLabel(_("Filter:")), self.change_button, self.used_button
def on_hide_toolbar(self):
self.show_change = 0
self.show_used = 0
self.update()
def save_toolbar_state(self, state, config):
config.set_key('show_toolbar_addresses', state)
def refresh_headers(self):
fx = self.parent.fx
if fx and fx.get_fiat_address_config():
ccy = fx.get_currency()
else:
ccy = _('Fiat')
headers = {
self.Columns.TYPE: _('Type'),
self.Columns.ADDRESS: _('Address'),
self.Columns.LABEL: _('Label'),
self.Columns.COIN_BALANCE: _('Balance'),
self.Columns.FIAT_BALANCE: ccy + ' ' + _('Balance'),
self.Columns.NUM_TXS: _('Tx'),
}
self.update_headers(headers)
def toggle_change(self, state):
if state == self.show_change:
return
self.show_change = state
self.update()
def toggle_used(self, state):
if state == self.show_used:
return
self.show_used = state
self.update()
@profiler
def update(self):
self.wallet = self.parent.wallet
current_address = self.current_item_user_role(col=self.Columns.LABEL)
if self.show_change == 1:
addr_list = self.wallet.get_receiving_addresses()
elif self.show_change == 2:
addr_list = self.wallet.get_change_addresses()
else:
addr_list = self.wallet.get_addresses()
self.model().clear()
self.refresh_headers()
fx = self.parent.fx
set_address = None
for address in addr_list:
num = self.wallet.get_address_history_len(address)
label = self.wallet.labels.get(address, '')
c, u, x = self.wallet.get_addr_balance(address)
balance = c + u + x
is_used_and_empty = self.wallet.is_used(address) and balance == 0
if self.show_used == 1 and (balance or is_used_and_empty):
continue
if self.show_used == 2 and balance == 0:
continue
if self.show_used == 3 and not is_used_and_empty:
continue
balance_text = self.parent.format_amount(balance, whitespaces=True)
# create item
if fx and fx.get_fiat_address_config():
rate = fx.exchange_rate()
fiat_balance = fx.value_str(balance, rate)
else:
fiat_balance = ''
labels = ['', address, label, balance_text, fiat_balance, "%d"%num]
address_item = [QStandardItem(e) for e in labels]
# align text and set fonts
for i, item in enumerate(address_item):
item.setTextAlignment(Qt.AlignVCenter)
if i not in (self.Columns.TYPE, self.Columns.LABEL):
item.setFont(QFont(MONOSPACE_FONT))
item.setEditable(i in self.editable_columns)
address_item[self.Columns.FIAT_BALANCE].setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
# setup column 0
if self.wallet.is_change(address):
address_item[self.Columns.TYPE].setText(_('change'))
address_item[self.Columns.TYPE].setBackground(ColorScheme.YELLOW.as_color(True))
else:
address_item[self.Columns.TYPE].setText(_('receiving'))
address_item[self.Columns.TYPE].setBackground(ColorScheme.GREEN.as_color(True))
address_item[self.Columns.LABEL].setData(address, Qt.UserRole)
# setup column 1
if self.wallet.is_frozen_address(address):
address_item[self.Columns.ADDRESS].setBackground(ColorScheme.BLUE.as_color(True))
if self.wallet.is_beyond_limit(address):
address_item[self.Columns.ADDRESS].setBackground(ColorScheme.RED.as_color(True))
# add item
count = self.model().rowCount()
self.model().insertRow(count, address_item)
address_idx = self.model().index(count, self.Columns.LABEL)
if address == current_address:
set_address = QPersistentModelIndex(address_idx)
self.set_current_idx(set_address)
# show/hide columns
if fx and fx.get_fiat_address_config():
self.showColumn(self.Columns.FIAT_BALANCE)
else:
self.hideColumn(self.Columns.FIAT_BALANCE)
self.filter()
def create_menu(self, position):
from electrum.wallet import Multisig_Wallet
is_multisig = isinstance(self.wallet, Multisig_Wallet)
can_delete = self.wallet.can_delete_address()
selected = self.selected_in_column(self.Columns.ADDRESS)
if not selected:
return
multi_select = len(selected) > 1
addrs = [self.model().itemFromIndex(item).text() for item in selected]
menu = QMenu()
if not multi_select:
idx = self.indexAt(position)
if not idx.isValid():
return
col = idx.column()
item = self.model().itemFromIndex(idx)
if not item:
return
addr = addrs[0]
addr_column_title = self.model().horizontalHeaderItem(self.Columns.LABEL).text()
addr_idx = idx.sibling(idx.row(), self.Columns.LABEL)
column_title = self.model().horizontalHeaderItem(col).text()
copy_text = self.model().itemFromIndex(idx).text()
if col == self.Columns.COIN_BALANCE or col == self.Columns.FIAT_BALANCE:
copy_text = copy_text.strip()
menu.addAction(_("Copy {}").format(column_title), lambda: self.place_text_on_clipboard(copy_text))
menu.addAction(_('Details'), lambda: self.parent.show_address(addr))
persistent = QPersistentModelIndex(addr_idx)
menu.addAction(_("Edit {}").format(addr_column_title), lambda p=persistent: self.edit(QModelIndex(p)))
menu.addAction(_("Request payment"), lambda: self.parent.receive_at(addr))
if self.wallet.can_export():
menu.addAction(_("Private key"), lambda: self.parent.show_private_key(addr))
if not is_multisig and not self.wallet.is_watching_only():
menu.addAction(_("Sign/verify message"), lambda: self.parent.sign_verify_message(addr))
menu.addAction(_("Encrypt/decrypt message"), lambda: self.parent.encrypt_message(addr))
if can_delete:
menu.addAction(_("Remove from wallet"), lambda: self.parent.remove_address(addr))
addr_URL = block_explorer_URL(self.config, 'addr', addr)
if addr_URL:
menu.addAction(_("View on block explorer"), lambda: webopen(addr_URL))
if not self.wallet.is_frozen_address(addr):
menu.addAction(_("Freeze"), lambda: self.parent.set_frozen_state_of_addresses([addr], True))
else:
menu.addAction(_("Unfreeze"), lambda: self.parent.set_frozen_state_of_addresses([addr], False))
coins = self.wallet.get_spendable_coins(addrs, config=self.config)
if coins:
menu.addAction(_("Spend from"), lambda: self.parent.spend_coins(coins))
run_hook('receive_menu', menu, addrs, self.wallet)
menu.exec_(self.viewport().mapToGlobal(position))
def place_text_on_clipboard(self, text):
if is_address(text):
try:
self.wallet.check_address(text)
except InternalAddressCorruption as e:
self.parent.show_error(str(e))
raise
self.parent.app.clipboard().setText(text)
|
|
"""Arcam media player."""
import logging
from arcam.fmj import SourceCodes
from arcam.fmj.state import State
from homeassistant.components.media_player import BrowseMedia, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_CLASS_DIRECTORY,
MEDIA_CLASS_MUSIC,
MEDIA_TYPE_MUSIC,
SUPPORT_BROWSE_MEDIA,
SUPPORT_PLAY_MEDIA,
SUPPORT_SELECT_SOUND_MODE,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.components.media_player.errors import BrowseError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .config_flow import get_entry_client
from .const import (
DOMAIN,
EVENT_TURN_ON,
SIGNAL_CLIENT_DATA,
SIGNAL_CLIENT_STARTED,
SIGNAL_CLIENT_STOPPED,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the configuration entry."""
client = get_entry_client(hass, config_entry)
async_add_entities(
[
ArcamFmj(
config_entry.title,
State(client, zone),
config_entry.unique_id or config_entry.entry_id,
)
for zone in (1, 2)
],
True,
)
class ArcamFmj(MediaPlayerEntity):
"""Representation of a media device."""
_attr_should_poll = False
def __init__(
self,
device_name,
state: State,
uuid: str,
):
"""Initialize device."""
self._state = state
self._device_name = device_name
self._attr_name = f"{device_name} - Zone: {state.zn}"
self._uuid = uuid
self._attr_supported_features = (
SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY_MEDIA
| SUPPORT_BROWSE_MEDIA
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_STEP
| SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
)
if state.zn == 1:
self._attr_supported_features |= SUPPORT_SELECT_SOUND_MODE
self._attr_unique_id = f"{uuid}-{state.zn}"
self._attr_entity_registry_enabled_default = state.zn == 1
@property
def state(self):
"""Return the state of the device."""
if self._state.get_power():
return STATE_ON
return STATE_OFF
@property
def device_info(self):
"""Return a device description for device registry."""
return DeviceInfo(
identifiers={
(DOMAIN, self._uuid),
(DOMAIN, self._state.client.host, self._state.client.port),
},
manufacturer="Arcam",
model="Arcam FMJ AVR",
name=self._device_name,
)
async def async_added_to_hass(self):
"""Once registered, add listener for events."""
await self._state.start()
await self._state.update()
@callback
def _data(host):
if host == self._state.client.host:
self.async_write_ha_state()
@callback
def _started(host):
if host == self._state.client.host:
self.async_schedule_update_ha_state(force_refresh=True)
@callback
def _stopped(host):
if host == self._state.client.host:
self.async_schedule_update_ha_state(force_refresh=True)
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_CLIENT_DATA, _data
)
)
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_CLIENT_STARTED, _started
)
)
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_CLIENT_STOPPED, _stopped
)
)
async def async_update(self):
"""Force update of state."""
_LOGGER.debug("Update state %s", self.name)
await self._state.update()
async def async_mute_volume(self, mute):
"""Send mute command."""
await self._state.set_mute(mute)
self.async_write_ha_state()
async def async_select_source(self, source):
"""Select a specific source."""
try:
value = SourceCodes[source]
except KeyError:
_LOGGER.error("Unsupported source %s", source)
return
await self._state.set_source(value)
self.async_write_ha_state()
async def async_select_sound_mode(self, sound_mode):
"""Select a specific source."""
try:
await self._state.set_decode_mode(sound_mode)
except (KeyError, ValueError):
_LOGGER.error("Unsupported sound_mode %s", sound_mode)
return
self.async_write_ha_state()
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self._state.set_volume(round(volume * 99.0))
self.async_write_ha_state()
async def async_volume_up(self):
"""Turn volume up for media player."""
await self._state.inc_volume()
self.async_write_ha_state()
async def async_volume_down(self):
"""Turn volume up for media player."""
await self._state.dec_volume()
self.async_write_ha_state()
async def async_turn_on(self):
"""Turn the media player on."""
if self._state.get_power() is not None:
_LOGGER.debug("Turning on device using connection")
await self._state.set_power(True)
else:
_LOGGER.debug("Firing event to turn on device")
self.hass.bus.async_fire(EVENT_TURN_ON, {ATTR_ENTITY_ID: self.entity_id})
async def async_turn_off(self):
"""Turn the media player off."""
await self._state.set_power(False)
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
if media_content_id not in (None, "root"):
raise BrowseError(
f"Media not found: {media_content_type} / {media_content_id}"
)
presets = self._state.get_preset_details()
radio = [
BrowseMedia(
title=preset.name,
media_class=MEDIA_CLASS_MUSIC,
media_content_id=f"preset:{preset.index}",
media_content_type=MEDIA_TYPE_MUSIC,
can_play=True,
can_expand=False,
)
for preset in presets.values()
]
root = BrowseMedia(
title="Root",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id="root",
media_content_type="library",
can_play=False,
can_expand=True,
children=radio,
)
return root
async def async_play_media(self, media_type: str, media_id: str, **kwargs) -> None:
"""Play media."""
if media_id.startswith("preset:"):
preset = int(media_id[7:])
await self._state.set_tuner_preset(preset)
else:
_LOGGER.error("Media %s is not supported", media_id)
return
@property
def source(self):
"""Return the current input source."""
if (value := self._state.get_source()) is None:
return None
return value.name
@property
def source_list(self):
"""List of available input sources."""
return [x.name for x in self._state.get_source_list()]
@property
def sound_mode(self):
"""Name of the current sound mode."""
if (value := self._state.get_decode_mode()) is None:
return None
return value.name
@property
def sound_mode_list(self):
"""List of available sound modes."""
if (values := self._state.get_decode_modes()) is None:
return None
return [x.name for x in values]
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
if (value := self._state.get_mute()) is None:
return None
return value
@property
def volume_level(self):
"""Volume level of device."""
if (value := self._state.get_volume()) is None:
return None
return value / 99.0
@property
def media_content_type(self):
"""Content type of current playing media."""
source = self._state.get_source()
if source == SourceCodes.DAB:
value = MEDIA_TYPE_MUSIC
elif source == SourceCodes.FM:
value = MEDIA_TYPE_MUSIC
else:
value = None
return value
@property
def media_content_id(self):
"""Content type of current playing media."""
source = self._state.get_source()
if source in (SourceCodes.DAB, SourceCodes.FM):
if preset := self._state.get_tuner_preset():
value = f"preset:{preset}"
else:
value = None
else:
value = None
return value
@property
def media_channel(self):
"""Channel currently playing."""
source = self._state.get_source()
if source == SourceCodes.DAB:
value = self._state.get_dab_station()
elif source == SourceCodes.FM:
value = self._state.get_rds_information()
else:
value = None
return value
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
if self._state.get_source() == SourceCodes.DAB:
value = self._state.get_dls_pdt()
else:
value = None
return value
@property
def media_title(self):
"""Title of current playing media."""
if (source := self._state.get_source()) is None:
return None
if channel := self.media_channel:
value = f"{source.name} - {channel}"
else:
value = source.name
return value
|
|
from __future__ import absolute_import, unicode_literals
import datetime
from django.contrib.admin import (site, ModelAdmin, SimpleListFilter,
BooleanFieldListFilter)
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings, six
from django.utils.encoding import force_text
from .models import Book, Department, Employee
def select_by(dictlist, key, value):
return [x for x in dictlist if x[key] == value][0]
class DecadeListFilter(SimpleListFilter):
def lookups(self, request, model_admin):
return (
('the 80s', "the 1980's"),
('the 90s', "the 1990's"),
('the 00s', "the 2000's"),
('other', "other decades"),
)
def queryset(self, request, queryset):
decade = self.value()
if decade == 'the 80s':
return queryset.filter(year__gte=1980, year__lte=1989)
if decade == 'the 90s':
return queryset.filter(year__gte=1990, year__lte=1999)
if decade == 'the 00s':
return queryset.filter(year__gte=2000, year__lte=2009)
class DecadeListFilterWithTitleAndParameter(DecadeListFilter):
title = 'publication decade'
parameter_name = 'publication-decade'
class DecadeListFilterWithoutTitle(DecadeListFilter):
parameter_name = 'publication-decade'
class DecadeListFilterWithoutParameter(DecadeListFilter):
title = 'publication decade'
class DecadeListFilterWithNoneReturningLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
pass
class DecadeListFilterWithFailingQueryset(DecadeListFilterWithTitleAndParameter):
def queryset(self, request, queryset):
raise 1/0
class DecadeListFilterWithQuerysetBasedLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
qs = model_admin.queryset(request)
if qs.filter(year__gte=1980, year__lte=1989).exists():
yield ('the 80s', "the 1980's")
if qs.filter(year__gte=1990, year__lte=1999).exists():
yield ('the 90s', "the 1990's")
if qs.filter(year__gte=2000, year__lte=2009).exists():
yield ('the 00s', "the 2000's")
class DecadeListFilterParameterEndsWith__In(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__in' # Ends with '__in"
class DecadeListFilterParameterEndsWith__Isnull(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__isnull' # Ends with '__isnull"
class CustomUserAdmin(UserAdmin):
list_filter = ('books_authored', 'books_contributed')
class BookAdmin(ModelAdmin):
list_filter = ('year', 'author', 'contributors', 'is_best_seller', 'date_registered', 'no')
ordering = ('-id',)
class BookAdminWithTupleBooleanFilter(BookAdmin):
list_filter = ('year', 'author', 'contributors', ('is_best_seller', BooleanFieldListFilter), 'date_registered', 'no')
class DecadeFilterBookAdmin(ModelAdmin):
list_filter = ('author', DecadeListFilterWithTitleAndParameter)
ordering = ('-id',)
class DecadeFilterBookAdminWithoutTitle(ModelAdmin):
list_filter = (DecadeListFilterWithoutTitle,)
class DecadeFilterBookAdminWithoutParameter(ModelAdmin):
list_filter = (DecadeListFilterWithoutParameter,)
class DecadeFilterBookAdminWithNoneReturningLookups(ModelAdmin):
list_filter = (DecadeListFilterWithNoneReturningLookups,)
class DecadeFilterBookAdminWithFailingQueryset(ModelAdmin):
list_filter = (DecadeListFilterWithFailingQueryset,)
class DecadeFilterBookAdminWithQuerysetBasedLookups(ModelAdmin):
list_filter = (DecadeListFilterWithQuerysetBasedLookups,)
class DecadeFilterBookAdminParameterEndsWith__In(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__In,)
class DecadeFilterBookAdminParameterEndsWith__Isnull(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__Isnull,)
class EmployeeAdmin(ModelAdmin):
list_display = ['name', 'department']
list_filter = ['department']
class ListFiltersTests(TestCase):
def setUp(self):
self.today = datetime.date.today()
self.tomorrow = self.today + datetime.timedelta(days=1)
self.one_week_ago = self.today - datetime.timedelta(days=7)
self.request_factory = RequestFactory()
# Users
self.alfred = User.objects.create_user('alfred', 'alfred@example.com')
self.bob = User.objects.create_user('bob', 'bob@example.com')
self.lisa = User.objects.create_user('lisa', 'lisa@example.com')
# Books
self.djangonaut_book = Book.objects.create(title='Djangonaut: an art of living', year=2009, author=self.alfred, is_best_seller=True, date_registered=self.today)
self.bio_book = Book.objects.create(title='Django: a biography', year=1999, author=self.alfred, is_best_seller=False, no=207)
self.django_book = Book.objects.create(title='The Django Book', year=None, author=self.bob, is_best_seller=None, date_registered=self.today, no=103)
self.gipsy_book = Book.objects.create(title='Gipsy guitar for dummies', year=2002, is_best_seller=True, date_registered=self.one_week_ago)
self.gipsy_book.contributors = [self.bob, self.lisa]
self.gipsy_book.save()
def get_changelist(self, request, model, modeladmin):
return ChangeList(request, model, modeladmin.list_display, modeladmin.list_display_links,
modeladmin.list_filter, modeladmin.date_hierarchy, modeladmin.search_fields,
modeladmin.list_select_related, modeladmin.list_per_page, modeladmin.list_max_show_all, modeladmin.list_editable, modeladmin)
def test_datefieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'date_registered__gte': self.today,
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Today")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today, self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(day=1),
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
if (self.today.year, self.today.month) == (self.one_week_ago.year, self.one_week_ago.month):
# In case one week ago is in the same month.
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This month")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today.replace(day=1), self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(month=1, day=1),
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
if self.today.year == self.one_week_ago.year:
# In case one week ago is in the same year.
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This year")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today.replace(month=1, day=1), self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': str(self.one_week_ago),
'date_registered__lt': str(self.tomorrow)})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Past 7 days")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (str(self.one_week_ago), str(self.tomorrow)))
@override_settings(USE_TZ=True)
def test_datefieldlistfilter_with_time_zone_support(self):
# Regression for #17830
self.test_datefieldlistfilter()
def test_allvaluesfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'year__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?year__isnull=True')
request = self.request_factory.get('/', {'year': '2002'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?year=2002')
def test_relatedfieldlistfilter_foreignkey(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'author__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?author__isnull=True')
request = self.request_factory.get('/', {'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
# order of choices depends on User model, which has no order
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%d' % self.alfred.pk)
def test_relatedfieldlistfilter_manytomany(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'contributors__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book, self.bio_book, self.djangonaut_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(force_text(filterspec.title), 'Verbose Contributors')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?contributors__isnull=True')
request = self.request_factory.get('/', {'contributors__id__exact': self.bob.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(force_text(filterspec.title), 'Verbose Contributors')
choice = select_by(filterspec.choices(changelist), "display", "bob")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?contributors__id__exact=%d' % self.bob.pk)
def test_relatedfieldlistfilter_reverse_relationships(self):
modeladmin = CustomUserAdmin(User, site)
# FK relationship -----
request = self.request_factory.get('/', {'books_authored__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.lisa])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_authored__isnull=True')
request = self.request_factory.get('/', {'books_authored__id__exact': self.bio_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'book')
choice = select_by(filterspec.choices(changelist), "display", self.bio_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_authored__id__exact=%d' % self.bio_book.pk)
# M2M relationship -----
request = self.request_factory.get('/', {'books_contributed__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.alfred])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_contributed__isnull=True')
request = self.request_factory.get('/', {'books_contributed__id__exact': self.django_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'book')
choice = select_by(filterspec.choices(changelist), "display", self.django_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_contributed__id__exact=%d' % self.django_book.pk)
def test_booleanfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def test_booleanfieldlistfilter_tuple(self):
modeladmin = BookAdminWithTupleBooleanFilter(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def verify_booleanfieldlistfilter(self, modeladmin):
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'is_best_seller__exact': 0})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "No")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=0')
request = self.request_factory.get('/', {'is_best_seller__exact': 1})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Yes")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=1')
request = self.request_factory.get('/', {'is_best_seller__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Unknown")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__isnull=True')
def test_simplelistfilter(self):
modeladmin = DecadeFilterBookAdmin(Book, site)
# Make sure that the first option is 'All' ---------------------------
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), list(Book.objects.all().order_by('-id')))
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
# Look for books in the 1980s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 80s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'the 1980\'s')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+80s')
# Look for books in the 1990s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+90s')
# Look for books in the 2000s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s')
# Combine multiple filters -------------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s', 'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.djangonaut_book])
# Make sure the correct choices are selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s&author__id__exact=%s' % self.alfred.pk)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?publication-decade=the+00s&author__id__exact=%s' % self.alfred.pk)
def test_listfilter_without_title(self):
"""
Any filter must define a title.
"""
modeladmin = DecadeFilterBookAdminWithoutTitle(Book, site)
request = self.request_factory.get('/', {})
six.assertRaisesRegex(self, ImproperlyConfigured,
"The list filter 'DecadeListFilterWithoutTitle' does not specify a 'title'.",
self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_without_parameter(self):
"""
Any SimpleListFilter must define a parameter_name.
"""
modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)
request = self.request_factory.get('/', {})
six.assertRaisesRegex(self, ImproperlyConfigured,
"The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'.",
self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_with_none_returning_lookups(self):
"""
A SimpleListFilter lookups method can return None but disables the
filter completely.
"""
modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 0)
def test_filter_with_failing_queryset(self):
"""
Ensure that when a filter's queryset method fails, it fails loudly and
the corresponding exception doesn't get swallowed.
Refs #17828.
"""
modeladmin = DecadeFilterBookAdminWithFailingQueryset(Book, site)
request = self.request_factory.get('/', {})
self.assertRaises(ZeroDivisionError, self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_with_queryset_based_lookups(self):
modeladmin = DecadeFilterBookAdminWithQuerysetBasedLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(len(choices), 3)
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'the 1990\'s')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+90s')
self.assertEqual(choices[2]['display'], 'the 2000\'s')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+00s')
def test_two_characters_long_field(self):
"""
Ensure that list_filter works with two-characters long field names.
Refs #16080.
"""
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'no': '207'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'number')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?no=207')
def test_parameter_ends_with__in__or__isnull(self):
"""
Ensure that a SimpleListFilter's parameter name is not mistaken for a
model field if it ends with '__isnull' or '__in'.
Refs #17091.
"""
# When it ends with '__in' -----------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site)
request = self.request_factory.get('/', {'decade__in': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__in=the+90s')
# When it ends with '__isnull' ---------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site)
request = self.request_factory.get('/', {'decade__isnull': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__isnull=the+90s')
def test_fk_with_to_field(self):
"""
Ensure that a filter on a FK respects the FK's to_field attribute.
Refs #17972.
"""
modeladmin = EmployeeAdmin(Employee, site)
dev = Department.objects.create(code='DEV', description='Development')
design = Department.objects.create(code='DSN', description='Design')
john = Employee.objects.create(name='John Blue', department=dev)
jack = Employee.objects.create(name='Jack Red', department=design)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [jack, john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
# Filter by Department=='Development' --------------------------------
request = self.request_factory.get('/', {'department__code__exact': 'DEV'})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], False)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
from __future__ import absolute_import, print_function
"""
This module provides classes used to enumerate surface sites
and to find adsorption sites on slabs
"""
import numpy as np
from six.moves import range
from pymatgen import Structure, Lattice, vis
import tempfile
import sys
import subprocess
import itertools
import os
from monty.serialization import loadfn
from scipy.spatial import Delaunay
from pymatgen.core.operations import SymmOp
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.analyzer import generate_full_symmops
from pymatgen.util.coord_utils import in_coord_list, in_coord_list_pbc
from pymatgen.core.sites import PeriodicSite
from pymatgen.analysis.structure_analyzer import VoronoiCoordFinder
from pymatgen.core.surface import generate_all_slabs
from matplotlib import patches
from matplotlib.path import Path
__author__ = "Joseph Montoya"
__copyright__ = "Copyright 2016, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Joseph Montoya"
__credits__ = "Richard Tran"
__email__ = "montoyjh@lbl.gov"
__status__ = "Development"
__date__ = "December 2, 2015"
class AdsorbateSiteFinder(object):
"""
This class finds adsorbate sites on slabs and generates
adsorbate structures according to user-defined criteria.
The algorithm for finding sites is essentially as follows:
1. Determine "surface sites" by finding those within
a height threshold along the miller index of the
highest site
2. Create a network of surface sites using the Delaunay
triangulation of the surface sites
3. Assign on-top, bridge, and hollow adsorption sites
at the nodes, edges, and face centers of the Del.
Triangulation
4. Generate structures from a molecule positioned at
these sites
"""
def __init__(self, slab, selective_dynamics=False,
height=0.9, mi_vec=None):
"""
Create an AdsorbateSiteFinder object.
Args:
slab (Slab): slab object for which to find adsorbate sites
selective_dynamics (bool): flag for whether to assign
non-surface sites as fixed for selective dynamics
height (float): height criteria for selection of surface sites
mi_vec (3-D array-like): vector corresponding to the vector
concurrent with the miller index, this enables use with
slabs that have been reoriented, but the miller vector
must be supplied manually
"""
self.mi_string = ''.join([str(i) for i in slab.miller_index])
# get surface normal from miller index
if mi_vec:
self.mvec = mi_vec
else:
self.mvec = get_mi_vec(slab)
slab = self.assign_site_properties(slab, height)
if selective_dynamics:
slab = self.assign_selective_dynamics(slab)
self.slab = slab
@classmethod
def from_bulk_and_miller(cls, structure, miller_index, min_slab_size=8.0,
min_vacuum_size=10.0, max_normal_search=None,
center_slab = True, selective_dynamics=False,
undercoord_threshold = 0.09):
"""
This method constructs the adsorbate site finder from a bulk
structure and a miller index, which allows the surface sites
to be determined from the difference in bulk and slab coordination,
as opposed to the height threshold.
Args:
structure (Structure): structure from which slab
input to the ASF is constructed
miller_index (3-tuple or list): miller index to be used
min_slab_size (float): min slab size for slab generation
min_vacuum_size (float): min vacuum size for slab generation
max_normal_search (int): max normal search for slab generation
center_slab (bool): whether to center slab in slab generation
selective dynamics (bool): whether to assign surface sites
to selective dynamics
undercoord_threshold (float): threshold of "undercoordation"
to use for the assignment of surface sites. Default is
0.1, for which surface sites will be designated if they
are 10% less coordinated than their bulk counterpart
"""
# TODO: for some reason this works poorly with primitive cells
# may want to switch the coordination algorithm eventually
vcf_bulk = VoronoiCoordFinder(structure)
bulk_coords = [len(vcf_bulk.get_coordinated_sites(n, tol=0.05))
for n in range(len(structure))]
struct = structure.copy(site_properties = {'bulk_coordinations':bulk_coords})
slabs = generate_all_slabs(struct, max_index=max(miller_index),
min_slab_size=min_slab_size,
min_vacuum_size=min_vacuum_size,
max_normal_search = max_normal_search,
center_slab = center_slab)
slab_dict = {slab.miller_index:slab for slab in slabs}
if miller_index not in slab_dict:
raise ValueError("Miller index not in slab dict")
this_slab = slab_dict[miller_index]
vcf_surface = VoronoiCoordFinder(this_slab, allow_pathological=True)
surf_props, undercoords = [], []
this_mi_vec = get_mi_vec(this_slab)
mi_mags = [np.dot(this_mi_vec, site.coords) for site in this_slab]
average_mi_mag = np.average(mi_mags)
for n, site in enumerate(this_slab):
bulk_coord = this_slab.site_properties['bulk_coordinations'][n]
slab_coord = len(vcf_surface.get_coordinated_sites(n, tol=0.05))
mi_mag = np.dot(this_mi_vec, site.coords)
undercoord = (bulk_coord - slab_coord)/bulk_coord
undercoords += [undercoord]
if undercoord > undercoord_threshold and mi_mag > average_mi_mag:
surf_props += ['surface']
else:
surf_props += ['subsurface']
new_site_properties = {'surface_properties':surf_props,
'undercoords':undercoords}
new_slab = this_slab.copy(site_properties=new_site_properties)
return cls(new_slab, selective_dynamics)
def find_surface_sites_by_height(self, slab, height=0.9, xy_tol=0.05):
"""
This method finds surface sites by determining which sites are within
a threshold value in height from the topmost site in a list of sites
Args:
site_list (list): list of sites from which to select surface sites
height (float): threshold in angstroms of distance from topmost
site in slab along the slab c-vector to include in surface
site determination
xy_tol (float): if supplied, will remove any sites which are
within a certain distance in the miller plane.
Returns:
list of sites selected to be within a threshold of the highest
"""
# Get projection of coordinates along the miller index
m_projs = np.array([np.dot(site.coords, self.mvec)
for site in slab.sites])
# Mask based on window threshold along the miller index
mask = (m_projs - np.amax(m_projs)) >= -height
surf_sites = [slab.sites[n] for n in np.where(mask)[0]]
if xy_tol:
# sort surface sites by height
surf_sites = [s for (h, s) in zip(m_projs[mask], surf_sites)]
surf_sites.reverse()
unique_sites, unique_perp_fracs = [], []
for site in surf_sites:
this_perp = site.coords - np.dot(site.coords, self.mvec)
this_perp_frac = cart_to_frac(slab.lattice, this_perp)
if not in_coord_list_pbc(unique_perp_fracs, this_perp_frac):
unique_sites.append(site)
unique_perp_fracs.append(this_perp_frac)
surf_sites = unique_sites
return surf_sites
def assign_site_properties(self, slab, height=0.9):
"""
Assigns site properties.
"""
if 'surface_properties' in slab.site_properties.keys():
return slab
else:
surf_sites = self.find_surface_sites_by_height(slab, height)
surf_props = ['surface' if site in surf_sites
else 'subsurface' for site in slab.sites]
return slab.copy(
site_properties = {'surface_properties': surf_props})
def get_extended_surface_mesh(self, repeat=(5, 5, 1)):
"""
Gets an extended surface mesh for to use for adsorption
site finding by constructing supercell of surface sites
Args:
repeat (3-tuple): repeat for getting extended surface mesh
"""
surf_str = Structure.from_sites(self.surface_sites)
surf_str.make_supercell(repeat)
return surf_str
@property
def surface_sites(self):
"""
convenience method to return a list of surface sites
"""
return [site for site in self.slab.sites
if site.properties['surface_properties']=='surface']
def subsurface_sites(self):
"""
convenience method to return list of subsurface sites
"""
return [site for site in self.slab.sites
if site.properties['surface_properties']=='subsurface']
def find_adsorption_sites(self, distance = 2.0, put_inside = True,
symm_reduce = 1e-2, near_reduce = 1e-2,
positions = ['ontop', 'bridge', 'hollow'],
no_obtuse_hollow = True):
"""
Finds surface sites according to the above algorithm. Returns
a list of corresponding cartesian coordinates.
Args:
distance (float): distance from the coordinating ensemble
of atoms along the miller index for the site (i. e.
the distance from the slab itself)
put_inside (bool): whether to put the site inside the cell
symm_reduce (float): symm reduction threshold
near_reduce (float): near reduction threshold
positions (list): which positions to include in the site finding
"ontop": sites on top of surface sites
"bridge": sites at edges between surface sites in Delaunay
triangulation of surface sites in the miller plane
"hollow": sites at centers of Delaunay triangulation faces
"subsurface": subsurface positions projected into miller plane
no_obtuse_hollow (bool): flag to indicate whether to include
obtuse triangular ensembles in hollow sites
"""
ads_sites = {k:[] for k in positions}
if 'ontop' in positions:
ads_sites['ontop'] = [s.coords for s in self.surface_sites]
if 'subsurface' in positions:
# Get highest site
ref = self.slab.sites[np.argmax(self.slab.cart_coords[:, 2])]
# Project diff between highest site and subs site into miller
ss_sites = [self.mvec*np.dot(ref.coords-s.coords, self.mvec)
+ s.coords for s in self.subsurface_sites()]
ads_sites['subsurface'] = ss_sites
if 'bridge' in positions or 'hollow' in positions:
mesh = self.get_extended_surface_mesh()
sop = get_rot(self.slab)
dt = Delaunay([sop.operate(m.coords)[:2] for m in mesh])
# TODO: refactor below to properly account for >3-fold
for v in dt.simplices:
if -1 not in v:
dots = []
for i_corner, i_opp in zip(range(3), ((1,2), (0,2), (0,1))):
corner, opp = v[i_corner], [v[o] for o in i_opp]
vecs = [mesh[d].coords - mesh[corner].coords for d in opp]
vecs = [vec / np.linalg.norm(vec) for vec in vecs]
dots.append(np.dot(*vecs))
# Add bridge sites at midpoints of edges of D. Tri
if 'bridge' in positions:
ads_sites["bridge"].append(
self.ensemble_center(mesh, opp))
# Prevent addition of hollow sites in obtuse triangles
obtuse = no_obtuse_hollow and (np.array(dots) < 1e-5).any()
# Add hollow sites at centers of D. Tri faces
if 'hollow' in positions and not obtuse:
ads_sites['hollow'].append(
self.ensemble_center(mesh, v))
ads_sites['all'] = sum(ads_sites.values(), [])
for key, sites in ads_sites.items():
# Pare off outer sites for bridge/hollow
if key in ['bridge', 'hollow']:
frac_coords = [cart_to_frac(self.slab.lattice, ads_site)
for ads_site in sites]
frac_coords = [frac_coord for frac_coord in frac_coords
if (frac_coord[0]>1 and frac_coord[0]<4
and frac_coord[1]>1 and frac_coord[1]<4)]
sites = [frac_to_cart(self.slab.lattice, frac_coord)
for frac_coord in frac_coords]
if near_reduce:
sites = self.near_reduce(sites, threshold=near_reduce)
if put_inside:
sites = [put_coord_inside(self.slab.lattice, coord)
for coord in sites]
if symm_reduce:
sites = self.symm_reduce(sites, threshold=symm_reduce)
sites = [site + distance*self.mvec for site in sites]
ads_sites[key] = sites
return ads_sites
def symm_reduce(self, coords_set, threshold = 1e-6):
"""
Reduces the set of adsorbate sites by finding removing
symmetrically equivalent duplicates
Args:
coords_set: coordinate set in cartesian coordinates
threshold: tolerance for distance equivalence, used
as input to in_coord_list_pbc for dupl. checking
"""
surf_sg = SpacegroupAnalyzer(self.slab, 0.1)
symm_ops = surf_sg.get_symmetry_operations()
unique_coords = []
# Convert to fractional
coords_set = [cart_to_frac(self.slab.lattice, coords)
for coords in coords_set]
for coords in coords_set:
incoord = False
for op in symm_ops:
if in_coord_list_pbc(unique_coords, op.operate(coords),
atol = threshold):
incoord = True
break
if not incoord:
unique_coords += [coords]
# convert back to cartesian
return [frac_to_cart(self.slab.lattice, coords)
for coords in unique_coords]
def near_reduce(self, coords_set, threshold = 1e-4):
"""
Prunes coordinate set for coordinates that are within
threshold
Args:
coords_set (Nx3 array-like): list or array of coordinates
threshold (float): threshold value for distance
"""
unique_coords = []
coords_set = [cart_to_frac(self.slab.lattice, coords)
for coords in coords_set]
for coord in coords_set:
if not in_coord_list_pbc(unique_coords, coord, threshold):
unique_coords += [coord]
return [frac_to_cart(self.slab.lattice, coords)
for coords in unique_coords]
def ensemble_center(self, site_list, indices, cartesian = True):
"""
Finds the center of an ensemble of sites selected from
a list of sites. Helper method for the find_adsorption_sites
algorithm.
Args:
site_list (list of sites): list of sites
indices (list of ints): list of ints from which to select
sites from site list
cartesian (bool): whether to get average fractional or
cartesian coordinate
"""
if cartesian:
return np.average([site_list[i].coords for i in indices],
axis = 0)
else:
return np.average([site_list[i].frac_coords for i in indices],
axis = 0)
def add_adsorbate(self, molecule, ads_coord, repeat=None, reorient=True):
"""
Adds an adsorbate at a particular coordinate. Adsorbate
represented by a Molecule object, and is positioned relative
to the input adsorbate coordinate.
Args:
molecule (Molecule): molecule object representing the adsorbate
ads_coord (array): coordinate of adsorbate position
repeat (3-tuple or list): input for making a supercell of slab
prior to placing the adsorbate
reorient (bool): flag on whether to reorient the molecule to
have its z-axis concurrent with miller index
"""
if reorient:
# Reorient the molecule along slab m_index
sop = get_rot(self.slab)
molecule.apply_operation(sop.inverse)
struct = self.slab.copy()
if repeat:
struct.make_supercell(repeat)
if 'surface_properties' in struct.site_properties.keys():
molecule.add_site_property("surface_properties",
["adsorbate"] * molecule.num_sites)
if 'selective_dynamics' in struct.site_properties.keys():
molecule.add_site_property("selective_dynamics",
[[True, True, True]] * molecule.num_sites)
for site in molecule:
struct.append(site.specie, ads_coord + site.coords, coords_are_cartesian = True,
properties = site.properties)
return struct
def assign_selective_dynamics(self, slab):
"""
Helper function to assign selective dynamics site_properties
based on surface, subsurface site properties
Args:
slab (Slab): slab for which to assign selective dynamics
"""
sd_list = []
sd_list = [[False, False, False] if site.properties['surface_properties']=='subsurface'
else [True, True, True] for site in slab.sites]
new_sp = slab.site_properties
new_sp['selective_dynamics'] = sd_list
return slab.copy(site_properties = new_sp)
def generate_adsorption_structures(self, molecule, repeat=None, min_lw=5.0,
reorient = True, find_args={}):
"""
Function that generates all adsorption structures for a given
molecular adsorbate. Can take repeat argument or minimum
length/width of precursor slab as an input
Args:
molecule (Molecule): molecule corresponding to adsorbate
repeat (3-tuple or list): repeat argument for supercell generation
min_lw (float): minimum length and width of the slab, only used
if repeat is None
reorient (bool): flag on whether or not to reorient adsorbate
along the miller index
find_args (dict): dictionary of arguments to be passed to the
call to self.find_adsorption_sites, e.g. {"distance":2.0}
"""
if repeat is None:
xrep = np.ceil(min_lw / np.linalg.norm(self.slab.lattice.matrix[0]))
yrep = np.ceil(min_lw / np.linalg.norm(self.slab.lattice.matrix[1]))
repeat = [xrep, yrep, 1]
structs = []
for coords in self.find_adsorption_sites(**find_args)['all']:
structs.append(self.add_adsorbate(
molecule, coords, repeat=repeat, reorient=reorient))
return structs
def get_mi_vec(slab):
"""
Convenience function which returns the unit vector aligned
with the miller index.
"""
mvec = np.cross(slab.lattice.matrix[0], slab.lattice.matrix[1])
return mvec / np.linalg.norm(mvec)
def get_rot(slab):
"""
Gets the transformation to rotate the z axis into the miller index
"""
new_z = get_mi_vec(slab)
a, b, c = slab.lattice.matrix
new_x = a / np.linalg.norm(a)
new_y = np.cross(new_z, new_x)
x, y, z = np.eye(3)
rot_matrix = np.array([np.dot(*el) for el in
itertools.product([x, y, z],
[new_x, new_y, new_z])]).reshape(3,3)
rot_matrix = np.transpose(rot_matrix)
sop = SymmOp.from_rotation_and_translation(rot_matrix)
return sop
def put_coord_inside(lattice, cart_coordinate):
"""
converts a cartesian coordinate such that it is inside the unit cell.
"""
fc = cart_to_frac(lattice, cart_coordinate)
return frac_to_cart(lattice, [c - np.floor(c) for c in fc])
def reorient_z(structure):
"""
reorients a structure such that the z axis is concurrent with the
normal to the A-B plane
"""
struct = structure.copy()
sop = get_rot(struct)
struct.apply_operation(sop)
return struct
def frac_to_cart(lattice, frac_coord):
"""
converts fractional coordinates to cartesian
"""
return np.dot(np.transpose(lattice.matrix), frac_coord)
def cart_to_frac(lattice, cart_coord):
"""
converts cartesian coordinates to fractional
"""
return np.dot(np.linalg.inv(np.transpose(lattice.matrix)), cart_coord)
# Get color dictionary
colors = loadfn(os.path.join(os.path.dirname(vis.__file__),
"ElementColorSchemes.yaml"))
color_dict = {el:[j / 256.001 for j in colors["Jmol"][el]]
for el in colors["Jmol"].keys()}
def plot_slab(slab, ax, scale=0.8, repeat=5, window=1.5,
draw_unit_cell=True, decay = 0.2, adsorption_sites=True):
"""
Function that helps visualize the slab in a 2-D plot, for
convenient viewing of output of AdsorbateSiteFinder.
Args:
slab (slab): Slab object to be visualized
ax (axes): matplotlib axes with which to visualize
scale (float): radius scaling for sites
repeat (int): number of repeating unit cells to visualize
window (float): window for setting the axes limits, is essentially
a fraction of the unit cell limits
draw_unit_cell (bool): flag indicating whether or not to draw cell
decay (float): how the alpha-value decays along the z-axis
"""
orig_slab = slab.copy()
slab = reorient_z(slab)
orig_cell = slab.lattice.matrix.copy()
if repeat:
slab.make_supercell([repeat, repeat, 1])
coords = np.array(sorted(slab.cart_coords, key=lambda x: x[2]))
sites = sorted(slab.sites, key = lambda x: x.coords[2])
alphas = 1 - decay*(np.max(coords[:, 2]) - coords[:, 2])
alphas = alphas.clip(min=0)
corner = [0, 0, cart_to_frac(slab.lattice, coords[-1])[-1]]
corner = frac_to_cart(slab.lattice, corner)[:2]
verts = orig_cell[:2, :2]
lattsum = verts[0]+verts[1]
# Draw circles at sites and stack them accordingly
for n, coord in enumerate(coords):
r = sites[n].specie.atomic_radius*scale
ax.add_patch(patches.Circle(coord[:2]-lattsum*(repeat//2),
r, color='w', zorder=2*n))
color = color_dict[sites[n].species_string]
ax.add_patch(patches.Circle(coord[:2]-lattsum*(repeat//2), r,
facecolor=color, alpha=alphas[n],
edgecolor='k', lw=0.3, zorder=2*n+1))
# Adsorption sites
if adsorption_sites:
asf = AdsorbateSiteFinder(orig_slab)
ads_sites = asf.find_adsorption_sites()['all']
sop = get_rot(orig_slab)
ads_sites = [sop.operate(ads_site)[:2].tolist()
for ads_site in ads_sites]
ax.plot(*zip(*ads_sites), color='k', marker='x',
markersize=10, mew=1, linestyle='', zorder=10000)
# Draw unit cell
if draw_unit_cell:
verts = np.insert(verts, 1, lattsum, axis=0).tolist()
verts += [[0., 0.]]
verts = [[0., 0.]] + verts
codes = [Path.MOVETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
verts = [(np.array(vert) + corner).tolist() for vert in verts]
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor='none',lw=2,
alpha = 0.5, zorder=2*n+2)
ax.add_patch(patch)
ax.set_aspect("equal")
center = corner + lattsum / 2.
extent = np.max(lattsum)
lim_array = [center-extent*window, center+extent*window]
x_lim = [ele[0] for ele in lim_array]
y_lim = [ele[1] for ele in lim_array]
ax.set_xlim(x_lim)
ax.set_ylim(y_lim)
return ax
|
|
# Copyright (c) 2014 The mitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-mitcoinrpc to module search path:
import os
import sys
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
mitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "mitcoin.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
mitcoind and mitcoin-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run mitcoinds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("mitcoinD", "mitcoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
mitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: mitcoind started, calling mitcoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("mitcoinCLI", "mitcoin-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: mitcoin-cli -rpcwait getblockcount completed"
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_mitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in mitcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a mitcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("mitcoinD", "mitcoind")
args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
mitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: mitcoind started, calling mitcoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("mitcoinCLI", "mitcoin-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: calling mitcoin-cli -rpcwait getblockcount returned"
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
if timewait is not None:
proxy = AuthServiceProxy(url, timeout=timewait)
else:
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple mitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
mitcoind_processes[i].wait()
del mitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_mitcoinds():
# Wait for all mitcoinds to cleanly exit
for mitcoind in mitcoind_processes.values():
mitcoind.wait()
mitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import textwrap
import mock
import pep8
from cinder.hacking import checks
from cinder import test
@ddt.ddt
class HackingTestCase(test.TestCase):
"""This class tests the hacking checks in cinder.hacking.checks
This class ensures that Cinder's hacking checks are working by passing
strings to the check methods like the pep8/flake8 parser would. The parser
loops over each line in the file and then passes the parameters to the
check method. The parameter names in the check method dictate what type of
object is passed to the check method. The parameter types are::
logical_line: A processed line with the following modifications:
- Multi-line statements converted to a single line.
- Stripped left and right.
- Contents of strings replaced with "xxx" of same length.
- Comments removed.
physical_line: Raw line of text from the input file.
lines: a list of the raw lines from the input file
tokens: the tokens that contribute to this logical line
line_number: line number in the input file
total_lines: number of lines in the input file
blank_lines: blank lines before this one
indent_char: indentation character in this file (" " or "\t")
indent_level: indentation (with tabs expanded to multiples of 8)
previous_indent_level: indentation on previous line
previous_logical: previous logical line
filename: Path of the file being run through pep8
When running a test on a check method the return will be False/None if
there is no violation in the sample input. If there is an error a tuple is
returned with a position in the line, and a message. So to check the result
just assertTrue if the check is expected to fail and assertFalse if it
should pass.
"""
def test_no_vi_headers(self):
lines = ['Line 1\n', 'Line 2\n', 'Line 3\n', 'Line 4\n', 'Line 5\n',
'Line 6\n', 'Line 7\n', 'Line 8\n', 'Line 9\n', 'Line 10\n',
'Line 11\n']
self.assertEqual(None, checks.no_vi_headers(
"Test string foo", 1, lines))
self.assertEqual(2, len(list(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
2, lines))))
self.assertEqual(2, len(list(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
8, lines))))
self.assertEqual(None, checks.no_vi_headers(
"Test end string for vi",
9, lines))
# vim header outside of boundary (first/last 5 lines)
self.assertEqual(None, checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
6, lines))
def test_no_translate_debug_logs(self):
self.assertEqual(1, len(list(checks.no_translate_debug_logs(
"LOG.debug(_('foo'))", "cinder/scheduler/foo.py"))))
self.assertEqual(0, len(list(checks.no_translate_debug_logs(
"LOG.debug('foo')", "cinder/scheduler/foo.py"))))
self.assertEqual(0, len(list(checks.no_translate_debug_logs(
"LOG.info(_('foo'))", "cinder/scheduler/foo.py"))))
def test_check_explicit_underscore_import(self):
self.assertEqual(1, len(list(checks.check_explicit_underscore_import(
"LOG.info(_('My info message'))",
"cinder.tests.unit/other_files.py"))))
self.assertEqual(1, len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder.tests.unit/other_files.py"))))
self.assertEqual(0, len(list(checks.check_explicit_underscore_import(
"from cinder.i18n import _",
"cinder.tests.unit/other_files.py"))))
self.assertEqual(0, len(list(checks.check_explicit_underscore_import(
"LOG.info(_('My info message'))",
"cinder.tests.unit/other_files.py"))))
self.assertEqual(0, len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder.tests.unit/other_files.py"))))
self.assertEqual(0, len(list(checks.check_explicit_underscore_import(
"from cinder.i18n import _, _LW",
"cinder.tests.unit/other_files2.py"))))
self.assertEqual(0, len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder.tests.unit/other_files2.py"))))
self.assertEqual(0, len(list(checks.check_explicit_underscore_import(
"_ = translations.ugettext",
"cinder.tests.unit/other_files3.py"))))
self.assertEqual(0, len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder.tests.unit/other_files3.py"))))
# Complete code coverage by falling through all checks
self.assertEqual(0, len(list(checks.check_explicit_underscore_import(
"LOG.info('My info message')",
"cinder.tests.unit/other_files4.py"))))
# We are patching pep8 so that only the check under test is actually
# installed.
@mock.patch('pep8._checks',
{'physical_line': {}, 'logical_line': {}, 'tree': {}})
def _run_check(self, code, checker, filename=None):
pep8.register_check(checker)
lines = textwrap.dedent(code).strip().splitlines(True)
checker = pep8.Checker(filename=filename, lines=lines)
checker.check_all()
checker.report._deferred_print.sort()
return checker.report._deferred_print
def _assert_has_errors(self, code, checker, expected_errors=None,
filename=None):
actual_errors = [e[:3] for e in
self._run_check(code, checker, filename)]
self.assertEqual(expected_errors or [], actual_errors)
def _assert_has_no_errors(self, code, checker, filename=None):
self._assert_has_errors(code, checker, filename=filename)
def test_str_unicode_exception(self):
checker = checks.CheckForStrUnicodeExc
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
p = str(e)
return p
"""
errors = [(5, 16, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
try:
p = unicode(a) + str(b)
except ValueError as e:
p = e
return p
"""
self._assert_has_no_errors(code, checker)
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
p = unicode(e)
return p
"""
errors = [(5, 20, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
try:
p = unicode(a) + unicode(b)
except ValueError as ve:
p = str(e) + str(ve)
p = e
return p
"""
errors = [(8, 20, 'N325'), (8, 29, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
try:
p = unicode(a) + unicode(b)
except ValueError as ve:
p = str(e) + unicode(ve)
p = str(e)
return p
"""
errors = [(8, 20, 'N325'), (8, 33, 'N325'), (9, 16, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
def test_check_no_log_audit(self):
self.assertEqual(1, len(list(checks.check_no_log_audit(
"LOG.audit('My test audit log')"))))
self.assertEqual(0, len(list(checks.check_no_log_audit(
"LOG.info('My info test log.')"))))
def test_no_mutable_default_args(self):
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"def foo (bar):"))))
self.assertEqual(1, len(list(checks.no_mutable_default_args(
"def foo (bar=[]):"))))
self.assertEqual(1, len(list(checks.no_mutable_default_args(
"def foo (bar={}):"))))
def test_oslo_namespace_imports_check(self):
self.assertEqual(1, len(list(checks.check_oslo_namespace_imports(
"from oslo.concurrency import foo"))))
self.assertEqual(0, len(list(checks.check_oslo_namespace_imports(
"from oslo_concurrency import bar"))))
self.assertEqual(1, len(list(checks.check_oslo_namespace_imports(
"from oslo.db import foo"))))
self.assertEqual(0, len(list(checks.check_oslo_namespace_imports(
"from oslo_db import bar"))))
self.assertEqual(1, len(list(checks.check_oslo_namespace_imports(
"from oslo.config import foo"))))
self.assertEqual(0, len(list(checks.check_oslo_namespace_imports(
"from oslo_config import bar"))))
self.assertEqual(1, len(list(checks.check_oslo_namespace_imports(
"from oslo.utils import foo"))))
self.assertEqual(0, len(list(checks.check_oslo_namespace_imports(
"from oslo_utils import bar"))))
self.assertEqual(1, len(list(checks.check_oslo_namespace_imports(
"from oslo.serialization import foo"))))
self.assertEqual(0, len(list(checks.check_oslo_namespace_imports(
"from oslo_serialization import bar"))))
self.assertEqual(1, len(list(checks.check_oslo_namespace_imports(
"from oslo.log import foo"))))
self.assertEqual(0, len(list(checks.check_oslo_namespace_imports(
"from oslo_log import bar"))))
def test_no_contextlib_nested(self):
self.assertEqual(1, len(list(checks.check_no_contextlib_nested(
"with contextlib.nested("))))
self.assertEqual(1, len(list(checks.check_no_contextlib_nested(
" with nested("))))
self.assertEqual(0, len(list(checks.check_no_contextlib_nested(
"with my.nested("))))
self.assertEqual(0, len(list(checks.check_no_contextlib_nested(
"with foo as bar"))))
def test_check_datetime_now(self):
self.assertEqual(1, len(list(checks.check_datetime_now(
"datetime.now", False))))
self.assertEqual(0, len(list(checks.check_datetime_now(
"timeutils.utcnow", False))))
def test_check_datetime_now_noqa(self):
self.assertEqual(0, len(list(checks.check_datetime_now(
"datetime.now() # noqa", True))))
def test_check_timeutils_strtime(self):
self.assertEqual(1, len(list(checks.check_timeutils_strtime(
"timeutils.strtime"))))
self.assertEqual(0, len(list(checks.check_timeutils_strtime(
"strftime"))))
def test_validate_log_translations(self):
self.assertEqual(1, len(list(checks.validate_log_translations(
"LOG.info('foo')", "foo.py"))))
self.assertEqual(1, len(list(checks.validate_log_translations(
"LOG.warning('foo')", "foo.py"))))
self.assertEqual(1, len(list(checks.validate_log_translations(
"LOG.error('foo')", "foo.py"))))
self.assertEqual(1, len(list(checks.validate_log_translations(
"LOG.exception('foo')", "foo.py"))))
self.assertEqual(0, len(list(checks.validate_log_translations(
"LOG.info('foo')", "cinder/tests/foo.py"))))
self.assertEqual(0, len(list(checks.validate_log_translations(
"LOG.info(_LI('foo')", "foo.py"))))
self.assertEqual(0, len(list(checks.validate_log_translations(
"LOG.warning(_LW('foo')", "foo.py"))))
self.assertEqual(0, len(list(checks.validate_log_translations(
"LOG.error(_LE('foo')", "foo.py"))))
self.assertEqual(0, len(list(checks.validate_log_translations(
"LOG.exception(_LE('foo')", "foo.py"))))
def test_check_unicode_usage(self):
self.assertEqual(1, len(list(checks.check_unicode_usage(
"unicode(msg)", False))))
self.assertEqual(0, len(list(checks.check_unicode_usage(
"unicode(msg) # noqa", True))))
def test_no_print_statements(self):
self.assertEqual(0, len(list(checks.check_no_print_statements(
"a line with no print statement",
"cinder/file.py", False))))
self.assertEqual(1, len(list(checks.check_no_print_statements(
"print('My print statement')",
"cinder/file.py", False))))
self.assertEqual(0, len(list(checks.check_no_print_statements(
"print('My print statement in cinder/cmd, which is ok.')",
"cinder/cmd/file.py", False))))
self.assertEqual(0, len(list(checks.check_no_print_statements(
"print('My print statement that I just must have.')",
"cinder.tests.unit/file.py", True))))
self.assertEqual(1, len(list(checks.check_no_print_statements(
"print ('My print with space')",
"cinder/volume/anotherFile.py", False))))
def test_dict_constructor_with_list_copy(self):
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict([(i, connect_info[i])"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" attrs = dict([(k, _from_json(v))"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" type_names = dict((value, key) for key, value in"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict((value, key) for key, value in"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
"foo(param=dict((k, v) for k, v in bar.items()))"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict([[i,i] for i in range(3)])"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dd = dict([i,i] for i in range(3))"))))
self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy(
" dict()"))))
self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy(
" create_kwargs = dict(snapshot=snapshot,"))))
self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy(
" self._render_dict(xml, data_el, data.__dict__)"))))
@ddt.unpack
@ddt.data(
(1, 'LOG.info', "cinder/tests/unit/fake.py", False),
(1, 'LOG.warning', "cinder/tests/fake.py", False),
(1, 'LOG.error', "cinder/tests/fake.py", False),
(1, 'LOG.exception', "cinder/tests/fake.py", False),
(1, 'LOG.debug', "cinder/tests/fake.py", False),
(0, 'LOG.info.assert_called_once_with', "cinder/tests/fake.py", False),
(0, 'some.LOG.error.call', "cinder/tests/fake.py", False),
(0, 'LOG.warning', "cinder/tests/unit/fake.py", True),
(0, 'LOG.warning', "cinder/tests/unit/integrated/fake.py", False))
def test_no_test_log(self, first, second, third, fourth):
self.assertEqual(first, len(list(checks.no_test_log(
"%s('arg')" % second, third, fourth))))
|
|
# Status: being ported by Steven Watanabe
# Base revision: 47077
# TODO: common.jam needs to be ported
# TODO: generators.jam needs to have register_c_compiler.
#
# Copyright 2001 David Abrahams.
# Copyright 2002-2006 Rene Rivera.
# Copyright 2002-2003 Vladimir Prus.
# Copyright (c) 2005 Reece H. Dunn.
# Copyright 2006 Ilya Sokolov.
# Copyright 2007 Roland Schwarz
# Copyright 2007 Boris Gubenko.
# Copyright 2008 Steven Watanabe
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
import subprocess
import re
import bjam
from b2.tools import unix, common, rc, pch, builtin
from b2.build import feature, type, toolset, generators
from b2.util.utility import os_name, on_windows
from b2.manager import get_manager
from b2.build.generators import Generator
from b2.build.toolset import flags
from b2.util.utility import to_seq
__debug = None
def debug():
global __debug
if __debug is None:
__debug = "--debug-configuration" in bjam.variable("ARGV")
return __debug
feature.extend('toolset', ['gcc'])
toolset.inherit_generators('gcc', [], 'unix', ['unix.link', 'unix.link.dll'])
toolset.inherit_flags('gcc', 'unix')
toolset.inherit_rules('gcc', 'unix')
generators.override('gcc.prebuilt', 'builtin.prebuilt')
generators.override('gcc.searched-lib-generator', 'searched-lib-generator')
# Target naming is determined by types/lib.jam and the settings below this
# comment.
#
# On *nix:
# libxxx.a static library
# libxxx.so shared library
#
# On windows (mingw):
# libxxx.lib static library
# xxx.dll DLL
# xxx.lib import library
#
# On windows (cygwin) i.e. <target-os>cygwin
# libxxx.a static library
# xxx.dll DLL
# libxxx.dll.a import library
#
# Note: user can always override by using the <tag>@rule
# This settings have been choosen, so that mingw
# is in line with msvc naming conventions. For
# cygwin the cygwin naming convention has been choosen.
# Make the "o" suffix used for gcc toolset on all
# platforms
type.set_generated_target_suffix('OBJ', ['<toolset>gcc'], 'o')
type.set_generated_target_suffix('STATIC_LIB', ['<toolset>gcc', '<target-os>cygwin'], 'a')
type.set_generated_target_suffix('IMPORT_LIB', ['<toolset>gcc', '<target-os>cygwin'], 'dll.a')
type.set_generated_target_prefix('IMPORT_LIB', ['<toolset>gcc', '<target-os>cygwin'], 'lib')
__machine_match = re.compile('^([^ ]+)')
__version_match = re.compile('^([0-9.]+)')
def init(version = None, command = None, options = None):
"""
Initializes the gcc toolset for the given version. If necessary, command may
be used to specify where the compiler is located. The parameter 'options' is a
space-delimited list of options, each one specified as
<option-name>option-value. Valid option names are: cxxflags, linkflags and
linker-type. Accepted linker-type values are gnu, darwin, osf, hpux or sun
and the default value will be selected based on the current OS.
Example:
using gcc : 3.4 : : <cxxflags>foo <linkflags>bar <linker-type>sun ;
"""
options = to_seq(options)
command = to_seq(command)
# Information about the gcc command...
# The command.
command = to_seq(common.get_invocation_command('gcc', 'g++', command))
# The root directory of the tool install.
root = feature.get_values('<root>', options) ;
# The bin directory where to find the command to execute.
bin = None
# The flavor of compiler.
flavor = feature.get_values('<flavor>', options)
# Autodetect the root and bin dir if not given.
if command:
if not bin:
bin = common.get_absolute_tool_path(command[-1])
if not root:
root = os.path.dirname(bin)
# Autodetect the version and flavor if not given.
if command:
machine_info = subprocess.Popen(command + ['-dumpmachine'], stdout=subprocess.PIPE).communicate()[0]
machine = __machine_match.search(machine_info).group(1)
version_info = subprocess.Popen(command + ['-dumpversion'], stdout=subprocess.PIPE).communicate()[0]
version = __version_match.search(version_info).group(1)
if not flavor and machine.find('mingw') != -1:
flavor = 'mingw'
condition = None
if flavor:
condition = common.check_init_parameters('gcc', None,
('version', version),
('flavor', flavor))
else:
condition = common.check_init_parameters('gcc', None,
('version', version))
if command:
command = command[0]
common.handle_options('gcc', condition, command, options)
linker = feature.get_values('<linker-type>', options)
if not linker:
if os_name() == 'OSF':
linker = 'osf'
elif os_name() == 'HPUX':
linker = 'hpux' ;
else:
linker = 'gnu'
init_link_flags('gcc', linker, condition)
# If gcc is installed in non-standard location, we'd need to add
# LD_LIBRARY_PATH when running programs created with it (for unit-test/run
# rules).
if command:
# On multilib 64-bit boxes, there are both 32-bit and 64-bit libraries
# and all must be added to LD_LIBRARY_PATH. The linker will pick the
# right onces. Note that we don't provide a clean way to build 32-bit
# binary with 64-bit compiler, but user can always pass -m32 manually.
lib_path = [os.path.join(root, 'bin'),
os.path.join(root, 'lib'),
os.path.join(root, 'lib32'),
os.path.join(root, 'lib64')]
if debug():
print 'notice: using gcc libraries ::', condition, '::', lib_path
toolset.flags('gcc.link', 'RUN_PATH', condition, lib_path)
# If it's not a system gcc install we should adjust the various programs as
# needed to prefer using the install specific versions. This is essential
# for correct use of MinGW and for cross-compiling.
# - The archive builder.
archiver = common.get_invocation_command('gcc',
'ar', feature.get_values('<archiver>', options), [bin], path_last=True)
toolset.flags('gcc.archive', '.AR', condition, [archiver])
if debug():
print 'notice: using gcc archiver ::', condition, '::', archiver
# - The resource compiler.
rc_command = common.get_invocation_command_nodefault('gcc',
'windres', feature.get_values('<rc>', options), [bin], path_last=True)
rc_type = feature.get_values('<rc-type>', options)
if not rc_type:
rc_type = 'windres'
if not rc_command:
# If we can't find an RC compiler we fallback to a null RC compiler that
# creates empty object files. This allows the same Jamfiles to work
# across the board. The null RC uses the assembler to create the empty
# objects, so configure that.
rc_command = common.get_invocation_command('gcc', 'as', [], [bin], path_last=True)
rc_type = 'null'
rc.configure(rc_command, condition, '<rc-type>' + rc_type)
###if [ os.name ] = NT
###{
### # This causes single-line command invocation to not go through .bat files,
### # thus avoiding command-line length limitations.
### JAMSHELL = % ;
###}
#FIXME: when register_c_compiler is moved to
# generators, these should be updated
builtin.register_c_compiler('gcc.compile.c++', ['CPP'], ['OBJ'], ['<toolset>gcc'])
builtin.register_c_compiler('gcc.compile.c', ['C'], ['OBJ'], ['<toolset>gcc'])
builtin.register_c_compiler('gcc.compile.asm', ['ASM'], ['OBJ'], ['<toolset>gcc'])
# pch support
# The compiler looks for a precompiled header in each directory just before it
# looks for the include file in that directory. The name searched for is the
# name specified in the #include directive with ".gch" suffix appended. The
# logic in gcc-pch-generator will make sure that BASE_PCH suffix is appended to
# full name of the header.
type.set_generated_target_suffix('PCH', ['<toolset>gcc'], 'gch')
# GCC-specific pch generator.
class GccPchGenerator(pch.PchGenerator):
# Inherit the __init__ method
def run_pch(self, project, name, prop_set, sources):
# Find the header in sources. Ignore any CPP sources.
header = None
for s in sources:
if type.is_derived(s.type, 'H'):
header = s
# Error handling: Base header file name should be the same as the base
# precompiled header name.
header_name = header.name
header_basename = os.path.basename(header_name).rsplit('.', 1)[0]
if header_basename != name:
location = project.project_module
###FIXME:
raise Exception()
### errors.user-error "in" $(location)": pch target name `"$(name)"' should be the same as the base name of header file `"$(header-name)"'" ;
pch_file = Generator.run(self, project, name, prop_set, [header])
# return result of base class and pch-file property as usage-requirements
# FIXME: what about multiple results from generator.run?
return (property_set.create('<pch-file>' + pch_file[0], '<cflags>-Winvalid-pch'),
pch_file)
# Calls the base version specifying source's name as the name of the created
# target. As result, the PCH will be named whatever.hpp.gch, and not
# whatever.gch.
def generated_targets(self, sources, prop_set, project, name = None):
name = sources[0].name
return Generator.generated_targets(self, sources,
prop_set, project, name)
# Note: the 'H' source type will catch both '.h' header and '.hpp' header. The
# latter have HPP type, but HPP type is derived from H. The type of compilation
# is determined entirely by the destination type.
generators.register(GccPchGenerator('gcc.compile.c.pch', False, ['H'], ['C_PCH'], ['<pch>on', '<toolset>gcc' ]))
generators.register(GccPchGenerator('gcc.compile.c++.pch', False, ['H'], ['CPP_PCH'], ['<pch>on', '<toolset>gcc' ]))
# Override default do-nothing generators.
generators.override('gcc.compile.c.pch', 'pch.default-c-pch-generator')
generators.override('gcc.compile.c++.pch', 'pch.default-cpp-pch-generator')
flags('gcc.compile', 'PCH_FILE', ['<pch>on'], ['<pch-file>'])
# Declare flags and action for compilation
flags('gcc.compile', 'OPTIONS', ['<optimization>off'], ['-O0'])
flags('gcc.compile', 'OPTIONS', ['<optimization>speed'], ['-O3'])
flags('gcc.compile', 'OPTIONS', ['<optimization>space'], ['-Os'])
flags('gcc.compile', 'OPTIONS', ['<inlining>off'], ['-fno-inline'])
flags('gcc.compile', 'OPTIONS', ['<inlining>on'], ['-Wno-inline'])
flags('gcc.compile', 'OPTIONS', ['<inlining>full'], ['-finline-functions', '-Wno-inline'])
flags('gcc.compile', 'OPTIONS', ['<warnings>off'], ['-w'])
flags('gcc.compile', 'OPTIONS', ['<warnings>on'], ['-Wall'])
flags('gcc.compile', 'OPTIONS', ['<warnings>all'], ['-Wall', '-pedantic'])
flags('gcc.compile', 'OPTIONS', ['<warnings-as-errors>on'], ['-Werror'])
flags('gcc.compile', 'OPTIONS', ['<debug-symbols>on'], ['-g'])
flags('gcc.compile', 'OPTIONS', ['<profiling>on'], ['-pg'])
flags('gcc.compile', 'OPTIONS', ['<rtti>off'], ['-fno-rtti'])
# On cygwin and mingw, gcc generates position independent code by default, and
# warns if -fPIC is specified. This might not be the right way of checking if
# we're using cygwin. For example, it's possible to run cygwin gcc from NT
# shell, or using crosscompiling. But we'll solve that problem when it's time.
# In that case we'll just add another parameter to 'init' and move this login
# inside 'init'.
if not os_name () in ['CYGWIN', 'NT']:
# This logic will add -fPIC for all compilations:
#
# lib a : a.cpp b ;
# obj b : b.cpp ;
# exe c : c.cpp a d ;
# obj d : d.cpp ;
#
# This all is fine, except that 'd' will be compiled with -fPIC even though
# it's not needed, as 'd' is used only in exe. However, it's hard to detect
# where a target is going to be used. Alternative, we can set -fPIC only
# when main target type is LIB but than 'b' will be compiled without -fPIC.
# In x86-64 that will lead to link errors. So, compile everything with
# -fPIC.
#
# Yet another alternative would be to create propagated <sharedable>
# feature, and set it when building shared libraries, but that's hard to
# implement and will increase target path length even more.
flags('gcc.compile', 'OPTIONS', ['<link>shared'], ['-fPIC'])
if os_name() != 'NT' and os_name() != 'OSF' and os_name() != 'HPUX':
# OSF does have an option called -soname but it doesn't seem to work as
# expected, therefore it has been disabled.
HAVE_SONAME = ''
SONAME_OPTION = '-h'
flags('gcc.compile', 'USER_OPTIONS', [], ['<cflags>'])
flags('gcc.compile.c++', 'USER_OPTIONS',[], ['<cxxflags>'])
flags('gcc.compile', 'DEFINES', [], ['<define>'])
flags('gcc.compile', 'INCLUDES', [], ['<include>'])
engine = get_manager().engine()
engine.register_action('gcc.compile.c++.pch',
'"$(CONFIG_COMMAND)" -x c++-header $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)"')
engine.register_action('gcc.compile.c.pch',
'"$(CONFIG_COMMAND)" -x c-header $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)"')
def gcc_compile_cpp(targets, sources, properties):
# Some extensions are compiled as C++ by default. For others, we need to
# pass -x c++. We could always pass -x c++ but distcc does not work with it.
extension = os.path.splitext (sources [0]) [1]
lang = ''
if not extension in ['.cc', '.cp', '.cxx', '.cpp', '.c++', '.C']:
lang = '-x c++'
get_manager().engine().set_target_variable (targets, 'LANG', lang)
engine.add_dependency(targets, bjam.call('get-target-variable', targets, 'PCH_FILE'))
def gcc_compile_c(targets, sources, properties):
engine = get_manager().engine()
# If we use the name g++ then default file suffix -> language mapping does
# not work. So have to pass -x option. Maybe, we can work around this by
# allowing the user to specify both C and C++ compiler names.
#if $(>:S) != .c
#{
engine.set_target_variable (targets, 'LANG', '-x c')
#}
engine.add_dependency(targets, bjam.call('get-target-variable', targets, 'PCH_FILE'))
engine.register_action(
'gcc.compile.c++',
'"$(CONFIG_COMMAND)" $(LANG) -ftemplate-depth-128 $(OPTIONS) ' +
'$(USER_OPTIONS) -D$(DEFINES) -I"$(PCH_FILE:D)" -I"$(INCLUDES)" ' +
'-c -o "$(<:W)" "$(>:W)"',
function=gcc_compile_cpp,
bound_list=['PCH_FILE'])
engine.register_action(
'gcc.compile.c',
'"$(CONFIG_COMMAND)" $(LANG) $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) ' +
'-I"$(PCH_FILE:D)" -I"$(INCLUDES)" -c -o "$(<)" "$(>)"',
function=gcc_compile_c,
bound_list=['PCH_FILE'])
def gcc_compile_asm(targets, sources, properties):
get_manager().engine().set_target_variable(targets, 'LANG', '-x assembler-with-cpp')
engine.register_action(
'gcc.compile.asm',
'"$(CONFIG_COMMAND)" $(LANG) $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)"',
function=gcc_compile_asm)
class GccLinkingGenerator(unix.UnixLinkingGenerator):
"""
The class which check that we don't try to use the <runtime-link>static
property while creating or using shared library, since it's not supported by
gcc/libc.
"""
def run(self, project, name, ps, sources):
# TODO: Replace this with the use of a target-os property.
no_static_link = False
if bjam.variable('UNIX'):
no_static_link = True;
##FIXME: what does this mean?
## {
## switch [ modules.peek : JAMUNAME ]
## {
## case * : no-static-link = true ;
## }
## }
reason = None
if no_static_link and ps.get('runtime-link') == 'static':
if ps.get('link') == 'shared':
reason = "On gcc, DLL can't be build with '<runtime-link>static'."
elif type.is_derived(self.target_types[0], 'EXE'):
for s in sources:
source_type = s.type()
if source_type and type.is_derived(source_type, 'SHARED_LIB'):
reason = "On gcc, using DLLS together with the " +\
"<runtime-link>static options is not possible "
if reason:
print 'warning:', reason
print 'warning:',\
"It is suggested to use '<runtime-link>static' together",\
"with '<link>static'." ;
return
else:
generated_targets = unix.UnixLinkingGenerator.run(self, project,
name, ps, sources)
return generated_targets
if on_windows():
flags('gcc.link.dll', '.IMPLIB-COMMAND', [], ['-Wl,--out-implib,'])
generators.register(
GccLinkingGenerator('gcc.link', True,
['OBJ', 'SEARCHED_LIB', 'STATIC_LIB', 'IMPORT_LIB'],
[ 'EXE' ],
[ '<toolset>gcc' ]))
generators.register(
GccLinkingGenerator('gcc.link.dll', True,
['OBJ', 'SEARCHED_LIB', 'STATIC_LIB', 'IMPORT_LIB'],
['IMPORT_LIB', 'SHARED_LIB'],
['<toolset>gcc']))
else:
generators.register(
GccLinkingGenerator('gcc.link', True,
['LIB', 'OBJ'],
['EXE'],
['<toolset>gcc']))
generators.register(
GccLinkingGenerator('gcc.link.dll', True,
['LIB', 'OBJ'],
['SHARED_LIB'],
['<toolset>gcc']))
# Declare flags for linking.
# First, the common flags.
flags('gcc.link', 'OPTIONS', ['<debug-symbols>on'], ['-g'])
flags('gcc.link', 'OPTIONS', ['<profiling>on'], ['-pg'])
flags('gcc.link', 'USER_OPTIONS', [], ['<linkflags>'])
flags('gcc.link', 'LINKPATH', [], ['<library-path>'])
flags('gcc.link', 'FINDLIBS-ST', [], ['<find-static-library>'])
flags('gcc.link', 'FINDLIBS-SA', [], ['<find-shared-library>'])
flags('gcc.link', 'LIBRARIES', [], ['<library-file>'])
# For <runtime-link>static we made sure there are no dynamic libraries in the
# link. On HP-UX not all system libraries exist as archived libraries (for
# example, there is no libunwind.a), so, on this platform, the -static option
# cannot be specified.
if os_name() != 'HPUX':
flags('gcc.link', 'OPTIONS', ['<runtime-link>static'], ['-static'])
# Now, the vendor specific flags.
# The parameter linker can be either gnu, darwin, osf, hpux or sun.
def init_link_flags(toolset, linker, condition):
"""
Now, the vendor specific flags.
The parameter linker can be either gnu, darwin, osf, hpux or sun.
"""
toolset_link = toolset + '.link'
if linker == 'gnu':
# Strip the binary when no debugging is needed. We use --strip-all flag
# as opposed to -s since icc (intel's compiler) is generally
# option-compatible with and inherits from the gcc toolset, but does not
# support -s.
# FIXME: what does unchecked translate to?
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,--strip-all']) # : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ;
flags(toolset_link, 'START-GROUP', condition, ['-Wl,--start-group'])# : unchecked ;
flags(toolset_link, 'END-GROUP', condition, ['-Wl,--end-group']) # : unchecked ;
# gnu ld has the ability to change the search behaviour for libraries
# referenced by -l switch. These modifiers are -Bstatic and -Bdynamic
# and change search for -l switches that follow them. The following list
# shows the tried variants.
# The search stops at the first variant that has a match.
# *nix: -Bstatic -lxxx
# libxxx.a
#
# *nix: -Bdynamic -lxxx
# libxxx.so
# libxxx.a
#
# windows (mingw,cygwin) -Bstatic -lxxx
# libxxx.a
# xxx.lib
#
# windows (mingw,cygwin) -Bdynamic -lxxx
# libxxx.dll.a
# xxx.dll.a
# libxxx.a
# xxx.lib
# cygxxx.dll (*)
# libxxx.dll
# xxx.dll
# libxxx.a
#
# (*) This is for cygwin
# Please note that -Bstatic and -Bdynamic are not a guarantee that a
# static or dynamic lib indeed gets linked in. The switches only change
# search patterns!
# On *nix mixing shared libs with static runtime is not a good idea.
flags(toolset_link, 'FINDLIBS-ST-PFX',
map(lambda x: x + '/<runtime-link>shared', condition),
['-Wl,-Bstatic']) # : unchecked ;
flags(toolset_link, 'FINDLIBS-SA-PFX',
map(lambda x: x + '/<runtime-link>shared', condition),
['-Wl,-Bdynamic']) # : unchecked ;
# On windows allow mixing of static and dynamic libs with static
# runtime.
flags(toolset_link, 'FINDLIBS-ST-PFX',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bstatic']) # : unchecked ;
flags(toolset_link, 'FINDLIBS-SA-PFX',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bdynamic']) # : unchecked ;
flags(toolset_link, 'OPTIONS',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bstatic']) # : unchecked ;
elif linker == 'darwin':
# On Darwin, the -s option to ld does not work unless we pass -static,
# and passing -static unconditionally is a bad idea. So, don't pass -s.
# at all, darwin.jam will use separate 'strip' invocation.
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ;
elif linker == 'osf':
# No --strip-all, just -s.
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s'])
# : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
# This does not supports -R.
flags(toolset_link, 'RPATH_OPTION', condition, ['-rpath']) # : unchecked ;
# -rpath-link is not supported at all.
elif linker == 'sun':
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s'])
# : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
# Solaris linker does not have a separate -rpath-link, but allows to use
# -L for the same purpose.
flags(toolset_link, 'LINKPATH', condition, ['<xdll-path>']) # : unchecked ;
# This permits shared libraries with non-PIC code on Solaris.
# VP, 2004/09/07: Now that we have -fPIC hardcode in link.dll, the
# following is not needed. Whether -fPIC should be hardcoded, is a
# separate question.
# AH, 2004/10/16: it is still necessary because some tests link against
# static libraries that were compiled without PIC.
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition), ['-mimpure-text'])
# : unchecked ;
elif linker == 'hpux':
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition),
['-Wl,-s']) # : unchecked ;
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition),
['-fPIC']) # : unchecked ;
else:
# FIXME:
errors.user_error(
"$(toolset) initialization: invalid linker '$(linker)' " +
"The value '$(linker)' specified for <linker> is not recognized. " +
"Possible values are 'gnu', 'darwin', 'osf', 'hpux' or 'sun'")
# Declare actions for linking.
def gcc_link(targets, sources, properties):
engine = get_manager().engine()
engine.set_target_variable(targets, 'SPACE', ' ')
# Serialize execution of the 'link' action, since running N links in
# parallel is just slower. For now, serialize only gcc links, it might be a
# good idea to serialize all links.
engine.set_target_variable(targets, 'JAM_SEMAPHORE', '<s>gcc-link-semaphore')
engine.register_action(
'gcc.link',
'"$(CONFIG_COMMAND)" -L"$(LINKPATH)" ' +
'-Wl,$(RPATH_OPTION:E=-R)$(SPACE)-Wl,"$(RPATH)" ' +
'-Wl,-rpath-link$(SPACE)-Wl,"$(RPATH_LINK)" -o "$(<)" ' +
'$(START-GROUP) "$(>)" "$(LIBRARIES)" $(FINDLIBS-ST-PFX) ' +
'-l$(FINDLIBS-ST) $(FINDLIBS-SA-PFX) -l$(FINDLIBS-SA) $(END-GROUP) ' +
'$(OPTIONS) $(USER_OPTIONS)',
function=gcc_link,
bound_list=['LIBRARIES'])
# Default value. Mostly for the sake of intel-linux that inherits from gcc, but
# does not have the same logic to set the .AR variable. We can put the same
# logic in intel-linux, but that's hardly worth the trouble as on Linux, 'ar' is
# always available.
__AR = 'ar'
flags('gcc.archive', 'AROPTIONS', [], ['<archiveflags>'])
def gcc_archive(targets, sources, properties):
# Always remove archive and start again. Here's rationale from
#
# Andre Hentz:
#
# I had a file, say a1.c, that was included into liba.a. I moved a1.c to
# a2.c, updated my Jamfiles and rebuilt. My program was crashing with absurd
# errors. After some debugging I traced it back to the fact that a1.o was
# *still* in liba.a
#
# Rene Rivera:
#
# Originally removing the archive was done by splicing an RM onto the
# archive action. That makes archives fail to build on NT when they have
# many files because it will no longer execute the action directly and blow
# the line length limit. Instead we remove the file in a different action,
# just before building the archive.
clean = targets[0] + '(clean)'
bjam.call('TEMPORARY', clean)
bjam.call('NOCARE', clean)
engine = get_manager().engine()
engine.set_target_variable('LOCATE', clean, bjam.call('get-target-variable', targets, 'LOCATE'))
engine.add_dependency(clean, sources)
engine.add_dependency(targets, clean)
engine.set_update_action('common.RmTemps', clean, targets)
# Declare action for creating static libraries.
# The letter 'r' means to add files to the archive with replacement. Since we
# remove archive, we don't care about replacement, but there's no option "add
# without replacement".
# The letter 'c' suppresses the warning in case the archive does not exists yet.
# That warning is produced only on some platforms, for whatever reasons.
engine.register_action('gcc.archive',
'"$(.AR)" $(AROPTIONS) rc "$(<)" "$(>)"',
function=gcc_archive,
flags=['piecemeal'])
def gcc_link_dll(targets, sources, properties):
engine = get_manager().engine()
engine.set_target_variable(targets, 'SPACE', ' ')
engine.set_target_variable(targets, 'JAM_SEMAPHORE', '<s>gcc-link-semaphore')
engine.set_target_variable(targets, "HAVE_SONAME", HAVE_SONAME)
engine.set_target_variable(targets, "SONAME_OPTION", SONAME_OPTION)
engine.register_action(
'gcc.link.dll',
# Differ from 'link' above only by -shared.
'"$(CONFIG_COMMAND)" -L"$(LINKPATH)" ' +
'-Wl,$(RPATH_OPTION:E=-R)$(SPACE)-Wl,"$(RPATH)" ' +
'"$(.IMPLIB-COMMAND)$(<[1])" -o "$(<[-1])" ' +
'$(HAVE_SONAME)-Wl,$(SONAME_OPTION)$(SPACE)-Wl,$(<[-1]:D=) ' +
'-shared $(START-GROUP) "$(>)" "$(LIBRARIES)" $(FINDLIBS-ST-PFX) ' +
'-l$(FINDLIBS-ST) $(FINDLIBS-SA-PFX) -l$(FINDLIBS-SA) $(END-GROUP) ' +
'$(OPTIONS) $(USER_OPTIONS)',
function = gcc_link_dll,
bound_list=['LIBRARIES'])
# Set up threading support. It's somewhat contrived, so perform it at the end,
# to avoid cluttering other code.
if on_windows():
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-mthreads'])
elif bjam.variable('UNIX'):
jamuname = bjam.variable('JAMUNAME')
host_os_name = jamuname[0]
if host_os_name.startswith('SunOS'):
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthreads'])
flags('gcc', 'FINDLIBS-SA', [], ['rt'])
elif host_os_name == 'BeOS':
# BeOS has no threading options, don't set anything here.
pass
elif host_os_name.endswith('BSD'):
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthread'])
# there is no -lrt on BSD
elif host_os_name == 'DragonFly':
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthread'])
# there is no -lrt on BSD - DragonFly is a FreeBSD variant,
# which anoyingly doesn't say it's a *BSD.
elif host_os_name == 'IRIX':
# gcc on IRIX does not support multi-threading, don't set anything here.
pass
elif host_os_name == 'Darwin':
# Darwin has no threading options, don't set anything here.
pass
else:
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthread'])
flags('gcc', 'FINDLIBS-SA', [], ['rt'])
def cpu_flags(toolset, variable, architecture, instruction_set, values, default=None):
#FIXME: for some reason this fails. Probably out of date feature code
## if default:
## flags(toolset, variable,
## ['<architecture>' + architecture + '/<instruction-set>'],
## values)
flags(toolset, variable,
#FIXME: same as above
[##'<architecture>/<instruction-set>' + instruction_set,
'<architecture>' + architecture + '/<instruction-set>' + instruction_set],
values)
# Set architecture/instruction-set options.
#
# x86 and compatible
flags('gcc', 'OPTIONS', ['<architecture>x86/<address-model>32'], ['-m32'])
flags('gcc', 'OPTIONS', ['<architecture>x86/<address-model>64'], ['-m64'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'i386', ['-march=i386'], default=True)
cpu_flags('gcc', 'OPTIONS', 'x86', 'i486', ['-march=i486'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'i586', ['-march=i586'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'i686', ['-march=i686'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium', ['-march=pentium'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium-mmx', ['-march=pentium-mmx'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentiumpro', ['-march=pentiumpro'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium2', ['-march=pentium2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium3', ['-march=pentium3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium3m', ['-march=pentium3m'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium-m', ['-march=pentium-m'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium4', ['-march=pentium4'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium4m', ['-march=pentium4m'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'prescott', ['-march=prescott'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'nocona', ['-march=nocona'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'k6', ['-march=k6'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'k6-2', ['-march=k6-2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'k6-3', ['-march=k6-3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon', ['-march=athlon'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-tbird', ['-march=athlon-tbird'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-4', ['-march=athlon-4'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-xp', ['-march=athlon-xp'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-mp', ['-march=athlon-mp'])
##
cpu_flags('gcc', 'OPTIONS', 'x86', 'k8', ['-march=k8'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'opteron', ['-march=opteron'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon64', ['-march=athlon64'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-fx', ['-march=athlon-fx'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'winchip-c6', ['-march=winchip-c6'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'winchip2', ['-march=winchip2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'c3', ['-march=c3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'c3-2', ['-march=c3-2'])
# Sparc
flags('gcc', 'OPTIONS', ['<architecture>sparc/<address-model>32'], ['-m32'])
flags('gcc', 'OPTIONS', ['<architecture>sparc/<address-model>64'], ['-m64'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'c3', ['-mcpu=c3'], default=True)
cpu_flags('gcc', 'OPTIONS', 'sparc', 'v7', ['-mcpu=v7'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'cypress', ['-mcpu=cypress'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'v8', ['-mcpu=v8'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'supersparc', ['-mcpu=supersparc'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'sparclite', ['-mcpu=sparclite'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'hypersparc', ['-mcpu=hypersparc'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'sparclite86x', ['-mcpu=sparclite86x'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'f930', ['-mcpu=f930'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'f934', ['-mcpu=f934'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'sparclet', ['-mcpu=sparclet'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'tsc701', ['-mcpu=tsc701'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'v9', ['-mcpu=v9'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'ultrasparc', ['-mcpu=ultrasparc'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'ultrasparc3', ['-mcpu=ultrasparc3'])
# RS/6000 & PowerPC
flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>32'], ['-m32'])
flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>64'], ['-m64'])
cpu_flags('gcc', 'OPTIONS', 'power', '403', ['-mcpu=403'])
cpu_flags('gcc', 'OPTIONS', 'power', '505', ['-mcpu=505'])
cpu_flags('gcc', 'OPTIONS', 'power', '601', ['-mcpu=601'])
cpu_flags('gcc', 'OPTIONS', 'power', '602', ['-mcpu=602'])
cpu_flags('gcc', 'OPTIONS', 'power', '603', ['-mcpu=603'])
cpu_flags('gcc', 'OPTIONS', 'power', '603e', ['-mcpu=603e'])
cpu_flags('gcc', 'OPTIONS', 'power', '604', ['-mcpu=604'])
cpu_flags('gcc', 'OPTIONS', 'power', '604e', ['-mcpu=604e'])
cpu_flags('gcc', 'OPTIONS', 'power', '620', ['-mcpu=620'])
cpu_flags('gcc', 'OPTIONS', 'power', '630', ['-mcpu=630'])
cpu_flags('gcc', 'OPTIONS', 'power', '740', ['-mcpu=740'])
cpu_flags('gcc', 'OPTIONS', 'power', '7400', ['-mcpu=7400'])
cpu_flags('gcc', 'OPTIONS', 'power', '7450', ['-mcpu=7450'])
cpu_flags('gcc', 'OPTIONS', 'power', '750', ['-mcpu=750'])
cpu_flags('gcc', 'OPTIONS', 'power', '801', ['-mcpu=801'])
cpu_flags('gcc', 'OPTIONS', 'power', '821', ['-mcpu=821'])
cpu_flags('gcc', 'OPTIONS', 'power', '823', ['-mcpu=823'])
cpu_flags('gcc', 'OPTIONS', 'power', '860', ['-mcpu=860'])
cpu_flags('gcc', 'OPTIONS', 'power', '970', ['-mcpu=970'])
cpu_flags('gcc', 'OPTIONS', 'power', '8540', ['-mcpu=8540'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power', ['-mcpu=power'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power2', ['-mcpu=power2'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power3', ['-mcpu=power3'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power4', ['-mcpu=power4'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power5', ['-mcpu=power5'])
cpu_flags('gcc', 'OPTIONS', 'power', 'powerpc', ['-mcpu=powerpc'])
cpu_flags('gcc', 'OPTIONS', 'power', 'powerpc64', ['-mcpu=powerpc64'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rios', ['-mcpu=rios'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rios1', ['-mcpu=rios1'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rios2', ['-mcpu=rios2'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rsc', ['-mcpu=rsc'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rs64a', ['-mcpu=rs64'])
# AIX variant of RS/6000 & PowerPC
flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>32/<target-os>aix'], ['-maix32'])
flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>64/<target-os>aix'], ['-maix64'])
flags('gcc', 'AROPTIONS', ['<architecture>power/<address-model>64/<target-os>aix'], ['-X 64'])
|
|
# Note that the dataset must be already downloaded for this script to work, do:
# $ cd data/
# $ python download_dataset.py
# quoc_trinh
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import metrics
import os
import sys
import datetime
# get current file_name as [0] of array
file_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print(" File Name:")
print(file_name)
print("")
# FLAG to know that whether this is traning process or not.
FLAG = 'train'
POOL_X = 16
POOL_Y = 18
N_HIDDEN_CONFIG = 32
save_path_name = file_name + "/model.ckpt"
print(datetime.datetime.now())
# Write to file: time to start, type, time to end
f = open(file_name + '/time.txt', 'a+')
f.write("------------- \n")
f.write("This is time \n")
f.write("Started at \n")
f.write(str(datetime.datetime.now())+'\n')
if __name__ == "__main__":
# -----------------------------
# step1: load and prepare data
# -----------------------------
# Those are separate normalised input features for the neural network
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATA_PATH = "../data/"
DATASET_PATH = DATA_PATH + "UCI HAR Dataset/"
print("\n" + "Dataset is now located at: " + DATASET_PATH)
# Preparing data set:
TRAIN = "train/"
TEST = "test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(' ', ' ').strip().split(' ') for row in file
]]
)
file.close()
"""Examples
--------
>> > x = np.arange(4).reshape((2, 2))
>> > x
array([[0, 1],
[2, 3]])
>> > np.transpose(x)
array([[0, 2],
[1, 3]])
>> > x = np.ones((1, 2, 3))
>> > np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
X_train = load_X(X_train_signals_paths) # [7352, 128, 9]
X_test = load_X(X_test_signals_paths) # [7352, 128, 9]
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 128
print(len(X_train[0][0])) # 9
print(type(X_train))
X_train = np.reshape(X_train, [-1, 32, 36])
X_test = np.reshape(X_test, [-1, 32, 36])
print("-----------------X_train---------------")
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 32
print(len(X_train[0][0])) # 36
print(type(X_train))
# exit()
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
def one_hot(label):
"""convert label from dense to one hot
argument:
label: ndarray dense label ,shape: [sample_num,1]
return:
one_hot_label: ndarray one hot, shape: [sample_num,n_class]
"""
label_num = len(label)
new_label = label.reshape(label_num) # shape : [sample_num]
# because max is 5, and we will create 6 columns
n_values = np.max(new_label) + 1
return np.eye(n_values)[np.array(new_label, dtype=np.int32)]
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# Subtract 1 to each output class for friendly 0-based indexing
return y_ - 1
y_train = one_hot(load_y(y_train_path))
y_test = one_hot(load_y(y_test_path))
print("---------y_train----------")
# print(y_train)
print(len(y_train)) # 7352
print(len(y_train[0])) # 6
# -----------------------------------
# step2: define parameters for model
# -----------------------------------
class Config(object):
"""
define a class to store parameters,
the input should be feature mat of training and testing
"""
def __init__(self, X_train, X_test):
# Input data
self.train_count = len(X_train) # 7352 training series
self.test_data_count = len(X_test) # 2947 testing series
self.n_steps = len(X_train[0]) # 128 time_steps per series
# Training
self.learning_rate = 0.0025
self.lambda_loss_amount = 0.0015
self.training_epochs = 300
self.batch_size = 1000
# LSTM structure
self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time
self.n_hidden = N_HIDDEN_CONFIG # nb of neurons inside the neural network
self.n_classes = 6 # Final output classes
self.W = {
'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])), # [9, 32]
'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes])) # [32, 6]
}
self.biases = {
'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)), # [32]
'output': tf.Variable(tf.random_normal([self.n_classes])) # [6]
}
config = Config(X_train, X_test)
# print("Some useful info to get an insight on dataset's shape and normalisation:")
# print("features shape, labels shape, each features mean, each features standard deviation")
# print(X_test.shape, y_test.shape,
# np.mean(X_test), np.std(X_test))
# print("the dataset is therefore properly normalised, as expected.")
#
#
# ------------------------------------------------------
# step3: Let's get serious and build the neural network
# ------------------------------------------------------
# [none, 128, 9]
X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs])
# [none, 6]
Y = tf.placeholder(tf.float32, [None, config.n_classes])
print("-------X Y----------")
print(X)
X = tf.reshape(X, shape=[-1, 32, 36])
print(X)
print(Y)
Y = tf.reshape(Y, shape=[-1, 6])
print(Y)
# Weight Initialization
def weight_variable(shape):
# tra ve 1 gia tri random theo thuat toan truncated_ normal
initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def bias_varibale(shape):
initial = tf.constant(0.1, shape=shape, name='Bias')
return tf.Variable(initial)
# Convolution and Pooling
def conv2d(x, W):
# Must have `strides[0] = strides[3] = 1 `.
# For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `.
return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d')
def max_pool_2x2(x):
return tf.nn.max_pool(value=x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME', name='max_pool')
def LSTM_Network(feature_mat, config):
"""model a LSTM Network,
it stacks 2 LSTM layers, each layer has n_hidden=32 cells
and 1 output layer, it is a full connet layer
argument:
feature_mat: ndarray feature matrix, shape=[batch_size,time_steps,n_inputs]
config: class containing config of network
return:
: matrix output shape [batch_size,n_classes]
"""
W_conv1 = weight_variable([3, 3, 1, 16])
b_conv1 = bias_varibale([16])
# x_image = tf.reshape(x, shape=[-1, 28, 28, 1])
feature_mat_image = tf.reshape(feature_mat, shape=[-1, 32, 36, 1])
print("----feature_mat_image-----")
print(feature_mat_image.get_shape())
h_conv1 = tf.nn.relu(conv2d(feature_mat_image, W_conv1) + b_conv1)
h_pool1 = h_conv1
# Second Convolutional Layer
W_conv2 = weight_variable([3, 3, 16, 16])
b_conv2 = weight_variable([16])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# Third Convolutional Layer
W_conv3 = weight_variable([3, 3, 16, 16])
b_conv3 = weight_variable([16])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
h_pool3 = (h_conv3)
# Forth Convolutional Layer
W_conv4 = weight_variable([3, 3, 16, 64])
b_conv4 = weight_variable([64])
h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)
h_pool4 = h_conv4
# Fifth Convolutional Layer
W_conv5 = weight_variable([3, 3, 64, 1])
b_conv5 = weight_variable([1])
h_conv5 = tf.nn.relu(conv2d(h_pool4, W_conv5) + b_conv5)
h_pool5 = h_conv5
h_pool5 = tf.reshape(h_pool5, shape=[-1, POOL_X, POOL_Y])
feature_mat = h_pool5
print("----feature_mat-----")
print(feature_mat)
# exit()
# W_fc1 = weight_variable([8 * 9 * 1, 1024])
# b_fc1 = bias_varibale([1024])
# h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 9 * 1])
# h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# print("----h_fc1_drop-----")
# print(h_fc1)
# exit()
#
# # keep_prob = tf.placeholder(tf.float32)
# keep_prob = tf.placeholder(1.0)
# h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=keep_prob)
# print("----h_fc1_drop-----")
# print(h_fc1_drop)
# exit()
#
# W_fc2 = weight_variable([1024, 10])
# b_fc2 = bias_varibale([10])
#
# y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# print("----y_conv-----")
# print(y_conv)
# exit()
# Exchange dim 1 and dim 0
# Start at: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36]
feature_mat = tf.transpose(feature_mat, [1, 0, 2])
# New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9]
print("----feature_mat-----")
print(feature_mat)
# exit()
# Temporarily crush the feature_mat's dimensions
feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs]) # 9
# New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9
# Linear activation, reshaping inputs to the LSTM's number of hidden:
hidden = tf.nn.relu(tf.matmul(
feature_mat, config.W['hidden']
) + config.biases['hidden'])
# New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32]
print("--n_steps--")
print(config.n_steps)
print("--hidden--")
print(hidden)
# Split the series because the rnn cell needs time_steps features, each of shape:
hidden = tf.split(0, config.n_steps/4, hidden) # (0, 128, [128*batch_size, 32])
# New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden]
# Define LSTM cell of first hidden layer:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0)
# Stack two LSTM layers, both layers has the same shape
lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2)
# Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here
outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32)
# outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden]
print("------------------list-------------------")
print(outputs)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1] # Get the last element of the array: [?, 32]
print("------------------last outputs-------------------")
print (lstm_last_output)
# Linear activation
return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']
pred_Y = LSTM_Network(X, config) # shape[?,6]
print("------------------pred_Y-------------------")
print(pred_Y)
# Loss,train_step,evaluation
l2 = config.lambda_loss_amount * \
sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Softmax loss and L2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2
train_step = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# --------------------------------------------
# step4: Hooray, now train the neural network
# --------------------------------------------
# Note that log_device_placement can be turned ON but will cause console spam.
# Initializing the variables
init = tf.initialize_all_variables()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
best_accuracy = 0.0
# sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))
if (FLAG == 'train') : # If it is the training mode
with tf.Session() as sess:
# tf.initialize_all_variables().run()
sess.run(init) # .run()
f.write("---Save model \n")
# Start training for each batch and loop epochs
for i in range(config.training_epochs):
for start, end in zip(range(0, config.train_count, config.batch_size), # (0, 7352, 1500)
range(config.batch_size, config.train_count + 1,
config.batch_size)): # (1500, 7353, 1500)
print(start)
print(end)
sess.run(train_step, feed_dict={X: X_train[start:end],
Y: y_train[start:end]})
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
print("traing iter: {},".format(i) + \
" test accuracy : {},".format(accuracy_out) + \
" loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
# Save the model in this session
save_path = saver.save(sess, file_name + "/model.ckpt")
print("Model saved in file: %s" % save_path)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
else :
# Running a new session
print("Starting 2nd session...")
with tf.Session() as sess:
# Initialize variables
sess.run(init)
f.write("---Restore model \n")
# Restore model weights from previously saved model
saver.restore(sess, file_name+ "/model.ckpt")
print("Model restored from file: %s" % save_path_name)
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
# print("traing iter: {}," + \
# " test accuracy : {},".format(accuracy_out) + \
# " loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
#
# #------------------------------------------------------------------
# # step5: Training is good, but having visual insight is even better
# #------------------------------------------------------------------
# # The code is in the .ipynb
#
# #------------------------------------------------------------------
# # step6: And finally, the multi-class confusion matrix and metrics!
# #------------------------------------------------------------------
# # The code is in the .ipynb
f.write("Ended at \n")
f.write(str(datetime.datetime.now())+'\n')
f.write("------------- \n")
f.close()
|
|
# Copyright 2014 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import ctypes as cp
import numpy as np
import numpy.ctypeslib as npct
from ndlib import rgbColor
from operator import sub, div
#
# Cube Locations using ctypes
#
# Load the shared C library using ctype mechanism and the directory path is always local
BASE_PATH = os.path.dirname(__file__)
ndlib = npct.load_library("ndlib", BASE_PATH+"/c_version")
# Load the shared CPP library using ctype mechanism and the directory path is always local
#ndlib = npct.load_library("ndlib", "cpp_version")
array_1d_uint8 = npct.ndpointer(dtype=np.uint8, ndim=1, flags='C_CONTIGUOUS')
array_2d_uint8 = npct.ndpointer(dtype=np.uint8, ndim=2, flags='C_CONTIGUOUS')
array_1d_uint16 = npct.ndpointer(dtype=np.uint16, ndim=1, flags='C_CONTIGUOUS')
array_2d_uint16 = npct.ndpointer(dtype=np.uint16, ndim=2, flags='C_CONTIGUOUS')
array_3d_uint16 = npct.ndpointer(dtype=np.uint16, ndim=3, flags='C_CONTIGUOUS')
array_1d_uint32 = npct.ndpointer(dtype=np.uint32, ndim=1, flags='C_CONTIGUOUS')
array_2d_uint32 = npct.ndpointer(dtype=np.uint32, ndim=2, flags='C_CONTIGUOUS')
array_3d_uint32 = npct.ndpointer(dtype=np.uint32, ndim=3, flags='C_CONTIGUOUS')
array_4d_uint32 = npct.ndpointer(dtype=np.uint32, ndim=4, flags='C_CONTIGUOUS')
array_1d_uint64 = npct.ndpointer(dtype=np.uint64, ndim=1, flags='C_CONTIGUOUS')
array_2d_uint64 = npct.ndpointer(dtype=np.uint64, ndim=2, flags='C_CONTIGUOUS')
array_2d_float32 = npct.ndpointer(dtype=np.float32, ndim=2, flags='C_CONTIGUOUS')
array_3d_float32 = npct.ndpointer(dtype=np.float32, ndim=3, flags='C_CONTIGUOUS')
# defining the parameter types of the functions in C
# FORMAT: <library_name>,<functiona_name>.argtypes = [ ctype.<argtype> , ctype.<argtype> ....]
ndlib.filterCutout.argtypes = [array_1d_uint32, cp.c_int, array_1d_uint32, cp.c_int]
ndlib.filterCutoutOMP32.argtypes = [array_1d_uint32, cp.c_int, array_1d_uint32, cp.c_int]
ndlib.filterCutoutOMP64.argtypes = [array_1d_uint64, cp.c_int, array_1d_uint64, cp.c_int]
ndlib.locateCube.argtypes = [ array_2d_uint64, cp.c_int, array_2d_uint32, cp.c_int, cp.POINTER(cp.c_int) ]
ndlib.annotateCube.argtypes = [ array_1d_uint32, cp.c_int, cp.POINTER(cp.c_int), cp.c_int, array_1d_uint32, array_2d_uint32, cp.c_int, cp.c_char, array_2d_uint32 ]
ndlib.XYZMorton.argtypes = [ array_1d_uint64 ]
ndlib.MortonXYZ.argtypes = [ npct.ctypes.c_int64 , array_1d_uint64 ]
ndlib.recolorCubeOMP32.argtypes = [ array_2d_uint32, cp.c_int, cp.c_int, array_2d_uint32, array_1d_uint32 ]
ndlib.recolorCubeOMP64.argtypes = [ array_2d_uint64, cp.c_int, cp.c_int, array_2d_uint64, array_1d_uint64 ]
ndlib.quicksort.argtypes = [ array_2d_uint64, cp.c_int ]
ndlib.shaveCube.argtypes = [ array_1d_uint32, cp.c_int, cp.POINTER(cp.c_int), cp.c_int, array_1d_uint32, array_2d_uint32, cp.c_int, array_2d_uint32, cp.c_int, array_2d_uint32 ]
ndlib.annotateEntityDense.argtypes = [ array_3d_uint32, cp.POINTER(cp.c_int), cp.c_int ]
ndlib.shaveDense.argtypes = [ array_3d_uint32, array_3d_uint32, cp.POINTER(cp.c_int) ]
ndlib.exceptionDense.argtypes = [ array_3d_uint32, array_3d_uint32, cp.POINTER(cp.c_int) ]
ndlib.overwriteDense32.argtypes = [ array_3d_uint32, array_3d_uint32, cp.POINTER(cp.c_int) ]
ndlib.overwriteDenseF32.argtypes = [ array_3d_float32, array_3d_float32, cp.POINTER(cp.c_int) ]
ndlib.overwriteMerge.argtypes = [ array_4d_uint32, array_4d_uint32, cp.c_int ]
ndlib.zoomOutData.argtypes = [ array_3d_uint32, array_3d_uint32, cp.POINTER(cp.c_int), cp.c_int ]
ndlib.zoomOutDataOMP.argtypes = [ array_3d_uint32, array_3d_uint32, cp.POINTER(cp.c_int), cp.c_int ]
ndlib.zoomInData.argtypes = [ array_3d_uint32, array_3d_uint32, cp.POINTER(cp.c_int), cp.c_int ]
ndlib.zoomInDataOMP16.argtypes = [ array_3d_uint16, array_3d_uint16, cp.POINTER(cp.c_int), cp.c_int ]
ndlib.zoomInDataOMP32.argtypes = [ array_3d_uint32, array_3d_uint32, cp.POINTER(cp.c_int), cp.c_int ]
ndlib.mergeCube.argtypes = [ array_3d_uint32, cp.POINTER(cp.c_int), cp.c_int, cp.c_int ]
ndlib.isotropicBuild8.argtypes = [ array_2d_uint8, array_2d_uint8, array_2d_uint8, cp.POINTER(cp.c_int) ]
ndlib.isotropicBuild16.argtypes = [ array_2d_uint16, array_2d_uint16, array_2d_uint16, cp.POINTER(cp.c_int) ]
ndlib.isotropicBuild32.argtypes = [ array_2d_uint32, array_2d_uint32, array_2d_uint32, cp.POINTER(cp.c_int) ]
ndlib.isotropicBuildF32.argtypes = [ array_2d_float32, array_2d_float32, array_2d_float32, cp.POINTER(cp.c_int) ]
ndlib.addDataZSlice.argtypes = [ array_3d_uint32, array_3d_uint32, cp.POINTER(cp.c_int), cp.POINTER(cp.c_int) ]
ndlib.addDataIsotropic.argtypes = [ array_3d_uint32, array_3d_uint32, cp.POINTER(cp.c_int), cp.POINTER(cp.c_int) ]
ndlib.unique.argtypes = [ array_1d_uint32, array_1d_uint32, cp.c_int ]
ndlib.ZSliceStackCube.argtypes = [ array_3d_uint32, array_3d_uint32 ]
ndlib.IsotropicStackCube.argtypes = [ array_3d_uint32, array_3d_uint32 ]
# setting the return type of the function in C
# FORMAT: <library_name>.<function_name>.restype = [ ctype.<argtype> ]
ndlib.filterCutout.restype = None
ndlib.filterCutoutOMP32.restype = None
ndlib.filterCutoutOMP64.restype = None
ndlib.locateCube.restype = None
ndlib.annotateCube.restype = cp.c_int
ndlib.XYZMorton.restype = npct.ctypes.c_uint64
ndlib.MortonXYZ.restype = None
ndlib.recolorCubeOMP32.restype = None
ndlib.recolorCubeOMP64.restype = None
ndlib.quicksort.restype = None
ndlib.shaveCube.restype = None
ndlib.annotateEntityDense.restype = None
ndlib.shaveDense.restype = None
ndlib.exceptionDense.restype = None
ndlib.overwriteDense32.restype = None
ndlib.overwriteDenseF32.restype = None
ndlib.overwriteMerge.restype = None
ndlib.zoomOutData.restype = None
ndlib.zoomOutDataOMP.restype = None
ndlib.zoomInData.restype = None
ndlib.zoomInDataOMP16.restype = None
ndlib.zoomInDataOMP32.restype = None
ndlib.mergeCube.restype = None
ndlib.isotropicBuild8.restype = None
ndlib.isotropicBuild16.restype = None
ndlib.isotropicBuild32.restype = None
ndlib.isotropicBuildF32.restype = None
ndlib.addDataZSlice.restype = None
ndlib.addDataIsotropic.restype = None
ndlib.ZSliceStackCube.restype = None
ndlib.IsotropicStackCube.restype = None
ndlib.unique.restype = cp.c_int
def filter_ctype_OMP ( cutout, filterlist ):
"""Remove all annotations in a cutout that do not match the filterlist using OpenMP"""
cutout_shape = cutout.shape
# Temp Fix
if cutout.dtype == np.uint32:
# get a copy of the iterator as a 1-D array
cutout = np.asarray(cutout, dtype=np.uint32)
cutout = cutout.ravel()
filterlist = np.asarray(filterlist, dtype=np.uint32)
# Calling the C openmp funtion
ndlib.filterCutoutOMP32( cutout, cp.c_int(len(cutout)), np.sort(filterlist), cp.c_int(len(filterlist)) )
elif cutout.dtype == np.uint64:
# get a copy of the iterator as a 1-D array
cutout = np.asarray(cutout, dtype=np.uint64)
cutout = cutout.ravel()
filterlist = np.asarray(filterlist, dtype=np.uint64)
# Calling the C openmp funtion
ndlib.filterCutoutOMP64( cutout, cp.c_int(len(cutout)), np.sort(filterlist), cp.c_int(len(filterlist)) )
else:
raise
return cutout.reshape( cutout_shape )
def filter_ctype ( cutout, filterlist ):
"""Remove all annotations in a cutout that do not match the filterlist"""
# get a copy of the iterator as a 1-D array
flatcutout = cutout.flat.copy()
# Calling the C naive function
ndlib.filterCutout(flatcutout,cp.c_int(len(flatcutout)),filterlist,cp.c_int(len(filterlist)))
return flatcutout.reshape(cutout.shape[0],cutout.shape[1],cutout.shape[2])
def annotate_ctype ( data, annid, offset, locations, conflictopt ):
""" Remove all annotations in a cutout that do not match the filterlist """
# get a copy of the iterator as a 1-D array
datashape = data.shape
dims = [i for i in data.shape]
data = data.ravel()
exceptions = np.zeros ( (len(locations),3), dtype=np.uint32 )
# Calling the C native function
exceptionIndex = ndlib.annotateCube ( data, cp.c_int(len(data)), (cp.c_int * len(dims))(*dims), cp.c_int(annid), offset, locations, cp.c_int(len(locations)), cp.c_char(conflictopt), exceptions )
if exceptionIndex > 0:
exceptions = exceptions[:(exceptionIndex+1)]
else:
exceptions = np.zeros ( (0), dtype=np.uint32 )
return ( data.reshape(datashape) , exceptions )
def locate_ctype ( locations, dims ):
""" Remove all annotations in a cutout that do not match the filterlist """
# get a copy of the iterator as a 1-D array
cubeLocs = np.zeros ( [len(locations),4], dtype=np.uint64 )
# Calling the C native function
ndlib.locateCube ( cubeLocs, cp.c_int(len(cubeLocs)), locations, cp.c_int(len(locations)), (cp.c_int * len(dims))(*dims) )
return cubeLocs
def XYZMorton ( xyz ):
""" Get morton order from XYZ coordinates """
# Calling the C native function
xyz = np.uint64( xyz )
morton = ndlib.XYZMorton ( xyz )
return morton
def MortonXYZ ( morton ):
""" Get morton order from XYZ coordinates """
# Calling the C native function
morton = np.uint64(morton)
cubeoff = np.zeros((3), dtype=np.uint64)
ndlib.MortonXYZ ( morton, cubeoff )
cubeoff = np.uint32(cubeoff)
return [i for i in cubeoff]
def recolor_ctype ( cutout, imagemap ):
""" Annotation recoloring function """
xdim, ydim = cutout.shape
if not cutout.flags['C_CONTIGUOUS']:
cutout = np.ascontiguousarray(cutout,dtype=cutout.dtype)
# Calling the c native function
if cutout.dtype == np.uint32:
ndlib.recolorCubeOMP32 ( cutout, cp.c_int(xdim), cp.c_int(ydim), imagemap, np.asarray( rgbColor.rgbcolor,dtype=np.uint32) )
else:
ndlib.recolorCubeOMP64 ( cutout, cp.c_int(xdim), cp.c_int(ydim), imagemap, np.asarray( rgbColor.rgbcolor,dtype=np.uint64) )
return imagemap
def quicksort ( locs ):
""" Sort the cube on Morton Id """
# Calling the C native language
ndlib.quicksort ( locs, len(locs) )
return locs
def shave_ctype ( data, annid, offset, locations ):
""" Remove annotations by a list of locations """
# get a copy of the iterator as a 1-D array
datashape = data.shape
dims = [i for i in data.shape]
data = data.ravel()
exceptions = np.zeros ( (len(locations),3), dtype=np.uint32 )
zeroed = np.zeros ( (len(locations),3), dtype=np.uint32 )
exceptionIndex = -1
zeroedIndex = -1
# Calling the C native function
ndlib.shaveCube ( data, cp.c_int(len(data)), (cp.c_int * len(dims))(*dims), cp.c_int(annid), offset, locations, cp.c_int(len(locations)), exceptions, cp.c_int(exceptionIndex), zeroed, cp.c_int(zeroedIndex) )
if exceptionIndex > 0:
exceptions = exceptions[:(exceptionIndex+1)]
else:
exceptions = np.zeros ( (0), dtype=np.uint32 )
if zeroedIndex > 0:
zeroed = zeroed[:(zeroedIndex+1)]
else:
zeroed = np.zeros ( (0), dtype=np.uint32 )
return ( data.reshape(datashape) , exceptions, zeroed )
def annotateEntityDense_ctype ( data, entityid ):
""" Relabel all non zero pixels to annotation id """
dims = [ i for i in data.shape ]
ndlib.annotateEntityDense ( data, (cp.c_int * len(dims))(*dims), cp.c_int(entityid) )
return ( data )
def shaveDense_ctype ( data, shavedata ):
""" Remove the specified voxels from the annotation """
dims = [ i for i in data.shape ]
ndlib.shaveDense ( data, shavedata, (cp.c_int * len(dims))(*dims) )
return ( data )
def exceptionDense_ctype ( data, annodata ):
""" Get a dense voxel region and overwrite all the non-zero values """
data = np.uint32(data)
annodata = np.uint32(annodata)
if not annodata.flags['C_CONTIGUOUS']:
annodata = np.ascontiguousarray(annodata,np.uint32)
dims = [ i for i in data.shape ]
ndlib.exceptionDense ( data, annodata, (cp.c_int * len(dims))(*dims) )
return ( data )
def overwriteDense_ctype ( data, annodata ):
""" Get a dense voxel region and overwrite all the non-zero values """
orginal_dtype = data.dtype
dims = [ i for i in data.shape ]
# checking for float32 or data otherwise
if data.dtype != np.float32:
data = np.uint32(data)
annodata = np.uint32(annodata)
if not annodata.flags['C_CONTIGUOUS']:
annodata = np.ascontiguousarray(annodata, dtype=np.uint32)
ndlib.overwriteDense32 ( data, annodata, (cp.c_int * len(dims))(*dims) )
else:
if not annodata.flags['C_CONTIGUOUS']:
annodata = np.ascontiguousarray(annodata, dtype=np.float32)
ndlib.overwriteDenseF32 ( data, annodata, (cp.c_int * len(dims))(*dims) )
return ( data.astype(orginal_dtype, copy=False) )
def overwriteMerge_ctype( data1, data2 ):
"""Blaze Overwrite"""
from functools import reduce
from operator import mul
dim = cp.c_int(reduce(mul, data1.shape))
ndlib.overwriteMerge(data1, data2, dim)
return data1
def zoomOutData_ctype ( olddata, newdata, factor ):
""" Add the contribution of the input data to the next level at the given offset in the output cube """
dims = [ i for i in newdata.shape ]
ndlib.zoomOutData ( olddata, newdata, (cp.c_int * len(dims))(*dims), cp.c_int(factor) )
return ( newdata )
def zoomOutData64_ctype ( olddata, newdata, factor ):
""" Add the contribution of the input data to the next level at the given offset in the output cube """
dims = [ i for i in newdata.shape ]
ndlib.zoomOutData64 ( olddata, newdata, (cp.c_int * len(dims))(*dims), cp.c_int(factor) )
return ( newdata )
def zoomOutData_ctype_OMP ( olddata, newdata, factor ):
""" Add the contribution of the input data to the next level at the given offset in the output cube """
dims = [ i for i in newdata.shape ]
ndlib.zoomOutDataOMP ( olddata, newdata, (cp.c_int * len(dims))(*dims), cp.c_int(factor) )
return ( newdata )
def zoomInData_ctype ( olddata, newdata, factor ):
""" Add the contribution of the input data to the next level at the given offset in the output cube """
dims = [ i for i in newdata.shape ]
ndlib.zoomInData ( olddata, newdata, (cp.c_int * len(dims))(*dims), cp.c_int(factor) )
return ( newdata )
def zoomInData_ctype_OMP ( olddata, newdata, factor ):
""" Add the contribution of the input data to the next level at the given offset in the output cube """
dims = [ i for i in newdata.shape ]
if olddata.dtype == np.uint16:
ndlib.zoomInDataOMP16 ( olddata, newdata, (cp.c_int * len(dims))(*dims), cp.c_int(factor) )
else:
ndlib.zoomInDataOMP32 ( olddata, newdata, (cp.c_int * len(dims))(*dims), cp.c_int(factor) )
return ( newdata )
def mergeCube_ctype ( data, newid, oldid ):
""" Relabel voxels in cube from oldid to newid """
dims = [ i for i in data.shape ]
ndlib.mergeCube ( data, (cp.c_int * len(dims))(*dims), cp.c_int(newid), cp.c_int(oldid) )
return ( data )
def isotropicBuild_ctype ( data1, data2 ):
""" Merging Data """
dims = [ i for i in data1.shape ]
newdata = np.zeros(data1.shape,dtype=data1.dtype)
if data1.dtype == np.uint32:
ndlib.isotropicBuild32 ( data1, data2, newdata, (cp.c_int * len(dims))(*dims) )
elif data1.dtype == np.uint8:
ndlib.isotropicBuild8 ( data1, data2, newdata, (cp.c_int * len(dims))(*dims) )
elif data1.dtype == np.uint16:
ndlib.isotropicBuild16 ( data1, data2, newdata, (cp.c_int * len(dims))(*dims) )
elif data1.dtype == np.float32:
ndlib.isotropicBuildF32 ( data1, data2, newdata, (cp.c_int * len(dims))(*dims) )
else:
raise
return ( newdata )
def IsotropicStackCube_ctype ( olddata, newdata ):
"""Convert the old cube to new cube of annotations reducing by 2x2x2"""
dims = [ i for i in newdata.shape ]
ndlib.IsotropicStackCube ( olddata, newdata, (cp.c_int * len(dims))(*dims) )
def ZSliceStackCube_ctype ( olddata, newdata ):
"""Convert the old cube to new cube of annotations reducing by 2x2x1"""
dims = [ i for i in newdata.shape ]
ndlib.ZSliceStackCube ( olddata, newdata, (cp.c_int * len(dims))(*dims) )
def addDataToIsotropicStack_ctype ( cube, output, offset ):
"""Add the contribution of the input data to the next level at the given offset in the output cube"""
dims = [ i for i in cube.data.shape ]
ndlib.addDataIsotropic ( cube.data, output, (cp.c_int * len(offset))(*offset), (cp.c_int * len(dims))(*dims) )
def addDataToZSliceStack_ctype ( cube, output, offset ):
"""Add the contribution of the input data to the next level at the given offset in the output cube"""
dims = [ i for i in cube.data.shape ]
ndlib.addDataZSlice ( cube.data, output, (cp.c_int * len(offset))(*offset), (cp.c_int * len(dims))(*dims) )
def unique ( data ):
"""Return the unqiue elements in the array"""
data = data.ravel()
unique_array = np.zeros(len(data), dtype=data.dtype)
unique_length = ndlib.unique ( data, unique_array, cp.c_int(len(data)) )
return unique_array[:unique_length]
def boundary_morton(start_xyz, stop_xyz, cube_dim, offset=[0,0,0]):
"""Returns a list of morton indexs inside the region and one on the boundary"""
boundary_list = []
interior_list = []
start_value = map(div, map(sub, start_xyz, offset), cube_dim)
stop_value = map(div, map(sub, stop_xyz, offset), cube_dim)
for z in range(start_value[2], stop_value[2], 1):
for y in range(start_value[1], stop_value[1], 1):
for x in range(start_value[0], stop_value[1], 1):
if x in [start_value[0], stop_value[0]-1] or y in [start_value[0], stop_value[1]-1] or z in [start_value[2], stop_value[2]-1]:
boundary_list.append(XYZMorton([x, y, z]))
# boundary_list.append([x, y, z])
else:
interior_list.append(XYZMorton([x, y, z]))
# interior_list.append([x, y, z])
return boundary_list, interior_list
#def annoidIntersect_ctype_OMP(cutout, annoid_list):
#"""Remove all annotations in a cutout that do not match the filterlist using OpenMP"""
## get a copy of the iterator as a 1-D array
#cutout = cutout.ravel()
#annoid_list = np.asarray(annoid_list, dtype=np.uint32)
## Calling the C openmp funtion
#ndlib.annoidIntersectOMP(cutout, cp.c_int(len(cutout)), np.sort(annoid_list), cp.c_int(len(annoid_list)))
#return cutout.reshape( cutout_shape )
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of utilities supporting export to SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import time
from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.contrib.learn.python.learn import export_strategy as export_strategy_lib
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator as core_estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.util import compat
class TestEstimator(core_estimator.Estimator):
def __init__(self, *args, **kwargs):
super(TestEstimator, self).__init__(*args, **kwargs)
self.last_exported_checkpoint = ""
self.last_exported_dir = ""
# @Override
def export_savedmodel(self,
export_dir,
serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None):
if not os.path.exists(export_dir):
os.makedirs(export_dir)
open(os.path.join(export_dir, "placeholder.txt"), "a").close()
self.last_exported_checkpoint = checkpoint_path
self.last_exported_dir = export_dir
return export_dir
class SavedModelExportUtilsTest(test.TestCase):
def test_build_standardized_signature_def_regression(self):
input_tensors = {
"input-1":
array_ops.placeholder(dtypes.string, 1, name="input-tensor-1")
}
output_tensors = {
"output-1":
array_ops.placeholder(dtypes.float32, 1, name="output-tensor-1")
}
problem_type = constants.ProblemType.LINEAR_REGRESSION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[signature_constants.REGRESS_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_string, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.REGRESS_OUTPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(name="output-tensor-1:0",
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = signature_constants.REGRESS_METHOD_NAME
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification(self):
"""Tests classification with one output tensor."""
input_tensors = {
"input-1":
array_ops.placeholder(dtypes.string, 1, name="input-tensor-1")
}
output_tensors = {
"output-1":
array_ops.placeholder(dtypes.string, 1, name="output-tensor-1")
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_string, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-1:0",
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification2(self):
"""Tests multiple output tensors that include classes and probabilities."""
input_tensors = {
"input-1":
array_ops.placeholder(dtypes.string, 1, name="input-tensor-1")
}
output_tensors = {
"classes":
array_ops.placeholder(
dtypes.string, 1, name="output-tensor-classes"),
# Will be used for CLASSIFY_OUTPUT_SCORES.
"probabilities":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-proba"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits-unused"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_string, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-classes:0",
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-proba:0",
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification3(self):
"""Tests multiple output tensors that include classes and scores."""
input_tensors = {
"input-1":
array_ops.placeholder(dtypes.string, 1, name="input-tensor-1")
}
output_tensors = {
"classes":
array_ops.placeholder(
dtypes.string, 1, name="output-tensor-classes"),
"scores":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-scores"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits-unused"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_string, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-classes:0",
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-scores:0",
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification4(self):
"""Tests classification without classes tensor."""
input_tensors = {
"input-1":
array_ops.placeholder(dtypes.string, 1, name="input-tensor-1")
}
output_tensors = {
"probabilities":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-proba"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits-unused"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_string, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-proba:0",
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification5(self):
"""Tests multiple output tensors that include integer classes and scores.
Integer classes are dropped out, because Servo classification can only serve
string classes. So, only scores are present in the signature.
"""
input_tensors = {
"input-1":
array_ops.placeholder(dtypes.string, 1, name="input-tensor-1")
}
output_tensors = {
"classes":
array_ops.placeholder(
dtypes.int64, 1, name="output-tensor-classes"),
"scores":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-scores"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits-unused"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_string, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-scores:0",
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification6(self):
"""Tests multiple output tensors that with integer classes and no scores.
Servo classification cannot serve integer classes, but no scores are
available. So, we fall back to predict signature.
"""
input_tensors = {
"input-1":
array_ops.placeholder(dtypes.string, 1, name="input-tensor-1")
}
output_tensors = {
"classes":
array_ops.placeholder(
dtypes.int64, 1, name="output-tensor-classes"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_int64 = types_pb2.DataType.Value("DT_INT64")
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs["input-1"].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_string, tensor_shape=shape))
expected_signature_def.outputs["classes"].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-classes:0",
dtype=dtype_int64,
tensor_shape=shape))
expected_signature_def.outputs["logits"].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-logits:0",
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.PREDICT_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_get_input_alternatives(self):
input_ops = input_fn_utils.InputFnOps("bogus features dict", None,
"bogus default input dict")
input_alternatives, _ = saved_model_export_utils.get_input_alternatives(
input_ops)
self.assertEqual(input_alternatives[
saved_model_export_utils.DEFAULT_INPUT_ALTERNATIVE_KEY],
"bogus default input dict")
# self.assertEqual(input_alternatives[
# saved_model_export_utils.FEATURES_INPUT_ALTERNATIVE_KEY],
# "bogus features dict")
def test_get_output_alternatives_explicit_default(self):
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION,
"bogus output dict"),
"head-2": (constants.ProblemType.CLASSIFICATION, "bogus output dict 2"),
"head-3": (constants.ProblemType.UNSPECIFIED, "bogus output dict 3"),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": "bogus_tensor"},
output_alternatives=provided_output_alternatives)
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops, "head-1")
self.assertEqual(provided_output_alternatives, output_alternatives)
def test_get_output_alternatives_wrong_default(self):
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION,
"bogus output dict"),
"head-2": (constants.ProblemType.CLASSIFICATION, "bogus output dict 2"),
"head-3": (constants.ProblemType.UNSPECIFIED, "bogus output dict 3"),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": "bogus_tensor"},
output_alternatives=provided_output_alternatives)
with self.assertRaises(ValueError) as e:
saved_model_export_utils.get_output_alternatives(model_fn_ops, "WRONG")
self.assertEqual("Requested default_output_alternative: WRONG, but "
"available output_alternatives are: ['head-1', 'head-2', "
"'head-3']", str(e.exception))
def test_get_output_alternatives_single_no_default(self):
prediction_tensor = constant_op.constant(["bogus"])
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION, {
"output": prediction_tensor
}),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions=prediction_tensor,
output_alternatives=provided_output_alternatives)
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops)
self.assertEqual({
"head-1": (constants.ProblemType.LINEAR_REGRESSION, {
"output": prediction_tensor
})
}, output_alternatives)
def test_get_output_alternatives_multi_no_default(self):
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION,
"bogus output dict"),
"head-2": (constants.ProblemType.CLASSIFICATION, "bogus output dict 2"),
"head-3": (constants.ProblemType.UNSPECIFIED, "bogus output dict 3"),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": "bogus_tensor"},
output_alternatives=provided_output_alternatives)
with self.assertRaises(ValueError) as e:
saved_model_export_utils.get_output_alternatives(model_fn_ops)
self.assertEqual("Please specify a default_output_alternative. Available "
"output_alternatives are: ['head-1', 'head-2', 'head-3']",
str(e.exception))
def test_get_output_alternatives_none_provided(self):
prediction_tensor = constant_op.constant(["bogus"])
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": prediction_tensor},
output_alternatives=None)
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops)
self.assertEqual({
"default_output_alternative": (constants.ProblemType.UNSPECIFIED, {
"some_output": prediction_tensor
})
}, output_alternatives)
def test_get_output_alternatives_empty_provided_with_default(self):
prediction_tensor = constant_op.constant(["bogus"])
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": prediction_tensor},
output_alternatives={})
with self.assertRaises(ValueError) as e:
saved_model_export_utils.get_output_alternatives(model_fn_ops, "WRONG")
self.assertEqual("Requested default_output_alternative: WRONG, but "
"available output_alternatives are: []", str(e.exception))
def test_get_output_alternatives_empty_provided_no_default(self):
prediction_tensor = constant_op.constant(["bogus"])
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": prediction_tensor},
output_alternatives={})
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops)
self.assertEqual({
"default_output_alternative": (constants.ProblemType.UNSPECIFIED, {
"some_output": prediction_tensor
})
}, output_alternatives)
def test_get_output_alternatives_implicit_single(self):
prediction_tensor = constant_op.constant(["bogus"])
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions=prediction_tensor,
output_alternatives=None)
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops)
self.assertEqual({
"default_output_alternative": (constants.ProblemType.UNSPECIFIED, {
"output": prediction_tensor
})
}, output_alternatives)
def test_build_all_signature_defs(self):
input_features = constant_op.constant(["10"])
input_example = constant_op.constant(["input string"])
input_ops = input_fn_utils.InputFnOps({
"features": input_features
}, None, {"default input": input_example})
input_alternatives, _ = (
saved_model_export_utils.get_input_alternatives(input_ops))
output_1 = constant_op.constant([1.0])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION, {
"some_output_1": output_1
}),
"head-2": (constants.ProblemType.CLASSIFICATION, {
"some_output_2": output_2
}),
"head-3": (constants.ProblemType.UNSPECIFIED, {
"some_output_3": output_3
}),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": constant_op.constant(["4"])},
output_alternatives=provided_output_alternatives)
output_alternatives, _ = (saved_model_export_utils.get_output_alternatives(
model_fn_ops, "head-1"))
signature_defs = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives, "head-1")
expected_signature_defs = {
"serving_default":
signature_def_utils.regression_signature_def(
input_example, output_1),
"default_input_alternative:head-1":
signature_def_utils.regression_signature_def(
input_example, output_1),
"default_input_alternative:head-2":
signature_def_utils.classification_signature_def(
input_example, output_2, None),
"default_input_alternative:head-3":
signature_def_utils.predict_signature_def({
"default input": input_example
}, {"some_output_3": output_3}),
# "features_input_alternative:head-1":
# signature_def_utils.regression_signature_def(input_features,
# output_1),
# "features_input_alternative:head-2":
# signature_def_utils.classification_signature_def(input_features,
# output_2, None),
# "features_input_alternative:head-3":
# signature_def_utils.predict_signature_def({
# "input": input_features
# }, {"output": output_3}),
}
self.assertDictEqual(expected_signature_defs, signature_defs)
def test_build_all_signature_defs_legacy_input_fn_not_supported(self):
"""Tests that legacy input_fn returning (features, labels) raises error.
serving_input_fn must return InputFnOps including a default input
alternative.
"""
input_features = constant_op.constant(["10"])
input_ops = ({"features": input_features}, None)
input_alternatives, _ = (
saved_model_export_utils.get_input_alternatives(input_ops))
output_1 = constant_op.constant(["1"])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION, {
"some_output_1": output_1
}),
"head-2": (constants.ProblemType.CLASSIFICATION, {
"some_output_2": output_2
}),
"head-3": (constants.ProblemType.UNSPECIFIED, {
"some_output_3": output_3
}),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": constant_op.constant(["4"])},
output_alternatives=provided_output_alternatives)
output_alternatives, _ = (saved_model_export_utils.get_output_alternatives(
model_fn_ops, "head-1"))
with self.assertRaisesRegexp(
ValueError, "A default input_alternative must be provided"):
saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives, "head-1")
def test_get_timestamped_export_dir(self):
export_dir_base = tempfile.mkdtemp() + "export/"
export_dir_1 = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
time.sleep(2)
export_dir_2 = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
time.sleep(2)
export_dir_3 = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
# Export directories should be named using a timestamp that is seconds
# since epoch. Such a timestamp is 10 digits long.
time_1 = os.path.basename(export_dir_1)
self.assertEqual(10, len(time_1))
time_2 = os.path.basename(export_dir_2)
self.assertEqual(10, len(time_2))
time_3 = os.path.basename(export_dir_3)
self.assertEqual(10, len(time_3))
self.assertTrue(int(time_1) < int(time_2))
self.assertTrue(int(time_2) < int(time_3))
def test_garbage_collect_exports(self):
export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(export_dir_base)
export_dir_1 = _create_test_export_dir(export_dir_base)
export_dir_2 = _create_test_export_dir(export_dir_base)
export_dir_3 = _create_test_export_dir(export_dir_base)
export_dir_4 = _create_test_export_dir(export_dir_base)
self.assertTrue(gfile.Exists(export_dir_1))
self.assertTrue(gfile.Exists(export_dir_2))
self.assertTrue(gfile.Exists(export_dir_3))
self.assertTrue(gfile.Exists(export_dir_4))
# Garbage collect all but the most recent 2 exports,
# where recency is determined based on the timestamp directory names.
saved_model_export_utils.garbage_collect_exports(export_dir_base, 2)
self.assertFalse(gfile.Exists(export_dir_1))
self.assertFalse(gfile.Exists(export_dir_2))
self.assertTrue(gfile.Exists(export_dir_3))
self.assertTrue(gfile.Exists(export_dir_4))
def test_get_most_recent_export(self):
export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(export_dir_base)
_create_test_export_dir(export_dir_base)
_create_test_export_dir(export_dir_base)
_create_test_export_dir(export_dir_base)
export_dir_4 = _create_test_export_dir(export_dir_base)
(most_recent_export_dir, most_recent_export_version) = (
saved_model_export_utils.get_most_recent_export(export_dir_base))
self.assertEqual(
compat.as_bytes(export_dir_4), compat.as_bytes(most_recent_export_dir))
self.assertEqual(
compat.as_bytes(export_dir_4),
os.path.join(
compat.as_bytes(export_dir_base),
compat.as_bytes(str(most_recent_export_version))))
def test_make_export_strategy(self):
"""Only tests that an ExportStrategy instance is created."""
def _serving_input_fn():
return array_ops.constant([1]), None
export_strategy = saved_model_export_utils.make_export_strategy(
serving_input_fn=_serving_input_fn,
default_output_alternative_key="default",
assets_extra={"from/path": "to/path"},
as_text=False,
exports_to_keep=5)
self.assertTrue(
isinstance(export_strategy, export_strategy_lib.ExportStrategy))
def test_make_parsing_export_strategy(self):
"""Only tests that an ExportStrategy instance is created."""
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
embedding_col = fc.embedding_column(
fc.sparse_column_with_hash_bucket(
"sparse_column_for_embedding", hash_bucket_size=10),
dimension=4)
real_valued_col1 = fc.real_valued_column("real_valued_column1")
bucketized_col1 = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization1"), [0, 4])
feature_columns = [
sparse_col, embedding_col, real_valued_col1, bucketized_col1
]
export_strategy = saved_model_export_utils.make_parsing_export_strategy(
feature_columns=feature_columns)
self.assertTrue(
isinstance(export_strategy, export_strategy_lib.ExportStrategy))
def test_make_best_model_export_strategy(self):
export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(export_dir_base)
test_estimator = TestEstimator()
export_strategy = saved_model_export_utils.make_best_model_export_strategy(
serving_input_fn=None, exports_to_keep=3, compare_fn=None)
self.assertNotEqual("",
export_strategy.export(test_estimator, export_dir_base,
"fake_ckpt_0", {"loss": 100}))
self.assertNotEqual("", test_estimator.last_exported_dir)
self.assertNotEqual("", test_estimator.last_exported_checkpoint)
self.assertEqual("",
export_strategy.export(test_estimator, export_dir_base,
"fake_ckpt_1", {"loss": 101}))
self.assertEqual(test_estimator.last_exported_dir,
os.path.join(export_dir_base, "fake_ckpt_0"))
self.assertNotEqual("",
export_strategy.export(test_estimator, export_dir_base,
"fake_ckpt_2", {"loss": 10}))
self.assertEqual(test_estimator.last_exported_dir,
os.path.join(export_dir_base, "fake_ckpt_2"))
self.assertEqual("",
export_strategy.export(test_estimator, export_dir_base,
"fake_ckpt_3", {"loss": 20}))
self.assertEqual(test_estimator.last_exported_dir,
os.path.join(export_dir_base, "fake_ckpt_2"))
def test_make_best_model_export_strategy_exceptions(self):
export_dir_base = tempfile.mkdtemp() + "export/"
test_estimator = TestEstimator()
export_strategy = saved_model_export_utils.make_best_model_export_strategy(
serving_input_fn=None, exports_to_keep=3, compare_fn=None)
with self.assertRaises(ValueError):
export_strategy.export(test_estimator, export_dir_base, "", {"loss": 200})
with self.assertRaises(ValueError):
export_strategy.export(test_estimator, export_dir_base, "fake_ckpt_1",
None)
def test_extend_export_strategy(self):
def _base_export_fn(unused_estimator,
export_dir_base,
unused_checkpoint_path=None):
base_path = os.path.join(export_dir_base, "e1")
gfile.MkDir(base_path)
return base_path
def _post_export_fn(orig_path, new_path):
assert orig_path.endswith("/e1")
post_export_path = os.path.join(new_path, "rewrite")
gfile.MkDir(post_export_path)
return post_export_path
base_export_strategy = export_strategy_lib.ExportStrategy(
"Servo", _base_export_fn)
final_export_strategy = saved_model_export_utils.extend_export_strategy(
base_export_strategy, _post_export_fn, "Servo2")
self.assertEqual(final_export_strategy.name, "Servo2")
test_estimator = TestEstimator()
tmpdir = tempfile.mkdtemp()
final_path = final_export_strategy.export(test_estimator, tmpdir,
os.path.join(
tmpdir, "checkpoint"))
self.assertEqual(os.path.join(tmpdir, "rewrite"), final_path)
def test_extend_export_strategy_same_name(self):
def _base_export_fn(unused_estimator,
export_dir_base,
unused_checkpoint_path=None):
base_path = os.path.join(export_dir_base, "e1")
gfile.MkDir(base_path)
return base_path
def _post_export_fn(orig_path, new_path):
assert orig_path.endswith("/e1")
post_export_path = os.path.join(new_path, "rewrite")
gfile.MkDir(post_export_path)
return post_export_path
base_export_strategy = export_strategy_lib.ExportStrategy(
"Servo", _base_export_fn)
final_export_strategy = saved_model_export_utils.extend_export_strategy(
base_export_strategy, _post_export_fn)
self.assertEqual(final_export_strategy.name, "Servo")
test_estimator = TestEstimator()
tmpdir = tempfile.mkdtemp()
final_path = final_export_strategy.export(test_estimator, tmpdir,
os.path.join(
tmpdir, "checkpoint"))
self.assertEqual(os.path.join(tmpdir, "rewrite"), final_path)
def test_extend_export_strategy_raises_error(self):
def _base_export_fn(unused_estimator,
export_dir_base,
unused_checkpoint_path=None):
base_path = os.path.join(export_dir_base, "e1")
gfile.MkDir(base_path)
return base_path
def _post_export_fn(unused_orig_path, unused_new_path):
return tempfile.mkdtemp()
base_export_strategy = export_strategy_lib.ExportStrategy(
"Servo", _base_export_fn)
final_export_strategy = saved_model_export_utils.extend_export_strategy(
base_export_strategy, _post_export_fn)
test_estimator = TestEstimator()
tmpdir = tempfile.mkdtemp()
with self.assertRaises(ValueError) as ve:
final_export_strategy.export(test_estimator, tmpdir,
os.path.join(tmpdir, "checkpoint"))
self.assertTrue(
"post_export_fn must return a sub-directory" in str(ve.exception))
def _create_test_export_dir(export_dir_base):
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
gfile.MkDir(export_dir)
time.sleep(2)
return export_dir
if __name__ == "__main__":
test.main()
|
|
"""A module to provide a copy of all the default data within the IATI SSOT.
This includes Codelists, Schemas and Rulesets at various versions of the Standard.
Todo:
Handle multiple versions of the Standard rather than limiting to the latest.
Implement more than Codelists.
"""
import json
import os
from collections import defaultdict
from copy import deepcopy
import iati.codelists
import iati.constants
import iati.resources
_CODELISTS = defaultdict(dict)
"""A cache of loaded Codelists.
This removes the need to repeatedly load a Codelist from disk each time it is accessed.
The dictionary is structured as:
{
"version_number_a": {
"codelist_name_1": iati.Codelist(codelist_1),
"codelist_name_2": iati.Codelist(codelist_2)
[...]
},
"version_number_b": {
[...]
},
[...]
}
Warning:
Modifying values directly obtained from this cache can potentially cause unexpected behavior. As such, it is highly recommended to perform a `deepcopy()` on any accessed Codelist before it is modified in any way.
"""
def codelist(name, version):
"""Return the default Codelist with the specified name for the specified version of the Standard.
Args:
name (str): The name of the Codelist to return.
version (str / Decimal / iati.Version): The Integer or Decimal version of the Standard to return the Codelist for. If an Integer Version is specified, uses the most recent Decimal Version within the Integer Version.
Raises:
ValueError: When a specified name is not a Codelist at the specified version of the Standard.
ValueError: When a specified version is not a valid version of the Standard.
Returns:
iati.Codelist: A Codelist with the specified name from the specified version of the Standard. It is populated with all the Codes on the Codelist.
Warning:
A name may not be sufficient to act as a UID.
Further exploration needs to be undertaken in how to handle multiple versions of the Standard.
Todo:
Better distinguish the types of ValueError.
Better distinguish TypeErrors from KeyErrors - sometimes the latter is raised when the former should have been.
"""
try:
codelist_found = _codelists(version, True)[name]
return deepcopy(codelist_found)
except (KeyError, TypeError):
msg = "There is no default Codelist in version {0} of the Standard with the name {1}.".format(version, name)
iati.utilities.log_warning(msg)
raise ValueError(msg)
@iati.version.decimalise_integer
@iati.version.normalise_decimals
@iati.version.allow_fully_supported_version
def _codelists(version, use_cache=False):
"""Locate the default Codelists for the specified version of the Standard.
Args:
version (str / Decimal / iati.Version): The Integer or Decimal version of the Standard to return the Codelists for. If an Integer Version is specified, uses the most recent Decimal Version within the Integer Version.
use_cache (bool): Whether the cache should be used rather than loading the Codelists from disk again. If used, a `deepcopy()` should be performed on any returned Codelist before it is modified.
Raises:
ValueError: When a specified version is not a valid version of the IATI Standard.
Returns:
dict: A dictionary containing all the Codelists at the specified version of the Standard. All Non-Embedded Codelists are included. Keys are Codelist names. Values are iati.Codelist() instances.
Warning:
Setting `use_cache` to `True` is dangerous since it does not return a deep copy of the Codelists. This means that modification of a returned Codelist will modify the Codelist everywhere.
A `deepcopy()` should be performed on any returned value before it is modified.
Note:
This is a private function so as to prevent the (dangerous) `use_cache` parameter being part of the public API.
"""
paths = iati.resources.get_codelist_paths(version)
for path in paths:
_, filename = os.path.split(path)
name = filename[:-len(iati.resources.FILE_CODELIST_EXTENSION)] # Get the name of the codelist, without the '.xml' file extension
if (name not in _CODELISTS[version].keys()) or not use_cache:
xml_str = iati.utilities.load_as_string(path)
codelist_found = iati.Codelist(name, xml=xml_str)
_CODELISTS[version][name] = codelist_found
return _CODELISTS[version]
def codelists(version):
"""Return the default Codelists for the specified version of the Standard.
Args:
version (str / Decimal / iati.Version): The Integer or Decimal version of the Standard to return the Codelists for. If an Integer Version is specified, uses the most recent Decimal Version within the Integer Version.
Raises:
ValueError: When a specified version is not a valid version of the IATI Standard.
Returns:
dict: A dictionary containing all the Codelists at the specified version of the Standard. All Non-Embedded Codelists are included. Keys are Codelist names. Values are iati.Codelist() instances, populated with the relevant Codes.
"""
return _codelists(version)
@iati.version.decimalise_integer
@iati.version.normalise_decimals
@iati.version.allow_fully_supported_version
def codelist_mapping(version):
"""Define the mapping process which states where in a Dataset you should find values on a given Codelist.
Args:
version (str / Decimal / iati.Version): The Integer or Decimal version of the Standard to return the Codelist Mapping File for. If an Integer Version is specified, uses the most recent Decimal Version within the Integer Version.
Raises:
ValueError: When a specified version is not a valid version of the IATI Standard.
Returns:
dict of dict: A dictionary containing mapping information. Keys in the first dictionary are Codelist names. Keys in the second dictionary are `xpath` and `condition`. The condition is `None` if there is no condition.
Todo:
Make use of the `version` parameter.
"""
path = iati.resources.create_codelist_mapping_path(version)
mapping_tree = iati.utilities.load_as_tree(path)
mappings = defaultdict(list)
for mapping in mapping_tree.getroot().xpath('//mapping'):
codelist_name = mapping.find('codelist').attrib['ref']
codelist_location = mapping.find('path').text
try:
condition = mapping.find('condition').text
except AttributeError: # there is no condition
condition = None
mappings[codelist_name].append({
'xpath': codelist_location,
'condition': condition
})
return mappings
@iati.version.decimalise_integer
@iati.version.normalise_decimals
@iati.version.allow_fully_supported_version
def ruleset(version):
"""Return the Standard Ruleset for the specified version of the Standard.
Args:
version (str / Decimal / iati.Version): The Integer or Decimal version of the Standard to return the Standard Ruleset for. If an Integer Version is specified, uses the most recent Decimal Version within the Integer Version.
Raises:
ValueError: When a specified version is not a valid version of the IATI Standard.
Returns:
iati.Ruleset: The default Ruleset for the specified version of the Standard.
"""
path = iati.resources.get_ruleset_paths(version)[0]
ruleset_str = iati.utilities.load_as_string(path)
return iati.Ruleset(ruleset_str)
def ruleset_schema():
"""Return the Ruleset schema for the specified version of the Standard.
Raises:
ValueError: When a specified version is not a valid version of the IATI Standard.
Returns:
dict: A dictionary representing the Ruleset schema for the specified version of the Standard.
Todo:
Determine whether a version should be provided. This is worth considering if the content of the IATI Ruleset Schema varies between versions.
"""
path = iati.resources.create_ruleset_path(iati.resources.FILE_RULESET_SCHEMA_NAME, iati.version.STANDARD_VERSION_ANY)
schema_str = iati.utilities.load_as_string(path)
return json.loads(schema_str)
_SCHEMAS = defaultdict(lambda: defaultdict(dict))
"""A cache of loaded Schemas.
This removes the need to repeatedly load a Schema from disk each time it is accessed.
{
"version_number_a": {
"populated": {
"iati-activities": iati.ActivitySchema
"iati-organisations": iati.OrganisationSchema
},
"unpopulated": {
"iati-activities": iati.ActivitySchema
"iati-organisations": iati.OrganisationSchema
},
},
"version_number_b": {
[...]
},
[...]
}
Warning:
Modifying values directly obtained from this cache can potentially cause unexpected behavior. As such, it is highly recommended to perform a `deepcopy()` on any accessed Schema before it is modified in any way.
"""
def _populate_schema(schema, version):
"""Populate a Schema with all its extras.
The extras include Codelists and Rulesets.
Args:
schema (iati.Schema): The Schema to populate.
version (iati.Version): The Decimal version of the Standard to populate the Schema from.
Returns:
iati.Schema: The provided Schema, populated with additional information.
Warning:
Does not create a copy of the provided Schema, instead adding to it directly.
"""
codelists_to_add = codelists(version)
for codelist_to_add in codelists_to_add.values():
schema.codelists.add(codelist_to_add)
schema.rulesets.add(ruleset(version))
return schema
def _schema(path_func, schema_class, version, populate=True, use_cache=False):
"""Return the default Schema of the specified type for the specified version of the Standard.
Args:
path_func (func): A function to return the paths at which the relevant Schema can be found.
schema_class (type): A class definition for the Schema of interest.
version (iati.Version): The Decimal version of the Standard to return the Schema for.
populate (bool): Whether the Schema should be populated with auxilliary information such as Codelists and Rulesets.
use_cache (bool): Whether the cache should be used rather than loading the Schema from disk again. If used, a `deepcopy()` should be performed on any returned Schema before it is modified.
Raises:
ValueError: When a specified version is not a valid version of the IATI Standard.
Returns:
iati.Schema: An instantiated IATI Schema for the specified version.
"""
population_key = 'populated' if populate else 'unpopulated'
schema_paths = path_func(version)
if (schema_class.ROOT_ELEMENT_NAME not in _SCHEMAS[version][population_key].keys()) or not use_cache:
schema = schema_class(schema_paths[0])
if populate:
schema = _populate_schema(schema, version)
_SCHEMAS[version][population_key][schema_class.ROOT_ELEMENT_NAME] = schema
return _SCHEMAS[version][population_key][schema_class.ROOT_ELEMENT_NAME]
@iati.version.decimalise_integer
@iati.version.normalise_decimals
@iati.version.allow_known_version
def activity_schema(version, populate=True):
"""Return the default Activity Schema for the specified version of the Standard.
Args:
version (str / Decimal / iati.Version): The Integer or Decimal version of the Standard to return the Activity Schema for. If an Integer Version is specified, uses the most recent Decimal Version within the Integer Version.
populate (bool): Whether the Schema should be populated with auxilliary information such as Codelists and Rulesets.
Raises:
ValueError: When a specified version is not a valid version of the IATI Standard.
Returns:
iati.ActivitySchema: An instantiated IATI Schema for the specified version of the Standard.
"""
return _schema(iati.resources.get_activity_schema_paths, iati.ActivitySchema, version, populate)
@iati.version.decimalise_integer
@iati.version.normalise_decimals
@iati.version.allow_known_version
def organisation_schema(version, populate=True):
"""Return the default Organisation Schema for the specified version of the Standard.
Args:
version (str / Decimal / iati.Version): The Integer or Decimal version of the Standard to return the Organisation Schema for. If an Integer Version is specified, uses the most recent Decimal Version within the Integer Version.
populate (bool): Whether the Schema should be populated with auxilliary information such as Codelists and Rulesets.
Raises:
ValueError: When a specified version is not a valid version of the IATI Standard.
Returns:
iati.OrganisationSchema: An instantiated IATI Schema for the specified version of the Standard.
"""
return _schema(iati.resources.get_organisation_schema_paths, iati.OrganisationSchema, version, populate)
|
|
# -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
from collections import Mapping
from datetime import datetime
from .auth import _basic_auth_str
from .compat import cookielib, OrderedDict, urljoin, urlparse
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook
from .utils import to_key_val_list, default_headers, to_native_string
from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .packages.urllib3._collections import RecentlyUsedContainer
from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter
from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url
)
from .status_codes import codes
# formerly defined here, reexposed here for backward compatibility
from .models import REDIRECT_STATI
REDIRECT_CACHE_SIZE = 1000
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""Determines appropriate setting for a given request, taking into account
the explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None. Extract keys first to avoid altering
# the dictionary during iteration.
none_keys = [k for (k, v) in merged_setting.items() if v is None]
for key in none_keys:
del merged_setting[key]
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses."""
i = 0
hist = [] # keep track of history
while resp.is_redirect:
prepared_request = req.copy()
if i > 0:
# Update history and keep track of redirects.
hist.append(resp)
new_hist = list(hist)
resp.history = new_hist
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp)
# Release the connection back into the pool.
resp.close()
url = resp.headers['location']
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
# Cache the url, unless it redirects to itself.
if resp.is_permanent_redirect and req.url != prepared_request.url:
self.redirect_cache[req.url] = prepared_request.url
self.rebuild_method(prepared_request, resp)
# https://github.com/kennethreitz/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
if 'Content-Length' in prepared_request.headers:
del prepared_request.headers['Content-Length']
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
prepared_request._cookies.update(self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# Override the original request.
req = prepared_request
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
i += 1
yield resp
def rebuild_auth(self, prepared_request, response):
"""When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
# If we get redirected to a new host, we should strip out any
# authentication headers.
original_parsed = urlparse(response.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname):
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return
def rebuild_proxies(self, prepared_request, proxies):
"""This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
"""
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy() if proxies is not None else {}
if self.trust_env and not should_bypass_proxies(url):
environ_proxies = get_environ_proxies(url)
proxy = environ_proxies.get('all', environ_proxies.get(scheme))
if proxy:
new_proxies.setdefault(scheme, proxy)
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
def rebuild_method(self, prepared_request, response):
"""When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
"""
method = prepared_request.method
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if response.status_code == codes.see_other and method != 'HEAD':
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if response.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if response.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('http://httpbin.org/get')
<Response [200]>
Or as a context manager::
>>> with requests.Session() as s:
>>> s.get('http://httpbin.org/get')
<Response [200]>
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
'cert', 'prefetch', 'adapters', 'stream', 'trust_env',
'max_redirects',
]
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol or protocol and host to the URL of the proxy
#: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
#: be used on each :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
self.verify = True
#: SSL certificate default.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
#: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
#: 30.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Trust environment settings for proxy configuration, default
#: authentication and similar.
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter())
# Only store 1000 redirects to prevent using infinite memory
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None,
json=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send
in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) whether the SSL cert will be verified.
A CA_BUNDLE path can also be provided. Defaults to ``True``.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response
"""
# Create the Request.
req = Request(
method = method.upper(),
url = url,
headers = headers,
files = files,
data = data or {},
json = json,
params = params or {},
auth = auth,
cookies = cookies,
hooks = hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('POST', url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest."""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if isinstance(request, Request):
raise ValueError('You can only send PreparedRequests.')
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
hooks = request.hooks
# Resolve URL in redirect cache, if available.
if allow_redirects:
checked_urls = set()
while request.url in self.redirect_cache:
checked_urls.add(request.url)
new_url = self.redirect_cache.get(request.url)
if new_url in checked_urls:
break
request.url = new_url
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = datetime.utcnow()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
r.elapsed = datetime.utcnow() - start
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, **kwargs)
# Resolve redirects if allowed.
history = [resp for resp in gen] if allow_redirects else []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
if not stream:
r.content
return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""Check the environment and merge it with some settings."""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
env_proxies = get_environ_proxies(url) or {}
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
def get_adapter(self, url):
"""Returns the appropriate connection adapter for the given URL."""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url)
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by key length.
"""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
state = dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
state['redirect_cache'] = dict(self.redirect_cache)
return state
def __setstate__(self, state):
redirect_cache = state.pop('redirect_cache', {})
for attr, value in state.items():
setattr(self, attr, value)
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
for redirect, to in redirect_cache.items():
self.redirect_cache[redirect] = to
def session():
"""Returns a :class:`Session` for context-management."""
return Session()
|
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_flobn_cm_subcon
@fid marine-integrations/mi/dataset/parser/test/test_flobn_cm_subcon.py
@author Rachel Manoni
@brief Test code for FLOBN-CM data parser
"""
from mi.dataset.parser.flobn_cm_subcon import FlobnMSubconTemperatureParser, FlobnCSubconParser, FlobnMSubconParser
__author__ = 'Rachel Manoni'
import os
from mi.core.log import get_logger
log = get_logger()
from nose.plugins.attrib import attr
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.test.test_parser import BASE_RESOURCE_PATH
RESOURCE_PATH = os.path.join(BASE_RESOURCE_PATH, 'flobn', 'resource')
TEMPERATURE_LOG_FILE = 'FLOBN-M_Temp_Record_ver_0-05.csv'
TEMPERATURE_YAML_FILE = 'FLOBN-M_Temp_Record_ver_0-05.yml'
INVALID_TEMPERATURE_DATA_FILE = 'FLOBN-M_Temp_Record_bad.csv'
TEMPERATURE_RECORDS = 242
C_LOG_FILE = 'FLOBN-C_Sample_Record_ver_0-05.csv'
C_YAML_FILE = 'FLOBN-C_Sample_Record_ver_0-05.yml'
INVALID_C_DATA_FILE = 'FLOBN-C_Sample_Record_bad.csv'
C_RECORDS = 168
M_LOG_FILE = 'FLOBN-M_Sample_Record_ver_0-05.csv'
M_YAML_FILE = 'FLOBN-M_Sample_Record_ver_0-05.yml'
INVALID_M_DATA_FILE = 'FLOBN-M_Sample_Record_bad.csv'
M_RECORDS = 1008
@attr('UNIT', group='mi')
class FlobnCmSubconParserUnitTestCase(ParserUnitTestCase):
"""
flobn_cm_subcon Parser unit test suite
"""
def setUp(self):
ParserUnitTestCase.setUp(self)
self.rec_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.flobn_cm_subcon',
DataSetDriverConfigKeys.PARTICLE_CLASS: None
}
def open_file(self, filename):
return open(os.path.join(RESOURCE_PATH, filename), mode='r')
def open_file_write(self, filename):
return open(os.path.join(RESOURCE_PATH, filename), mode='w')
def create_temp_rec_parser(self, file_handle):
return FlobnMSubconTemperatureParser(self.rec_config, file_handle, self.exception_callback)
def create_c_parser(self, file_handle):
return FlobnCSubconParser(self.rec_config, file_handle, self.exception_callback)
def create_m_parser(self, file_handle):
return FlobnMSubconParser(self.rec_config, file_handle, self.exception_callback)
def create_yml_file(self, input_file, output_file, number_samples):
"""
Create a yml file corresponding to an actual recovered dataset. This is not an actual test - it allows
us to create what we need for integration testing, i.e. a yml file.
"""
in_file = self.open_file(input_file)
parser = self.create_c_parser(in_file)
log.debug("Getting records...")
result = parser.get_records(number_samples)
log.debug("Done.")
self.particle_to_yml(result, output_file)
log.debug("File written")
def particle_to_yml(self, particles, filename):
"""
This is added as a testing helper, not actually as part of the parser tests. Since the same particles
will be used for the driver test it is helpful to write them to .yml in the same form they need in the
results.yml here.
"""
fid = self.open_file_write(filename)
fid.write('header:\n')
fid.write(" particle_object: 'MULTIPLE'\n")
fid.write(" particle_type: 'MULTIPLE'\n")
fid.write('data:\n')
for i in range(0, len(particles)):
particle_dict = particles[i].generate_dict()
fid.write(' - _index: %d\n' % (i + 1))
fid.write(' particle_object: %s\n' % particles[i].__class__.__name__)
fid.write(' particle_type: %s\n' % particle_dict.get('stream_name'))
fid.write(' internal_timestamp: %f\n' % particle_dict.get('internal_timestamp'))
for val in particle_dict.get('values'):
if isinstance(val.get('value'), float):
fid.write(' %s: %f\n' % (val.get('value_id'), val.get('value')))
elif isinstance(val.get('value'), str):
fid.write(" %s: '%s'\n" % (val.get('value_id'), val.get('value')))
else:
fid.write(' %s: %s\n' % (val.get('value_id'), val.get('value')))
fid.close()
def test_subcon_m_record_invalid_data(self):
"""
Read data from a file containing invalid data.
Verify that no particles are created and the correct number of exceptions are detected.
"""
log.debug('===== START TEST INVALID SENSOR DATA =====')
in_file = self.open_file(INVALID_M_DATA_FILE)
parser = self.create_m_parser(in_file)
# Try to get records and verify that none are returned.
# Input file's records contain all invalid samples
result = parser.get_records(1)
self.assertEqual(result, [])
in_file.close()
log.debug('===== END TEST INVALID SENSOR DATA =====')
def test_verify_subcon_m_record_against_yaml(self):
"""
Read data from a file and pull out data particles
one at a time. Verify that the results are those we expected.
"""
log.debug('===== START YAML TEST =====')
in_file = self.open_file(M_LOG_FILE)
parser = self.create_m_parser(in_file)
#uncomment to create yml results file
#self.create_yml_file(M_LOG_FILE, M_YAML_FILE, M_RECORDS)
result = parser.get_records(M_RECORDS)
self.assert_particles(result, M_YAML_FILE, RESOURCE_PATH)
in_file.close()
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END YAML TEST =====')
def test_subcon_c_record_invalid_data(self):
"""
Read data from a file containing invalid data.
Verify that no particles are created and the correct number of exceptions are detected.
"""
log.debug('===== START TEST INVALID SENSOR DATA =====')
in_file = self.open_file(INVALID_C_DATA_FILE)
parser = self.create_c_parser(in_file)
# Try to get records and verify that none are returned.
# Input file's records contain all invalid samples
result = parser.get_records(1)
self.assertEqual(result, [])
in_file.close()
log.debug('===== END TEST INVALID SENSOR DATA =====')
def test_verify_subcon_c_record_against_yaml(self):
"""
Read data from a file and pull out data particles
one at a time. Verify that the results are those we expected.
"""
log.debug('===== START YAML TEST =====')
in_file = self.open_file(C_LOG_FILE)
parser = self.create_c_parser(in_file)
#uncomment to create yml results file
#self.create_yml_file(C_LOG_FILE, C_YAML_FILE, C_RECORDS)
result = parser.get_records(C_RECORDS)
self.assert_particles(result, C_YAML_FILE, RESOURCE_PATH)
in_file.close()
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END YAML TEST =====')
def test_temp_record_invalid_data(self):
"""
Read data from a file containing invalid data.
Verify that no particles are created and the correct number of exceptions are detected.
"""
log.debug('===== START TEST INVALID SENSOR DATA =====')
in_file = self.open_file(INVALID_TEMPERATURE_DATA_FILE)
parser = self.create_temp_rec_parser(in_file)
# Try to get records and verify that none are returned.
# Input file's records contain all invalid samples
result = parser.get_records(1)
self.assertEqual(result, [])
in_file.close()
log.debug('===== END TEST INVALID SENSOR DATA =====')
def test_verify_temp_record_against_yaml(self):
"""
Read data from a file and pull out data particles
one at a time. Verify that the results are those we expected.
"""
log.debug('===== START YAML TEST =====')
in_file = self.open_file(TEMPERATURE_LOG_FILE)
parser = self.create_temp_rec_parser(in_file)
#uncomment to create yml results file
#self.create_yml_file(TEMPERATURE_LOG_FILE, TEMPERATURE_YAML_FILE, TEMPERATURE_RECORDS)
result = parser.get_records(TEMPERATURE_RECORDS)
self.assert_particles(result, TEMPERATURE_YAML_FILE, RESOURCE_PATH)
in_file.close()
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END YAML TEST =====')
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` acting like a Householder transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorHouseholder",]
@tf_export("linalg.LinearOperatorHouseholder")
class LinearOperatorHouseholder(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] of Householder transformations.
This operator acts like a [batch] of householder reflections with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
`LinearOperatorHouseholder` is initialized with a (batch) vector.
A Householder reflection, defined via a vector `v`, which reflects points
in `R^n` about the hyperplane orthogonal to `v` and through the origin.
```python
# Create a 2 x 2 householder transform.
vec = [1 / np.sqrt(2), 1. / np.sqrt(2)]
operator = LinearOperatorHouseholder(vec)
operator.to_dense()
==> [[0., -1.]
[-1., -0.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
reflection_axis,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorHouseholder"):
r"""Initialize a `LinearOperatorHouseholder`.
Args:
reflection_axis: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
The vector defining the hyperplane to reflect about.
Allowed dtypes: `float16`, `float32`, `float64`, `complex64`,
`complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. This is autoset to true
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
This is autoset to false.
is_square: Expect that this operator acts like square [batch] matrices.
This is autoset to true.
name: A name for this `LinearOperator`.
Raises:
ValueError: `is_self_adjoint` is not `True`, `is_positive_definite` is
not `False` or `is_square` is not `True`.
"""
with ops.name_scope(name, values=[reflection_axis]):
self._reflection_axis = linear_operator_util.convert_nonref_to_tensor(
reflection_axis, name="reflection_axis")
self._check_reflection_axis(self._reflection_axis)
# Check and auto-set hints.
if is_self_adjoint is False: # pylint:disable=g-bool-id-comparison
raise ValueError("A Householder operator is always self adjoint.")
else:
is_self_adjoint = True
if is_positive_definite is True: # pylint:disable=g-bool-id-comparison
raise ValueError(
"A Householder operator is always non-positive definite.")
else:
is_positive_definite = False
if is_square is False: # pylint:disable=g-bool-id-comparison
raise ValueError("A Householder operator is always square.")
is_square = True
super(LinearOperatorHouseholder, self).__init__(
dtype=self._reflection_axis.dtype,
graph_parents=None,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
# TODO(b/143910018) Remove graph_parents in V3.
self._set_graph_parents([self._reflection_axis])
def _check_reflection_axis(self, reflection_axis):
"""Static check of reflection_axis."""
if (reflection_axis.shape.ndims is not None and
reflection_axis.shape.ndims < 1):
raise ValueError(
"Argument reflection_axis must have at least 1 dimension. "
"Found: %s" % reflection_axis)
def _shape(self):
# If d_shape = [5, 3], we return [5, 3, 3].
d_shape = self._reflection_axis.shape
return d_shape.concatenate(d_shape[-1:])
def _shape_tensor(self):
d_shape = array_ops.shape(self._reflection_axis)
k = d_shape[-1]
return array_ops.concat((d_shape, [k]), 0)
def _assert_non_singular(self):
return control_flow_ops.no_op("assert_non_singular")
def _assert_positive_definite(self):
raise errors.InvalidArgumentError(
node_def=None, op=None, message="Householder operators are always "
"non-positive definite.")
def _assert_self_adjoint(self):
return control_flow_ops.no_op("assert_self_adjoint")
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# Given a vector `v`, we would like to reflect `x` about the hyperplane
# orthogonal to `v` going through the origin. We first project `x` to `v`
# to get v * dot(v, x) / dot(v, v). After we project, we can reflect the
# projection about the hyperplane by flipping sign to get
# -v * dot(v, x) / dot(v, v). Finally, we can add back the component
# that is orthogonal to v. This is invariant under reflection, since the
# whole hyperplane is invariant. This component is equal to x - v * dot(v,
# x) / dot(v, v), giving the formula x - 2 * v * dot(v, x) / dot(v, v)
# for the reflection.
# Note that because this is a reflection, it lies in O(n) (for real vector
# spaces) or U(n) (for complex vector spaces), and thus is its own adjoint.
reflection_axis = ops.convert_to_tensor(self.reflection_axis)
x = linalg.adjoint(x) if adjoint_arg else x
normalized_axis = reflection_axis / linalg.norm(
reflection_axis, axis=-1, keepdims=True)
mat = normalized_axis[..., array_ops.newaxis]
x_dot_normalized_v = math_ops.matmul(mat, x, adjoint_a=True)
return x - 2 * mat * x_dot_normalized_v
def _trace(self):
# We have (n - 1) +1 eigenvalues and a single -1 eigenvalue.
shape = self.shape_tensor()
return math_ops.cast(
self._domain_dimension_tensor(shape=shape) - 2,
self.dtype) * array_ops.ones(
shape=self._batch_shape_tensor(shape=shape), dtype=self.dtype)
def _determinant(self):
# For householder transformations, the determinant is -1.
return -array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _log_abs_determinant(self):
# Orthogonal matrix -> log|Q| = 0.
return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# A householder reflection is a reflection, hence is idempotent. Thus we
# can just apply a matmul.
return self._matmul(rhs, adjoint, adjoint_arg)
def _to_dense(self):
reflection_axis = ops.convert_to_tensor(self.reflection_axis)
normalized_axis = reflection_axis / linalg.norm(
reflection_axis, axis=-1, keepdims=True)
mat = normalized_axis[..., array_ops.newaxis]
matrix = -2 * math_ops.matmul(mat, mat, adjoint_b=True)
return array_ops.matrix_set_diag(
matrix, 1. + array_ops.matrix_diag_part(matrix))
def _diag_part(self):
reflection_axis = ops.convert_to_tensor(self.reflection_axis)
normalized_axis = reflection_axis / linalg.norm(
reflection_axis, axis=-1, keepdims=True)
return 1. - 2 * normalized_axis * math_ops.conj(normalized_axis)
def _eigvals(self):
# We have (n - 1) +1 eigenvalues and a single -1 eigenvalue.
result_shape = array_ops.shape(self.reflection_axis)
n = result_shape[-1]
ones_shape = array_ops.concat([result_shape[:-1], [n - 1]], axis=-1)
neg_shape = array_ops.concat([result_shape[:-1], [1]], axis=-1)
eigvals = array_ops.ones(shape=ones_shape, dtype=self.dtype)
eigvals = array_ops.concat(
[-array_ops.ones(shape=neg_shape, dtype=self.dtype), eigvals], axis=-1)
return eigvals
def _cond(self):
# Householder matrices are rotations which have condition number 1.
return array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
@property
def reflection_axis(self):
return self._reflection_axis
|
|
import os
import sys
import logging, logging.handlers
import environment
import logconfig
############################
# Relative Filepaths
############################
path = lambda root,*a: os.path.join(root, *a)
ROOT = os.path.dirname(os.path.abspath(__file__))
############################
# Administrators
############################
ADMINS = ()
MANAGERS = ADMINS
############################
# Deployment Configuration
############################
class DeploymentType:
PRODUCTION = "PRODUCTION"
DEV = "DEV"
SOLO = "SOLO"
STAGING = "STAGING"
dict = {
SOLO: 1,
PRODUCTION: 2,
DEV: 3,
STAGING: 4
}
if 'DEPLOYMENT_TYPE' in os.environ:
DEPLOYMENT = os.environ['DEPLOYMENT_TYPE'].upper()
else:
DEPLOYMENT = DeploymentType.SOLO
def is_solo():
return DEPLOYMENT == DeploymentType.SOLO
def is_dev():
return DEPLOYMENT == DeploymentType.DEV
def is_prod():
return DEPLOYMENT == DeploymentType.PRODUCTION
############################
# Site ID and Debugging
############################
SITE_ID = DeploymentType.dict[DEPLOYMENT]
DEBUG = DEPLOYMENT != DeploymentType.PRODUCTION
STATIC_MEDIA_SERVER = is_solo() or is_dev()
TEMPLATE_DEBUG = DEBUG
SSL_ENABLED = not DEBUG
INTERNAL_IPS = ('127.0.0.1',)
############################
# Logging
############################
if DEBUG:
LOG_LEVEL = logging.DEBUG
else:
LOG_LEVEL = logging.INFO
############################
# Cache Backend
############################
if is_solo() or is_dev():
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'notifications': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': '127.0.0.1:6379',
'OPTIONS': {
'DB': 1,
'PARSER_CLASS': 'redis.connection.HiredisParser'
}
}
}
else:
CACHES = {
'default': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': '209.61.142.151:6379',
'OPTIONS': {
'DB': 0,
'PASSWORD': '&Hunt3RK!ll3r$',
'PARSER_CLASS': 'redis.connection.HiredisParser'
}
},
'notifications': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': '209.61.142.151:6379',
'OPTIONS': {
'DB': 1,
'PASSWORD': '&Hunt3RK!ll3r$',
'PARSER_CLASS': 'redis.connection.HiredisParser'
}
}
}
############################
# E-mail Server
############################
if is_solo() or is_dev():
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
else:
SENDGRID_EMAIL_HOST = 'smtp.sendgrid.net'
SENDGRID_EMAIL_PORT = 587
SENDGRID_EMAIL_USERNAME = 'mavenize'
SENDGRID_EMAIL_PASSWORD = '$0l$tic3919'
DEFAULT_FROM_EMAIL = "Mavenize Support <admin@mavenize.me>"
CONTACT_EMAIL = 'admin@mavenize.me'
############################
# Internationalization
############################
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
USE_I18N = False
############################
# Testing & Coverage
############################
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
COVERAGE_REPORT_HTML_OUTPUT_DIR = 'coverage'
COVERAGE_MODULE_EXCLUDES = ['tests$', 'settings$', 'urls$', 'vendor$',
'__init__', 'migrations', 'templates', 'django', 'debug_toolbar',
'core\.fixtures', 'users\.fixtures',]
try:
import multiprocessing
cpu_count = multiprocessing.cpu_count()
except ImportError:
cpu_count = 1
NOSE_ARGS = ['--logging-clear-handlers', '--processes=%s' % cpu_count]
if is_solo():
try:
os.mkdir(COVERAGE_REPORT_HTML_OUTPUT_DIR)
except OSError:
pass
############################
# Media and Static Files
############################
MEDIA_ROOT = path(ROOT, 'media')
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
ROOT_URLCONF = 'mavenize.urls'
STATIC_ROOT = path(ROOT, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (path(ROOT, 'assets'),)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
)
COMPRESS_ENABLED = True
if is_prod():
CUMULUS = {
'USERNAME': 'mavenize',
'API_KEY': '7d93ad331ce171acccca10068da233dc',
'CONTAINER': 'media',
'STATIC_CONTAINER': 'static'
}
DEFAULT_FILE_STORAGE = 'cumulus.storage.CloudFilesStorage'
STATIC_URL = COMPRESS_URL = 'http://c352884.r84.cf1.rackcdn.com/'
STATICFILES_STORAGE = COMPRESS_STORAGE = \
'cumulus.storage.CloudFilesStaticStorage'
CUMULUS_TIMEOUT = 30
############################
# Version Information
# Grab the current commit SHA from git - handy for confirming the version deployed on a remote server is the one you think it is.
############################
import subprocess
GIT_COMMIT = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'],
stdout=subprocess.PIPE).communicate()[0].strip()
del subprocess
############################
# Database Configuration
############################
DATABASES = {}
if 'test' in sys.argv:
DATABASES['default'] = {
'name': 'testdb',
'ENGINE': 'django.db.backends.sqlite3'
}
elif DEPLOYMENT == DeploymentType.PRODUCTION:
DATABASES['default'] = {
'NAME': 'mavenize_production',
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': '198.101.193.156',
'PORT': '5432',
'USER': 'mavenize',
'PASSWORD': '@u$tr@l1aN912'
}
elif DEPLOYMENT == DeploymentType.DEV:
DATABASES['default'] = {
'NAME': 'mavenize_development',
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': 'localhost',
'PORT': '5432',
'USER': 'django',
'PASSWORD': 'PyDjR0ck$'
}
elif DEPLOYMENT == DeploymentType.STAGING:
DATABASES['default'] = {
'NAME': 'mavenize_staging',
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': '198.101.193.156',
'PORT': '5432',
'USER': 'mavenize',
'PASSWORD': '@u$tr@l1aN912'
}
else:
DATABASES['default'] = {
'NAME': 'db',
'ENGINE': 'django.db.backends.sqlite3',
'HOST': '',
'PORT': '',
'USER': '',
'PASSWORD': ''
}
############################
# Message Broker for Celery
############################
import djcelery
djcelery.setup_loader()
BROKER_URL = "redis://mavenize:&Hunt3RK!ll3r$@209.61.142.151:6379:3"
CELERY_RESULT_BACKEND = "redis"
CELERY_REDIS_HOST = "209.61.142.151"
CELERY_REDIS_PORT = "6379"
CELERY_REDIS_DB = 4
CELERY_REDIS_PASSWORD = "&Hunt3RK!ll3r$"
CELERY_IMPORTS = ('process_thumbnails',)
CELERY_ALWAYS_EAGER = is_solo() or is_dev()
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
############################
# South
############################
SOUTH_TESTS_MIGRATE = False
SKIP_SOUTH_TESTS = True
############################
# Logging
############################
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'log_file':{
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': path(ROOT, 'logs/django.log'),
'maxBytes': '16777216',
'formatter': 'verbose'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'apps': {
'handlers': ['log_file'],
'level': 'INFO',
'propagate': True,
},
},
'root': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO'
},
}
############################
# Debug Toolbar
############################
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'EXTRA_SIGNALS': ['social_auth.signals.pre_update',
'social_auth.signals.socialauth_registered']
}
############################
# Application Settings
############################
SECRET_KEY = '8^q6o4zyxy%p!ltd^#t)hqmb_))e5zy^nxg151f7tf)y_@%!9-'
############################
# Sessions
############################
if is_solo() or is_dev():
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
else:
SESSION_ENGINE = 'redis_sessions.session'
SESSION_REDIS_HOST = '209.61.142.151'
SESSION_REDIS_PORT = 6379
SESSION_REDIS_DB = 2
SESSION_REDIS_PASSWORD = '&Hunt3RK!ll3r$'
SESSION_REDIS_PREFIX = 'session'
############################
# Middleware
############################
middleware_list = [
'django.middleware.common.CommonMiddleware',
'announce.middleware.AnnounceCookieMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
if is_solo():
middleware_list += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
elif is_dev():
middleware_list += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.transaction.TransactionMiddleware',
]
else:
middleware_list += [
'django.middleware.transaction.TransactionMiddleware',
]
MIDDLEWARE_CLASSES = tuple(middleware_list)
############################
# Templates
############################
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
if not is_solo():
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', TEMPLATE_LOADERS),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'social_auth.context_processors.social_auth_by_name_backends',
)
TEMPLATE_DIRS = (
path(ROOT, 'templates')
)
############################
# Applications
############################
apps_list = [
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.markup',
'django.contrib.messages',
'django.contrib.staticfiles',
'activity_feed',
'bookmark',
'item',
'leaderboard',
'notification',
'request',
'review',
'social_graph',
'user_profile',
'movie',
'nexus',
'social_auth',
'south',
'sorl.thumbnail',
'haystack',
'announce',
'djcelery',
'compressor',
]
if is_solo() or is_dev():
apps_list += [
'debug_toolbar',
'django_nose',
'django_coverage',
]
if is_prod():
apps_list += [
'cumulus',
'nexus_redis',
'raven.contrib.django',
'sendgrid',
]
INSTALLED_APPS = tuple(apps_list)
############################
# Nexus Configuration
############################
NEXUS_REDIS_CONNECTIONS = [
{ 'host': '209.61.142.151',
'password': '&Hunt3RK!ll3r$',
'db': 0 },
{ 'host': '209.61.142.151',
'password': '&Hunt3RK!ll3r$',
'db': 1 },
{ 'host': '209.61.142.151',
'password': '&Hunt3RK!ll3r$',
'db': 2 },
{ 'host': '209.61.142.151',
'password': '&Hunt3RK!ll3r$',
'db': 3 },
{ 'host': '209.61.142.151',
'password': '&Hunt3RK!ll3r$',
'db': 4 },
{ 'host': '209.61.142.151',
'password': '&Hunt3RK!ll3r$',
'db': 5 }
]
############################
# Social Authentication
############################
AUTHENTICATION_BACKENDS = (
'social_auth.backends.facebook.FacebookBackend',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_COMPLETE_URL_NAME = 'socialauth_complete'
SOCIAL_AUTH_ASSOCIATE_URL_NAME = 'socialauth_associate_complete'
SOCIAL_AUTH_EXTRA_DATA = True
SOCIAL_AUTH_EXPIRATION = 'expires'
SOCIAL_AUTH_NEW_USER_REDIRECT_URL = '/signup'
LOGIN_URL = '/login'
LOGIN_REDIRECT_URL = '/'
############################
# Facebook
############################
if is_solo() or is_dev():
FACEBOOK_APP_ID = '319245824782103'
FACEBOOK_API_SECRET = 'ce2645caabfeb6e234e00d3769ce1793'
else:
FACEBOOK_APP_ID = '184293225012617'
FACEBOOK_API_SECRET = '122e7c7f4489c1e55c6c2589ae8e283d'
FACEBOOK_EXTENDED_PERMISSIONS = ['email', 'create_event', 'publish_stream']
############################
# User Profiles
############################
AUTH_PROFILE_MODULE = 'user_profile.UserProfile'
############################
# Haystack
############################
HAYSTACK_CONNECTIONS = {}
if is_solo() or is_dev():
HAYSTACK_CONNECTIONS['default'] = {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
}
else:
HAYSTACK_CONNECTIONS['default'] = {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
'URL': 'http://198.101.195.82:8983/solr',
}
############################
# Announce
############################
if is_prod():
ANNOUNCE_CLIENT_ADDR = '198.101.193.28:5500'
ANNOUNCE_API_ADDR = '198.101.193.28:6600'
############################
# Raven
############################
SENTRY_DSN = 'https://d53e20807c2e45ee8a586e1bc3423c46:ee1ead175980421aadb57bf229841239@app.getsentry.com/910'
############################
# Sorl Thumbnail
############################
if is_prod():
THUMBNAIL_KVSTORE = 'sorl.thumbnail.kvstores.redis_kvstore.KVStore'
THUMBNAIL_REDIS_HOST = '209.61.142.151'
THUMBNAIL_REDIS_PORT = 6379
THUMBNAIL_REDIS_DB = 5
THUMBNAIL_REDIS_PASSWORD = '&Hunt3RK!ll3r$'
|
|
#!/usr/bin/env python
"""List, determine, and modify experiment status.
This can be used either as a module or standalone.
"""
import csv
import hashlib
import logging
import optparse
import os
import re
import sys
from . import gmacpyutil
from . import defaults
from . import systemconfig
import yaml
# knobs for experiments
MANUAL_ON_KNOB = 'ManuallyEnabledExperiments'
MANUAL_OFF_KNOB = 'ManuallyDisabledExperiments'
EXPERIMENTS_KNOB = 'EnableExperiments'
DEFAULT_USEFUL_KNOBS = (EXPERIMENTS_KNOB,
MANUAL_ON_KNOB,
MANUAL_OFF_KNOB)
EXPERIMENT_KEY = 'experiments'
PERCENT_KEY = 'percent'
START_KEY = 'begin_date'
OBSOLETE_KEY = 'obsolete_after'
OWNER_KEY = 'owner'
DESCRIPTION_KEY = 'description'
ENABLE_UNSTABLE = 'enable_unstable'
ENABLE_TESTING = 'enable_testing'
REQUIRED_FIELDS = (OWNER_KEY, PERCENT_KEY, START_KEY)
OPTIONAL_FIELDS = (OBSOLETE_KEY, DESCRIPTION_KEY, ENABLE_UNSTABLE,
ENABLE_TESTING)
ALL_FIELDS = REQUIRED_FIELDS + OPTIONAL_FIELDS
# Where we store experiments
EXP_FILENAME = defaults.EXPERIMENTS_YAML
# Experiment status values
ENABLED, DISABLED = ('enabled', 'disabled')
# Experiment source values (or why a given experiment has a given status.
ALWAYS, NEVER, MANUAL, AUTO = 'always', 'never', 'manually', 'automatically'
# Experiment source values continued:
RECOMMENDED = 'recommended'
# How many pieces do we want to divide the fleet into?
MOD_VALUE = 10000 # seems to work for now (gives us .01% granularity)
class ExperimentsError(Exception):
pass
class InvalidData(ExperimentsError):
pass
class InvalidExperiment(ExperimentsError):
pass
class MissingUUID(ExperimentsError):
pass
class PlistError(ExperimentsError):
pass
class ExperimentListFetcher(object):
"""Wrapper around fetching experiment data.
Generally errors will not be obvious until the GetData phase in which case an
InvaldData exception will be raised.
data.valid == True implies that data.parsed exists.
data.valid == False implies that the data is bad and you should not use it.
"""
def __init__(self, path):
self.data = None
self.path = path
def _Fetch(self):
self.data = type('obj', (object,), dict(valid=False, parsed=None))
try:
self.data.data = open(self.path, 'rb').read()
except IOError, e:
logging.debug('Failed to read experiment file: %s', self.path)
self.data = None
raise ExperimentsError(e.message)
def _Parse(self):
"""Ensure the class data is valid."""
if self.data is not None:
try:
logging.debug('yaml.safe_load(...)')
self.data.parsed = yaml.safe_load(self.data.data)
except yaml.YAMLError:
logging.warning('Error parsing YAML.')
self.data.parsed = None
if self.data.parsed is not None:
try:
self.data.serial = self.data.parsed.get('serial', None)
self.data.experiments = self.data.parsed.get('experiments', {})
except (AttributeError, ExperimentsError):
logging.warning('Caught exception while parsing self.data.')
self.data.valid = False
return
logging.debug('Parsed YAML data is valid')
self.data.valid = True
else:
logging.debug('Problem parsing YAML data')
self.data.valid = False
else:
logging.error('No data to parse!')
def GetData(self):
"""Return parsed and valid experiments data."""
try:
logging.debug('Fetching data from YAML')
self._Fetch()
except ExperimentsError, e:
raise InvalidData(e.message)
logging.debug('Parsing data')
self._Parse()
if not self.data or not self.data.valid:
logging.error('Data not valid after parsing')
raise InvalidData
return self.data
class Knobs(object):
"""Caching class for reading knobs.
Usage:
k = Knobs()
k.Knobs()
"""
def __init__(self, knobs_list=DEFAULT_USEFUL_KNOBS):
self._knobs_list = knobs_list
self._knobs = {}
self._valid = False
def Knobs(self):
"""Get knobs from plist if they haven't been fetched already.
Returns:
a dictionary containing the knobs data.
"""
if not self._valid:
logging.debug('Getting knobs')
self._knobs = self._GetKnobs()
self._valid = True
return self._knobs
def _GetKnobs(self):
"""Gets values of specific knobs.
Returns:
a dict of knobs.
"""
knobs = {}
for knob in self._knobs_list:
data = gmacpyutil.MachineInfoForKey(knob)
if data:
if knob in (MANUAL_ON_KNOB, MANUAL_OFF_KNOB):
knobs[knob] = ConvertCSVStringToList(data)
else:
knobs[knob] = data
return knobs
# Global instance of Knobs class
KNOBS = Knobs(DEFAULT_USEFUL_KNOBS)
def ExperimentIsBucket(experiment, exp_data, mach_uuid):
"""Determine if a given experiment is enabled for a certname.
Args:
experiment: a string identifier for the experiment.
exp_data: a dict containing experiment data (yaml.load(...))
mach_uuid: A machine UUID provided by sendsysinfo.
Returns:
an instance with three attributes, status, source, and rollout_percent
Raises:
InvalidExperiment: an invalid experiment
ExperimentsError: couldn't coerce hash to integer
"""
ret = type('obj', (object,), dict(status=None, source=AUTO))
data = exp_data.get(experiment)
if not data:
raise InvalidExperiment('%s is not in %s.' % (experiment, exp_data))
try:
rollout_percent = float(data.get(PERCENT_KEY, 0))
logging.debug('Got rollout_percent: %s.', rollout_percent)
except ValueError:
logging.warning('Could not parse rollout_percent, using 0.')
rollout_percent = 0
ret.rollout_percent = rollout_percent
try:
exp_hash = int(hashlib.sha256(experiment + mach_uuid).hexdigest(), 16)
except ValueError:
logging.warning('Could not coerce hash to integer.')
raise ExperimentsError('Could not determine bucket for host.')
bucket = exp_hash % MOD_VALUE
logging.debug('Bucket is %s, rollout_percent is %s.', bucket, rollout_percent)
if bucket * 100 / float(MOD_VALUE) < rollout_percent:
ret.status = ENABLED
else:
ret.status = DISABLED
return ret
def GetExperimentStatus(experiment, knobs, exp_data, track='stable'):
"""Determine the status and source of a given experiment.
Take into account all ways that a given experiment may be enabled and allow
the client to determine why a given experiment has a particular status.
Experiments at 100% are always on.
If the machine is set to ignore experiments, it will ignore any experiments
not at 100%.
If the machine is set to always apply experiments, the experiment will be on.
If the machine is in an explicitly enabled track, the experiment will be on.
If the machine is manually opted in or out, that option is applied.
Otherwise the bucket algorithm is applied.
Args:
experiment: a string identifier for a given experiment.
knobs: knobs for a host (in dict form)
exp_data: a dict containing experiment data (yaml.load(...))
track: a string of the machine's release track
Returns:
an object with three attributes, status, source, and rollout_percent
"""
ReturnEarly = lambda ret: ret.source is not None # pylint: disable=g-bad-name
ret = type('obj', (object,), {})
ret.status = DISABLED
ret.source = None
ret.rollout_percent = float(exp_data.get(experiment, {}).get(PERCENT_KEY, -1))
if ret.rollout_percent == 100:
logging.debug('Experiment %s is at 100%%, enabling', experiment)
ret.status = ENABLED
ret.source = ALWAYS
return ret
auto_knob = knobs.get(EXPERIMENTS_KNOB, 'recommended')
if auto_knob == ALWAYS:
ret.status = ENABLED
ret.source = ALWAYS
elif auto_knob == NEVER:
ret.status = DISABLED
ret.source = ALWAYS
if ReturnEarly(ret): return ret
manual_on_knob = knobs.get(MANUAL_ON_KNOB, [])
manual_off_knob = knobs.get(MANUAL_OFF_KNOB, [])
if experiment in manual_on_knob:
ret.status = ENABLED
ret.source = MANUAL
elif experiment in manual_off_knob:
ret.status = DISABLED
ret.source = MANUAL
if ReturnEarly(ret): return ret
enable_unstable = exp_data.get(experiment, {}).get(ENABLE_UNSTABLE, False)
enable_testing = exp_data.get(experiment, {}).get(ENABLE_TESTING, False)
if ((track == 'testing' and enable_testing) or
(track == 'unstable' and (enable_unstable or enable_testing))):
ret.status = ENABLED
ret.source = ALWAYS
if ReturnEarly(ret): return ret
try:
mach_uuid = FetchUUID()
except ExperimentsError, e:
raise MissingUUID(e)
logging.debug('Found uuid %s', mach_uuid)
return ExperimentIsBucket(experiment, exp_data, mach_uuid)
def InExperiment(exp_name, experiments):
"""Check if we are in a given experiment.
Args:
exp_name: str, name of experiment
experiments: dict, dictionary of experiments
Returns:
tuple of bool, if host in exp_name, and str, source of experiment status
"""
in_experiment = False
source = 'unknown'
knobs = KNOBS.Knobs()
track = gmacpyutil.GetTrack()
if experiments:
try:
retval = GetExperimentStatus(exp_name, knobs, experiments, track=track)
if retval.status == ENABLED:
in_experiment = True
source = retval.source
except (InvalidExperiment, MissingUUID):
pass
return in_experiment, source
def GetExperiments():
"""Try to fetch a new set of experiment data. Perform verification.
Returns:
An object containing experiments or None.
"""
fetcher = ExperimentListFetcher(EXP_FILENAME)
try:
return fetcher.GetData().experiments
except InvalidData:
return None
def FetchUUID():
"""Return our UUID.
Returns:
a string UUID
Raises:
ExperimentsError: machine UUID is malformed
"""
uuid_regex = r'^[A-F0-9]{8}-([A-F0-9]{4}-){3}[A-F0-9]{12}$'
uuid = gmacpyutil.MachineInfoForKey('MachineUUID')
if not uuid:
logging.info('No MachineUUID found, trying platform UUID')
sp = systemconfig.SystemProfiler()
uuid = sp.GetHWUUID()
if isinstance(uuid, basestring) and re.match(uuid_regex, uuid):
return uuid
else:
raise ExperimentsError('Malformed UUID: %s' % uuid)
def AddExperimentToManualList(experiment, knob):
"""Adds an experiment to the ManuallyEnabledExperiments knob.
Args:
experiment: str, the experiment name to add.
knob: str, the manual knob to modify
Raises:
PlistError: if the plist can't be modified.
"""
knobs = KNOBS.Knobs()
current_value = knobs.get(knob, [])
if knob in knobs and experiment in current_value:
Output('%s is already in %s.' % (experiment, knob))
else:
current_value.append(experiment)
Output('New value of %s is %s' % (knob, ','.join(current_value)))
if not gmacpyutil.SetMachineInfoForKey(knob, ','.join(current_value)):
raise PlistError('Problem writing to plist.')
def RemoveExperimentFromManualList(experiment, knob):
"""Removes an experiment from the ManuallyDisabledExperiments knob.
Args:
experiment: str, the experiment name to remove.
knob: str, the manual knob to modify
Raises:
PlistError: if the plist can't be modified.
"""
knobs = KNOBS.Knobs()
if knob not in knobs:
Output('%s list is empty, nothing to remove.' % knob)
else:
current_value = knobs.get(knob, [])
if experiment in current_value:
current_value.remove(experiment)
Output('New value of %s is %s' % (knob, ','.join(current_value)))
if not gmacpyutil.SetMachineInfoForKey(knob, ','.join(current_value)):
raise PlistError('Problem writing to plist.')
else:
Output('%s is not in %s.' % (experiment, knob))
def ConvertCSVStringToList(csv_string):
"""Helper to convert a csv string to a list."""
reader = csv.reader([csv_string])
return list(reader)[0]
def ConvertListToCSVString(csv_list):
"""Helper to convert a list to a csv string."""
return ','.join(str(s) for s in csv_list)
def ModifyManualList(action, knob_list, experiments):
"""Modify the manually enabled/disabled lists.
Args:
action: string, action to take, either add or remove.
knob_list: list of manual setting knobs to modify
experiments: string, comma-delimited string of experiments to add or remove
Raises:
ValueError: an invalid action was requested.
"""
experiments = '' if experiments is None else experiments
exp_list = ConvertCSVStringToList(experiments)
for experiment in exp_list:
for knob in knob_list:
if action == 'add':
AddExperimentToManualList(experiment, knob)
elif action == 'remove':
RemoveExperimentFromManualList(experiment, knob)
else:
raise ValueError('%s is not a valid action.')
def ParseOptions(argv):
"""Parse command-line options."""
parser = optparse.OptionParser(usage='%prog [options]')
parser.add_option('-D', '--debug', action='store_true', default=False)
parser.add_option('-F', '--formatted', action='store_true', default=False,
help=('Output experiments as one "experiment,status" '
'per line'))
parser.add_option(
'-e', '--enable', action='store', dest='manually_enable',
help='Comma-delimited list of experiments to manually enable.')
parser.add_option(
'-d', '--disable', action='store', dest='manually_disable',
help='Comma-delimited list of experiments to manually enable.')
parser.add_option(
'-r', '--recommended', action='store', dest='recommended',
help='Comma-delimited list of experiments to no longer manually manage.')
opts, args = parser.parse_args(argv)
return opts, args
def Output(text):
"""Wrap print so it's mockable for testing."""
print text
def main(argv):
opts, _ = ParseOptions(argv)
if opts.debug:
gmacpyutil.ConfigureLogging(debug_level=logging.DEBUG, stderr=opts.debug)
else:
gmacpyutil.ConfigureLogging()
if opts.formatted and (
opts.manually_enable or opts.manually_disable or opts.recommended):
Output('--formatted and --enable/--disable cannot be used together.')
raise SystemExit(1)
if opts.manually_enable or opts.manually_disable or opts.recommended:
if os.geteuid() != 0:
Output('Need to be root to change knobs, try again with sudo')
raise SystemExit(2)
try:
# Manually enabled experiments should be added to MANUAL_ON_KNOB and
# removed from MANUAL_OFF_KNOB
ModifyManualList('add', [MANUAL_ON_KNOB], opts.manually_enable)
ModifyManualList('remove', [MANUAL_OFF_KNOB], opts.manually_enable)
# Manually disabled experiments should be added to MANUAL_OFF_KNOB and
# removed from MANUAL_ON_KNOB
ModifyManualList('add', [MANUAL_OFF_KNOB], opts.manually_disable)
ModifyManualList('remove', [MANUAL_ON_KNOB], opts.manually_disable)
# Experiments reset to recommended should be removed from both
# MANUAL_ON_KNOB and MANUAL_OFF_KNOB
ModifyManualList('remove', [MANUAL_ON_KNOB, MANUAL_OFF_KNOB],
opts.recommended)
except PlistError, e:
Output(e.message)
raise SystemExit(3)
else:
experiments = GetExperiments()
if experiments:
for experiment in experiments:
status, source = InExperiment(experiment, experiments)
if opts.formatted:
Output('%s,%s' % (experiment,
str(status).lower()))
else:
percent = experiments[experiment]['percent']
description = experiments[experiment].get('description',
'No description')
status_string = 'enabled' if status else 'disabled'
Output('%s: %s, at %.1f%% rollout, %s on this host %s' % (
experiment, description, percent, status_string, source))
else:
if not opts.formatted:
Output('No experiments are currently running.')
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
from iptest.assert_util import skiptest
from iptest.cominterop_util import *
from System import *
from clr import StrongBox
###############################################################################
##GLOBALS######################################################################
com_type_name = "DlrComLibrary.DispEvents"
com_obj = getRCWFromProgID(com_type_name)
N = 3 #number of events to send
HANDLER_CALL_COUNT = 0 #number of events received
###############################################################################
##HELPERS######################################################################
def handler_helper(e_trigger, com_event, expected_retval, event_handlers):
global HANDLER_CALL_COUNT
for handler in event_handlers:
#send an event with no handlers to ensure the test is OK
HANDLER_CALL_COUNT = 0
e_trigger()
AreEqual(HANDLER_CALL_COUNT, 0)
#add the handler and verify events are received
try:
com_event += handler
for i in xrange(N):
retval = e_trigger()
AreEqual(retval, expected_retval)
AreEqual(HANDLER_CALL_COUNT, i+1)
except Exception, e:
print "handler_helper(", e_trigger, com_event, expected_retval, handler, ")"
raise e
finally:
#remove the handler
com_event -= handler
#send an event with no handlers to ensure removal
HANDLER_CALL_COUNT = 0
e_trigger()
AreEqual(HANDLER_CALL_COUNT, 0)
def bad_handler_signature_helper(e_trigger, com_event, bad_arg_handlers):
global HANDLER_CALL_COUNT
for handler in bad_arg_handlers:
#send an event with no handlers to ensure the test is OK
HANDLER_CALL_COUNT = 0
e_trigger()
AreEqual(HANDLER_CALL_COUNT, 0)
#add the handler and verify events are received
com_event += handler
try:
e_trigger()
Fail("Trying to call " + str(handler) + " as an event handler should have failed")
except Exception, e:
pass
AreEqual(HANDLER_CALL_COUNT, 0)
#send an event with no handlers to ensure removal
com_event -= handler
HANDLER_CALL_COUNT = 0
e_trigger()
AreEqual(HANDLER_CALL_COUNT, 0)
###############################################################################
##SANITY CHECKS################################################################
#--One unique handler
ONE_HANDLER_COUNT = 0
ONE_HANDLER_VAL = None
@skip("multiple_execute")
def test_one_handler():
def one_handler(a):
global ONE_HANDLER_COUNT
global ONE_HANDLER_VAL
ONE_HANDLER_COUNT += 1
ONE_HANDLER_VAL = a
expected_count = 0
expected_val = None
com_event = com_obj.eInUshort
#Preliminary checks
AreEqual(expected_count, ONE_HANDLER_COUNT)
AreEqual(expected_val, None)
#Send N events w/ no handlers attached
for i in xrange(N):
com_obj.triggerUShort(i)
AreEqual(expected_count, ONE_HANDLER_COUNT)
AreEqual(expected_val, ONE_HANDLER_VAL)
#Attach one event handler
com_event += one_handler
#Send N events
for i in xrange(N):
com_obj.triggerUShort(i)
expected_count += 1
expected_val = i
AreEqual(expected_count, ONE_HANDLER_COUNT)
AreEqual(expected_val, ONE_HANDLER_VAL)
#Remove it
com_event -= one_handler
#Send N events
for i in xrange(N):
com_obj.triggerUShort(i)
AreEqual(expected_count, ONE_HANDLER_COUNT)
AreEqual(expected_val, ONE_HANDLER_VAL)
#Re-add the event handler
com_event += one_handler
#Send N events
for i in xrange(N):
com_obj.triggerUShort(i)
expected_count += 1
expected_val = i
AreEqual(expected_count, ONE_HANDLER_COUNT)
AreEqual(expected_val, ONE_HANDLER_VAL)
#Remove it
com_event -= one_handler
#Send N events
for i in xrange(N):
com_obj.triggerUShort(i)
AreEqual(expected_count, ONE_HANDLER_COUNT)
AreEqual(expected_val, ONE_HANDLER_VAL)
#--Two identical handlers
TWO_IDENT_HANDLERS_COUNT = 0
TWO_IDENT_HANDLERS_VAL = None
@skip("multiple_execute")
def test_two_ident_handlers():
def two_ident_handlers(a):
global TWO_IDENT_HANDLERS_COUNT
global TWO_IDENT_HANDLERS_VAL
TWO_IDENT_HANDLERS_COUNT += 1
TWO_IDENT_HANDLERS_VAL = a
expected_count = 0
expected_val = None
com_event = com_obj.eInUshort
#Preliminary checks
AreEqual(expected_count, TWO_IDENT_HANDLERS_COUNT)
AreEqual(expected_val, None)
#Send N events w/ no handlers attached
for i in xrange(N):
com_obj.triggerUShort(i)
AreEqual(expected_count, TWO_IDENT_HANDLERS_COUNT)
AreEqual(expected_val, TWO_IDENT_HANDLERS_VAL)
#Attach two event handlers
for i in xrange(2):
com_event += two_ident_handlers
#Send N events
for i in xrange(N):
com_obj.triggerUShort(i)
expected_count += 2
expected_val = i
AreEqual(expected_count, TWO_IDENT_HANDLERS_COUNT)
AreEqual(expected_val, TWO_IDENT_HANDLERS_VAL)
#Remove one event handler
com_event -= two_ident_handlers
#Send N events
for i in xrange(N):
com_obj.triggerUShort(i)
expected_count += 1
expected_val = i
AreEqual(expected_count, TWO_IDENT_HANDLERS_COUNT)
AreEqual(expected_val, TWO_IDENT_HANDLERS_VAL)
#Add one event handler
com_event += two_ident_handlers
#Send N events
for i in xrange(N):
com_obj.triggerUShort(i)
expected_count += 2
expected_val = i
AreEqual(expected_count, TWO_IDENT_HANDLERS_COUNT)
AreEqual(expected_val, TWO_IDENT_HANDLERS_VAL)
#Remove them both
for i in xrange(2):
com_event -= two_ident_handlers
#Send N events w/ no handlers attached
for i in xrange(N):
com_obj.triggerUShort(i)
AreEqual(expected_count, TWO_IDENT_HANDLERS_COUNT)
AreEqual(expected_val, TWO_IDENT_HANDLERS_VAL)
#Re-add the event handlers
for i in xrange(2):
com_event += two_ident_handlers
#Send N events
for i in xrange(N):
com_obj.triggerUShort(i)
expected_count += 2
expected_val = i
AreEqual(expected_count, TWO_IDENT_HANDLERS_COUNT)
AreEqual(expected_val, TWO_IDENT_HANDLERS_VAL)
#Remove them
for i in xrange(2):
com_event -= two_ident_handlers
#Send N events
for i in xrange(N):
com_obj.triggerUShort(i)
AreEqual(expected_count, TWO_IDENT_HANDLERS_COUNT)
AreEqual(expected_val, TWO_IDENT_HANDLERS_VAL)
#--Two unique handlers
TWO_UNIQUE_HANDLERS_COUNT = 0
TWO_UNIQUE_HANDLERS_VAL_A = None
TWO_UNIQUE_HANDLERS_VAL_B = None
@skip("multiple_execute")
def test_two_unique_handlers():
def two_unique_handlers_a(a):
global TWO_UNIQUE_HANDLERS_COUNT
global TWO_UNIQUE_HANDLERS_VAL_A
TWO_UNIQUE_HANDLERS_COUNT += 1
TWO_UNIQUE_HANDLERS_VAL_A = a
def two_unique_handlers_b(a):
global TWO_UNIQUE_HANDLERS_COUNT
global TWO_UNIQUE_HANDLERS_VAL_B
TWO_UNIQUE_HANDLERS_COUNT += 1
TWO_UNIQUE_HANDLERS_VAL_B = a
expected_count = 0
expected_val_a = None
expected_val_b = None
com_event = com_obj.eInUshort
#Preliminary checks
AreEqual(expected_count, TWO_UNIQUE_HANDLERS_COUNT)
AreEqual(expected_val_a, TWO_UNIQUE_HANDLERS_VAL_A)
AreEqual(expected_val_b, TWO_UNIQUE_HANDLERS_VAL_B)
#Send N events w/ no handlers attached
for i in xrange(N):
com_obj.triggerUShort(i)
AreEqual(expected_count, TWO_UNIQUE_HANDLERS_COUNT)
AreEqual(expected_val_a, TWO_UNIQUE_HANDLERS_VAL_A)
AreEqual(expected_val_b, TWO_UNIQUE_HANDLERS_VAL_B)
#Attach handler A
com_event += two_unique_handlers_a
#Send N events
for i in xrange(N):
com_obj.triggerUShort(i)
expected_count += 1
expected_val_a = i
AreEqual(expected_count, TWO_UNIQUE_HANDLERS_COUNT)
AreEqual(expected_val_a, TWO_UNIQUE_HANDLERS_VAL_A)
AreEqual(expected_val_b, TWO_UNIQUE_HANDLERS_VAL_B)
#Attach handler B
com_event += two_unique_handlers_b
#Send N events
for i in xrange(N):
com_obj.triggerUShort(i)
expected_count += 2
expected_val_a = i
expected_val_b = i
AreEqual(expected_count, TWO_UNIQUE_HANDLERS_COUNT)
AreEqual(expected_val_a, TWO_UNIQUE_HANDLERS_VAL_A)
AreEqual(expected_val_b, TWO_UNIQUE_HANDLERS_VAL_B)
#Remove handler A
com_event -= two_unique_handlers_a
#Send N events
for i in xrange(N):
com_obj.triggerUShort(i)
expected_count += 1
expected_val_b = i
AreEqual(expected_count, TWO_UNIQUE_HANDLERS_COUNT)
AreEqual(expected_val_a, TWO_UNIQUE_HANDLERS_VAL_A)
AreEqual(expected_val_b, TWO_UNIQUE_HANDLERS_VAL_B)
#Attach handler A
com_event += two_unique_handlers_a
#Send N events
for i in xrange(N):
com_obj.triggerUShort(i)
expected_count += 2
expected_val_a = i
expected_val_b = i
AreEqual(expected_count, TWO_UNIQUE_HANDLERS_COUNT)
AreEqual(expected_val_a, TWO_UNIQUE_HANDLERS_VAL_A)
AreEqual(expected_val_b, TWO_UNIQUE_HANDLERS_VAL_B)
#Remove handler A
com_event -= two_unique_handlers_a
#Send N events
for i in xrange(N):
com_obj.triggerUShort(i)
expected_count += 1
expected_val_b = i
AreEqual(expected_count, TWO_UNIQUE_HANDLERS_COUNT)
AreEqual(expected_val_a, TWO_UNIQUE_HANDLERS_VAL_A)
AreEqual(expected_val_b, TWO_UNIQUE_HANDLERS_VAL_B)
#Remove handler B
com_event -= two_unique_handlers_b
#Send N events w/ no handlers attached
for i in xrange(N):
com_obj.triggerUShort(i)
AreEqual(expected_count, TWO_UNIQUE_HANDLERS_COUNT)
AreEqual(expected_val_a, TWO_UNIQUE_HANDLERS_VAL_A)
AreEqual(expected_val_b, TWO_UNIQUE_HANDLERS_VAL_B)
#Attach handlers A and B
com_event += two_unique_handlers_a
com_event += two_unique_handlers_b
#Send N events
for i in xrange(N):
com_obj.triggerUShort(i)
expected_count += 2
expected_val_a = i
expected_val_b = i
AreEqual(expected_count, TWO_UNIQUE_HANDLERS_COUNT)
AreEqual(expected_val_a, TWO_UNIQUE_HANDLERS_VAL_A)
AreEqual(expected_val_b, TWO_UNIQUE_HANDLERS_VAL_B)
#Attach second B handler
com_event += two_unique_handlers_b
#Send N events
for i in xrange(N):
com_obj.triggerUShort(i)
expected_count += 3
expected_val_a = i
expected_val_b = i
AreEqual(expected_count, TWO_UNIQUE_HANDLERS_COUNT)
AreEqual(expected_val_a, TWO_UNIQUE_HANDLERS_VAL_A)
AreEqual(expected_val_b, TWO_UNIQUE_HANDLERS_VAL_B)
#Remove second B handler
com_event -= two_unique_handlers_b
#Send N events
for i in xrange(N):
com_obj.triggerUShort(i)
expected_count += 2
expected_val_a = i
expected_val_b = i
AreEqual(expected_count, TWO_UNIQUE_HANDLERS_COUNT)
AreEqual(expected_val_a, TWO_UNIQUE_HANDLERS_VAL_A)
AreEqual(expected_val_b, TWO_UNIQUE_HANDLERS_VAL_B)
#Remove A and B handlers
com_event -= two_unique_handlers_a
com_event -= two_unique_handlers_b
#Send N events
for i in xrange(N):
com_obj.triggerUShort(i)
AreEqual(expected_count, TWO_UNIQUE_HANDLERS_COUNT)
AreEqual(expected_val_a, TWO_UNIQUE_HANDLERS_VAL_A)
AreEqual(expected_val_b, TWO_UNIQUE_HANDLERS_VAL_B)
###############################################################################
##EVENT HANDLER SIGNATURES#####################################################
#--POSITIVE--------------------------------------------------------------------
def test_eNull():
global HANDLER_CALL_COUNT
e_trigger = com_obj.triggerNull
com_event = com_obj.eNull
#--typical handler implementations
def f1():
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
def f2(*args):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(args)!=0: raise TypeError("Too many args:" + str(args))
def f3(**kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(kwargs.keys())!=0: raise TypeError("Too many kwargs:" + str(kwargs))
def f4(*args, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(args)!=0: raise TypeError("Too many args:" + str(args))
if len(kwargs.keys())!=0: raise TypeError("Too many kwargs:" + str(kwargs))
event_handlers = [f1, f2, f3, f4]
handler_helper(e_trigger, com_event, None, event_handlers)
def test_eInOutretBool():
global HANDLER_CALL_COUNT
e_trigger = com_obj.triggerInOutretBool
com_event = com_obj.eInOutretBool
#--typical handler implementations
def f1(a):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
return a
def f2(*args):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(args)!=1: raise TypeError("Too few/many args:" + str(args))
return args[0]
def f3(a, *args):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(args)!=0: raise TypeError("Too many args:" + str(args))
return a
def f4(a, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(kwargs.keys())!=0: raise TypeError("Too many kwargs:" + str(kwargs))
return a
def f5(*args, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(args)!=1: raise TypeError("Too few/many args:" + str(args))
if len(kwargs.keys())!=0: raise TypeError("Too many kwargs:" + str(kwargs))
return args[0]
def f6(a, *args, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(args)!=0: raise TypeError("Too few/many args:" + str(args))
if len(kwargs.keys())!=0: raise TypeError("Too many kwargs:" + str(kwargs))
return a
event_handlers = [f1, f2, f3, f4, f5, f6]
handler_helper(lambda: e_trigger(True), com_event,
False, #Merlin 386344
event_handlers)
handler_helper(lambda: e_trigger(False), com_event, False, event_handlers)
def test_eInOutBstr():
global HANDLER_CALL_COUNT
e_trigger = com_obj.triggerInOutBstr
com_event = com_obj.eInOutBstr
#--typical handler implementations
def f1(a, o):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
return a
def f2(*args):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(args)!=2: raise TypeError("Too few/many args:" + str(args))
return args[0]
def f3(a, *args):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(args)!=1: raise TypeError("Too many args:" + str(args))
return a
def f4(a, o, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(kwargs.keys())!=0: raise TypeError("Too many kwargs:" + str(kwargs))
return a
def f5(*args, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(args)!=2: raise TypeError("Too few/many args:" + str(args))
if len(kwargs.keys())!=0: raise TypeError("Too many kwargs:" + str(kwargs))
return args[0]
def f6(a, *args, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(args)!=1: raise TypeError("Too few/many args:" + str(args))
if len(kwargs.keys())!=0: raise TypeError("Too many kwargs:" + str(kwargs))
return a
event_handlers = [f1, f2, f3, f4, f5, f6]
try:
o = StrongBox[str]('www')
handler_helper(lambda: e_trigger("", o), com_event, None, event_handlers)
o = StrongBox[str]('www')
handler_helper(lambda: e_trigger("abc", o), com_event, None, event_handlers)
except EnvironmentError, e:
print "Dev10 409998"
def test_eInUshort():
global HANDLER_CALL_COUNT
e_trigger = com_obj.triggerUShort
com_event = com_obj.eInUshort
def f1(a):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
return
def f2(*args):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(args)!=1: raise TypeError("Too few/many args:" + str(args))
return
def f3(a, *args):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(args)!=0: raise TypeError("Too many args:" + str(args))
return
def f4(a, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(kwargs.keys())!=0: raise TypeError("Too many kwargs:" + str(kwargs))
return
def f5(*args, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(args)!=1: raise TypeError("Too few/many args:" + str(args))
if len(kwargs.keys())!=0: raise TypeError("Too many kwargs:" + str(kwargs))
return
def f6(a, *args, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(args)!=0: raise TypeError("Too few/many args:" + str(args))
if len(kwargs.keys())!=0: raise TypeError("Too many kwargs:" + str(kwargs))
return
event_handlers = [f1, f2, f3, f4, f5, f6]
handler_helper(lambda: e_trigger(0), com_event, None, event_handlers)
handler_helper(lambda: e_trigger(42), com_event, None, event_handlers)
def test_eNullShort():
global HANDLER_CALL_COUNT
e_trigger = com_obj.triggerNullShort
com_event = com_obj.eNullShort
#--typical handler implementations
def f1():
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
return 42
def f2(*args):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(args)!=0: raise TypeError("Too many args:" + str(args))
return 42
def f3(**kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(kwargs.keys())!=0: raise TypeError("Too many kwargs:" + str(kwargs))
return 42
def f4(*args, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
if len(args)!=0: raise TypeError("Too many args:" + str(args))
if len(kwargs.keys())!=0: raise TypeError("Too many kwargs:" + str(kwargs))
return 42
event_handlers = [f1, f2, f3, f4]
handler_helper(e_trigger, com_event, None, event_handlers)
#--NEGATIVE HANDLER SIGNATURES-------------------------------------------------
def test_eNull_neg_handler_signatures():
global HANDLER_CALL_COUNT
e_trigger = com_obj.triggerNull
com_event = com_obj.eNull
#--bad handler implementations
def f1(a):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
def f2(a, b):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
def f3(a, *args):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
def f4(a, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
def f5(a, *args, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
bad_arg_handlers = [f1, f2,
#f3, #Merlin 384332
#f4, #Merlin 384332
#f5, #Merlin 384332
]
#Dev10 410001
bad_arg_handlers = []
bad_handler_signature_helper(e_trigger, com_event, bad_arg_handlers)
def test_eInOutretBool_neg_handler_signatures():
e_trigger = com_obj.triggerInOutretBool
com_event = com_obj.eInOutretBool
#--bad handler implementations
def f1():
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
def f2(a, b):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
def f3(a, b, *args):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
def f4(a, b, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
def f5(a, b, *args, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
bad_arg_handlers = [f1,
#f2, #Merlin 384332
#f3, #Merlin 384332
#f4, #Merlin 384332
#f5, #Merlin 384332
]
#Dev10 410001
bad_arg_handlers = []
bad_handler_signature_helper(lambda: e_trigger(True), com_event, bad_arg_handlers)
def test_eInOutBstr_neg_handler_signatures():
e_trigger = com_obj.triggerInOutBstr
com_event = com_obj.eInOutBstr
#--bad handler implementations
def f1():
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
def f2(a, b):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
def f3(a, b, *args):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
def f4(a, b, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
def f5(a, b, *args, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
bad_arg_handlers = [f1,
#f2, #Merlin 384332
#f3, #Merlin 384332
#f4, #Merlin 384332
#f5, #Merlin 384332
]
#Dev10 410001
bad_arg_handlers = []
bad_handler_signature_helper(lambda: e_trigger("abc"), com_event, bad_arg_handlers)
def test_eInUshort_neg_handler_signatures():
e_trigger = com_obj.triggerUShort
com_event = com_obj.eInUshort
#--bad handler implementations
def f1():
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
def f2(a, b):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
def f3(a, b, *args):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
def f4(a, b, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
def f5(a, b, *args, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
bad_arg_handlers = [f1,
f2,
#f3, #Merlin 384332
#f4, #Merlin 384332
#f5, #Merlin 384332
]
#Dev10 410001
bad_arg_handlers = []
bad_handler_signature_helper(lambda: e_trigger(1), com_event, bad_arg_handlers)
def test_eNullShort_neg_handler_signatures():
global HANDLER_CALL_COUNT
e_trigger = com_obj.triggerNullShort
com_event = com_obj.eNullShort
#--bad handler implementations
def f1(a):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
return 42
def f2(a, b):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
return 42
def f3(a, *args):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
return 42
def f4(a, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
return 42
def f5(a, *args, **kwargs):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
raise Exception("Bad number of args should trigger DispException")
return 42
bad_arg_handlers = [f1, f2,
#f3, #Merlin 384332
#f4, #Merlin 384332
#f5, #Merlin 384332
]
#Dev10 410001
bad_arg_handlers = []
bad_handler_signature_helper(e_trigger, com_event, bad_arg_handlers)
#--NEGATIVE HANDLER RETURN VALUES----------------------------------------------
def test_eNull_neg_handler_return_values():
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT = 0
expected_call_count = 0
e_trigger = com_obj.triggerNull
com_event = com_obj.eNull
for retVal in [1, "", "abc", False, []]:
def bad_handler():
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT+=1
return retVal
com_event += bad_handler
#Expect this to throw an exception
#AssertError(EnvironmentError, e_trigger)
e_trigger() #Dev10 409792
#Also expect the HANDLER_CALL_COUNT to have been incremented
expected_call_count += 1
AreEqual(HANDLER_CALL_COUNT, expected_call_count)
#remove handler and send an event to ensure removal
com_event -= bad_handler
e_trigger()
AreEqual(HANDLER_CALL_COUNT, expected_call_count)
def test_eInOutretBool_neg_handler_return_values():
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT = 0
expected_call_count = 0
e_trigger = com_obj.triggerInOutretBool
com_event = com_obj.eInOutretBool
for retVal in [3.14, "", "abc", []]:
def bad_handler(a):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT+=1
return retVal
com_event += bad_handler
#Expect this to throw an exception
#AssertError(EnvironmentError, e_trigger, True)
e_trigger(True) #Dev10 409792
#Also expect the HANDLER_CALL_COUNT to have been incremented
expected_call_count += 1
AreEqual(HANDLER_CALL_COUNT, expected_call_count)
#remove handler and send an event to ensure removal
com_event -= bad_handler
e_trigger(True)
AreEqual(HANDLER_CALL_COUNT, expected_call_count)
def test_eInOutBstr_neg_handler_return_values():
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT = 0
expected_call_count = 0
e_trigger = com_obj.triggerInOutBstr
com_event = com_obj.eInOutBstr
for retVal in [3.14, True, 42L, []]:
def bad_handler(a):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT+=1
return retVal
com_event += bad_handler
#Expect this to throw an exception
#AssertError(EnvironmentError, e_trigger, "abc")
print "Dev10 409998" #cannot continue as bad_handler never gets called
com_event -= bad_handler
continue
e_trigger("abc") #Dev10 409792
#Also expect the HANDLER_CALL_COUNT to have been incremented
expected_call_count += 1
AreEqual(HANDLER_CALL_COUNT, expected_call_count)
#remove handler and send an event to ensure removal
com_event -= bad_handler
e_trigger("abc")
AreEqual(HANDLER_CALL_COUNT, expected_call_count)
def test_eInUshort_neg_handler_return_values():
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT = 0
expected_call_count = 0
e_trigger = com_obj.triggerUShort
com_event = com_obj.eInUshort
for retVal in [3.14, "", "abc", False, []]:
def bad_handler(a):
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT+=1
return retVal
com_event += bad_handler
#Expect this to throw an exception
#AssertError(EnvironmentError, e_trigger, 42)
e_trigger(42) #Dev10 409792
#Also expect the HANDLER_CALL_COUNT to have been incremented
expected_call_count += 1
AreEqual(HANDLER_CALL_COUNT, expected_call_count)
#remove handler and send an event to ensure removal
com_event -= bad_handler
e_trigger(42)
AreEqual(HANDLER_CALL_COUNT, expected_call_count)
def test_eNullShort_neg_handler_return_values():
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT = 0
expected_call_count = 0
e_trigger = com_obj.triggerNullShort
com_event = com_obj.eNullShort
for retVal in [None, "", "abc", []]:
def bad_handler():
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT+=1
return retVal
com_event += bad_handler
#Expect this to throw an exception
#Dev10 384367
#AssertError(EnvironmentError, e_trigger)
e_trigger()
#Also expect the HANDLER_CALL_COUNT to have been incremented
expected_call_count += 1
AreEqual(HANDLER_CALL_COUNT, expected_call_count)
#remove handler and send an event to ensure removal
com_event -= bad_handler
e_trigger()
AreEqual(HANDLER_CALL_COUNT, expected_call_count)
###############################################################################
##MISC#########################################################################
def test_slow_handler_sta():
from time import sleep
global HANDLER_CALL_COUNT
e_trigger = com_obj.triggerNull
com_event = com_obj.eNull
def slow_handler():
global HANDLER_CALL_COUNT
HANDLER_CALL_COUNT += 1
print "slow_handler...sleeping"
sleep(5)
HANDLER_CALL_COUNT += 1
#send an event with no handlers to ensure the test is OK
HANDLER_CALL_COUNT = 0
e_trigger()
AreEqual(HANDLER_CALL_COUNT, 0)
#add the handler and verify events are received
com_event += slow_handler
e_trigger()
#UNCOMMENT THIS UNDER MTA
#AreEqual(HANDLER_CALL_COUNT, 1)
#sleep(10)
AreEqual(HANDLER_CALL_COUNT, 2)
#remove the handler
com_event -= slow_handler
#send an event with no handlers to ensure it's removed
HANDLER_CALL_COUNT = 0
e_trigger()
sleep(10)
AreEqual(HANDLER_CALL_COUNT, 0)
def test_handler_spawns_thread():
from time import sleep
global HANDLER_CALL_COUNT
e_trigger = com_obj.triggerNull
com_event = com_obj.eNull
def thread_handler():
#Start a thread which increments the HANDLER_CALL_COUNT
import thread
def f():
global HANDLER_CALL_COUNT
from time import sleep
HANDLER_CALL_COUNT += 1
sleep(10)
HANDLER_CALL_COUNT += 1
thread.start_new_thread(f, ())
#send an event with no handlers to ensure the test is OK
HANDLER_CALL_COUNT = 0
e_trigger()
AreEqual(HANDLER_CALL_COUNT, 0)
#add the handler and verify events are received
com_event += thread_handler
e_trigger()
sleep(5) #Seems fragile
AreEqual(HANDLER_CALL_COUNT, 1)
sleep(15)
AreEqual(HANDLER_CALL_COUNT, 2)
#remove the handler
com_event -= thread_handler
#send an event with no handlers to ensure it's removed
HANDLER_CALL_COUNT = 0
e_trigger()
sleep(10)
AreEqual(HANDLER_CALL_COUNT, 0)
def test_handler_calls_caller():
global HANDLER_CALL_COUNT
e_trigger = com_obj.triggerNull
com_event = com_obj.eNull
def call_caller_handler():
global HANDLER_CALL_COUNT
if HANDLER_CALL_COUNT==0:
HANDLER_CALL_COUNT += 1
e_trigger()
else:
HANDLER_CALL_COUNT += 1
#send an event with no handlers to ensure the test is OK
HANDLER_CALL_COUNT = 0
e_trigger()
AreEqual(HANDLER_CALL_COUNT, 0)
#add the handler and verify events are received
com_event += call_caller_handler
e_trigger()
AreEqual(HANDLER_CALL_COUNT, 2)
#remove the handler
com_event -= call_caller_handler
#send an event with no handlers to ensure it's removed
HANDLER_CALL_COUNT = 0
e_trigger()
AreEqual(HANDLER_CALL_COUNT, 0)
def test_handler_raises():
global HANDLER_CALL_COUNT
e_trigger = com_obj.triggerNull
com_event = com_obj.eNull
def except_handler():
global HANDLER_CALL_COUNT
raise Exception("bad")
HANDLER_CALL_COUNT += 1
#send an event with no handlers to ensure the test is OK
HANDLER_CALL_COUNT = 0
e_trigger()
AreEqual(HANDLER_CALL_COUNT, 0)
#add the handler and verify events are received
com_event += except_handler
AssertError(Exception, e_trigger)
AreEqual(HANDLER_CALL_COUNT, 0)
#remove the handler
com_event -= except_handler
#send an event with no handlers to ensure it's removed
HANDLER_CALL_COUNT = 0
e_trigger()
AreEqual(HANDLER_CALL_COUNT, 0)
###############################################################################
##MAIN#########################################################################
run_com_test(__name__, __file__)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Worker that receives input from Piped RDD.
"""
from __future__ import print_function
import os
import sys
import time
import socket
import traceback
from pyspark.accumulators import _accumulatorRegistry
from pyspark.broadcast import Broadcast, _broadcastRegistry
from pyspark.taskcontext import TaskContext
from pyspark.files import SparkFiles
from pyspark.serializers import write_with_length, write_int, read_long, \
write_long, read_int, SpecialLengths, PythonEvalType, UTF8Deserializer, PickleSerializer, \
BatchedSerializer, ArrowStreamPandasSerializer
from pyspark.sql.types import to_arrow_type
from pyspark import shuffle
pickleSer = PickleSerializer()
utf8_deserializer = UTF8Deserializer()
def report_times(outfile, boot, init, finish):
write_int(SpecialLengths.TIMING_DATA, outfile)
write_long(int(1000 * boot), outfile)
write_long(int(1000 * init), outfile)
write_long(int(1000 * finish), outfile)
def add_path(path):
# worker can be used, so donot add path multiple times
if path not in sys.path:
# overwrite system packages
sys.path.insert(1, path)
def read_command(serializer, file):
command = serializer._read_with_length(file)
if isinstance(command, Broadcast):
command = serializer.loads(command.value)
return command
def chain(f, g):
"""chain two functions together """
return lambda *a: g(f(*a))
def wrap_udf(f, return_type):
if return_type.needConversion():
toInternal = return_type.toInternal
return lambda *a: toInternal(f(*a))
else:
return lambda *a: f(*a)
def wrap_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def verify_result_length(*a):
result = f(*a)
if not hasattr(result, "__len__"):
raise TypeError("Return type of the user-defined functon should be "
"Pandas.Series, but is {}".format(type(result)))
if len(result) != len(a[0]):
raise RuntimeError("Result vector from pandas_udf was not the required length: "
"expected %d, got %d" % (len(a[0]), len(result)))
return result
return lambda *a: (verify_result_length(*a), arrow_return_type)
def read_single_udf(pickleSer, infile, eval_type):
num_arg = read_int(infile)
arg_offsets = [read_int(infile) for i in range(num_arg)]
row_func = None
for i in range(read_int(infile)):
f, return_type = read_command(pickleSer, infile)
if row_func is None:
row_func = f
else:
row_func = chain(row_func, f)
# the last returnType will be the return type of UDF
if eval_type == PythonEvalType.SQL_PANDAS_UDF:
return arg_offsets, wrap_pandas_udf(row_func, return_type)
elif eval_type == PythonEvalType.SQL_PANDAS_GROUPED_UDF:
# a groupby apply udf has already been wrapped under apply()
return arg_offsets, row_func
else:
return arg_offsets, wrap_udf(row_func, return_type)
def read_udfs(pickleSer, infile, eval_type):
num_udfs = read_int(infile)
udfs = {}
call_udf = []
for i in range(num_udfs):
arg_offsets, udf = read_single_udf(pickleSer, infile, eval_type)
udfs['f%d' % i] = udf
args = ["a[%d]" % o for o in arg_offsets]
call_udf.append("f%d(%s)" % (i, ", ".join(args)))
# Create function like this:
# lambda a: (f0(a0), f1(a1, a2), f2(a3))
# In the special case of a single UDF this will return a single result rather
# than a tuple of results; this is the format that the JVM side expects.
mapper_str = "lambda a: (%s)" % (", ".join(call_udf))
mapper = eval(mapper_str, udfs)
func = lambda _, it: map(mapper, it)
if eval_type == PythonEvalType.SQL_PANDAS_UDF \
or eval_type == PythonEvalType.SQL_PANDAS_GROUPED_UDF:
ser = ArrowStreamPandasSerializer()
else:
ser = BatchedSerializer(PickleSerializer(), 100)
# profiling is not supported for UDF
return func, None, ser, ser
def main(infile, outfile):
try:
boot_time = time.time()
split_index = read_int(infile)
if split_index == -1: # for unit tests
exit(-1)
version = utf8_deserializer.loads(infile)
if version != "%d.%d" % sys.version_info[:2]:
raise Exception(("Python in worker has different version %s than that in " +
"driver %s, PySpark cannot run with different minor versions." +
"Please check environment variables PYSPARK_PYTHON and " +
"PYSPARK_DRIVER_PYTHON are correctly set.") %
("%d.%d" % sys.version_info[:2], version))
# initialize global state
taskContext = TaskContext._getOrCreate()
taskContext._stageId = read_int(infile)
taskContext._partitionId = read_int(infile)
taskContext._attemptNumber = read_int(infile)
taskContext._taskAttemptId = read_long(infile)
shuffle.MemoryBytesSpilled = 0
shuffle.DiskBytesSpilled = 0
_accumulatorRegistry.clear()
# fetch name of workdir
spark_files_dir = utf8_deserializer.loads(infile)
SparkFiles._root_directory = spark_files_dir
SparkFiles._is_running_on_worker = True
# fetch names of includes (*.zip and *.egg files) and construct PYTHONPATH
add_path(spark_files_dir) # *.py files that were added will be copied here
num_python_includes = read_int(infile)
for _ in range(num_python_includes):
filename = utf8_deserializer.loads(infile)
add_path(os.path.join(spark_files_dir, filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
# fetch names and values of broadcast variables
num_broadcast_variables = read_int(infile)
for _ in range(num_broadcast_variables):
bid = read_long(infile)
if bid >= 0:
path = utf8_deserializer.loads(infile)
_broadcastRegistry[bid] = Broadcast(path=path)
else:
bid = - bid - 1
_broadcastRegistry.pop(bid)
_accumulatorRegistry.clear()
eval_type = read_int(infile)
if eval_type == PythonEvalType.NON_UDF:
func, profiler, deserializer, serializer = read_command(pickleSer, infile)
else:
func, profiler, deserializer, serializer = read_udfs(pickleSer, infile, eval_type)
init_time = time.time()
def process():
iterator = deserializer.load_stream(infile)
serializer.dump_stream(func(split_index, iterator), outfile)
if profiler:
profiler.profile(process)
else:
process()
except Exception:
try:
write_int(SpecialLengths.PYTHON_EXCEPTION_THROWN, outfile)
write_with_length(traceback.format_exc().encode("utf-8"), outfile)
except IOError:
# JVM close the socket
pass
except Exception:
# Write the error to stderr if it happened while serializing
print("PySpark worker failed with exception:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
exit(-1)
finish_time = time.time()
report_times(outfile, boot_time, init_time, finish_time)
write_long(shuffle.MemoryBytesSpilled, outfile)
write_long(shuffle.DiskBytesSpilled, outfile)
# Mark the beginning of the accumulators section of the output
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
write_int(len(_accumulatorRegistry), outfile)
for (aid, accum) in _accumulatorRegistry.items():
pickleSer._write_with_length((aid, accum._value), outfile)
# check end of stream
if read_int(infile) == SpecialLengths.END_OF_STREAM:
write_int(SpecialLengths.END_OF_STREAM, outfile)
else:
# write a different value to tell JVM to not reuse this worker
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
exit(-1)
if __name__ == '__main__':
# Read a local port to connect to from stdin
java_port = int(sys.stdin.readline())
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", java_port))
sock_file = sock.makefile("rwb", 65536)
main(sock_file, sock_file)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras 2D transposed convolution layer (sometimes called deconvolution)."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
from keras import activations
from keras import backend
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.engine.input_spec import InputSpec
from keras.layers.convolutional.conv2d import Conv2D
from keras.utils import conv_utils
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Conv2DTranspose',
'keras.layers.Convolution2DTranspose')
class Conv2DTranspose(Conv2D):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers or `None`, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Args:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros evenly
to the left/right or up/down of the input such that output has the same
height/width dimension as the input.
output_padding: An integer or tuple/list of 2 integers,
specifying the amount of padding along the height and width
of the output tensor.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer, specifying the dilation rate for all spatial
dimensions for dilated convolution. Specifying different dilation rates
for different dimensions is not supported.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix
(see `keras.initializers`). Defaults to 'glorot_uniform'.
bias_initializer: Initializer for the bias vector
(see `keras.initializers`). Defaults to 'zeros'.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector
(see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix
(see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector
(see `keras.constraints`).
Input shape:
4D tensor with shape:
`(batch_size, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch_size, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
If `output_padding` is specified:
```
new_rows = ((rows - 1) * strides[0] + kernel_size[0] - 2 * padding[0] +
output_padding[0])
new_cols = ((cols - 1) * strides[1] + kernel_size[1] - 2 * padding[1] +
output_padding[1])
```
Returns:
A tensor of rank 4 representing
`activation(conv2dtranspose(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
output_padding=None,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 2, 'output_padding', allow_zero=True)
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Strides must be greater than output padding. '
f'Received strides={self.strides}, '
f'output_padding={self.output_padding}.')
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if len(input_shape) != 4:
raise ValueError('Inputs should have rank 4. '
f'Received input_shape={input_shape}.')
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'to `Conv2DTranspose` should be defined. '
f'The input_shape received is {input_shape}, '
f'where axis {channel_axis} (0-based) '
'is the channel dimension, which found to be `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = tf.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
h_axis, w_axis = 2, 3
else:
h_axis, w_axis = 1, 2
# Use the constant height and weight when possible.
# TODO(scottzhu): Extract this into a utility function that can be applied
# to all convolutional layers, which currently lost the static shape
# information due to tf.shape().
height, width = None, None
if inputs.shape.rank is not None:
dims = inputs.shape.as_list()
height = dims[h_axis]
width = dims[w_axis]
height = height if height is not None else inputs_shape[h_axis]
width = width if width is not None else inputs_shape[w_axis]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_height = conv_utils.deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
out_width = conv_utils.deconv_output_length(width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_height, out_width)
else:
output_shape = (batch_size, out_height, out_width, self.filters)
output_shape_tensor = tf.stack(output_shape)
outputs = backend.conv2d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if not tf.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = tf.nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, h_axis, w_axis = 1, 2, 3
else:
c_axis, h_axis, w_axis = 3, 1, 2
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
return tf.TensorShape(output_shape)
def get_config(self):
config = super(Conv2DTranspose, self).get_config()
config['output_padding'] = self.output_padding
return config
# Alias
Convolution2DTranspose = Conv2DTranspose
|
|
'''
Get the level of compliance for 'old' versions of the apps.
That is, run the test harness over the suite from 31 Dec in some year.
Pass-rates are written to files for later use, one file per year/version.
'''
import os
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from qualitas import get_dirnames, corpus_for_year
from qualitas_test import test_all, print_latex_table
# Where the output boxplots will go:
_BARCHART_PDF = 'versions-barchart-s{}.pdf'
# Read/write pass rates to this directory to speed things up:
_DATA_DIR = os.path.join(DATA_DIR, 'pass_rates')
_THRESHOLD=98 # Percentage pass rate that counts as compliant (>=)
_YEAR_LIST = list(range(2005, 2018))
def prettify(versions):
''' Delete the revision version number if it is 0; so 3.5.0 -> 3.5 '''
def pretty(v):
vers = v.split('.', maxsplit=2)
if len(vers)>2 and vers[2]=='0': return '.'.join(vers[:2]) # Trim
return v
return [pretty(v) for v in versions]
def find_oldest_ver(versions, percs, threshold=_THRESHOLD):
''' Return the oldest version with a pass rate over the threshold;
return None if no such version found.
'''
assert len(versions)==len(percs), 'Missing data in {}'.format(versions)
for v, p in zip(versions, percs):
if p >= threshold:
return v
return None
def read_year_percs(filename, year_apps):
''' Reads the pass rates from a file; returns a list of percentages.
In case app names in file don't match given ones, returns None.
'''
percs = [ ]
with open(filename, 'r') as fh:
for i,line in enumerate(fh):
app, perc = line.split()
if app != year_apps[i]:
return None
percs.append(float(perc))
assert len(year_apps)==len(percs), 'Not enough data in {}'.format(percs)
return percs
def write_year_percs(filename, year_apps, percs):
''' Write a list of (app-name, pass-rate) to a txt file '''
assert len(year_apps)==len(percs), 'Not enough data in {}'.format(percs)
with open(filename, 'w') as fh:
for app, perc in zip(year_apps, percs):
print(app, perc, file=fh)
def test_or_read(versions, year, year_apps, year_root):
''' Get the percentage pass rates for one particular year of the apps.
return an array, one row for each app, one column for each version.
Will re-generate the pass-rate data if it is not found in a file.
'''
qname = os.path.basename(corpus_for_year(year)) # Name prefix for data files
all_percs = [[] for _ in year_apps] # One row for each app
for ver in versions:
filename = os.path.join(_DATA_DIR, '{}-{}.dat'.format(qname,ver))
percs = None
if os.path.isfile(filename):
percs = read_year_percs(filename, year_apps)
if not percs: # Not in a file (for these apps): re-generate
print('--- Year = {}'.format(year), flush=True)
percs = test_all([ver], year_apps, year_root)
percs = [p[0] for p in percs] # Were lists of length 1
write_year_percs(filename, year_apps, percs)
assert len(percs)==len(year_apps), 'Want one entry per app {}'.format(percs)
# Now, append a perc to each row, so one column for each version:
for i,p in enumerate(percs):
all_percs[i].append(p)
return all_percs # Dimension: apps by versions
def test_all_years(versions, qualapps, year_list):
''' For all the given years, get the lowest version with enough passes.
For each year return a list of (app, oldest-version) pairs.
'''
oldest = { } # Maps year to list of (app, oldest-version) pairs
for year in year_list:
oldest[year] = [ ]
year_root = corpus_for_year(year)
year_apps = [app for app in get_dirnames(year_root) if app in qualapps]
percs = test_or_read(versions, year, year_apps, year_root)
for i,app in enumerate(year_apps):
best_ver = find_oldest_ver(versions, percs[i])
if best_ver:
oldest[year].append((app,best_ver))
return oldest
def sum_year_counts(versions, oldest):
''' For each year, calc the number of apps for each Python version.
Return a list of (year, version-counts), ordered by year.
This is the raw data for Fig 4 of the ESEM paper.
'''
year_counts = [ ]
for year in sorted(oldest.keys()):
counts = Counter([p for (_,p) in oldest[year]])
vcount = [counts.get(v,0) for v in versions]
year_counts.append((year, vcount))
return year_counts
def show_year_counts(versions, year_counts):
''' For each year, list the number of apps for each Python version'''
plist = lambda ls : ', '.join(['{:>3s}'.format(str(l)) for l in ls])
print('# Year {}'.format(plist(prettify(versions))))
for year, vcount in year_counts:
print(' {} {}'.format(year, plist(vcount)))
def tabulate_by_app(qualapps, oldest):
''' Table: rows are apps, columns are years, cell is best Python version '''
# First get the data into a 2D array
by_app = {app:{} for app in qualapps}
all_years = sorted(oldest.keys())
for year in all_years:
aplist = oldest[year]
for vlist in by_app.values():
vlist[year] = '-'
for app, ver in aplist:
by_app[app][year] = ver
# Then print it:
print_row = lambda row: print(' & '.join([str(r) for r in row]))
print_row(['Application'] + all_years)
for app in qualapps:
row = [app] + prettify([by_app[app][year] for year in all_years])
print_row(row)
def tabulate_pass_rates(versions, qualapps, year):
''' Table: rows are apps, columns are versions, cell is pass rate.
Year is given, so the pass rates shown are for just one year.
'''
year_root = corpus_for_year(year)
year_apps = [app for app in get_dirnames(year_root) if app in qualapps]
percs = test_or_read(versions, year, year_apps, year_root)
print_latex_table(prettify(versions), year_apps, year_root, percs)
def plot_pass_rates(versions, qualapps, year, save_as=None):
''' Graph of pass rates; x-axis is version, y-axis is % rate.
Each line on graph represents a single app.
'''
# First, get the data:
year_root = corpus_for_year(year)
year_apps = [app for app in get_dirnames(year_root) if app in qualapps]
percs = test_or_read(versions, year, year_apps, year_root)
# Now plot:
fig, ax = plt.subplots(figsize=(9,7))
ver_xlocs = np.arange(len(versions))
for appdata in percs:
ax.plot(ver_xlocs, appdata)
plt.xlabel('Python Versions')
plt.xticks(ver_xlocs, prettify(versions))
plt.ylabel('Percentage of files passing')
plt.yticks(np.arange(0, 105, 5))
ax.yaxis.grid(True, linestyle='--', which='major', color='grey', alpha=.25)
plt.savefig(save_as, bbox_inches='tight') if save_as else plt.show()
def plot_bar_chart(versions, year_counts, save_as=None):
''' Plot a stacked bar chart, one stacked bar per year,
divide each bar on no. of apps per version for that year.
This is how we get Figs 6&7 of the J.ESE paper.
'''
fig, ax1 = plt.subplots(figsize=(9,7))
# REviewer wants no ticks along bottom axis:
plt.tick_params(axis='x', which='both', bottom='off', top='off')
bar_width = 0.45 # the width of the bars
ver_colors = cm.rainbow(np.linspace(0, 1, len(versions)))[::-1]
num_years = len(year_counts)
yr_xlocs = np.arange(num_years) # x locations for the bars
yr_bottoms = np.zeros(num_years) # y locations for the bottom of the bars
bars = [ ] # Keep bar data to make legend handle
for i,_ in enumerate(versions):
# Plot the bars for this versions, one for each year
ver_data = [c[i] for _,c in year_counts] # counts (no of apps) per year
abar = plt.bar(yr_xlocs, ver_data, bar_width,
bottom=yr_bottoms, color=ver_colors[i])
bars.append(abar)
#bottom = [b+d for (b,d) in zip(bottom,data)]
yr_bottoms = yr_bottoms + ver_data
# Trimmings:
#plt.title('Change in Python versions over time')
plt.xticks(yr_xlocs, [y for y,_ in year_counts]) # years on x axis
plt.ylabel('No. of applications passing')
plt.yticks(np.arange(0, 51, 5)) # no. of apps on y axis
ax1.yaxis.grid(True, linestyle='--', which='major', color='grey', alpha=.25)
plt.legend([b[0] for b in bars], prettify(versions), loc=2)
plt.savefig(save_as, bbox_inches='tight') if save_as else plt.show()
# Probably want to do one or the other of these:
_versions_for = {
2 : ['2.0', '2.2', '2.4', '2.5', '2.6', '2.7'],
3 : ['3.0', '3.1', '3.3.0', '3.5.0', '3.6.0'],
}
_SERIES = 3 ### EDIT ME TO SUIT
if __name__ == '__main__':
qualapps = get_dirnames() # All applications
versions = _versions_for[_SERIES]
oldest = test_all_years(versions, qualapps, _YEAR_LIST)
year_counts = sum_year_counts(versions, oldest)
show_year_counts(versions, year_counts)
tabulate_by_app(qualapps, oldest)
plot_bar_chart(versions, year_counts, _BARCHART_PDF.format(_SERIES))
#plot_pass_rates(versions, qualapps, '2016',
# 'versions-2016-s{}.pdf'.format(series))
|
|
#!/usr/bin/python
# coding=utf-8
import codecs
import cPickle
class Route:
def __init__(self, id, route, name, trans, days, stops):
self.id = int(id)
self.route = route
self.name = name
self.trans = trans
self.days = days
self.stops = stops
class Stop:
def __init__(self, id, name, street):
self.id = int(id)
self.name = name
self.street = street
class Routes:
def __init__(self, routes_by_id):
self.by_id = routes_by_id
# gen by route
self.by_route = {}
for id in self.by_id:
r = self.by_id[id]
key = (r.trans, r.route)
list = []
if key in self.by_route:
list = self.by_route[key]
else:
self.by_route[key] = list
list.append(r)
self.by_trans = {}
for key in self.by_route:
(trans, route) = key
rr = self.by_route[key]
dict = {}
if trans in self.by_trans:
dict = self.by_trans[trans]
else:
self.by_trans[trans] = dict
dict[route] = rr
# convert stop id from list to normalized form (remove e, x prefixes and convert to int)
def convert_stop(stop):
if stop[0] == 'e' or stop[0] == 'x':
stop = stop[1:]
return int(stop)
# read routes from file
def read_routes():
global routes
f = codecs.open('.tmp/routes.txt', 'rb', 'utf-8-sig')
text = f.read().replace('\r', '<CR>')
lines = filter(lambda x: x.strip() != '', text.split('\n'))
col_route = 0
col_trans = 3
col_name = 10
col_days = 11
col_id = 12
col_stops = 14
route=''
trans=''
name=''
days=''
rr = {}
for line in lines[1:]:
row = line.split(';')
if row[col_route] != '':
route=row[col_route]
if row[col_trans] != '':
trans=row[col_trans]
if row[col_name] != '':
name = row[col_name]
if row[col_days] != '':
days = row[col_days]
id = row[col_id]
stops = row[col_stops].split(',')
stops = filter(lambda x: x != '', stops)
stops = map(convert_stop, stops)
#print id, name.encode('utf-8'), stops
rr[id] = Route(int(id), route.encode('utf-8'), name.encode('utf-8'), trans.encode('utf-8'), days.encode('utf-8'), stops)
routes = Routes(rr)
# read stops from file
def read_stops():
global stops
f = codecs.open('.tmp/stops.txt', 'rb', 'utf-8-sig')
text = f.read().replace('\r', '<CR>')
lines = filter(lambda x: x.strip() != '', text.split('\n'))
col_id = 0
col_street = 3
col_name = 4
street=''
name=''
stops = {}
for line in lines[1:]:
row = line.split(';')
# print line
# print row
# print
id = int(row[col_id])
if row[col_street] != '':
street = row[col_street]
if row[col_name] != '':
name = row[col_name]
s = street
if s == '0':
s = ''
#print name.encode('utf-8'), '|', s.encode('utf-8')
stops[id] = Stop(id, name.encode('utf-8'), s)
# decode references from '[key1, width1, key2, width2, key3]' to [(key1, (begin1, end1)), (key2, (begin2, end2), (key3, (begin3, end3)))]
# where (beginX, endX] indexes in referenced list
def decode_ref(lst, ref_lst):
refs = []
start_index = 0
end_index = 0
i = 0
while i < len(lst):
item = lst[i]
i = i + 1
start_index = end_index
if i < len(lst):
end_index = end_index + int(lst[i])
i = i + 1
else:
end_index = len(ref_lst)
refs.append((item.encode('utf-8'), (start_index, end_index)))
return refs
# split list by empty items
def split_by_space(lst):
ret = []
cur = []
for i in lst:
if i == '\r' or i == '\n':
continue
if i == '':
ret.append(cur)
cur = []
else:
cur.append(i)
if len(cur) > 0:
ret.append(cur)
return ret
# split list by references
def split_by_refs(lst, refs):
ret = {}
for (ref, (b, e)) in refs:
ret[ref] = lst[b:e]
return ret
# strange '-5' fix
def fix_deltas(deltas):
dt = 5
fixed_deltas = []
for (delta, bounds) in deltas:
dt = dt + int(delta) - 5
fixed_deltas.append((dt, bounds))
return fixed_deltas
# read times file
def read_times():
f = codecs.open('.tmp/times.txt', 'rb', 'utf-8-sig')
text = f.read().replace('\r', '<CR>')
lines = filter(lambda x: x.strip() != '', text.split('\n'))
for line in lines:
row = line.split(',')
# route
r = routes.by_id[row[0]]
#print r.route, r.name, r.id
#print "LINE:", line
#print "ROW:", row
ll = split_by_space(row[1:])
# split single list into sublists
# first - timetable of starts from initial stop. Stored as diffs
timetable = []
x = 0
for c in ll[0]:
x = x + int(c)
timetable.append(x)
#print timetable
# references of days into timetable
day_refs = decode_ref(ll[3], timetable)
# all other - differences between stops. add first element as zero difference and zip with stop id
diffs = map(lambda x: decode_ref(x, timetable), ll[4:])
diffs.insert(0, [])
diffs = map(fix_deltas, diffs)
diffs = zip(r.stops, diffs)
#print "RouteId:", route_id
#print "Timetable:", timetable
#print "DayRefs:", day_refs
#print "Diffs:"
#for i in diffs:
# print i
# calc timetables for each stop:
r.timetables = {}
for (stop, diffs_list) in diffs:
#print stop, stops[stop].name
#print "TT BEFORE:", len(timetable), timetable
#print "PARTS:"
#for (k,p) in split_by_refs(timetable, diffs_list).items():
# print k,p
#print "PARTS$"
for (d, (b, e)) in diffs_list:
#print "DIFF:", d, "FOR:", b, e
for i in range(b, e):
timetable[i] = timetable[i] + int(d)
#print "TT AFTER :", len(timetable), timetable
#print "PARTS:"
#for (k,p) in split_by_refs(timetable, diffs_list).items():
# print k,p
#print "PARTS$"
#print
# split timetable to days
r.timetables[stop] = split_by_refs(timetable, day_refs)
#print r.timetables
read_routes()
read_stops()
read_times()
root = {}
root["stops"] = stops
root["routes"] = routes
f = open(".tmp/data.bin", 'wb')
cPickle.dump(root, f)
|
|
# Copyright 2014, Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Vivekanandan Narasimhan, Hewlett-Packard Inc
from neutron.api.rpc.handlers import dvr_rpc
from neutron.common import constants as n_const
from neutron.common import utils as n_utils
from neutron.openstack.common import log as logging
from neutron.plugins.openvswitch.common import constants
LOG = logging.getLogger(__name__)
# A class to represent a DVR-hosted subnet including vif_ports resident on
# that subnet
class LocalDVRSubnetMapping:
def __init__(self, subnet, csnat_ofport=constants.OFPORT_INVALID):
# set of commpute ports on on this dvr subnet
self.compute_ports = {}
self.subnet = subnet
self.csnat_ofport = csnat_ofport
self.dvr_owned = False
def __str__(self):
return ("subnet = %s compute_ports = %s csnat_port = %s"
" is_dvr_owned = %s" %
(self.subnet, self.get_compute_ofports(),
self.get_csnat_ofport(), self.is_dvr_owned()))
def get_subnet_info(self):
return self.subnet
def set_dvr_owned(self, owned):
self.dvr_owned = owned
def is_dvr_owned(self):
return self.dvr_owned
def add_compute_ofport(self, vif_id, ofport):
self.compute_ports[vif_id] = ofport
def remove_compute_ofport(self, vif_id):
self.compute_ports.pop(vif_id, 0)
def remove_all_compute_ofports(self):
self.compute_ports.clear()
def get_compute_ofports(self):
return self.compute_ports
def set_csnat_ofport(self, ofport):
self.csnat_ofport = ofport
def get_csnat_ofport(self):
return self.csnat_ofport
class OVSPort:
def __init__(self, id, ofport, mac, device_owner):
self.id = id
self.mac = mac
self.ofport = ofport
self.subnets = set()
self.device_owner = device_owner
def __str__(self):
return ("OVSPort: id = %s, ofport = %s, mac = %s,"
"device_owner = %s, subnets = %s" %
(self.id, self.ofport, self.mac,
self.device_owner, self.subnets))
def add_subnet(self, subnet_id):
self.subnets.add(subnet_id)
def remove_subnet(self, subnet_id):
self.subnets.remove(subnet_id)
def remove_all_subnets(self):
self.subnets.clear()
def get_subnets(self):
return self.subnets
def get_device_owner(self):
return self.device_owner
def get_mac(self):
return self.mac
def get_ofport(self):
return self.ofport
class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin):
'''
Implements OVS-based DVR(Distributed Virtual Router), for overlay networks.
'''
# history
# 1.0 Initial version
def __init__(self, context, plugin_rpc, integ_br, tun_br,
patch_int_ofport=constants.OFPORT_INVALID,
patch_tun_ofport=constants.OFPORT_INVALID,
host=None, enable_tunneling=False,
enable_distributed_routing=False):
self.context = context
self.plugin_rpc = plugin_rpc
self.int_br = integ_br
self.tun_br = tun_br
self.patch_int_ofport = patch_int_ofport
self.patch_tun_ofport = patch_tun_ofport
self.host = host
self.enable_tunneling = enable_tunneling
self.enable_distributed_routing = enable_distributed_routing
def reset_ovs_parameters(self, integ_br, tun_br,
patch_int_ofport, patch_tun_ofport):
'''Reset the openvswitch parameters'''
if not (self.enable_tunneling and self.enable_distributed_routing):
return
self.int_br = integ_br
self.tun_br = tun_br
self.patch_int_ofport = patch_int_ofport
self.patch_tun_ofport = patch_tun_ofport
def setup_dvr_flows_on_integ_tun_br(self):
'''Setup up initial dvr flows into br-int and br-tun'''
if not (self.enable_tunneling and self.enable_distributed_routing):
return
LOG.debug("L2 Agent operating in DVR Mode")
self.dvr_mac_address = None
self.local_dvr_map = {}
self.local_csnat_map = {}
self.local_ports = {}
self.registered_dvr_macs = set()
# get the local DVR MAC Address
try:
details = self.plugin_rpc.get_dvr_mac_address_by_host(
self.context, self.host)
LOG.debug("L2 Agent DVR: Received response for "
"get_dvr_mac_address_by_host() from "
"plugin: %r", details)
self.dvr_mac_address = details['mac_address']
except Exception:
LOG.error(_("DVR: Failed to obtain local DVR Mac address"))
self.enable_distributed_routing = False
# switch all traffic using L2 learning
self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
priority=1, actions="normal")
return
# Remove existing flows in integration bridge
self.int_br.remove_all_flows()
# Add a canary flow to int_br to track OVS restarts
self.int_br.add_flow(table=constants.CANARY_TABLE, priority=0,
actions="drop")
# Insert 'drop' action as the default for Table DVR_TO_SRC_MAC
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
priority=1,
actions="drop")
# Insert 'normal' action as the default for Table LOCAL_SWITCHING
self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
priority=1,
actions="normal")
dvr_macs = self.plugin_rpc.get_dvr_mac_address_list(self.context)
LOG.debug("L2 Agent DVR: Received these MACs: %r", dvr_macs)
for mac in dvr_macs:
if mac['mac_address'] == self.dvr_mac_address:
continue
# Table 0 (default) will now sort DVR traffic from other
# traffic depending on in_port
self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
priority=2,
in_port=self.patch_tun_ofport,
dl_src=mac['mac_address'],
actions="resubmit(,%s)" %
constants.DVR_TO_SRC_MAC)
# Table DVR_NOT_LEARN ensures unique dvr macs in the cloud
# are not learnt, as they may
# result in flow explosions
self.tun_br.add_flow(table=constants.DVR_NOT_LEARN,
priority=1,
dl_src=mac['mac_address'],
actions="output:%s" % self.patch_int_ofport)
self.registered_dvr_macs.add(mac['mac_address'])
self.tun_br.add_flow(priority=1,
in_port=self.patch_int_ofport,
actions="resubmit(,%s)" %
constants.DVR_PROCESS)
# table-miss should be sent to learning table
self.tun_br.add_flow(table=constants.DVR_NOT_LEARN,
priority=0,
actions="resubmit(,%s)" %
constants.LEARN_FROM_TUN)
self.tun_br.add_flow(table=constants.DVR_PROCESS,
priority=0,
actions="resubmit(,%s)" %
constants.PATCH_LV_TO_TUN)
def dvr_mac_address_update(self, dvr_macs):
if not (self.enable_tunneling and self.enable_distributed_routing):
return
LOG.debug("DVR Mac address update with host-mac: %s", dvr_macs)
if not self.dvr_mac_address:
LOG.debug("Self mac unknown, ignoring this "
"dvr_mac_address_update() ")
return
dvr_host_macs = set()
for entry in dvr_macs:
if entry['mac_address'] == self.dvr_mac_address:
continue
dvr_host_macs.add(entry['mac_address'])
if dvr_host_macs == self.registered_dvr_macs:
LOG.debug("DVR Mac address already up to date")
return
dvr_macs_added = dvr_host_macs - self.registered_dvr_macs
dvr_macs_removed = self.registered_dvr_macs - dvr_host_macs
for oldmac in dvr_macs_removed:
self.int_br.delete_flows(table=constants.LOCAL_SWITCHING,
in_port=self.patch_tun_ofport,
dl_src=oldmac)
self.tun_br.delete_flows(table=constants.DVR_NOT_LEARN,
dl_src=oldmac)
LOG.debug("Removed DVR MAC flow for %s", oldmac)
self.registered_dvr_macs.remove(oldmac)
for newmac in dvr_macs_added:
self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
priority=2,
in_port=self.patch_tun_ofport,
dl_src=newmac,
actions="resubmit(,%s)" %
constants.DVR_TO_SRC_MAC)
self.tun_br.add_flow(table=constants.DVR_NOT_LEARN,
priority=1,
dl_src=newmac,
actions="output:%s" % self.patch_int_ofport)
LOG.debug("Added DVR MAC flow for %s", newmac)
self.registered_dvr_macs.add(newmac)
def is_dvr_router_interface(self, device_owner):
return device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE
def process_tunneled_network(self, network_type, lvid, segmentation_id):
if not (self.enable_tunneling and self.enable_distributed_routing):
return
self.tun_br.add_flow(table=constants.TUN_TABLE[network_type],
priority=1,
tun_id=segmentation_id,
actions="mod_vlan_vid:%s,"
"resubmit(,%s)" %
(lvid, constants.DVR_NOT_LEARN))
def _bind_distributed_router_interface_port(self, port, fixed_ips,
device_owner, local_vlan):
# since router port must have only one fixed IP, directly
# use fixed_ips[0]
subnet_uuid = fixed_ips[0]['subnet_id']
csnat_ofport = constants.OFPORT_INVALID
ldm = None
if subnet_uuid in self.local_dvr_map:
ldm = self.local_dvr_map[subnet_uuid]
csnat_ofport = ldm.get_csnat_ofport()
if csnat_ofport == constants.OFPORT_INVALID:
LOG.error(_("DVR: Duplicate DVR router interface detected "
"for subnet %s"), subnet_uuid)
return
else:
# set up LocalDVRSubnetMapping available for this subnet
subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context,
subnet_uuid)
if not subnet_info:
LOG.error(_("DVR: Unable to retrieve subnet information"
" for subnet_id %s"), subnet_uuid)
return
LOG.debug("get_subnet_for_dvr for subnet %s returned with %s" %
(subnet_uuid, subnet_info))
ldm = LocalDVRSubnetMapping(subnet_info)
self.local_dvr_map[subnet_uuid] = ldm
# DVR takes over
ldm.set_dvr_owned(True)
subnet_info = ldm.get_subnet_info()
ip_subnet = subnet_info['cidr']
local_compute_ports = (
self.plugin_rpc.get_ports_on_host_by_subnet(
self.context, self.host, subnet_uuid))
LOG.debug("DVR: List of ports received from "
"get_ports_on_host_by_subnet %s",
local_compute_ports)
for prt in local_compute_ports:
vif = self.int_br.get_vif_port_by_id(prt['id'])
if not vif:
continue
ldm.add_compute_ofport(vif.vif_id, vif.ofport)
if vif.vif_id in self.local_ports:
# ensure if a compute port is already on
# a different dvr routed subnet
# if yes, queue this subnet to that port
ovsport = self.local_ports[vif.vif_id]
ovsport.add_subnet(subnet_uuid)
else:
# the compute port is discovered first here that its on
# a dvr routed subnet queue this subnet to that port
ovsport = OVSPort(vif.vif_id, vif.ofport,
vif.vif_mac, prt['device_owner'])
ovsport.add_subnet(subnet_uuid)
self.local_ports[vif.vif_id] = ovsport
# create rule for just this vm port
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
priority=4,
dl_vlan=local_vlan,
dl_dst=ovsport.get_mac(),
actions="strip_vlan,mod_dl_src:%s,"
"output:%s" %
(subnet_info['gateway_mac'],
ovsport.get_ofport()))
# create rule to forward broadcast/multicast frames from dvr
# router interface to appropriate local tenant ports
ofports = ','.join(map(str, ldm.get_compute_ofports().values()))
if csnat_ofport != constants.OFPORT_INVALID:
ofports = str(csnat_ofport) + ',' + ofports
if ofports:
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
priority=2,
proto='ip',
dl_vlan=local_vlan,
nw_dst=ip_subnet,
actions="strip_vlan,mod_dl_src:%s,"
"output:%s" %
(subnet_info['gateway_mac'], ofports))
self.tun_br.add_flow(table=constants.DVR_PROCESS,
priority=3,
dl_vlan=local_vlan,
proto='arp',
nw_dst=subnet_info['gateway_ip'],
actions="drop")
self.tun_br.add_flow(table=constants.DVR_PROCESS,
priority=2,
dl_vlan=local_vlan,
dl_dst=port.vif_mac,
actions="drop")
self.tun_br.add_flow(table=constants.DVR_PROCESS,
priority=1,
dl_vlan=local_vlan,
dl_src=port.vif_mac,
actions="mod_dl_src:%s,resubmit(,%s)" %
(self.dvr_mac_address,
constants.PATCH_LV_TO_TUN))
# the dvr router interface is itself a port, so capture it
# queue this subnet to that port. A subnet appears only once as
# a router interface on any given router
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
def _bind_port_on_dvr_subnet(self, port, fixed_ips,
device_owner, local_vlan):
# Handle new compute port added use-case
subnet_uuid = None
for ips in fixed_ips:
if ips['subnet_id'] not in self.local_dvr_map:
continue
subnet_uuid = ips['subnet_id']
ldm = self.local_dvr_map[subnet_uuid]
if not ldm.is_dvr_owned():
# well this is CSNAT stuff, let dvr come in
# and do plumbing for this vm later
continue
# This confirms that this compute port belongs
# to a dvr hosted subnet.
# Accommodate this VM Port into the existing rule in
# the integration bridge
LOG.debug("DVR: Plumbing compute port %s", port.vif_id)
subnet_info = ldm.get_subnet_info()
ip_subnet = subnet_info['cidr']
csnat_ofport = ldm.get_csnat_ofport()
ldm.add_compute_ofport(port.vif_id, port.ofport)
if port.vif_id in self.local_ports:
# ensure if a compute port is already on a different
# dvr routed subnet
# if yes, queue this subnet to that port
ovsport = self.local_ports[port.vif_id]
ovsport.add_subnet(subnet_uuid)
else:
# the compute port is discovered first here that its
# on a dvr routed subnet, queue this subnet to that port
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
# create a rule for this vm port
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
priority=4,
dl_vlan=local_vlan,
dl_dst=ovsport.get_mac(),
actions="strip_vlan,mod_dl_src:%s,"
"output:%s" %
(subnet_info['gateway_mac'],
ovsport.get_ofport()))
ofports = ','.join(map(str, ldm.get_compute_ofports().values()))
if csnat_ofport != constants.OFPORT_INVALID:
ofports = str(csnat_ofport) + ',' + ofports
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
priority=2,
proto='ip',
dl_vlan=local_vlan,
nw_dst=ip_subnet,
actions="strip_vlan,mod_dl_src:%s,"
" output:%s" %
(subnet_info['gateway_mac'], ofports))
def _bind_centralized_snat_port_on_dvr_subnet(self, port, fixed_ips,
device_owner, local_vlan):
if port.vif_id in self.local_ports:
# throw an error if CSNAT port is already on a different
# dvr routed subnet
ovsport = self.local_ports[port.vif_id]
subs = list(ovsport.get_subnets())
LOG.error(_("Centralized-SNAT port %s already seen on "),
port.vif_id)
LOG.error(_("a different subnet %s"), subs[0])
return
# since centralized-SNAT (CSNAT) port must have only one fixed
# IP, directly use fixed_ips[0]
subnet_uuid = fixed_ips[0]['subnet_id']
ldm = None
subnet_info = None
if subnet_uuid not in self.local_dvr_map:
# no csnat ports seen on this subnet - create csnat state
# for this subnet
subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context,
subnet_uuid)
ldm = LocalDVRSubnetMapping(subnet_info, port.ofport)
self.local_dvr_map[subnet_uuid] = ldm
else:
ldm = self.local_dvr_map[subnet_uuid]
subnet_info = ldm.get_subnet_info()
# Store csnat OF Port in the existing DVRSubnetMap
ldm.set_csnat_ofport(port.ofport)
# create ovsPort footprint for csnat port
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
priority=4,
dl_vlan=local_vlan,
dl_dst=ovsport.get_mac(),
actions="strip_vlan,mod_dl_src:%s,"
" output:%s" %
(subnet_info['gateway_mac'],
ovsport.get_ofport()))
ofports = ','.join(map(str, ldm.get_compute_ofports().values()))
ofports = str(ldm.get_csnat_ofport()) + ',' + ofports
ip_subnet = subnet_info['cidr']
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
priority=2,
proto='ip',
dl_vlan=local_vlan,
nw_dst=ip_subnet,
actions="strip_vlan,mod_dl_src:%s,"
" output:%s" %
(subnet_info['gateway_mac'], ofports))
def bind_port_to_dvr(self, port, network_type, fixed_ips,
device_owner, local_vlan_id):
# a port coming up as distributed router interface
if not (self.enable_tunneling and self.enable_distributed_routing):
return
if network_type not in constants.TUNNEL_NETWORK_TYPES:
return
if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
self._bind_distributed_router_interface_port(port, fixed_ips,
device_owner,
local_vlan_id)
if device_owner and n_utils.is_dvr_serviced(device_owner):
self._bind_port_on_dvr_subnet(port, fixed_ips,
device_owner,
local_vlan_id)
if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
self._bind_centralized_snat_port_on_dvr_subnet(port, fixed_ips,
device_owner,
local_vlan_id)
def _unbind_distributed_router_interface_port(self, port, local_vlan):
ovsport = self.local_ports[port.vif_id]
# removal of distributed router interface
subnet_ids = ovsport.get_subnets()
subnet_set = set(subnet_ids)
# ensure we process for all the subnets laid on this removed port
for sub_uuid in subnet_set:
if sub_uuid not in self.local_dvr_map:
continue
ldm = self.local_dvr_map[sub_uuid]
subnet_info = ldm.get_subnet_info()
ip_subnet = subnet_info['cidr']
# DVR is no more owner
ldm.set_dvr_owned(False)
# remove all vm rules for this dvr subnet
# clear of compute_ports altogether
compute_ports = ldm.get_compute_ofports()
for vif_id in compute_ports:
ovsport = self.local_ports[vif_id]
self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC,
dl_vlan=local_vlan,
dl_dst=ovsport.get_mac())
ldm.remove_all_compute_ofports()
if ldm.get_csnat_ofport() != -1:
# If there is a csnat port on this agent, preserve
# the local_dvr_map state
ofports = str(ldm.get_csnat_ofport())
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
priority=2,
proto='ip',
dl_vlan=local_vlan,
nw_dst=ip_subnet,
actions="strip_vlan,mod_dl_src:%s,"
" output:%s" %
(subnet_info['gateway_mac'], ofports))
else:
# removed port is a distributed router interface
self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC,
proto='ip', dl_vlan=local_vlan,
nw_dst=ip_subnet)
# remove subnet from local_dvr_map as no dvr (or) csnat
# ports available on this agent anymore
self.local_dvr_map.pop(sub_uuid, None)
self.tun_br.delete_flows(table=constants.DVR_PROCESS,
dl_vlan=local_vlan,
proto='arp',
nw_dst=subnet_info['gateway_ip'])
ovsport.remove_subnet(sub_uuid)
self.tun_br.delete_flows(table=constants.DVR_PROCESS,
dl_vlan=local_vlan,
dl_dst=port.vif_mac)
self.tun_br.delete_flows(table=constants.DVR_PROCESS,
dl_vlan=local_vlan,
dl_src=port.vif_mac)
# release port state
self.local_ports.pop(port.vif_id, None)
def _unbind_port_on_dvr_subnet(self, port, local_vlan):
ovsport = self.local_ports[port.vif_id]
# This confirms that this compute port being removed belonged
# to a dvr hosted subnet.
# Accommodate this VM Port into the existing rule in
# the integration bridge
LOG.debug("DVR: Removing plumbing for compute port %s", port)
subnet_ids = ovsport.get_subnets()
# ensure we process for all the subnets laid on this port
for sub_uuid in subnet_ids:
if sub_uuid not in self.local_dvr_map:
continue
ldm = self.local_dvr_map[sub_uuid]
subnet_info = ldm.get_subnet_info()
ldm.remove_compute_ofport(port.vif_id)
ofports = ','.join(map(str, ldm.get_compute_ofports().values()))
ip_subnet = subnet_info['cidr']
# first remove this vm port rule
self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC,
dl_vlan=local_vlan,
dl_dst=ovsport.get_mac())
if ldm.get_csnat_ofport() != -1:
# If there is a csnat port on this agent, preserve
# the local_dvr_map state
ofports = str(ldm.get_csnat_ofport()) + ',' + ofports
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
priority=2,
proto='ip',
dl_vlan=local_vlan,
nw_dst=ip_subnet,
actions="strip_vlan,mod_dl_src:%s,"
" output:%s" %
(subnet_info['gateway_mac'], ofports))
else:
if ofports:
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
priority=2,
proto='ip',
dl_vlan=local_vlan,
nw_dst=ip_subnet,
actions="strip_vlan,mod_dl_src:%s,"
" output:%s" %
(subnet_info['gateway_mac'],
ofports))
else:
# remove the flow altogether, as no ports (both csnat/
# compute) are available on this subnet in this
# agent
self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC,
proto='ip',
dl_vlan=local_vlan,
nw_dst=ip_subnet)
# release port state
self.local_ports.pop(port.vif_id, None)
def _unbind_centralized_snat_port_on_dvr_subnet(self, port, local_vlan):
ovsport = self.local_ports[port.vif_id]
# This confirms that this compute port being removed belonged
# to a dvr hosted subnet.
# Accommodate this VM Port into the existing rule in
# the integration bridge
LOG.debug("DVR: Removing plumbing for csnat port %s", port)
sub_uuid = list(ovsport.get_subnets())[0]
# ensure we process for all the subnets laid on this port
if sub_uuid not in self.local_dvr_map:
return
ldm = self.local_dvr_map[sub_uuid]
subnet_info = ldm.get_subnet_info()
ip_subnet = subnet_info['cidr']
ldm.set_csnat_ofport(constants.OFPORT_INVALID)
# then remove csnat port rule
self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC,
dl_vlan=local_vlan,
dl_dst=ovsport.get_mac())
ofports = ','.join(map(str, ldm.get_compute_ofports().values()))
if ofports:
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
priority=2,
proto='ip',
dl_vlan=local_vlan,
nw_dst=ip_subnet,
actions="strip_vlan,mod_dl_src:%s,"
" output:%s" %
(subnet_info['gateway_mac'], ofports))
else:
self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC,
proto='ip',
dl_vlan=local_vlan,
nw_dst=ip_subnet)
if not ldm.is_dvr_owned():
# if not owned by DVR (only used for csnat), remove this
# subnet state altogether
self.local_dvr_map.pop(sub_uuid, None)
# release port state
self.local_ports.pop(port.vif_id, None)
def unbind_port_from_dvr(self, vif_port, local_vlan_id):
if not (self.enable_tunneling and self.enable_distributed_routing):
return
# Handle port removed use-case
if vif_port and vif_port.vif_id not in self.local_ports:
LOG.debug("DVR: Non distributed port, ignoring %s", vif_port)
return
ovsport = self.local_ports[vif_port.vif_id]
device_owner = ovsport.get_device_owner()
if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
self._unbind_distributed_router_interface_port(vif_port,
local_vlan_id)
if device_owner and n_utils.is_dvr_serviced(device_owner):
self._unbind_port_on_dvr_subnet(vif_port, local_vlan_id)
if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
self._unbind_centralized_snat_port_on_dvr_subnet(vif_port,
local_vlan_id)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
import collections
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import six
import tensorflow as tf
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with tf.gfile.Open(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.StringIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box in normalized coordinates (same below).
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
text_bottom = top
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def draw_keypoints_on_image_array(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y):
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color, fill=color)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.7):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a float numpy array of shape (img_height, img_height) with
values between 0 and 1
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.7)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.float32:
raise ValueError('`mask` not of type np.float32')
if np.any(np.logical_or(mask > 1.0, mask < 0.0)):
raise ValueError('`mask` elements should be in [0, 1]')
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(image,
boxes,
instance_masks=None,
keypoints=None,
use_normalized_coordinates=False,
agnostic_mode=False,
line_thickness=4):
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_keypoints_map = collections.defaultdict(list)
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
# if scores is None:
# box_to_color_map[box] = 'black'
else:
box_to_color_map[box] = 'DarkOrange'
# else:
# box_to_color_map[box] = STANDARD_COLORS[
# classes[i] % len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in six.iteritems(box_to_color_map):
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image,
box_to_instance_masks_map[box],
color=color
)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=line_thickness,
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates)
|
|
# Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Classes for making VMware VI SOAP calls.
"""
import httplib
import urllib2
import suds
from cinder.openstack.common.gettextutils import _
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim_util
RESP_NOT_XML_ERROR = "Response is 'text/html', not 'text/xml'"
CONN_ABORT_ERROR = 'Software caused connection abort'
ADDRESS_IN_USE_ERROR = 'Address already in use'
def get_moref(value, type):
"""Get managed object reference.
:param value: value for the managed object
:param type: type of the managed object
:return: Managed object reference with input value and type
"""
moref = suds.sudsobject.Property(value)
moref._type = type
return moref
class VIMMessagePlugin(suds.plugin.MessagePlugin):
def addAttributeForValue(self, node):
"""Helper to handle AnyType.
suds does not handle AnyType properly.
VI SDK requires type attribute to be set when AnyType is used
:param node: XML value node
"""
if node.name == 'value':
node.set('xsi:type', 'xsd:string')
def marshalled(self, context):
"""Marshal soap context.
Provides the plugin with the opportunity to prune empty
nodes and fixup nodes before sending it to the server.
:param context: SOAP context
"""
# suds builds the entire request object based on the wsdl schema.
# VI SDK throws server errors if optional SOAP nodes are sent
# without values, e.g. <test/> as opposed to <test>test</test>
context.envelope.prune()
context.envelope.walk(self.addAttributeForValue)
class Vim(object):
"""The VIM Object."""
def __init__(self, protocol='https', host='localhost', wsdl_loc=None):
"""Create communication interfaces for initiating SOAP transactions.
:param protocol: http or https
:param host: Server IPAddress[:port] or Hostname[:port]
"""
self._protocol = protocol
self._host_name = host
if not wsdl_loc:
wsdl_loc = Vim._get_wsdl_loc(protocol, host)
soap_url = vim_util.get_soap_url(protocol, host)
self._client = suds.client.Client(wsdl_loc, location=soap_url,
plugins=[VIMMessagePlugin()])
self._service_content = self.RetrieveServiceContent('ServiceInstance')
@staticmethod
def _get_wsdl_loc(protocol, host_name):
"""Return default WSDL file location hosted at the server.
:param protocol: http or https
:param host_name: ESX/VC server host name
:return: Default WSDL file location hosted at the server
"""
return vim_util.get_soap_url(protocol, host_name) + '/vimService.wsdl'
@property
def service_content(self):
return self._service_content
@property
def client(self):
return self._client
def __getattr__(self, attr_name):
"""Makes the API call and gets the result."""
def retrieve_properties_ex_fault_checker(response):
"""Checks the RetrievePropertiesEx response for errors.
Certain faults are sent as part of the SOAP body as property of
missingSet. For example NotAuthenticated fault. The method raises
appropriate VimFaultException when an error is found.
:param response: Response from RetrievePropertiesEx API call
"""
fault_list = []
if not response:
# This is the case when the session has timed out. ESX SOAP
# server sends an empty RetrievePropertiesExResponse. Normally
# missingSet in the returnval field has the specifics about
# the error, but that's not the case with a timed out idle
# session. It is as bad as a terminated session for we cannot
# use the session. So setting fault to NotAuthenticated fault.
fault_list = [error_util.NOT_AUTHENTICATED]
else:
for obj_cont in response:
if hasattr(obj_cont, 'missingSet'):
for missing_elem in obj_cont.missingSet:
fault_type = missing_elem.fault.fault.__class__
# Fault needs to be added to the type of fault
# for uniformity in error checking as SOAP faults
# define
fault_list.append(fault_type.__name__)
if fault_list:
exc_msg_list = ', '.join(fault_list)
raise error_util.VimFaultException(fault_list,
_("Error(s): %s occurred "
"in the call to "
"RetrievePropertiesEx.") %
exc_msg_list)
def vim_request_handler(managed_object, **kwargs):
"""Handler for VI SDK calls.
Builds the SOAP message and parses the response for fault
checking and other errors.
:param managed_object:Managed object reference
:param kwargs: Keyword arguments of the call
:return: Response of the API call
"""
try:
if isinstance(managed_object, str):
# For strings use string value for value and type
# of the managed object.
managed_object = get_moref(managed_object, managed_object)
request = getattr(self.client.service, attr_name)
response = request(managed_object, **kwargs)
if (attr_name.lower() == 'retrievepropertiesex'):
retrieve_properties_ex_fault_checker(response)
return response
except error_util.VimFaultException as excep:
raise
except suds.WebFault as excep:
doc = excep.document
detail = doc.childAtPath('/Envelope/Body/Fault/detail')
fault_list = []
for child in detail.getChildren():
fault_list.append(child.get('type'))
raise error_util.VimFaultException(fault_list, excep)
except AttributeError as excep:
raise error_util.VimAttributeException(_("No such SOAP method "
"%(attr)s. Detailed "
"error: %(excep)s.") %
{'attr': attr_name,
'excep': excep})
except (httplib.CannotSendRequest,
httplib.ResponseNotReady,
httplib.CannotSendHeader) as excep:
raise error_util.SessionOverLoadException(_("httplib error in "
"%(attr)s: "
"%(excep)s.") %
{'attr': attr_name,
'excep': excep})
except (urllib2.URLError, urllib2.HTTPError) as excep:
raise error_util.VimConnectionException(
_("urllib2 error in %(attr)s: %(excep)s.") %
{'attr': attr_name,
'excep': excep})
except Exception as excep:
# Socket errors which need special handling for they
# might be caused by server API call overload
if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
str(excep).find(CONN_ABORT_ERROR)) != -1:
raise error_util.SessionOverLoadException(_("Socket error "
"in %(attr)s: "
"%(excep)s.") %
{'attr':
attr_name,
'excep': excep})
# Type error that needs special handling for it might be
# caused by server API call overload
elif str(excep).find(RESP_NOT_XML_ERROR) != -1:
raise error_util.SessionOverLoadException(_("Type error "
"in %(attr)s: "
"%(excep)s.") %
{'attr':
attr_name,
'excep': excep})
else:
raise error_util.VimException(_("Error in %(attr)s. "
"Detailed error: "
"%(excep)s.") %
{'attr': attr_name,
'excep': excep})
return vim_request_handler
def __repr__(self):
return "VIM Object."
def __str__(self):
return "VIM Object."
|
|
# Copyright 2016 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import functools
import random
import time
import six
from ironic_lib.common.i18n import _
class Timer(object):
"""A timer decorator and context manager.
This metric type times the decorated method or code running inside the
context manager, and emits the time as the metric value. It is bound to
this MetricLogger. For example::
from ironic_lib import metrics_utils
METRICS = metrics_utils.get_metrics_logger()
@METRICS.timer('foo')
def foo(bar, baz):
print bar, baz
with METRICS.timer('foo'):
do_something()
"""
def __init__(self, metrics, name):
"""Init the decorator / context manager.
:param metrics: The metric logger
:param name: The metric name
"""
if not isinstance(name, six.string_types):
raise TypeError(_("The metric name is expected to be a string. "
"Value is %s") % name)
self.metrics = metrics
self.name = name
self._start = None
def __call__(self, f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
start = _time()
result = f(*args, **kwargs)
duration = _time() - start
# Log the timing data (in ms)
self.metrics.send_timer(self.metrics.get_metric_name(self.name),
duration * 1000)
return result
return wrapped
def __enter__(self):
self._start = _time()
def __exit__(self, exc_type, exc_val, exc_tb):
duration = _time() - self._start
# Log the timing data (in ms)
self.metrics.send_timer(self.metrics.get_metric_name(self.name),
duration * 1000)
class Counter(object):
"""A counter decorator and context manager.
This metric type increments a counter every time the decorated method or
context manager is executed. It is bound to this MetricLogger. For
example::
from ironic_lib import metrics_utils
METRICS = metrics_utils.get_metrics_logger()
@METRICS.counter('foo')
def foo(bar, baz):
print bar, baz
with METRICS.counter('foo'):
do_something()
"""
def __init__(self, metrics, name, sample_rate):
"""Init the decorator / context manager.
:param metrics: The metric logger
:param name: The metric name
:param sample_rate: Probabilistic rate at which the values will be sent
"""
if not isinstance(name, six.string_types):
raise TypeError(_("The metric name is expected to be a string. "
"Value is %s") % name)
if (sample_rate is not None and
(sample_rate < 0.0 or sample_rate > 1.0)):
msg = _("sample_rate is set to %s. Value must be None "
"or in the interval [0.0, 1.0]") % sample_rate
raise ValueError(msg)
self.metrics = metrics
self.name = name
self.sample_rate = sample_rate
def __call__(self, f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
self.metrics.send_counter(
self.metrics.get_metric_name(self.name),
1, sample_rate=self.sample_rate)
result = f(*args, **kwargs)
return result
return wrapped
def __enter__(self):
self.metrics.send_counter(self.metrics.get_metric_name(self.name),
1, sample_rate=self.sample_rate)
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class Gauge(object):
"""A gauge decorator.
This metric type returns the value of the decorated method as a metric
every time the method is executed. It is bound to this MetricLogger. For
example::
from ironic_lib import metrics_utils
METRICS = metrics_utils.get_metrics_logger()
@METRICS.gauge('foo')
def add_foo(bar, baz):
return (bar + baz)
"""
def __init__(self, metrics, name):
"""Init the decorator / context manager.
:param metrics: The metric logger
:param name: The metric name
"""
if not isinstance(name, six.string_types):
raise TypeError(_("The metric name is expected to be a string. "
"Value is %s") % name)
self.metrics = metrics
self.name = name
def __call__(self, f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
result = f(*args, **kwargs)
self.metrics.send_gauge(self.metrics.get_metric_name(self.name),
result)
return result
return wrapped
def _time():
"""Wraps time.time() for simpler testing."""
return time.time()
@six.add_metaclass(abc.ABCMeta)
class MetricLogger(object):
"""Abstract class representing a metrics logger.
A MetricLogger sends data to a backend (noop or statsd).
The data can be a gauge, a counter, or a timer.
The data sent to the backend is composed of:
- a full metric name
- a numeric value
The format of the full metric name is:
_prefix<delim>name
where:
- _prefix: [global_prefix<delim>][uuid<delim>][host_name<delim>]prefix
- name: the name of this metric
- <delim>: the delimiter. Default is '.'
"""
def __init__(self, prefix='', delimiter='.'):
"""Init a MetricLogger.
:param prefix: Prefix for this metric logger. This string will prefix
all metric names.
:param delimiter: Delimiter used to generate the full metric name.
"""
self._prefix = prefix
self._delimiter = delimiter
def get_metric_name(self, name):
"""Get the full metric name.
The format of the full metric name is:
_prefix<delim>name
where:
- _prefix: [global_prefix<delim>][uuid<delim>][host_name<delim>]
prefix
- name: the name of this metric
- <delim>: the delimiter. Default is '.'
:param name: The metric name.
:return: The full metric name, with logger prefix, as a string.
"""
if not self._prefix:
return name
return self._delimiter.join([self._prefix, name])
def send_gauge(self, name, value):
"""Send gauge metric data.
Gauges are simple values.
The backend will set the value of gauge 'name' to 'value'.
:param name: Metric name
:param value: Metric numeric value that will be sent to the backend
"""
self._gauge(name, value)
def send_counter(self, name, value, sample_rate=None):
"""Send counter metric data.
Counters are used to count how many times an event occurred.
The backend will increment the counter 'name' by the value 'value'.
Optionally, specify sample_rate in the interval [0.0, 1.0] to
sample data probabilistically where::
P(send metric data) = sample_rate
If sample_rate is None, then always send metric data, but do not
have the backend send sample rate information (if supported).
:param name: Metric name
:param value: Metric numeric value that will be sent to the backend
:param sample_rate: Probabilistic rate at which the values will be
sent. Value must be None or in the interval [0.0, 1.0].
"""
if (sample_rate is None or random.random() < sample_rate):
return self._counter(name, value,
sample_rate=sample_rate)
def send_timer(self, name, value):
"""Send timer data.
Timers are used to measure how long it took to do something.
:param m_name: Metric name
:param m_value: Metric numeric value that will be sent to the backend
"""
self._timer(name, value)
def timer(self, name):
return Timer(self, name)
def counter(self, name, sample_rate=None):
return Counter(self, name, sample_rate)
def gauge(self, name):
return Gauge(self, name)
@abc.abstractmethod
def _gauge(self, name, value):
"""Abstract method for backends to implement gauge behavior."""
@abc.abstractmethod
def _counter(self, name, value, sample_rate=None):
"""Abstract method for backends to implement counter behavior."""
@abc.abstractmethod
def _timer(self, name, value):
"""Abstract method for backends to implement timer behavior."""
class NoopMetricLogger(MetricLogger):
"""Noop metric logger that throws away all metric data."""
def _gauge(self, name, value):
pass
def _counter(self, name, value, sample_rate=None):
pass
def _timer(self, m_name, value):
pass
|
|
"""Support for Synology NAS Sensors."""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST, CONF_USERNAME, CONF_PASSWORD, CONF_PORT, CONF_SSL,
ATTR_ATTRIBUTION, TEMP_CELSIUS, CONF_MONITORED_CONDITIONS,
EVENT_HOMEASSISTANT_START, CONF_DISKS)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
REQUIREMENTS = ['python-synology==0.2.0']
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = 'Data provided by Synology'
CONF_VOLUMES = 'volumes'
DEFAULT_NAME = 'Synology DSM'
DEFAULT_PORT = 5001
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
_UTILISATION_MON_COND = {
'cpu_other_load': ['CPU Load (Other)', '%', 'mdi:chip'],
'cpu_user_load': ['CPU Load (User)', '%', 'mdi:chip'],
'cpu_system_load': ['CPU Load (System)', '%', 'mdi:chip'],
'cpu_total_load': ['CPU Load (Total)', '%', 'mdi:chip'],
'cpu_1min_load': ['CPU Load (1 min)', '%', 'mdi:chip'],
'cpu_5min_load': ['CPU Load (5 min)', '%', 'mdi:chip'],
'cpu_15min_load': ['CPU Load (15 min)', '%', 'mdi:chip'],
'memory_real_usage': ['Memory Usage (Real)', '%', 'mdi:memory'],
'memory_size': ['Memory Size', 'Mb', 'mdi:memory'],
'memory_cached': ['Memory Cached', 'Mb', 'mdi:memory'],
'memory_available_swap': ['Memory Available (Swap)', 'Mb', 'mdi:memory'],
'memory_available_real': ['Memory Available (Real)', 'Mb', 'mdi:memory'],
'memory_total_swap': ['Memory Total (Swap)', 'Mb', 'mdi:memory'],
'memory_total_real': ['Memory Total (Real)', 'Mb', 'mdi:memory'],
'network_up': ['Network Up', 'Kbps', 'mdi:upload'],
'network_down': ['Network Down', 'Kbps', 'mdi:download'],
}
_STORAGE_VOL_MON_COND = {
'volume_status': ['Status', None, 'mdi:checkbox-marked-circle-outline'],
'volume_device_type': ['Type', None, 'mdi:harddisk'],
'volume_size_total': ['Total Size', None, 'mdi:chart-pie'],
'volume_size_used': ['Used Space', None, 'mdi:chart-pie'],
'volume_percentage_used': ['Volume Used', '%', 'mdi:chart-pie'],
'volume_disk_temp_avg': ['Average Disk Temp', None, 'mdi:thermometer'],
'volume_disk_temp_max': ['Maximum Disk Temp', None, 'mdi:thermometer'],
}
_STORAGE_DSK_MON_COND = {
'disk_name': ['Name', None, 'mdi:harddisk'],
'disk_device': ['Device', None, 'mdi:dots-horizontal'],
'disk_smart_status': ['Status (Smart)', None,
'mdi:checkbox-marked-circle-outline'],
'disk_status': ['Status', None, 'mdi:checkbox-marked-circle-outline'],
'disk_exceed_bad_sector_thr': ['Exceeded Max Bad Sectors', None,
'mdi:test-tube'],
'disk_below_remain_life_thr': ['Below Min Remaining Life', None,
'mdi:test-tube'],
'disk_temp': ['Temperature', None, 'mdi:thermometer'],
}
_MONITORED_CONDITIONS = list(_UTILISATION_MON_COND.keys()) + \
list(_STORAGE_VOL_MON_COND.keys()) + \
list(_STORAGE_DSK_MON_COND.keys())
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=True): cv.boolean,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS):
vol.All(cv.ensure_list, [vol.In(_MONITORED_CONDITIONS)]),
vol.Optional(CONF_DISKS): cv.ensure_list,
vol.Optional(CONF_VOLUMES): cv.ensure_list,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Synology NAS Sensor."""
def run_setup(event):
"""Wait until Home Assistant is fully initialized before creating.
Delay the setup until Home Assistant is fully initialized.
This allows any entities to be created already
"""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
use_ssl = config.get(CONF_SSL)
unit = hass.config.units.temperature_unit
monitored_conditions = config.get(CONF_MONITORED_CONDITIONS)
api = SynoApi(host, port, username, password, unit, use_ssl)
sensors = [SynoNasUtilSensor(
api, variable, _UTILISATION_MON_COND[variable])
for variable in monitored_conditions
if variable in _UTILISATION_MON_COND]
# Handle all volumes
for volume in config.get(CONF_VOLUMES, api.storage.volumes):
sensors += [SynoNasStorageSensor(
api, variable, _STORAGE_VOL_MON_COND[variable], volume)
for variable in monitored_conditions
if variable in _STORAGE_VOL_MON_COND]
# Handle all disks
for disk in config.get(CONF_DISKS, api.storage.disks):
sensors += [SynoNasStorageSensor(
api, variable, _STORAGE_DSK_MON_COND[variable], disk)
for variable in monitored_conditions
if variable in _STORAGE_DSK_MON_COND]
add_entities(sensors, True)
# Wait until start event is sent to load this component.
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, run_setup)
class SynoApi:
"""Class to interface with Synology DSM API."""
def __init__(self, host, port, username, password, temp_unit, use_ssl):
"""Initialize the API wrapper class."""
from SynologyDSM import SynologyDSM
self.temp_unit = temp_unit
try:
self._api = SynologyDSM(host, port, username, password,
use_https=use_ssl)
except: # noqa: E722 pylint: disable=bare-except
_LOGGER.error("Error setting up Synology DSM")
# Will be updated when update() gets called.
self.utilisation = self._api.utilisation
self.storage = self._api.storage
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update function for updating api information."""
self._api.update()
class SynoNasSensor(Entity):
"""Representation of a Synology NAS Sensor."""
def __init__(self, api, variable, variable_info, monitor_device=None):
"""Initialize the sensor."""
self.var_id = variable
self.var_name = variable_info[0]
self.var_units = variable_info[1]
self.var_icon = variable_info[2]
self.monitor_device = monitor_device
self._api = api
@property
def name(self):
"""Return the name of the sensor, if any."""
if self.monitor_device is not None:
return "{} ({})".format(self.var_name, self.monitor_device)
return self.var_name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self.var_icon
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self.var_id in ['volume_disk_temp_avg', 'volume_disk_temp_max',
'disk_temp']:
return self._api.temp_unit
return self.var_units
def update(self):
"""Get the latest data for the states."""
if self._api is not None:
self._api.update()
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
}
class SynoNasUtilSensor(SynoNasSensor):
"""Representation a Synology Utilisation Sensor."""
@property
def state(self):
"""Return the state of the sensor."""
network_sensors = ['network_up', 'network_down']
memory_sensors = ['memory_size', 'memory_cached',
'memory_available_swap', 'memory_available_real',
'memory_total_swap', 'memory_total_real']
if self.var_id in network_sensors or self.var_id in memory_sensors:
attr = getattr(self._api.utilisation, self.var_id)(False)
if self.var_id in network_sensors:
return round(attr / 1024.0, 1)
if self.var_id in memory_sensors:
return round(attr / 1024.0 / 1024.0, 1)
else:
return getattr(self._api.utilisation, self.var_id)
class SynoNasStorageSensor(SynoNasSensor):
"""Representation a Synology Utilisation Sensor."""
@property
def state(self):
"""Return the state of the sensor."""
temp_sensors = ['volume_disk_temp_avg', 'volume_disk_temp_max',
'disk_temp']
if self.monitor_device is not None:
if self.var_id in temp_sensors:
attr = getattr(
self._api.storage, self.var_id)(self.monitor_device)
if self._api.temp_unit == TEMP_CELSIUS:
return attr
return round(attr * 1.8 + 32.0, 1)
return getattr(self._api.storage, self.var_id)(self.monitor_device)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/storage/impl_sqlalchemy.py
.. note::
In order to run the tests against real SQL server set the environment
variable CEILOMETER_TEST_SQL_URL to point to a SQL server before running
the tests.
"""
import datetime
import repr
import mock
from oslo_utils import timeutils
from ceilometer.alarm.storage import impl_sqlalchemy as impl_sqla_alarm
from ceilometer.event.storage import impl_sqlalchemy as impl_sqla_event
from ceilometer.event.storage import models
from ceilometer.storage import impl_sqlalchemy
from ceilometer.storage.sqlalchemy import models as sql_models
from ceilometer.tests import base as test_base
from ceilometer.tests import db as tests_db
from ceilometer.tests.storage import test_storage_scenarios as scenarios
@tests_db.run_with('sqlite', 'mysql', 'pgsql')
class CeilometerBaseTest(tests_db.TestBase):
def test_ceilometer_base(self):
base = sql_models.CeilometerBase()
base['key'] = 'value'
self.assertEqual('value', base['key'])
@tests_db.run_with('sqlite', 'mysql', 'pgsql')
class EventTypeTest(tests_db.TestBase):
# EventType is a construct specific to sqlalchemy
# Not applicable to other drivers.
def test_event_type_exists(self):
et1 = self.event_conn._get_or_create_event_type("foo")
self.assertTrue(et1.id >= 0)
et2 = self.event_conn._get_or_create_event_type("foo")
self.assertEqual(et2.id, et1.id)
self.assertEqual(et2.desc, et1.desc)
def test_event_type_unique(self):
et1 = self.event_conn._get_or_create_event_type("foo")
self.assertTrue(et1.id >= 0)
et2 = self.event_conn._get_or_create_event_type("blah")
self.assertNotEqual(et1.id, et2.id)
self.assertNotEqual(et1.desc, et2.desc)
# Test the method __repr__ returns a string
self.assertTrue(repr.repr(et2))
class MyException(Exception):
pass
@tests_db.run_with('sqlite', 'mysql', 'pgsql')
class EventTest(tests_db.TestBase):
def _verify_data(self, trait, trait_table):
now = datetime.datetime.utcnow()
ev = models.Event('1', 'name', now, [trait], {})
self.event_conn.record_events([ev])
session = self.event_conn._engine_facade.get_session()
t_tables = [sql_models.TraitText, sql_models.TraitFloat,
sql_models.TraitInt, sql_models.TraitDatetime]
for table in t_tables:
if table == trait_table:
self.assertEqual(1, session.query(table).count())
else:
self.assertEqual(0, session.query(table).count())
def test_string_traits(self):
model = models.Trait("Foo", models.Trait.TEXT_TYPE, "my_text")
self._verify_data(model, sql_models.TraitText)
def test_int_traits(self):
model = models.Trait("Foo", models.Trait.INT_TYPE, 100)
self._verify_data(model, sql_models.TraitInt)
def test_float_traits(self):
model = models.Trait("Foo", models.Trait.FLOAT_TYPE, 123.456)
self._verify_data(model, sql_models.TraitFloat)
def test_datetime_traits(self):
now = datetime.datetime.utcnow()
model = models.Trait("Foo", models.Trait.DATETIME_TYPE, now)
self._verify_data(model, sql_models.TraitDatetime)
def test_bad_event(self):
now = datetime.datetime.utcnow()
m = [models.Event("1", "Foo", now, [], {}),
models.Event("2", "Zoo", now, [], {})]
with mock.patch.object(self.event_conn,
"_get_or_create_event_type") as mock_save:
mock_save.side_effect = MyException("Boom")
problem_events = self.event_conn.record_events(m)
self.assertEqual(2, len(problem_events))
for bad, event in problem_events:
self.assertEqual(bad, models.Event.UNKNOWN_PROBLEM)
def test_event_repr(self):
ev = sql_models.Event('msg_id', None, False, {})
ev.id = 100
self.assertTrue(repr.repr(ev))
@tests_db.run_with('sqlite', 'mysql', 'pgsql')
class RelationshipTest(scenarios.DBTestBase):
# Note: Do not derive from SQLAlchemyEngineTestBase, since we
# don't want to automatically inherit all the Meter setup.
@mock.patch.object(timeutils, 'utcnow')
def test_clear_metering_data_meta_tables(self, mock_utcnow):
mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45)
self.conn.clear_expired_metering_data(3 * 60)
session = self.conn._engine_facade.get_session()
self.assertEqual(5, session.query(sql_models.Sample).count())
resource_ids = (session.query(sql_models.Resource.internal_id)
.group_by(sql_models.Resource.internal_id))
meta_tables = [sql_models.MetaText, sql_models.MetaFloat,
sql_models.MetaBigInt, sql_models.MetaBool]
s = set()
for table in meta_tables:
self.assertEqual(0, (session.query(table)
.filter(~table.id.in_(resource_ids)).count()
))
s.update(session.query(table.id).all())
self.assertEqual(set(resource_ids.all()), s)
class CapabilitiesTest(test_base.BaseTestCase):
# Check the returned capabilities list, which is specific to each DB
# driver
def test_capabilities(self):
expected_capabilities = {
'meters': {'pagination': False,
'query': {'simple': True,
'metadata': True,
'complex': False}},
'resources': {'pagination': False,
'query': {'simple': True,
'metadata': True,
'complex': False}},
'samples': {'pagination': True,
'query': {'simple': True,
'metadata': True,
'complex': True}},
'statistics': {'pagination': False,
'groupby': True,
'query': {'simple': True,
'metadata': True,
'complex': False},
'aggregation': {'standard': True,
'selectable': {
'max': True,
'min': True,
'sum': True,
'avg': True,
'count': True,
'stddev': True,
'cardinality': True}}
},
}
actual_capabilities = impl_sqlalchemy.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
def test_event_capabilities(self):
expected_capabilities = {
'events': {'query': {'simple': True}},
}
actual_capabilities = impl_sqla_event.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
def test_alarm_capabilities(self):
expected_capabilities = {
'alarms': {'query': {'simple': True,
'complex': True},
'history': {'query': {'simple': True,
'complex': True}}},
}
actual_capabilities = impl_sqla_alarm.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
def test_storage_capabilities(self):
expected_capabilities = {
'storage': {'production_ready': True},
}
actual_capabilities = (impl_sqlalchemy.
Connection.get_storage_capabilities())
self.assertEqual(expected_capabilities, actual_capabilities)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.cloud.kms_v1.proto import service_pb2_grpc
from google.iam.v1 import iam_policy_pb2
class KeyManagementServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.cloud.kms.v1 KeyManagementService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
def __init__(
self, channel=None, credentials=None, address="cloudkms.googleapis.com:443"
):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
"The `channel` and `credentials` arguments are mutually " "exclusive."
)
# Create the channel.
if channel is None:
channel = self.create_channel(address=address, credentials=credentials)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
"key_management_service_stub": service_pb2_grpc.KeyManagementServiceStub(
channel
),
"iam_policy_stub": iam_policy_pb2.IAMPolicyStub(channel),
}
@classmethod
def create_channel(cls, address="cloudkms.googleapis.com:443", credentials=None):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def list_key_rings(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists ``KeyRings``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].ListKeyRings
@property
def list_crypto_keys(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists ``CryptoKeys``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].ListCryptoKeys
@property
def list_crypto_key_versions(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists ``CryptoKeyVersions``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].ListCryptoKeyVersions
@property
def get_key_ring(self):
"""Return the gRPC stub for {$apiMethod.name}.
Returns metadata for a given ``KeyRing``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].GetKeyRing
@property
def get_crypto_key(self):
"""Return the gRPC stub for {$apiMethod.name}.
Returns metadata for a given ``CryptoKey``, as well as its ``primary``
``CryptoKeyVersion``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].GetCryptoKey
@property
def get_crypto_key_version(self):
"""Return the gRPC stub for {$apiMethod.name}.
Returns metadata for a given ``CryptoKeyVersion``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].GetCryptoKeyVersion
@property
def create_key_ring(self):
"""Return the gRPC stub for {$apiMethod.name}.
Create a new ``KeyRing`` in a given Project and Location.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].CreateKeyRing
@property
def create_crypto_key(self):
"""Return the gRPC stub for {$apiMethod.name}.
Create a new ``CryptoKey`` within a ``KeyRing``.
``CryptoKey.purpose`` and ``CryptoKey.version_template.algorithm`` are
required.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].CreateCryptoKey
@property
def create_crypto_key_version(self):
"""Return the gRPC stub for {$apiMethod.name}.
Create a new ``CryptoKeyVersion`` in a ``CryptoKey``.
The server will assign the next sequential id. If unset, ``state`` will
be set to ``ENABLED``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].CreateCryptoKeyVersion
@property
def update_crypto_key(self):
"""Return the gRPC stub for {$apiMethod.name}.
Update a ``CryptoKey``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].UpdateCryptoKey
@property
def update_crypto_key_version(self):
"""Return the gRPC stub for {$apiMethod.name}.
Update a ``CryptoKeyVersion``'s metadata.
``state`` may be changed between ``ENABLED`` and ``DISABLED`` using this
method. See ``DestroyCryptoKeyVersion`` and ``RestoreCryptoKeyVersion``
to move between other states.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].UpdateCryptoKeyVersion
@property
def encrypt(self):
"""Return the gRPC stub for {$apiMethod.name}.
Encrypts data, so that it can only be recovered by a call to
``Decrypt``. The ``CryptoKey.purpose`` must be ``ENCRYPT_DECRYPT``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].Encrypt
@property
def decrypt(self):
"""Return the gRPC stub for {$apiMethod.name}.
Decrypts data that was protected by ``Encrypt``. The
``CryptoKey.purpose`` must be ``ENCRYPT_DECRYPT``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].Decrypt
@property
def update_crypto_key_primary_version(self):
"""Return the gRPC stub for {$apiMethod.name}.
Update the version of a ``CryptoKey`` that will be used in ``Encrypt``.
Returns an error if called on an asymmetric key.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].UpdateCryptoKeyPrimaryVersion
@property
def destroy_crypto_key_version(self):
"""Return the gRPC stub for {$apiMethod.name}.
Schedule a ``CryptoKeyVersion`` for destruction.
Upon calling this method, ``CryptoKeyVersion.state`` will be set to
``DESTROY_SCHEDULED`` and ``destroy_time`` will be set to a time 24
hours in the future, at which point the ``state`` will be changed to
``DESTROYED``, and the key material will be irrevocably destroyed.
Before the ``destroy_time`` is reached, ``RestoreCryptoKeyVersion`` may
be called to reverse the process.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].DestroyCryptoKeyVersion
@property
def restore_crypto_key_version(self):
"""Return the gRPC stub for {$apiMethod.name}.
Restore a ``CryptoKeyVersion`` in the ``DESTROY_SCHEDULED`` state.
Upon restoration of the CryptoKeyVersion, ``state`` will be set to
``DISABLED``, and ``destroy_time`` will be cleared.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].RestoreCryptoKeyVersion
@property
def get_public_key(self):
"""Return the gRPC stub for {$apiMethod.name}.
Returns the public key for the given ``CryptoKeyVersion``. The
``CryptoKey.purpose`` must be ``ASYMMETRIC_SIGN`` or
``ASYMMETRIC_DECRYPT``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].GetPublicKey
@property
def asymmetric_decrypt(self):
"""Return the gRPC stub for {$apiMethod.name}.
Decrypts data that was encrypted with a public key retrieved from
``GetPublicKey`` corresponding to a ``CryptoKeyVersion`` with
``CryptoKey.purpose`` ASYMMETRIC\_DECRYPT.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].AsymmetricDecrypt
@property
def asymmetric_sign(self):
"""Return the gRPC stub for {$apiMethod.name}.
Signs data using a ``CryptoKeyVersion`` with ``CryptoKey.purpose``
ASYMMETRIC\_SIGN, producing a signature that can be verified with the
public key retrieved from ``GetPublicKey``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["key_management_service_stub"].AsymmetricSign
@property
def set_iam_policy(self):
"""Return the gRPC stub for {$apiMethod.name}.
Sets the access control policy on the specified resource. Replaces any
existing policy.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["iam_policy_stub"].SetIamPolicy
@property
def get_iam_policy(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets the access control policy for a resource.
Returns an empty policy if the resource exists and does not have a policy
set.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["iam_policy_stub"].GetIamPolicy
@property
def test_iam_permissions(self):
"""Return the gRPC stub for {$apiMethod.name}.
Returns permissions that a caller has on the specified resource. If the
resource does not exist, this will return an empty set of permissions,
not a NOT\_FOUND error.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["iam_policy_stub"].TestIamPermissions
|
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user)
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex + 1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(
os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
for item in glob.glob("%s/*" % SCRIPT_DIR):
if item.endswith(".wgt"):
continue
elif item.endswith("inst.py"):
continue
else:
item_name = os.path.basename(item)
if not doRemoteCopy(item, PKG_SRC_DIR + "/" + item_name):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0:
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket" % str(
userid)
else:
print "[Error] cmd commands error : %s" % str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
|
import os
from keen.client import KeenClient
from keen.exceptions import InvalidEnvironmentError
__author__ = 'dkador'
_client = None
project_id = None
write_key = None
read_key = None
def _initialize_client_from_environment():
global _client, project_id, write_key, read_key
if _client is None:
# check environment for project ID and keys
project_id = project_id or os.environ.get("KEEN_PROJECT_ID")
write_key = write_key or os.environ.get("KEEN_WRITE_KEY")
read_key = read_key or os.environ.get("KEEN_READ_KEY")
if not project_id:
raise InvalidEnvironmentError("Please set the KEEN_PROJECT_ID environment variable or set keen.project_id!")
_client = KeenClient(project_id,
write_key=write_key,
read_key=read_key)
def add_event(event_collection, body, timestamp=None):
_initialize_client_from_environment()
_client.add_event(event_collection, body, timestamp=timestamp)
def add_events(events):
_initialize_client_from_environment()
_client.add_events(events)
def generate_image_beacon(event_collection, body, timestamp=None):
_initialize_client_from_environment()
return _client.generate_image_beacon(event_collection, body, timestamp=timestamp)
def count(event_collection, timeframe=None, timezone=None, interval=None, filters=None, group_by=None, max_age=None):
""" Performs a count query
Counts the number of events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: {["property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.count(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
interval=interval, filters=filters, group_by=group_by, max_age=max_age)
def sum(event_collection, target_property, timeframe=None, timezone=None, interval=None, filters=None,
group_by=None, max_age=None):
""" Performs a sum query
Adds the values of a target property for events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param target_property: string, the name of the event property you would like use
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: {["property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.sum(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
interval=interval, filters=filters, group_by=group_by,
target_property=target_property, max_age=max_age)
def minimum(event_collection, target_property, timeframe=None, timezone=None, interval=None, filters=None,
group_by=None, max_age=None):
""" Performs a minimum query
Finds the minimum value of a target property for events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param target_property: string, the name of the event property you would like use
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: {["property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.minimum(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
interval=interval, filters=filters, group_by=group_by,
target_property=target_property, max_age=max_age)
def maximum(event_collection, target_property, timeframe=None, timezone=None, interval=None, filters=None,
group_by=None, max_age=None):
""" Performs a maximum query
Finds the maximum value of a target property for events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param target_property: string, the name of the event property you would like use
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: {["property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.maximum(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
interval=interval, filters=filters, group_by=group_by,
target_property=target_property, max_age=max_age)
def average(event_collection, target_property, timeframe=None, timezone=None, interval=None, filters=None,
group_by=None, max_age=None):
""" Performs a average query
Finds the average of a target property for events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param target_property: string, the name of the event property you would like use
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: {["property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.average(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
interval=interval, filters=filters, group_by=group_by,
target_property=target_property, max_age=max_age)
def percentile(event_collection, target_property, percentile, timeframe=None, timezone=None, interval=None,
filters=None, group_by=None, max_age=None):
""" Performs a percentile query
Finds the percentile of a target property for events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param target_property: string, the name of the event property you would like use
:param percentile: float, the specific percentile you wish to calculate,
supporting 0-100 with two decimal places of precision for example, 99.99
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: {["property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.percentile(
event_collection=event_collection,
timeframe=timeframe,
percentile=percentile,
timezone=timezone,
interval=interval,
filters=filters,
group_by=group_by,
target_property=target_property,
max_age=max_age,
)
def count_unique(event_collection, target_property, timeframe=None, timezone=None, interval=None,
filters=None, group_by=None, max_age=None):
""" Performs a count unique query
Counts the unique values of a target property for events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param target_property: string, the name of the event property you would like use
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: {["property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.count_unique(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
interval=interval, filters=filters, group_by=group_by,
target_property=target_property, max_age=max_age)
def select_unique(event_collection, target_property, timeframe=None, timezone=None, interval=None,
filters=None, group_by=None, max_age=None):
""" Performs a select unique query
Returns an array of the unique values of a target property for events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param target_property: string, the name of the event property you would like use
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: {["property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.select_unique(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
interval=interval, filters=filters, group_by=group_by,
target_property=target_property, max_age=max_age)
def extraction(event_collection, timeframe=None, timezone=None, filters=None, latest=None, email=None,
property_names=None):
""" Performs a data extraction
Returns either a JSON object of events or a response
indicating an email will be sent to you with data.
:param event_collection: string, the name of the collection to query
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param filters: array of dict, contains the filters you'd like to apply to the data
example: {["property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param latest: int, the number of most recent records you'd like to return
:param email: string, optional string containing an email address to email results to
:param property_names: string or list of strings, used to limit the properties returned
"""
_initialize_client_from_environment()
return _client.extraction(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
filters=filters, latest=latest, email=email, property_names=property_names)
def funnel(steps, timeframe=None, timezone=None, max_age=None):
""" Performs a Funnel query
Returns an object containing the results for each step of the funnel.
:param steps: array of dictionaries, one for each step. example:
[{"event_collection":"signup","actor_property":"user.id"},
{"event_collection":"purchase","actor_property:"user.id"}]
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.funnel(steps=steps, timeframe=timeframe, timezone=timezone, max_age=max_age)
def multi_analysis(event_collection, analyses, timeframe=None, interval=None,
timezone=None, filters=None, group_by=None, max_age=None):
""" Performs a multi-analysis query
Returns a dictionary of analysis results.
:param event_collection: string, the name of the collection to query
:param analyses: dict, the types of analyses you'd like to run. example:
{"total money made":{"analysis_type":"sum","target_property":"purchase.price",
"average price":{"analysis_type":"average","target_property":"purchase.price"}
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param filters: array of dict, contains the filters you'd like to apply to the data
example: {["property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.multi_analysis(event_collection=event_collection, timeframe=timeframe,
interval=interval, timezone=timezone, filters=filters,
group_by=group_by, analyses=analyses, max_age=max_age)
|
|
import copy
import os
import uuid
import braintree
from unittest2 import TestCase, main
from btnamespace import Namespace, NamespaceError
braintree.Configuration.configure(
braintree.Environment.Sandbox,
os.environ['BT_MERCHANT_ID'],
os.environ['BT_PUBLIC_KEY'],
os.environ['BT_PRIVATE_KEY'],
)
def _ensure_user_exists(user_params):
try:
braintree.Customer.find(user_params['id'])
except braintree.exceptions.NotFoundError:
braintree.Customer.create(user_params)
braintree.Customer.find(user_params['id'])
class ActionOutsideNamespaceTest(TestCase):
def test_customer_operations_outside_of_namespace(self):
with self.assertRaises(braintree.exceptions.NotFoundError):
braintree.Customer.find('nonexistent')
_ensure_user_exists({
'id': 'nonnamespaced',
})
with Namespace():
pass
try:
braintree.Customer.find('nonnamespaced')
except braintree.exceptions.NotFoundError:
self.fail()
class NamespaceTest(TestCase):
def setUp(self):
self.namespace = Namespace()
self.namespace.__enter__()
self.addCleanup(self.namespace.__exit__)
class OptionsTest(NamespaceTest):
def test_omit_options_gets_empty(self):
namespace = Namespace()
self.assertEqual(namespace.options, {})
class StrictMissingOptionTest(NamespaceTest):
class MyError(Exception):
pass
def setUp(self):
_ensure_user_exists({
"id": "existing",
"first_name": "Existing",
"last_name": "User",
})
# Cleanups are run LIFO, so this runs outside of the namespace.
self.addCleanup(braintree.Customer.delete, 'existing')
super(StrictMissingOptionTest, self).setUp()
def test_existing_nonnamespace_user_found_with_default_options(self):
braintree.Customer.find('existing') # should not raise NotFoundError
def test_strict_missing_will_404_existing_nonnamespace_user(self):
self.namespace.options['strict_missing'] = True
with self.assertRaises(braintree.exceptions.NotFoundError):
braintree.Customer.find('existing')
def test_strict_missing_exception_overrides_notfounderror(self):
self.namespace.options['strict_missing'] = True
self.namespace.options['strict_missing_exception'] = self.MyError
with self.assertRaises(self.MyError):
braintree.Customer.find('existing')
def test_strict_missing_exception_overrides_strict_missing(self):
self.namespace.options['strict_missing'] = False
self.namespace.options['strict_missing_exception'] = self.MyError
with self.assertRaises(self.MyError):
braintree.Customer.find('existing')
class PatchDeleteTest(NamespaceTest):
def test_delete_customer(self):
result = braintree.Customer.create({
"id": "customer_id",
"first_name": "Jen",
"last_name": "Smith",
"company": "Braintree",
"email": "jen@example.com",
"phone": "312.555.1234",
"fax": "614.555.5678",
"website": "www.example.com"
})
self.assertTrue(result.is_success, result)
result = braintree.Customer.delete('customer_id')
self.assertTrue(result.is_success, result)
def test_delete_credit_card(self):
result = braintree.Customer.create({
"id": "customer_id",
"first_name": "Jen",
"last_name": "Smith",
"company": "Braintree",
"email": "jen@example.com",
"phone": "312.555.1234",
"fax": "614.555.5678",
"website": "www.example.com",
"credit_card": {
"token": "credit_card_token",
"number": "4111111111111111",
"expiration_date": "05/2015",
"cvv": "123"
}
})
self.assertTrue(result.is_success, result)
result = braintree.CreditCard.delete('credit_card_token')
self.assertTrue(result.is_success, result)
class PatchFindTest(NamespaceTest):
def test_find_customer(self):
result = braintree.Customer.create({
"id": "customer_id",
"first_name": "Jen",
"last_name": "Smith",
"company": "Braintree",
"email": "jen@example.com",
"phone": "312.555.1234",
"fax": "614.555.5678",
"website": "www.example.com",
"credit_card": {
"token": "credit_card_token",
"number": "4111111111111111",
"expiration_date": "05/2015",
"cvv": "123"
}
})
self.assertTrue(result.is_success, result)
customer = braintree.Customer.find('customer_id')
self.assertEqual(customer.id, 'customer_id')
self.assertEqual(customer.credit_cards[0].token, 'credit_card_token')
def test_find_card(self):
result = braintree.Customer.create({
"id": "customer_id",
"first_name": "Jen",
"last_name": "Smith",
"company": "Braintree",
"email": "jen@example.com",
"phone": "312.555.1234",
"fax": "614.555.5678",
"website": "www.example.com",
"credit_card": {
"token": "credit_card_token",
"number": "4111111111111111",
"expiration_date": "05/2015",
"cvv": "123"
}
})
self.assertTrue(result.is_success, result)
card = braintree.CreditCard.find('credit_card_token')
self.assertEqual(card.token, 'credit_card_token')
def test_find_transaction(self):
result = braintree.Transaction.sale({
"id": "txn_id",
"amount": "10.00",
"order_id": str(uuid.uuid4()), # sidestep duplicate transaction validation
"credit_card": {
"token": "credit_card_token",
"number": "4111111111111111",
"expiration_date": "05/2015",
"cvv": "123"
},
"customer": {
"id": "customer_id",
"first_name": "Drew",
"last_name": "Smith",
"company": "Braintree",
"phone": "312-555-1234",
"fax": "312-555-1235",
"website": "http://www.example.com",
"email": "drew@example.com"
},
})
self.assertTrue(result.is_success, result)
transaction = braintree.Transaction.find('txn_id')
self.assertEqual(transaction.id, 'txn_id')
self.assertEqual(transaction.customer_details.id, 'customer_id')
self.assertEqual(transaction.credit_card_details.token, 'credit_card_token')
class PatchUpdateTest(NamespaceTest):
def test_update_customer(self):
result = braintree.Customer.create({
"id": "customer_id",
"first_name": "Jen",
"last_name": "Smith",
"company": "Braintree",
"email": "jen@example.com",
"phone": "312.555.1234",
"fax": "614.555.5678",
"website": "www.example.com",
"credit_card": {
"token": "credit_card_token",
"number": "4111111111111111",
"expiration_date": "05/2015",
"cvv": "123"
}
})
self.assertTrue(result.is_success, result)
result = braintree.Customer.update('customer_id', {"first_name": "Jenny"})
self.assertTrue(result.is_success, result)
self.assertEqual(result.customer.id, 'customer_id')
self.assertEqual(result.customer.credit_cards[0].token, 'credit_card_token')
def test_update_customer_and_existing_card(self):
result = braintree.Customer.create({
"id": "customer_id",
"first_name": "Jen",
"last_name": "Smith",
"company": "Braintree",
"email": "jen@example.com",
"phone": "312.555.1234",
"fax": "614.555.5678",
"website": "www.example.com",
"credit_card": {
"token": "credit_card_token",
"number": "4111111111111111",
"expiration_date": "05/2015",
"cvv": "123"
}
})
self.assertTrue(result.is_success, result)
result = braintree.Customer.update('customer_id', {
'first_name': 'Jenny',
'credit_card': {
'cvv': '123',
'expiration_date': '08/2016',
'options': {
'update_existing_token': 'credit_card_token',
}
}
})
self.assertTrue(result.is_success, result)
self.assertEqual(result.customer.first_name, 'Jenny')
self.assertEqual(result.customer.credit_cards[0].expiration_date, '08/2016')
def test_update_credit_card(self):
result = braintree.Customer.create({
"id": "customer_id",
"first_name": "Jen",
"last_name": "Smith",
"company": "Braintree",
"email": "jen@example.com",
"phone": "312.555.1234",
"fax": "614.555.5678",
"website": "www.example.com",
"credit_card": {
"token": "credit_card_token",
"number": "4111111111111111",
"expiration_date": "05/2015",
"cvv": "123"
}
})
self.assertTrue(result.is_success, result)
result = braintree.CreditCard.update('credit_card_token', {
'number': '4005519200000004',
'cvv': '123'
})
self.assertEqual(result.credit_card.token, 'credit_card_token')
class PatchCreateTest(NamespaceTest):
def setUp(self):
super(PatchCreateTest, self).setUp()
self.customer_params_no_id = {
"first_name": "Jen",
"last_name": "Smith",
"company": "Braintree",
"email": "jen@example.com",
"phone": "312.555.1234",
"fax": "614.555.5678",
"website": "www.example.com",
}
self.card_params_no_token = {
"number": "4111111111111111",
"expiration_date": "05/2015",
"cardholder_name": "The Cardholder",
"cvv": "123",
}
def id_maps(self):
# This isn't super clean: we're using internal knowledge of action state.
return self.namespace.schema_patcher._action_state['id_maps']
def get_real_id(self, bt_class, fake_id):
return self.id_maps()[bt_class].fake_id_for[fake_id]
def get_fake_id(self, bt_class, real_id):
return self.id_maps()[bt_class].real_id_for[real_id]
def assert_self_mapping(self, bt_class, fake_id):
"""Assert that this id is mapped symetrically to itself."""
self.assertEqual(fake_id, self.get_fake_id(bt_class, fake_id))
self.assertEqual(fake_id, self.get_real_id(bt_class, fake_id))
def assert_nonself_mapping(self, bt_class, fake_id):
"""Assert that this id is mapped to something other than itself
(ie, a random braintree-provided id)."""
real_id = self.get_real_id(bt_class, fake_id)
self.assertNotEqual(fake_id, real_id)
class PatchCustomerCreate(PatchCreateTest):
def test_create_patch_no_id(self):
result = braintree.Customer.create(self.customer_params_no_id)
self.assertTrue(result.is_success, result)
real_id = result.customer.id
self.assert_self_mapping(braintree.Customer, real_id)
def test_create_patch_with_id(self):
customer_params = copy.copy(self.customer_params_no_id)
customer_params['id'] = 'original_id'
result = braintree.Customer.create(customer_params)
self.assertTrue(result.is_success, result)
self.assertEqual(result.customer.id, "original_id")
self.assert_nonself_mapping(braintree.Customer, 'original_id')
def test_create_patch_with_card_no_token(self):
customer_params = copy.copy(self.customer_params_no_id)
customer_params['id'] = 'original_id'
customer_params['credit_card'] = self.card_params_no_token
result = braintree.Customer.create(customer_params)
self.assertTrue(result.is_success, result)
self.assertEqual(result.customer.id, "original_id")
self.assertEqual(len(result.customer.credit_cards), 1)
server_tok = result.customer.credit_cards[0].token
self.assert_nonself_mapping(braintree.Customer, 'original_id')
self.assert_self_mapping(braintree.CreditCard, server_tok)
def test_create_patch_with_card_and_token(self):
customer_params = copy.copy(self.customer_params_no_id)
customer_params['id'] = 'original_id'
customer_params['credit_card'] = self.card_params_no_token
customer_params['credit_card']['token'] = 'original_tok'
result = braintree.Customer.create(customer_params)
self.assertTrue(result.is_success, result)
self.assertEqual(result.customer.id, "original_id")
self.assertEqual(len(result.customer.credit_cards), 1)
self.assertEqual(result.customer.credit_cards[0].token, "original_tok")
self.assert_nonself_mapping(braintree.Customer, 'original_id')
self.assert_nonself_mapping(braintree.CreditCard, 'original_tok')
def test_double_create_causes_error(self):
"""Creating a customer twice should return an error from the gateway."""
customer_params = copy.copy(self.customer_params_no_id)
customer_params['id'] = 'original_id'
result = braintree.Customer.create(customer_params)
self.assertTrue(result.is_success, result)
result = braintree.Customer.create(customer_params)
self.assertFalse(result.is_success, result)
def test_different_class_stale_state_is_ignored(self):
# Make a failed request that provides a card token.
customer_params = copy.copy(self.customer_params_no_id)
customer_params['credit_card'] = copy.copy(self.card_params_no_token)
customer_params['credit_card']['token'] = 'tok_from_failure'
customer_params['credit_card']['cvv'] = 'invalid cvv'
result = braintree.Customer.create(customer_params)
self.assertFalse(result.is_success, result)
# Make a good request without a card token.
result = braintree.Customer.create(self.customer_params_no_id)
self.assertTrue(result.is_success, result)
customer_id = result.customer.id
card_params = self.card_params_no_token
card_params['customer_id'] = customer_id
result = braintree.CreditCard.create(self.card_params_no_token)
self.assertTrue(result.is_success, result)
# The token from the failed request shouldn't be returned.
self.assertNotEqual(result.credit_card.token, 'tok_from_failure')
class PatchTransactionCreate(PatchCreateTest):
def setUp(self):
super(PatchTransactionCreate, self).setUp()
self.txn_params_no_id = {
'amount': '10.00',
'order_id': str(uuid.uuid4()) # sidestep duplicate transaction validation
}
def test_create_patch_with_customer_card_and_token(self):
params = self.txn_params_no_id
params['id'] = 'orig_txn_id'
params['amount'] = '10.00'
params['credit_card'] = self.card_params_no_token
params['credit_card']['token'] = 'orig_cc_tok'
params['customer'] = self.customer_params_no_id
params['customer']['id'] = 'orig_cust_id'
result = braintree.Transaction.sale(params)
self.assertTrue(result.is_success, result)
self.assertEqual(result.transaction.id, "orig_txn_id")
self.assertEqual(result.transaction.customer_details.id, "orig_cust_id")
self.assertEqual(result.transaction.credit_card_details.token, "orig_cc_tok")
self.assert_nonself_mapping(braintree.Customer, 'orig_cust_id')
self.assert_nonself_mapping(braintree.CreditCard, 'orig_cc_tok')
self.assert_nonself_mapping(braintree.Transaction, 'orig_txn_id')
def test_create_with_existing_card(self):
customer_params = copy.copy(self.customer_params_no_id)
customer_params['id'] = 'orig_cust_id'
result = braintree.Customer.create(customer_params)
self.assertTrue(result.is_success, result)
customer_id = result.customer.id
card_params = self.card_params_no_token
card_params['token'] = 'orig_cc_tok'
card_params['customer_id'] = customer_id
result = braintree.CreditCard.create(card_params)
self.assertTrue(result.is_success, result)
card_tok = result.credit_card.token
txn_params = self.txn_params_no_id
txn_params['payment_method_token'] = card_tok
txn_params['id'] = 'orig_txn_id'
txn_params['customer_id'] = customer_id
result = braintree.Transaction.sale(txn_params)
self.assertTrue(result.is_success)
self.assertEqual(result.transaction.id, "orig_txn_id")
self.assertEqual(result.transaction.customer_details.id, "orig_cust_id")
self.assertEqual(result.transaction.credit_card_details.token, "orig_cc_tok")
self.assert_nonself_mapping(braintree.Customer, 'orig_cust_id')
self.assert_nonself_mapping(braintree.CreditCard, 'orig_cc_tok')
self.assert_nonself_mapping(braintree.Transaction, 'orig_txn_id')
class PatchCreditCardCreate(PatchCreateTest):
def setUp(self):
super(PatchCreditCardCreate, self).setUp()
# cards can only be added to existing customers
result = braintree.Customer.create(self.customer_params_no_id)
self.assertTrue(result.is_success, result)
self.customer_id = result.customer.id
def test_create_with_token(self):
params = copy.copy(self.card_params_no_token)
params['customer_id'] = self.customer_id
params['token'] = 'orig_tok'
result = braintree.CreditCard.create(params)
self.assertTrue(result.is_success, result)
self.assertEqual(result.credit_card.token, "orig_tok")
self.assert_nonself_mapping(braintree.CreditCard, 'orig_tok')
def test_create_no_token(self):
params = copy.copy(self.card_params_no_token)
params['customer_id'] = self.customer_id
result = braintree.CreditCard.create(params)
self.assertTrue(result.is_success, result)
server_tok = result.credit_card.token
self.assert_self_mapping(braintree.CreditCard, server_tok)
def test_same_class_stale_state_is_ignored(self):
"""Ensure creation ids from failed requests don't stick around."""
params = copy.copy(self.card_params_no_token)
params['token'] = 'first_id'
result = braintree.CreditCard.create(params)
self.assertFalse(result.is_success, result)
params['token'] = 'second_id'
params['customer_id'] = self.customer_id
result = braintree.CreditCard.create(params)
self.assertTrue(result.is_success, result)
self.assertTrue('first_id' not in self.id_maps()[braintree.CreditCard].fake_id_for)
self.assertTrue('second_id' in self.id_maps()[braintree.CreditCard].fake_id_for)
class PatchAdvancedSearch(NamespaceTest):
def test_customer_advanced_search_on_id(self):
with self.assertRaises(NamespaceError):
braintree.Customer.search(
braintree.CustomerSearch.id == 'my_id'
)
def test_customer_advanced_search_on_payment_method_token(self):
with self.assertRaises(NamespaceError):
braintree.Customer.search(
braintree.CustomerSearch.payment_method_token == 'my_tok'
)
def test_customer_advanced_search_on_payment_method_token_with_duplicates(self):
with self.assertRaises(NamespaceError):
braintree.Customer.search(
braintree.CustomerSearch.payment_method_token_with_duplicates == 'my_tok'
)
def test_transaction_advanced_search_on_id(self):
with self.assertRaises(NamespaceError):
braintree.Transaction.search(
braintree.TransactionSearch.id == 'my_id'
)
def test_transaction_advanced_search_on_payment_method_token(self):
with self.assertRaises(NamespaceError):
braintree.Transaction.search(
braintree.TransactionSearch.payment_method_token == 'my_tok'
)
def test_transaction_advanced_search_on_customer_id(self):
with self.assertRaises(NamespaceError):
braintree.Transaction.search(
braintree.TransactionSearch.customer_id == 'my_id'
)
class PatchClientTokenGenerate(NamespaceTest):
def test_client_token_generate_with_customer_id(self):
result = braintree.Customer.create({
"id": "customer_id",
"first_name": "Jen",
"last_name": "Smith",
"company": "Braintree",
"email": "jen@example.com",
"phone": "312.555.1234",
"fax": "614.555.5678",
"website": "www.example.com"
})
self.assertTrue(result.is_success, result)
client_token = braintree.ClientToken.generate({'customer_id': 'customer_id'})
self.assertIsNotNone(client_token)
class PatchAllTest(TestCase):
@staticmethod
def _get_current_methods():
return [
braintree.Customer.__init__,
braintree.Customer.find,
braintree.Customer.create,
braintree.Customer.delete,
braintree.Customer.update,
braintree.CreditCard.__init__,
braintree.CreditCard.find,
braintree.CreditCard.create,
braintree.CreditCard.delete,
braintree.CreditCard.update,
braintree.Transaction.__init__,
braintree.Transaction.find,
braintree.Transaction.create,
]
@staticmethod
def _get_current_search_nodes():
return [
braintree.CustomerSearch.id,
braintree.CustomerSearch.payment_method_token,
braintree.CustomerSearch.payment_method_token_with_duplicates,
braintree.TransactionSearch.id,
braintree.TransactionSearch.payment_method_token,
braintree.TransactionSearch.customer_id,
]
def test_schema_methods_get_patched(self):
original_methods = self._get_current_methods()
with Namespace():
patched_methods = self._get_current_methods()
unpatched_methods = self._get_current_methods()
for original_method, patched_method, unpatched_method in \
zip(original_methods, patched_methods, unpatched_methods):
self.assertEqual(original_method, unpatched_method)
self.assertNotEqual(original_method, patched_method)
def test_advanced_search_gets_patched(self):
original_nodes = self._get_current_search_nodes()
with Namespace():
patched_nodes = self._get_current_search_nodes()
unpatched_nodes = self._get_current_search_nodes()
# NamespaceError is raised on __getattribute__ for patched nodes.
for orig_node, unpatched_node in zip(original_nodes, unpatched_nodes):
self.assertIs(orig_node, unpatched_node)
self.assertIsNone(getattr(orig_node, 'foo', None)) # should not raise NamespaceError
for node in patched_nodes:
with self.assertRaises(NamespaceError):
node.foo
if __name__ == '__main__':
main()
|
|
"""
Thierry Bertin-Mahieux (2010) Columbia University
tb2332@columbia.edu
This code contains a set of routines to create HDF5 files containing
features and metadata of a song.
This is part of the Million Song Dataset project from
LabROSA (Columbia University) and The Echo Nest.
Copyright 2010, Thierry Bertin-Mahieux
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import numpy as np
# code relies on pytables, see http://www.pytables.org
import tables
import hdf5_descriptors as DESC
from hdf5_getters import *
# musicbrainz related stuff
try:
from MBrainzDB import query as QUERYMB
except ImportError:
print 'need pg module and MBrainzDB folder of Python source code if you'
print 'want to use musicbrainz related functions, e.g. fill_hdf5_from_musicbrainz'
# description of the different arrays in the song file
ARRAY_DESC_SIMILAR_ARTISTS = 'array of similar artists Echo Nest id'
ARRAY_DESC_ARTIST_TERMS = 'array of terms (Echo Nest tags) for an artist'
ARRAY_DESC_ARTIST_TERMS_FREQ = 'array of term (Echo Nest tags) frequencies for an artist'
ARRAY_DESC_ARTIST_TERMS_WEIGHT = 'array of term (Echo Nest tags) weights for an artist'
ARRAY_DESC_SEGMENTS_START = 'array of start times of segments'
ARRAY_DESC_SEGMENTS_CONFIDENCE = 'array of confidence of segments'
ARRAY_DESC_SEGMENTS_PITCHES = 'array of pitches of segments (chromas)'
ARRAY_DESC_SEGMENTS_TIMBRE = 'array of timbre of segments (MFCC-like)'
ARRAY_DESC_SEGMENTS_LOUDNESS_MAX = 'array of max loudness of segments'
ARRAY_DESC_SEGMENTS_LOUDNESS_MAX_TIME = 'array of max loudness time of segments'
ARRAY_DESC_SEGMENTS_LOUDNESS_START = 'array of loudness of segments at start time'
ARRAY_DESC_SECTIONS_START = 'array of start times of sections'
ARRAY_DESC_SECTIONS_CONFIDENCE = 'array of confidence of sections'
ARRAY_DESC_BEATS_START = 'array of start times of beats'
ARRAY_DESC_BEATS_CONFIDENCE = 'array of confidence of sections'
ARRAY_DESC_BARS_START = 'array of start times of bars'
ARRAY_DESC_BARS_CONFIDENCE = 'array of confidence of bars'
ARRAY_DESC_TATUMS_START = 'array of start times of tatums'
ARRAY_DESC_TATUMS_CONFIDENCE = 'array of confidence of tatums'
ARRAY_DESC_ARTIST_MBTAGS = 'array of tags from MusicBrainz for an artist'
ARRAY_DESC_ARTIST_MBTAGS_COUNT = 'array of tag counts from MusicBrainz for an artist'
def fill_hdf5_from_artist(h5,artist):
"""
Fill an open hdf5 using all content in a artist object
from the Echo Nest python API
There could be overlap with fill_from_song and fill_from_track,
we assume the data is consistent!
"""
# get the metadata table, fill it
metadata = h5.root.metadata.songs
metadata.cols.artist_id[0] = artist.id
idsplitter = lambda x,y: x.split(':')[2] if x else y
metadata.cols.artist_mbid[0] = idsplitter(artist.get_foreign_id(idspace='musicbrainz'),'')
metadata.cols.artist_playmeid[0] = int(idsplitter(artist.get_foreign_id(idspace='playme'),-1))
metadata.cols.artist_7digitalid[0] = int(idsplitter(artist.get_foreign_id(idspace='7digital'),-1))
# fill the metadata arrays
group = h5.root.metadata
metadata.cols.idx_similar_artists[0] = 0
group.similar_artists.append( np.array(map(lambda x : x.id,artist.get_similar(results=100)),dtype='string') )
metadata.cols.idx_artist_terms[0] = 0
group.artist_terms.append( np.array(map(lambda x : x.name,artist.get_terms()),dtype='string') )
group.artist_terms_freq.append( np.array(map(lambda x : x.frequency,artist.get_terms()),dtype='float64') )
group.artist_terms_weight.append( np.array(map(lambda x : x.weight,artist.get_terms()),dtype='float64') )
# done, flush
metadata.flush()
def fill_hdf5_from_song(h5,song):
"""
Fill an open hdf5 using all the content in a song object
from the Echo Nest python API.
Usually, fill_hdf5_from_track() will have been called first.
"""
# get the metadata table, fill it
metadata = h5.root.metadata.songs
metadata.cols.artist_familiarity[0] = song.get_artist_familiarity()
metadata.cols.artist_hotttnesss[0] = song.get_artist_hotttnesss()
metadata.cols.artist_id[0] = song.artist_id
metadata.cols.artist_latitude[0] = song.get_artist_location().latitude
metadata.cols.artist_location[0] = song.get_artist_location().location.encode('utf-8') if song.get_artist_location().location else ''
metadata.cols.artist_longitude[0] = song.get_artist_location().longitude
metadata.cols.artist_name[0] = song.artist_name.encode('utf-8') if song.artist_name else ''
metadata.cols.song_id[0] = song.id
metadata.cols.song_hotttnesss[0] = song.get_song_hotttnesss()
metadata.cols.title[0] = song.title.encode('utf-8') if song.title else ''
metadata.flush()
# get the analysis table
analysis = h5.root.analysis.songs
analysis.danceability = song.get_audio_summary().danceability
analysis.energy = song.get_audio_summary().energy
analysis.flush()
def fill_hdf5_from_track(h5,track):
"""
Fill an open hdf5 using all the content in a track object
from the Echo Nest python API
"""
# get the metadata table, fill it
metadata = h5.root.metadata.songs
#metadata.cols.analyzer_version[0] = track.analyzer_version
metadata.cols.artist_name[0] = getattr(track, 'artist', u'').encode('utf-8')
metadata.cols.release[0] = getattr(track, 'release', u'').encode('utf-8')
metadata.cols.title[0] = getattr(track, 'title', u'').encode('utf-8')
idsplitter_7digital = lambda x: int(x.split(':')[2]) if x and x.split(':')[0]=='7digital' else -1
metadata.cols.release_7digitalid[0] = idsplitter_7digital(track.foreign_release_id)
metadata.cols.track_7digitalid[0] = idsplitter_7digital(track.foreign_id)
metadata.flush()
# get the analysis table, fill it
analysis = h5.root.analysis.songs
analysis.cols.analysis_sample_rate[0] = track.analysis_sample_rate
analysis.cols.audio_md5[0] = track.audio_md5
analysis.cols.duration[0] = track.duration
analysis.cols.end_of_fade_in[0] = track.end_of_fade_in
analysis.cols.key[0] = track.key
analysis.cols.key_confidence[0] = track.key_confidence
analysis.cols.loudness[0] = track.loudness
analysis.cols.mode[0] = track.mode
analysis.cols.mode_confidence[0] = track.mode_confidence
analysis.cols.start_of_fade_out[0] = track.start_of_fade_out
analysis.cols.tempo[0] = track.tempo
analysis.cols.time_signature[0] = track.time_signature
analysis.cols.time_signature_confidence[0] = track.time_signature_confidence
analysis.cols.track_id[0] = track.id
analysis.flush()
group = h5.root.analysis
# analysis arrays (segments)
analysis.cols.idx_segments_start[0] = 0
group.segments_start.append( np.array(map(lambda x : x['start'],track.segments),dtype='float64') )
analysis.cols.idx_segments_confidence[0] = 0
group.segments_confidence.append( np.array(map(lambda x : x['confidence'],track.segments),dtype='float64') )
analysis.cols.idx_segments_pitches[0] = 0
group.segments_pitches.append( np.array(map(lambda x : x['pitches'],track.segments),dtype='float64') )
analysis.cols.idx_segments_timbre[0] = 0
group.segments_timbre.append( np.array(map(lambda x : x['timbre'],track.segments),dtype='float64') )
analysis.cols.idx_segments_loudness_max[0] = 0
group.segments_loudness_max.append( np.array(map(lambda x : x['loudness_max'],track.segments),dtype='float64') )
analysis.cols.idx_segments_loudness_max_time[0] = 0
group.segments_loudness_max_time.append( np.array(map(lambda x : x['loudness_max_time'],track.segments),dtype='float64') )
analysis.cols.idx_segments_loudness_start[0] = 0
group.segments_loudness_start.append( np.array(map(lambda x : x['loudness_start'],track.segments),dtype='float64') )
# analysis arrays (sections)
analysis.cols.idx_sections_start[0] = 0
group.sections_start.append( np.array(map(lambda x : x['start'],track.sections),dtype='float64') )
analysis.cols.idx_sections_confidence[0] = 0
group.sections_confidence.append( np.array(map(lambda x : x['confidence'],track.sections),dtype='float64') )
# analysis arrays (beats
analysis.cols.idx_beats_start[0] = 0
group.beats_start.append( np.array(map(lambda x : x['start'],track.beats),dtype='float64') )
analysis.cols.idx_beats_confidence[0] = 0
group.beats_confidence.append( np.array(map(lambda x : x['confidence'],track.beats),dtype='float64') )
# analysis arrays (bars)
analysis.cols.idx_bars_start[0] = 0
group.bars_start.append( np.array(map(lambda x : x['start'],track.bars),dtype='float64') )
analysis.cols.idx_bars_confidence[0] = 0
group.bars_confidence.append( np.array(map(lambda x : x['confidence'],track.bars),dtype='float64') )
# analysis arrays (tatums)
analysis.cols.idx_tatums_start[0] = 0
group.tatums_start.append( np.array(map(lambda x : x['start'],track.tatums),dtype='float64') )
analysis.cols.idx_tatums_confidence[0] = 0
group.tatums_confidence.append( np.array(map(lambda x : x['confidence'],track.tatums),dtype='float64') )
analysis.flush()
# DONE
def fill_hdf5_from_musicbrainz(h5,connect):
"""
Fill an open hdf5 using the musicbrainz server and data.
We assume this code is run after fill_hdf5_from_artist/song
because we need artist_mbid, artist_name, release and title
INPUT
h5 - open song file (append mode)
connect - open pg connection to musicbrainz_db
"""
# get info from h5 song file
ambid = h5.root.metadata.songs.cols.artist_mbid[0]
artist_name = h5.root.metadata.songs.cols.artist_name[0]
release = h5.root.metadata.songs.cols.release[0]
title = h5.root.metadata.songs.cols.title[0]
# get the musicbrainz table, fill it
musicbrainz = h5.root.musicbrainz.songs
musicbrainz.cols.year[0] = QUERYMB.find_year_safemode(connect,ambid,title,release,artist_name)
# fill the musicbrainz arrays
group = h5.root.musicbrainz
musicbrainz.cols.idx_artist_mbtags[0] = 0
tags,tagcount = QUERYMB.get_artist_tags(connect, ambid, maxtags=20)
group.artist_mbtags.append( np.array(tags,dtype='string') )
group.artist_mbtags_count.append( np.array(tagcount,dtype='float64') )
# done, flush
musicbrainz.flush()
def fill_hdf5_aggregate_file(h5,h5_filenames,summaryfile=False):
"""
Fill an open hdf5 aggregate file using all the content from all the HDF5 files
listed as filenames. These HDF5 files are supposed to be filled already.
Usefull to create one big HDF5 file from many, thus improving IO speed.
For most of the info, we simply use one row per song.
For the arrays (e.g. segment_start) we need the indecies (e.g. idx_segment_start)
to know which part of the array belongs to one particular song.
If summaryfile=True, we skip arrays (indices all 0)
"""
# counter
counter = 0
# iterate over filenames
for h5idx,h5filename in enumerate(h5_filenames):
# open h5 file
h5tocopy = open_h5_file_read(h5filename)
# get number of songs in new file
nSongs = get_num_songs(h5tocopy)
# iterate over songs in one HDF5 (1 if regular file, more if aggregate file)
for songidx in xrange(nSongs):
# METADATA
row = h5.root.metadata.songs.row
row["artist_familiarity"] = get_artist_familiarity(h5tocopy,songidx)
row["artist_hotttnesss"] = get_artist_hotttnesss(h5tocopy,songidx)
row["artist_id"] = get_artist_id(h5tocopy,songidx)
row["artist_mbid"] = get_artist_mbid(h5tocopy,songidx)
row["artist_playmeid"] = get_artist_playmeid(h5tocopy,songidx)
row["artist_7digitalid"] = get_artist_7digitalid(h5tocopy,songidx)
row["artist_latitude"] = get_artist_latitude(h5tocopy,songidx)
row["artist_location"] = get_artist_location(h5tocopy,songidx)
row["artist_longitude"] = get_artist_longitude(h5tocopy,songidx)
row["artist_name"] = get_artist_name(h5tocopy,songidx)
row["release"] = get_release(h5tocopy,songidx)
row["release_7digitalid"] = get_release_7digitalid(h5tocopy,songidx)
row["song_id"] = get_song_id(h5tocopy,songidx)
row["song_hotttnesss"] = get_song_hotttnesss(h5tocopy,songidx)
row["title"] = get_title(h5tocopy,songidx)
row["track_7digitalid"] = get_track_7digitalid(h5tocopy,songidx)
# INDICES
if not summaryfile:
if counter == 0 : # we're first row
row["idx_similar_artists"] = 0
row["idx_artist_terms"] = 0
else:
row["idx_similar_artists"] = h5.root.metadata.similar_artists.shape[0]
row["idx_artist_terms"] = h5.root.metadata.artist_terms.shape[0]
row.append()
h5.root.metadata.songs.flush()
# ARRAYS
if not summaryfile:
h5.root.metadata.similar_artists.append( get_similar_artists(h5tocopy,songidx) )
h5.root.metadata.artist_terms.append( get_artist_terms(h5tocopy,songidx) )
h5.root.metadata.artist_terms_freq.append( get_artist_terms_freq(h5tocopy,songidx) )
h5.root.metadata.artist_terms_weight.append( get_artist_terms_weight(h5tocopy,songidx) )
# ANALYSIS
row = h5.root.analysis.songs.row
row["analysis_sample_rate"] = get_analysis_sample_rate(h5tocopy,songidx)
row["audio_md5"] = get_audio_md5(h5tocopy,songidx)
row["danceability"] = get_danceability(h5tocopy,songidx)
row["duration"] = get_duration(h5tocopy,songidx)
row["end_of_fade_in"] = get_end_of_fade_in(h5tocopy,songidx)
row["energy"] = get_energy(h5tocopy,songidx)
row["key"] = get_key(h5tocopy,songidx)
row["key_confidence"] = get_key_confidence(h5tocopy,songidx)
row["loudness"] = get_loudness(h5tocopy,songidx)
row["mode"] = get_mode(h5tocopy,songidx)
row["mode_confidence"] = get_mode_confidence(h5tocopy,songidx)
row["start_of_fade_out"] = get_start_of_fade_out(h5tocopy,songidx)
row["tempo"] = get_tempo(h5tocopy,songidx)
row["time_signature"] = get_time_signature(h5tocopy,songidx)
row["time_signature_confidence"] = get_time_signature_confidence(h5tocopy,songidx)
row["track_id"] = get_track_id(h5tocopy,songidx)
# INDICES
if not summaryfile:
if counter == 0 : # we're first row
row["idx_segments_start"] = 0
row["idx_segments_confidence"] = 0
row["idx_segments_pitches"] = 0
row["idx_segments_timbre"] = 0
row["idx_segments_loudness_max"] = 0
row["idx_segments_loudness_max_time"] = 0
row["idx_segments_loudness_start"] = 0
row["idx_sections_start"] = 0
row["idx_sections_confidence"] = 0
row["idx_beats_start"] = 0
row["idx_beats_confidence"] = 0
row["idx_bars_start"] = 0
row["idx_bars_confidence"] = 0
row["idx_tatums_start"] = 0
row["idx_tatums_confidence"] = 0
else : # check the current shape of the arrays
row["idx_segments_start"] = h5.root.analysis.segments_start.shape[0]
row["idx_segments_confidence"] = h5.root.analysis.segments_confidence.shape[0]
row["idx_segments_pitches"] = h5.root.analysis.segments_pitches.shape[0]
row["idx_segments_timbre"] = h5.root.analysis.segments_timbre.shape[0]
row["idx_segments_loudness_max"] = h5.root.analysis.segments_loudness_max.shape[0]
row["idx_segments_loudness_max_time"] = h5.root.analysis.segments_loudness_max_time.shape[0]
row["idx_segments_loudness_start"] = h5.root.analysis.segments_loudness_start.shape[0]
row["idx_sections_start"] = h5.root.analysis.sections_start.shape[0]
row["idx_sections_confidence"] = h5.root.analysis.sections_confidence.shape[0]
row["idx_beats_start"] = h5.root.analysis.beats_start.shape[0]
row["idx_beats_confidence"] = h5.root.analysis.beats_confidence.shape[0]
row["idx_bars_start"] = h5.root.analysis.bars_start.shape[0]
row["idx_bars_confidence"] = h5.root.analysis.bars_confidence.shape[0]
row["idx_tatums_start"] = h5.root.analysis.tatums_start.shape[0]
row["idx_tatums_confidence"] = h5.root.analysis.tatums_confidence.shape[0]
row.append()
h5.root.analysis.songs.flush()
# ARRAYS
if not summaryfile:
h5.root.analysis.segments_start.append( get_segments_start(h5tocopy,songidx) )
h5.root.analysis.segments_confidence.append( get_segments_confidence(h5tocopy,songidx) )
h5.root.analysis.segments_pitches.append( get_segments_pitches(h5tocopy,songidx) )
h5.root.analysis.segments_timbre.append( get_segments_timbre(h5tocopy,songidx) )
h5.root.analysis.segments_loudness_max.append( get_segments_loudness_max(h5tocopy,songidx) )
h5.root.analysis.segments_loudness_max_time.append( get_segments_loudness_max_time(h5tocopy,songidx) )
h5.root.analysis.segments_loudness_start.append( get_segments_loudness_start(h5tocopy,songidx) )
h5.root.analysis.sections_start.append( get_sections_start(h5tocopy,songidx) )
h5.root.analysis.sections_confidence.append( get_sections_confidence(h5tocopy,songidx) )
h5.root.analysis.beats_start.append( get_beats_start(h5tocopy,songidx) )
h5.root.analysis.beats_confidence.append( get_beats_confidence(h5tocopy,songidx) )
h5.root.analysis.bars_start.append( get_bars_start(h5tocopy,songidx) )
h5.root.analysis.bars_confidence.append( get_bars_confidence(h5tocopy,songidx) )
h5.root.analysis.tatums_start.append( get_tatums_start(h5tocopy,songidx) )
h5.root.analysis.tatums_confidence.append( get_tatums_confidence(h5tocopy,songidx) )
# MUSICBRAINZ
row = h5.root.musicbrainz.songs.row
row["year"] = get_year(h5tocopy,songidx)
# INDICES
if not summaryfile:
if counter == 0 : # we're first row
row["idx_artist_mbtags"] = 0
else:
row["idx_artist_mbtags"] = h5.root.musicbrainz.artist_mbtags.shape[0]
row.append()
h5.root.musicbrainz.songs.flush()
# ARRAYS
if not summaryfile:
h5.root.musicbrainz.artist_mbtags.append( get_artist_mbtags(h5tocopy,songidx) )
h5.root.musicbrainz.artist_mbtags_count.append( get_artist_mbtags_count(h5tocopy,songidx) )
# counter
counter += 1
# close h5 file
h5tocopy.close()
def create_song_file(h5filename,title='H5 Song File',force=False,complevel=1):
"""
Create a new HDF5 file for a new song.
If force=False, refuse to overwrite an existing file
Raise a ValueError if it's the case.
Other optional param is the H5 file.
Setups the groups, each containing a table 'songs' with one row:
- metadata
- analysis
DETAIL
- we set the compression level to 1 by default, it uses the ZLIB library
to disable compression, set it to 0
"""
# check if file exists
if not force:
if os.path.exists(h5filename):
raise ValueError('file exists, can not create HDF5 song file')
# create the H5 file
h5 = tables.openFile(h5filename, mode='w', title='H5 Song File')
# set filter level
h5.filters = tables.Filters(complevel=complevel,complib='zlib')
# setup the groups and tables
# group metadata
group = h5.createGroup("/",'metadata','metadata about the song')
table = h5.createTable(group,'songs',DESC.SongMetaData,'table of metadata for one song')
r = table.row
r.append() # filled with default values 0 or '' (depending on type)
table.flush()
# group analysis
group = h5.createGroup("/",'analysis','Echo Nest analysis of the song')
table = h5.createTable(group,'songs',DESC.SongAnalysis,'table of Echo Nest analysis for one song')
r = table.row
r.append() # filled with default values 0 or '' (depending on type)
table.flush()
# group musicbrainz
group = h5.createGroup("/",'musicbrainz','data about the song coming from MusicBrainz')
table = h5.createTable(group,'songs',DESC.SongMusicBrainz,'table of data coming from MusicBrainz')
r = table.row
r.append() # filled with default values 0 or '' (depending on type)
table.flush()
# create arrays
create_all_arrays(h5,expectedrows=3)
# close it, done
h5.close()
def create_aggregate_file(h5filename,title='H5 Aggregate File',force=False,expectedrows=1000,complevel=1,
summaryfile=False):
"""
Create a new HDF5 file for all songs.
It will contains everything that are in regular song files.
Tables created empty.
If force=False, refuse to overwrite an existing file
Raise a ValueError if it's the case.
If summaryfile=True, creates a sumary file, i.e. no arrays
Other optional param is the H5 file.
DETAILS
- if you create a very large file, try to approximate correctly
the number of data points (songs), it speeds things up with arrays (by
setting the chunking correctly).
- we set the compression level to 1 by default, it uses the ZLIB library
to disable compression, set it to 0
Setups the groups, each containing a table 'songs' with one row:
- metadata
- analysis
"""
# check if file exists
if not force:
if os.path.exists(h5filename):
raise ValueError('file exists, can not create HDF5 song file')
# summary file? change title
if summaryfile:
title = 'H5 Summary File'
# create the H5 file
h5 = tables.openFile(h5filename, mode='w', title='H5 Song File')
# set filter level
h5.filters = tables.Filters(complevel=complevel,complib='zlib')
# setup the groups and tables
# group metadata
group = h5.createGroup("/",'metadata','metadata about the song')
table = h5.createTable(group,'songs',DESC.SongMetaData,'table of metadata for one song',
expectedrows=expectedrows)
# group analysis
group = h5.createGroup("/",'analysis','Echo Nest analysis of the song')
table = h5.createTable(group,'songs',DESC.SongAnalysis,'table of Echo Nest analysis for one song',
expectedrows=expectedrows)
# group musicbrainz
group = h5.createGroup("/",'musicbrainz','data about the song coming from MusicBrainz')
table = h5.createTable(group,'songs',DESC.SongMusicBrainz,'table of data coming from MusicBrainz',
expectedrows=expectedrows)
# create arrays
if not summaryfile:
create_all_arrays(h5,expectedrows=expectedrows)
# close it, done
h5.close()
def create_all_arrays(h5,expectedrows=1000):
"""
Utility functions used by both create_song_file and create_aggregate_files,
creates all the EArrays (empty).
INPUT
h5 - hdf5 file, open with write or append permissions
metadata and analysis groups already exist!
"""
# group metadata arrays
group = h5.root.metadata
h5.createEArray(where=group,name='similar_artists',atom=tables.StringAtom(20,shape=()),shape=(0,),title=ARRAY_DESC_SIMILAR_ARTISTS)
h5.createEArray(group,'artist_terms',tables.StringAtom(256,shape=()),(0,),ARRAY_DESC_ARTIST_TERMS,
expectedrows=expectedrows*40)
h5.createEArray(group,'artist_terms_freq',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_ARTIST_TERMS_FREQ,
expectedrows=expectedrows*40)
h5.createEArray(group,'artist_terms_weight',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_ARTIST_TERMS_WEIGHT,
expectedrows=expectedrows*40)
# group analysis arrays
group = h5.root.analysis
h5.createEArray(where=group,name='segments_start',atom=tables.Float64Atom(shape=()),shape=(0,),title=ARRAY_DESC_SEGMENTS_START)
h5.createEArray(group,'segments_confidence',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_SEGMENTS_CONFIDENCE,
expectedrows=expectedrows*300)
h5.createEArray(group,'segments_pitches',tables.Float64Atom(shape=()),(0,12),ARRAY_DESC_SEGMENTS_PITCHES,
expectedrows=expectedrows*300)
h5.createEArray(group,'segments_timbre',tables.Float64Atom(shape=()),(0,12),ARRAY_DESC_SEGMENTS_TIMBRE,
expectedrows=expectedrows*300)
h5.createEArray(group,'segments_loudness_max',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_SEGMENTS_LOUDNESS_MAX,
expectedrows=expectedrows*300)
h5.createEArray(group,'segments_loudness_max_time',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_SEGMENTS_LOUDNESS_MAX_TIME,
expectedrows=expectedrows*300)
h5.createEArray(group,'segments_loudness_start',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_SEGMENTS_LOUDNESS_START,
expectedrows=expectedrows*300)
h5.createEArray(group,'sections_start',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_SECTIONS_START,
expectedrows=expectedrows*300)
h5.createEArray(group,'sections_confidence',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_SECTIONS_CONFIDENCE,
expectedrows=expectedrows*300)
h5.createEArray(group,'beats_start',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_BEATS_START,
expectedrows=expectedrows*300)
h5.createEArray(group,'beats_confidence',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_BEATS_CONFIDENCE,
expectedrows=expectedrows*300)
h5.createEArray(group,'bars_start',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_BARS_START,
expectedrows=expectedrows*300)
h5.createEArray(group,'bars_confidence',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_BARS_CONFIDENCE,
expectedrows=expectedrows*300)
h5.createEArray(group,'tatums_start',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_TATUMS_START,
expectedrows=expectedrows*300)
h5.createEArray(group,'tatums_confidence',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_TATUMS_CONFIDENCE,
expectedrows=expectedrows*300)
# group musicbrainz arrays
group = h5.root.musicbrainz
h5.createEArray(where=group,name='artist_mbtags',atom=tables.StringAtom(256,shape=()),shape=(0,),title=ARRAY_DESC_ARTIST_MBTAGS,
expectedrows=expectedrows*5)
h5.createEArray(group,'artist_mbtags_count',tables.IntAtom(shape=()),(0,),ARRAY_DESC_ARTIST_MBTAGS_COUNT,
expectedrows=expectedrows*5)
def open_h5_file_read(h5filename):
"""
Open an existing H5 in read mode.
"""
return tables.openFile(h5filename, mode='r')
def open_h5_file_append(h5filename):
"""
Open an existing H5 in append mode.
"""
return tables.openFile(h5filename, mode='a')
################################################ MAIN #####################################
def die_with_usage():
""" HELP MENU """
print 'hdf5_utils.py'
print 'by T. Bertin-Mahieux (2010) Columbia University'
print ''
print 'should be used as a library, contains functions to create'
print 'HDF5 files for the Million Song Dataset project'
sys.exit(0)
if __name__ == '__main__':
# help menu
die_with_usage()
|
|
# lexer.py
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides the Lexer class for parsing template strings into parse trees."""
import re, codecs
from mako import parsetree, exceptions
from mako.pygen import adjust_whitespace
_regexp_cache = {}
class Lexer(object):
def __init__(self, text, filename=None, disable_unicode=False, input_encoding=None, preprocessor=None):
self.text = text
self.filename = filename
self.template = parsetree.TemplateNode(self.filename)
self.matched_lineno = 1
self.matched_charpos = 0
self.lineno = 1
self.match_position = 0
self.tag = []
self.control_line = []
self.disable_unicode = disable_unicode
self.encoding = input_encoding
if preprocessor is None:
self.preprocessor = []
elif not hasattr(preprocessor, '__iter__'):
self.preprocessor = [preprocessor]
else:
self.preprocessor = preprocessor
exception_kwargs = property(lambda self:{'source':self.text, 'lineno':self.matched_lineno, 'pos':self.matched_charpos, 'filename':self.filename})
def match(self, regexp, flags=None):
"""match the given regular expression string and flags to the current text position.
if a match occurs, update the current text and line position."""
mp = self.match_position
try:
reg = _regexp_cache[(regexp, flags)]
except KeyError:
if flags:
reg = re.compile(regexp, flags)
else:
reg = re.compile(regexp)
_regexp_cache[(regexp, flags)] = reg
match = reg.match(self.text, self.match_position)
if match:
(start, end) = match.span()
if end == start:
self.match_position = end + 1
else:
self.match_position = end
self.matched_lineno = self.lineno
lines = re.findall(r"\n", self.text[mp:self.match_position])
cp = mp - 1
while (cp >= 0 and cp<self.textlength and self.text[cp] != '\n'):
cp -=1
self.matched_charpos = mp - cp
self.lineno += len(lines)
#print "MATCHED:", match.group(0), "LINE START:", self.matched_lineno, "LINE END:", self.lineno
#print "MATCH:", regexp, "\n", self.text[mp : mp + 15], (match and "TRUE" or "FALSE")
return match
def parse_until_text(self, *text):
startpos = self.match_position
while True:
match = self.match(r'#.*\n')
if match:
continue
match = self.match(r'(\"\"\"|\'\'\'|\"|\')')
if match:
m = self.match(r'.*?%s' % match.group(1), re.S)
if not m:
raise exceptions.SyntaxException("Unmatched '%s'" % match.group(1), **self.exception_kwargs)
else:
match = self.match(r'(%s)' % r'|'.join(text))
if match:
return (self.text[startpos:self.match_position-len(match.group(1))], match.group(1))
else:
match = self.match(r".*?(?=\"|\'|#|%s)" % r'|'.join(text), re.S)
if not match:
raise exceptions.SyntaxException("Expected: %s" % ','.join(text), **self.exception_kwargs)
def append_node(self, nodecls, *args, **kwargs):
kwargs.setdefault('source', self.text)
kwargs.setdefault('lineno', self.matched_lineno)
kwargs.setdefault('pos', self.matched_charpos)
kwargs['filename'] = self.filename
node = nodecls(*args, **kwargs)
if len(self.tag):
self.tag[-1].nodes.append(node)
else:
self.template.nodes.append(node)
if isinstance(node, parsetree.Tag):
if len(self.tag):
node.parent = self.tag[-1]
self.tag.append(node)
elif isinstance(node, parsetree.ControlLine):
if node.isend:
self.control_line.pop()
elif node.is_primary:
self.control_line.append(node)
elif len(self.control_line) and not self.control_line[-1].is_ternary(node.keyword):
raise exceptions.SyntaxException("Keyword '%s' not a legal ternary for keyword '%s'" % (node.keyword, self.control_line[-1].keyword), **self.exception_kwargs)
def escape_code(self, text):
if not self.disable_unicode and self.encoding:
return text.encode('ascii', 'backslashreplace')
else:
return text
def parse(self):
for preproc in self.preprocessor:
self.text = preproc(self.text)
if not isinstance(self.text, unicode) and self.text.startswith(codecs.BOM_UTF8):
self.text = self.text[len(codecs.BOM_UTF8):]
parsed_encoding = 'utf-8'
me = self.match_encoding()
if me is not None and me != 'utf-8':
raise exceptions.CompileException("Found utf-8 BOM in file, with conflicting magic encoding comment of '%s'" % me, self.text.decode('utf-8', 'ignore'), 0, 0, self.filename)
else:
parsed_encoding = self.match_encoding()
if parsed_encoding:
self.encoding = parsed_encoding
if not self.disable_unicode and not isinstance(self.text, unicode):
if self.encoding:
try:
self.text = self.text.decode(self.encoding)
except UnicodeDecodeError, e:
raise exceptions.CompileException("Unicode decode operation of encoding '%s' failed" % self.encoding, self.text.decode('utf-8', 'ignore'), 0, 0, self.filename)
else:
try:
self.text = self.text.decode()
except UnicodeDecodeError, e:
raise exceptions.CompileException("Could not read template using encoding of 'ascii'. Did you forget a magic encoding comment?", self.text.decode('utf-8', 'ignore'), 0, 0, self.filename)
self.textlength = len(self.text)
while (True):
if self.match_position > self.textlength:
break
if self.match_end():
break
if self.match_expression():
continue
if self.match_control_line():
continue
if self.match_comment():
continue
if self.match_tag_start():
continue
if self.match_tag_end():
continue
if self.match_python_block():
continue
if self.match_text():
continue
if self.match_position > self.textlength:
break
raise exceptions.CompileException("assertion failed")
if len(self.tag):
raise exceptions.SyntaxException("Unclosed tag: <%%%s>" % self.tag[-1].keyword, **self.exception_kwargs)
if len(self.control_line):
raise exceptions.SyntaxException("Unterminated control keyword: '%s'" % self.control_line[-1].keyword, self.text, self.control_line[-1].lineno, self.control_line[-1].pos, self.filename)
return self.template
def match_encoding(self):
match = self.match(r'#.*coding[:=]\s*([-\w.]+).*\r?\n')
if match:
return match.group(1)
else:
return None
def match_tag_start(self):
match = self.match(r'''
\<% # opening tag
([\w\.\:]+) # keyword
((?:\s+\w+|\s*=\s*|".*?"|'.*?')*) # attrname, = sign, string expression
\s* # more whitespace
(/)?> # closing
''',
re.I | re.S | re.X)
if match:
(keyword, attr, isend) = (match.group(1), match.group(2), match.group(3))
self.keyword = keyword
attributes = {}
if attr:
for att in re.findall(r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr):
(key, val1, val2) = att
text = val1 or val2
text = text.replace('\r\n', '\n')
attributes[key] = self.escape_code(text)
self.append_node(parsetree.Tag, keyword, attributes)
if isend:
self.tag.pop()
else:
if keyword == 'text':
match = self.match(r'(.*?)(?=\</%text>)', re.S)
if not match:
raise exceptions.SyntaxException("Unclosed tag: <%%%s>" % self.tag[-1].keyword, **self.exception_kwargs)
self.append_node(parsetree.Text, match.group(1))
return self.match_tag_end()
return True
else:
return False
def match_tag_end(self):
match = self.match(r'\</%[\t ]*(.+?)[\t ]*>')
if match:
if not len(self.tag):
raise exceptions.SyntaxException("Closing tag without opening tag: </%%%s>" % match.group(1), **self.exception_kwargs)
elif self.tag[-1].keyword != match.group(1):
raise exceptions.SyntaxException("Closing tag </%%%s> does not match tag: <%%%s>" % (match.group(1), self.tag[-1].keyword), **self.exception_kwargs)
self.tag.pop()
return True
else:
return False
def match_end(self):
match = self.match(r'\Z', re.S)
if match:
string = match.group()
if string:
return string
else:
return True
else:
return False
def match_text(self):
match = self.match(r"""
(.*?) # anything, followed by:
(
(?<=\n)(?=[ \t]*(?=%|\#\#)) # an eval or line-based comment preceded by a consumed \n and whitespace
|
(?=\${) # an expression
|
(?=\#\*) # multiline comment
|
(?=</?[%&]) # a substitution or block or call start or end
# - don't consume
|
(\\\r?\n) # an escaped newline - throw away
|
\Z # end of string
)""", re.X | re.S)
if match:
text = match.group(1)
self.append_node(parsetree.Text, text)
return True
else:
return False
def match_python_block(self):
match = self.match(r"<%(!)?")
if match:
(line, pos) = (self.matched_lineno, self.matched_charpos)
(text, end) = self.parse_until_text(r'%>')
text = adjust_whitespace(text) + "\n" # the trailing newline helps compiler.parse() not complain about indentation
self.append_node(parsetree.Code, self.escape_code(text), match.group(1)=='!', lineno=line, pos=pos)
return True
else:
return False
def match_expression(self):
match = self.match(r"\${")
if match:
(line, pos) = (self.matched_lineno, self.matched_charpos)
(text, end) = self.parse_until_text(r'\|', r'}')
if end == '|':
(escapes, end) = self.parse_until_text(r'}')
else:
escapes = ""
text = text.replace('\r\n', '\n')
self.append_node(parsetree.Expression, self.escape_code(text), escapes.strip(), lineno=line, pos=pos)
return True
else:
return False
def match_control_line(self):
match = self.match(r"(?<=^)[\t ]*(%|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)(?:\r?\n|\Z)", re.M)
if match:
operator = match.group(1)
text = match.group(2)
if operator == '%':
m2 = re.match(r'(end)?(\w+)\s*(.*)', text)
if not m2:
raise exceptions.SyntaxException("Invalid control line: '%s'" % text, **self.exception_kwargs)
(isend, keyword) = m2.group(1, 2)
isend = (isend is not None)
if isend:
if not len(self.control_line):
raise exceptions.SyntaxException("No starting keyword '%s' for '%s'" % (keyword, text), **self.exception_kwargs)
elif self.control_line[-1].keyword != keyword:
raise exceptions.SyntaxException("Keyword '%s' doesn't match keyword '%s'" % (text, self.control_line[-1].keyword), **self.exception_kwargs)
self.append_node(parsetree.ControlLine, keyword, isend, self.escape_code(text))
else:
self.append_node(parsetree.Comment, text)
return True
else:
return False
def match_comment(self):
"""matches the multiline version of a comment"""
match = self.match(r"<%doc>(.*?)</%doc>", re.S)
if match:
self.append_node(parsetree.Comment, match.group(1))
return True
else:
return False
|
|
import uuid
from collections import OrderedDict, defaultdict
from collections.abc import Sequence
from django import forms
from django.core.exceptions import NON_FIELD_ERRORS, ValidationError
from django.forms.utils import ErrorList
from django.template.loader import render_to_string
from django.templatetags.static import static
from django.utils.html import format_html_join
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from wagtail.core.utils import escape_script
from .base import Block, BoundBlock, DeclarativeSubBlocksMetaclass
from .utils import indent, js_dict
__all__ = ['BaseStreamBlock', 'StreamBlock', 'StreamValue', 'StreamBlockValidationError']
class StreamBlockValidationError(ValidationError):
def __init__(self, block_errors=None, non_block_errors=None):
params = {}
if block_errors:
params.update(block_errors)
if non_block_errors:
params[NON_FIELD_ERRORS] = non_block_errors
super().__init__(
'Validation error in StreamBlock', params=params)
class BaseStreamBlock(Block):
def __init__(self, local_blocks=None, **kwargs):
self._constructor_kwargs = kwargs
super().__init__(**kwargs)
# create a local (shallow) copy of base_blocks so that it can be supplemented by local_blocks
self.child_blocks = self.base_blocks.copy()
if local_blocks:
for name, block in local_blocks:
block.set_name(name)
self.child_blocks[name] = block
self.dependencies = self.child_blocks.values()
def get_default(self):
"""
Default values set on a StreamBlock should be a list of (type_name, value) tuples -
we can't use StreamValue directly, because that would require a reference back to
the StreamBlock that hasn't been built yet.
For consistency, then, we need to convert it to a StreamValue here for StreamBlock
to work with.
"""
return StreamValue(self, self.meta.default)
def sorted_child_blocks(self):
"""Child blocks, sorted in to their groups."""
return sorted(self.child_blocks.values(),
key=lambda child_block: child_block.meta.group)
def render_list_member(self, block_type_name, value, prefix, index, errors=None, id=None):
"""
Render the HTML for a single list item. This consists of a container, hidden fields
to manage ID/deleted state/type, delete/reorder buttons, and the child block's own HTML.
"""
child_block = self.child_blocks[block_type_name]
child = child_block.bind(value, prefix="%s-value" % prefix, errors=errors)
return render_to_string('wagtailadmin/block_forms/stream_member.html', {
'child_blocks': self.sorted_child_blocks(),
'block_type_name': block_type_name,
'child_block': child_block,
'prefix': prefix,
'child': child,
'index': index,
'block_id': id,
})
def html_declarations(self):
return format_html_join(
'\n', '<script type="text/template" id="{0}-newmember-{1}">{2}</script>',
[
(
self.definition_prefix,
name,
mark_safe(escape_script(self.render_list_member(name, child_block.get_default(), '__PREFIX__', '')))
)
for name, child_block in self.child_blocks.items()
]
)
@property
def media(self):
return forms.Media(js=[static('wagtailadmin/js/blocks/sequence.js'), static('wagtailadmin/js/blocks/stream.js')])
def js_initializer(self):
# compile a list of info dictionaries, one for each available block type
child_blocks = []
for name, child_block in self.child_blocks.items():
# each info dictionary specifies at least a block name
child_block_info = {'name': "'%s'" % name}
# if the child defines a JS initializer function, include that in the info dict
# along with the param that needs to be passed to it for initializing an empty/default block
# of that type
child_js_initializer = child_block.js_initializer()
if child_js_initializer:
child_block_info['initializer'] = child_js_initializer
child_blocks.append(indent(js_dict(child_block_info)))
opts = {
'definitionPrefix': "'%s'" % self.definition_prefix,
'childBlocks': '[\n%s\n]' % ',\n'.join(child_blocks),
}
return "StreamBlock(%s)" % js_dict(opts)
def render_form(self, value, prefix='', errors=None):
error_dict = {}
if errors:
if len(errors) > 1:
# We rely on StreamBlock.clean throwing a single
# StreamBlockValidationError with a specially crafted 'params'
# attribute that we can pull apart and distribute to the child
# blocks
raise TypeError('StreamBlock.render_form unexpectedly received multiple errors')
error_dict = errors.as_data()[0].params
# value can be None when the StreamField is in a formset
if value is None:
value = self.get_default()
# drop any child values that are an unrecognised block type
valid_children = [child for child in value if child.block_type in self.child_blocks]
list_members_html = [
self.render_list_member(child.block_type, child.value, "%s-%d" % (prefix, i), i,
errors=error_dict.get(i), id=child.id)
for (i, child) in enumerate(valid_children)
]
return render_to_string('wagtailadmin/block_forms/stream.html', {
'prefix': prefix,
'help_text': getattr(self.meta, 'help_text', None),
'list_members_html': list_members_html,
'child_blocks': self.sorted_child_blocks(),
'header_menu_prefix': '%s-before' % prefix,
'block_errors': error_dict.get(NON_FIELD_ERRORS),
})
def value_from_datadict(self, data, files, prefix):
count = int(data['%s-count' % prefix])
values_with_indexes = []
for i in range(0, count):
if data['%s-%d-deleted' % (prefix, i)]:
continue
block_type_name = data['%s-%d-type' % (prefix, i)]
try:
child_block = self.child_blocks[block_type_name]
except KeyError:
continue
values_with_indexes.append(
(
int(data['%s-%d-order' % (prefix, i)]),
block_type_name,
child_block.value_from_datadict(data, files, '%s-%d-value' % (prefix, i)),
data.get('%s-%d-id' % (prefix, i)),
)
)
values_with_indexes.sort()
return StreamValue(self, [
(child_block_type_name, value, block_id)
for (index, child_block_type_name, value, block_id) in values_with_indexes
])
def value_omitted_from_data(self, data, files, prefix):
return ('%s-count' % prefix) not in data
@property
def required(self):
return self.meta.required
def clean(self, value):
cleaned_data = []
errors = {}
non_block_errors = ErrorList()
for i, child in enumerate(value): # child is a StreamChild instance
try:
cleaned_data.append(
(child.block.name, child.block.clean(child.value), child.id)
)
except ValidationError as e:
errors[i] = ErrorList([e])
if self.meta.min_num is not None and self.meta.min_num > len(value):
non_block_errors.append(ValidationError(
_('The minimum number of items is %d') % self.meta.min_num
))
elif self.required and len(value) == 0:
non_block_errors.append(ValidationError(_('This field is required.')))
if self.meta.max_num is not None and self.meta.max_num < len(value):
non_block_errors.append(ValidationError(
_('The maximum number of items is %d') % self.meta.max_num
))
if self.meta.block_counts:
block_counts = defaultdict(int)
for item in value:
block_counts[item.block_type] += 1
for block_name, min_max in self.meta.block_counts.items():
block = self.child_blocks[block_name]
max_num = min_max.get('max_num', None)
min_num = min_max.get('min_num', None)
block_count = block_counts[block_name]
if min_num is not None and min_num > block_count:
non_block_errors.append(ValidationError(
'{}: {}'.format(block.label, _('The minimum number of items is %d') % min_num)
))
if max_num is not None and max_num < block_count:
non_block_errors.append(ValidationError(
'{}: {}'.format(block.label, _('The maximum number of items is %d') % max_num)
))
if errors or non_block_errors:
# The message here is arbitrary - outputting error messages is delegated to the child blocks,
# which only involves the 'params' list
raise StreamBlockValidationError(block_errors=errors, non_block_errors=non_block_errors)
return StreamValue(self, cleaned_data)
def to_python(self, value):
# the incoming JSONish representation is a list of dicts, each with a 'type' and 'value' field
# (and possibly an 'id' too).
# This is passed to StreamValue to be expanded lazily - but first we reject any unrecognised
# block types from the list
return StreamValue(self, [
child_data for child_data in value
if child_data['type'] in self.child_blocks
], is_lazy=True)
def get_prep_value(self, value):
if not value:
# Falsy values (including None, empty string, empty list, and
# empty StreamValue) become an empty stream
return []
else:
# value is a StreamValue - delegate to its get_prep_value() method
# (which has special-case handling for lazy StreamValues to avoid useless
# round-trips to the full data representation and back)
return value.get_prep_value()
def get_api_representation(self, value, context=None):
if value is None:
# treat None as identical to an empty stream
return []
return [
{
'type': child.block.name,
'value': child.block.get_api_representation(child.value, context=context),
'id': child.id
}
for child in value # child is a StreamChild instance
]
def render_basic(self, value, context=None):
return format_html_join(
'\n', '<div class="block-{1}">{0}</div>',
[
(child.render(context=context), child.block_type)
for child in value
]
)
def get_searchable_content(self, value):
content = []
for child in value:
content.extend(child.block.get_searchable_content(child.value))
return content
def deconstruct(self):
"""
Always deconstruct StreamBlock instances as if they were plain StreamBlocks with all of the
field definitions passed to the constructor - even if in reality this is a subclass of StreamBlock
with the fields defined declaratively, or some combination of the two.
This ensures that the field definitions get frozen into migrations, rather than leaving a reference
to a custom subclass in the user's models.py that may or may not stick around.
"""
path = 'wagtail.core.blocks.StreamBlock'
args = [list(self.child_blocks.items())]
kwargs = self._constructor_kwargs
return (path, args, kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
for name, child_block in self.child_blocks.items():
errors.extend(child_block.check(**kwargs))
errors.extend(child_block._check_name(**kwargs))
return errors
class Meta:
# No icon specified here, because that depends on the purpose that the
# block is being used for. Feel encouraged to specify an icon in your
# descendant block type
icon = "placeholder"
default = []
required = True
min_num = None
max_num = None
block_counts = {}
class StreamBlock(BaseStreamBlock, metaclass=DeclarativeSubBlocksMetaclass):
pass
class StreamValue(Sequence):
"""
Custom type used to represent the value of a StreamBlock; behaves as a sequence of BoundBlocks
(which keep track of block types in a way that the values alone wouldn't).
"""
class StreamChild(BoundBlock):
"""
Extends BoundBlock with methods that make logical sense in the context of
children of StreamField, but not necessarily elsewhere that BoundBlock is used
"""
def __init__(self, *args, **kwargs):
self.id = kwargs.pop('id')
super(StreamValue.StreamChild, self).__init__(*args, **kwargs)
@property
def block_type(self):
"""
Syntactic sugar so that we can say child.block_type instead of child.block.name.
(This doesn't belong on BoundBlock itself because the idea of block.name denoting
the child's "type" ('heading', 'paragraph' etc) is unique to StreamBlock, and in the
wider context people are liable to confuse it with the block class (CharBlock etc).
"""
return self.block.name
def __init__(self, stream_block, stream_data, is_lazy=False, raw_text=None):
"""
Construct a StreamValue linked to the given StreamBlock,
with child values given in stream_data.
Passing is_lazy=True means that stream_data is raw JSONish data as stored
in the database, and needs to be converted to native values
(using block.to_python()) when accessed. In this mode, stream_data is a
list of dicts, each containing 'type' and 'value' keys.
Passing is_lazy=False means that stream_data consists of immediately usable
native values. In this mode, stream_data is a list of (type_name, value)
or (type_name, value, id) tuples.
raw_text exists solely as a way of representing StreamField content that is
not valid JSON; this may legitimately occur if an existing text field is
migrated to a StreamField. In this situation we return a blank StreamValue
with the raw text accessible under the `raw_text` attribute, so that migration
code can be rewritten to convert it as desired.
"""
self.is_lazy = is_lazy
self.stream_block = stream_block # the StreamBlock object that handles this value
self.stream_data = stream_data # a list of (type_name, value) tuples
self._bound_blocks = {} # populated lazily from stream_data as we access items through __getitem__
self.raw_text = raw_text
def __getitem__(self, i):
if i not in self._bound_blocks:
if self.is_lazy:
raw_value = self.stream_data[i]
type_name = raw_value['type']
child_block = self.stream_block.child_blocks[type_name]
if hasattr(child_block, 'bulk_to_python'):
self._prefetch_blocks(type_name, child_block)
return self._bound_blocks[i]
else:
value = child_block.to_python(raw_value['value'])
block_id = raw_value.get('id')
else:
try:
type_name, value, block_id = self.stream_data[i]
except ValueError:
type_name, value = self.stream_data[i]
block_id = None
child_block = self.stream_block.child_blocks[type_name]
self._bound_blocks[i] = StreamValue.StreamChild(child_block, value, id=block_id)
return self._bound_blocks[i]
def _prefetch_blocks(self, type_name, child_block):
"""Prefetch all child blocks for the given `type_name` using the
given `child_blocks`.
This prevents n queries for n blocks of a specific type.
"""
# create a mapping of all the child blocks matching the given block type,
# mapping (index within the stream) => (raw block value)
raw_values = OrderedDict(
(i, item['value']) for i, item in enumerate(self.stream_data)
if item['type'] == type_name
)
# pass the raw block values to bulk_to_python as a list
converted_values = child_block.bulk_to_python(raw_values.values())
# reunite the converted values with their stream indexes
for i, value in zip(raw_values.keys(), converted_values):
# also pass the block ID to StreamChild, if one exists for this stream index
block_id = self.stream_data[i].get('id')
self._bound_blocks[i] = StreamValue.StreamChild(child_block, value, id=block_id)
def get_prep_value(self):
prep_value = []
for i, stream_data_item in enumerate(self.stream_data):
if self.is_lazy and i not in self._bound_blocks:
# This child has not been accessed as a bound block, so its raw JSONish
# value (stream_data_item here) is still valid
prep_value_item = stream_data_item
# As this method is preparing this value to be saved to the database,
# this is an appropriate place to ensure that each block has a unique id.
prep_value_item['id'] = prep_value_item.get('id', str(uuid.uuid4()))
else:
# convert the bound block back into JSONish data
child = self[i]
# As this method is preparing this value to be saved to the database,
# this is an appropriate place to ensure that each block has a unique id.
child.id = child.id or str(uuid.uuid4())
prep_value_item = {
'type': child.block.name,
'value': child.block.get_prep_value(child.value),
'id': child.id,
}
prep_value.append(prep_value_item)
return prep_value
def __eq__(self, other):
if not isinstance(other, StreamValue):
return False
return self.stream_data == other.stream_data
def __len__(self):
return len(self.stream_data)
def __repr__(self):
return repr(list(self))
def render_as_block(self, context=None):
return self.stream_block.render(self, context=context)
def __html__(self):
return self.stream_block.render(self)
def __str__(self):
return self.__html__()
|
|
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume-related Utilities and helpers."""
import math
from oslo.config import cfg
from cinder.brick.local_dev import lvm as brick_lvm
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder.openstack.common import strutils
from cinder.openstack.common import units
from cinder import rpc
from cinder import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def null_safe_str(s):
return str(s) if s else ''
def _usage_from_volume(context, volume_ref, **kw):
usage_info = dict(tenant_id=volume_ref['project_id'],
user_id=volume_ref['user_id'],
instance_uuid=volume_ref['instance_uuid'],
availability_zone=volume_ref['availability_zone'],
volume_id=volume_ref['id'],
volume_type=volume_ref['volume_type_id'],
display_name=volume_ref['display_name'],
launched_at=null_safe_str(volume_ref['launched_at']),
created_at=null_safe_str(volume_ref['created_at']),
status=volume_ref['status'],
snapshot_id=volume_ref['snapshot_id'],
size=volume_ref['size'])
usage_info.update(kw)
return usage_info
def notify_about_volume_usage(context, volume, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_volume(context, volume, **extra_usage_info)
rpc.get_notifier("volume", host).info(context, 'volume.%s' % event_suffix,
usage_info)
def _usage_from_snapshot(context, snapshot_ref, **extra_usage_info):
usage_info = {
'tenant_id': snapshot_ref['project_id'],
'user_id': snapshot_ref['user_id'],
'availability_zone': snapshot_ref.volume['availability_zone'],
'volume_id': snapshot_ref['volume_id'],
'volume_size': snapshot_ref['volume_size'],
'snapshot_id': snapshot_ref['id'],
'display_name': snapshot_ref['display_name'],
'created_at': str(snapshot_ref['created_at']),
'status': snapshot_ref['status'],
'deleted': null_safe_str(snapshot_ref['deleted'])
}
usage_info.update(extra_usage_info)
return usage_info
def notify_about_snapshot_usage(context, snapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_snapshot(context, snapshot, **extra_usage_info)
rpc.get_notifier('snapshot', host).info(context,
'snapshot.%s' % event_suffix,
usage_info)
def setup_blkio_cgroup(srcpath, dstpath, bps_limit, execute=utils.execute):
if not bps_limit:
return None
try:
srcdev = utils.get_blkdev_major_minor(srcpath)
except exception.Error as e:
msg = (_('Failed to get device number for read throttling: %(error)s')
% {'error': e})
LOG.error(msg)
srcdev = None
try:
dstdev = utils.get_blkdev_major_minor(dstpath)
except exception.Error as e:
msg = (_('Failed to get device number for write throttling: %(error)s')
% {'error': e})
LOG.error(msg)
dstdev = None
if not srcdev and not dstdev:
return None
group_name = CONF.volume_copy_blkio_cgroup_name
try:
execute('cgcreate', '-g', 'blkio:%s' % group_name, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.warn(_('Failed to create blkio cgroup'))
return None
try:
if srcdev:
execute('cgset', '-r', 'blkio.throttle.read_bps_device=%s %d'
% (srcdev, bps_limit), group_name, run_as_root=True)
if dstdev:
execute('cgset', '-r', 'blkio.throttle.write_bps_device=%s %d'
% (dstdev, bps_limit), group_name, run_as_root=True)
except processutils.ProcessExecutionError:
msg = (_('Failed to setup blkio cgroup to throttle the devices: '
'\'%(src)s\',\'%(dst)s\'')
% {'src': srcdev, 'dst': dstdev})
LOG.warn(msg)
return None
return ['cgexec', '-g', 'blkio:%s' % group_name]
def _calculate_count(size_in_m, blocksize):
# Check if volume_dd_blocksize is valid
try:
# Rule out zero-sized/negative/float dd blocksize which
# cannot be caught by strutils
if blocksize.startswith(('-', '0')) or '.' in blocksize:
raise ValueError
bs = strutils.string_to_bytes('%sB' % blocksize)
except ValueError:
msg = (_("Incorrect value error: %(blocksize)s, "
"it may indicate that \'volume_dd_blocksize\' "
"was configured incorrectly. Fall back to default.")
% {'blocksize': blocksize})
LOG.warn(msg)
# Fall back to default blocksize
CONF.clear_override('volume_dd_blocksize')
blocksize = CONF.volume_dd_blocksize
bs = strutils.string_to_bytes('%sB' % blocksize)
count = math.ceil(size_in_m * units.Mi / bs)
return blocksize, int(count)
def copy_volume(srcstr, deststr, size_in_m, blocksize, sync=False,
execute=utils.execute, ionice=None):
# Use O_DIRECT to avoid thrashing the system buffer cache
extra_flags = []
# Check whether O_DIRECT is supported to iflag and oflag separately
for flag in ['iflag=direct', 'oflag=direct']:
try:
execute('dd', 'count=0', 'if=%s' % srcstr, 'of=%s' % deststr,
flag, run_as_root=True)
extra_flags.append(flag)
except processutils.ProcessExecutionError:
pass
# If the volume is being unprovisioned then
# request the data is persisted before returning,
# so that it's not discarded from the cache.
if sync and not extra_flags:
extra_flags.append('conv=fdatasync')
blocksize, count = _calculate_count(size_in_m, blocksize)
cmd = ['dd', 'if=%s' % srcstr, 'of=%s' % deststr,
'count=%d' % count, 'bs=%s' % blocksize]
cmd.extend(extra_flags)
if ionice is not None:
cmd = ['ionice', ionice] + cmd
cgcmd = setup_blkio_cgroup(srcstr, deststr, CONF.volume_copy_bps_limit)
if cgcmd:
cmd = cgcmd + cmd
# Perform the copy
execute(*cmd, run_as_root=True)
def clear_volume(volume_size, volume_path, volume_clear=None,
volume_clear_size=None, volume_clear_ionice=None):
"""Unprovision old volumes to prevent data leaking between users."""
if volume_clear is None:
volume_clear = CONF.volume_clear
if volume_clear_size is None:
volume_clear_size = CONF.volume_clear_size
if volume_clear_size == 0:
volume_clear_size = volume_size
if volume_clear_ionice is None:
volume_clear_ionice = CONF.volume_clear_ionice
LOG.info(_("Performing secure delete on volume: %s") % volume_path)
if volume_clear == 'zero':
return copy_volume('/dev/zero', volume_path, volume_clear_size,
CONF.volume_dd_blocksize,
sync=True, execute=utils.execute,
ionice=volume_clear_ionice)
elif volume_clear == 'shred':
clear_cmd = ['shred', '-n3']
if volume_clear_size:
clear_cmd.append('-s%dMiB' % volume_clear_size)
else:
raise exception.InvalidConfigurationValue(
option='volume_clear',
value=volume_clear)
clear_cmd.append(volume_path)
utils.execute(*clear_cmd, run_as_root=True)
def supports_thin_provisioning():
return brick_lvm.LVM.supports_thin_provisioning(
utils.get_root_helper())
def get_all_volumes(vg_name=None):
return brick_lvm.LVM.get_all_volumes(
utils.get_root_helper(),
vg_name)
def get_all_physical_volumes(vg_name=None):
return brick_lvm.LVM.get_all_physical_volumes(
utils.get_root_helper(),
vg_name)
def get_all_volume_groups(vg_name=None):
return brick_lvm.LVM.get_all_volume_groups(
utils.get_root_helper(),
vg_name)
|
|
"""Geolocation support for GDACS Feed."""
from __future__ import annotations
import logging
from homeassistant.components.geo_location import GeolocationEvent
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_UNIT_SYSTEM_IMPERIAL,
LENGTH_KILOMETERS,
LENGTH_MILES,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.util.unit_system import IMPERIAL_SYSTEM
from .const import DEFAULT_ICON, DOMAIN, FEED
_LOGGER = logging.getLogger(__name__)
ATTR_ALERT_LEVEL = "alert_level"
ATTR_COUNTRY = "country"
ATTR_DESCRIPTION = "description"
ATTR_DURATION_IN_WEEK = "duration_in_week"
ATTR_EVENT_TYPE = "event_type"
ATTR_EXTERNAL_ID = "external_id"
ATTR_FROM_DATE = "from_date"
ATTR_POPULATION = "population"
ATTR_SEVERITY = "severity"
ATTR_TO_DATE = "to_date"
ATTR_VULNERABILITY = "vulnerability"
ICONS = {
"DR": "mdi:water-off",
"EQ": "mdi:pulse",
"FL": "mdi:home-flood",
"TC": "mdi:weather-hurricane",
"TS": "mdi:waves",
"VO": "mdi:image-filter-hdr",
}
# An update of this entity is not making a web request, but uses internal data only.
PARALLEL_UPDATES = 0
SOURCE = "gdacs"
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the GDACS Feed platform."""
manager = hass.data[DOMAIN][FEED][entry.entry_id]
@callback
def async_add_geolocation(feed_manager, integration_id, external_id):
"""Add geolocation entity from feed."""
new_entity = GdacsEvent(feed_manager, integration_id, external_id)
_LOGGER.debug("Adding geolocation %s", new_entity)
async_add_entities([new_entity], True)
manager.listeners.append(
async_dispatcher_connect(
hass, manager.async_event_new_entity(), async_add_geolocation
)
)
# Do not wait for update here so that the setup can be completed and because an
# update will fetch data from the feed via HTTP and then process that data.
hass.async_create_task(manager.async_update())
_LOGGER.debug("Geolocation setup done")
class GdacsEvent(GeolocationEvent):
"""This represents an external event with GDACS feed data."""
def __init__(self, feed_manager, integration_id, external_id):
"""Initialize entity with data from feed entry."""
self._feed_manager = feed_manager
self._integration_id = integration_id
self._external_id = external_id
self._title = None
self._distance = None
self._latitude = None
self._longitude = None
self._attribution = None
self._alert_level = None
self._country = None
self._description = None
self._duration_in_week = None
self._event_type_short = None
self._event_type = None
self._from_date = None
self._to_date = None
self._population = None
self._severity = None
self._vulnerability = None
self._version = None
self._remove_signal_delete = None
self._remove_signal_update = None
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self._remove_signal_delete = async_dispatcher_connect(
self.hass, f"gdacs_delete_{self._external_id}", self._delete_callback
)
self._remove_signal_update = async_dispatcher_connect(
self.hass, f"gdacs_update_{self._external_id}", self._update_callback
)
async def async_will_remove_from_hass(self) -> None:
"""Call when entity will be removed from hass."""
self._remove_signal_delete()
self._remove_signal_update()
# Remove from entity registry.
entity_registry = await async_get_registry(self.hass)
if self.entity_id in entity_registry.entities:
entity_registry.async_remove(self.entity_id)
@callback
def _delete_callback(self):
"""Remove this entity."""
self.hass.async_create_task(self.async_remove(force_remove=True))
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
@property
def should_poll(self):
"""No polling needed for GDACS feed location events."""
return False
async def async_update(self):
"""Update this entity from the data held in the feed manager."""
_LOGGER.debug("Updating %s", self._external_id)
feed_entry = self._feed_manager.get_entry(self._external_id)
if feed_entry:
self._update_from_feed(feed_entry)
def _update_from_feed(self, feed_entry):
"""Update the internal state from the provided feed entry."""
if not (event_name := feed_entry.event_name):
# Earthquakes usually don't have an event name.
event_name = f"{feed_entry.country} ({feed_entry.event_id})"
self._title = f"{feed_entry.event_type}: {event_name}"
# Convert distance if not metric system.
if self.hass.config.units.name == CONF_UNIT_SYSTEM_IMPERIAL:
self._distance = IMPERIAL_SYSTEM.length(
feed_entry.distance_to_home, LENGTH_KILOMETERS
)
else:
self._distance = feed_entry.distance_to_home
self._latitude = feed_entry.coordinates[0]
self._longitude = feed_entry.coordinates[1]
self._attribution = feed_entry.attribution
self._alert_level = feed_entry.alert_level
self._country = feed_entry.country
self._description = feed_entry.title
self._duration_in_week = feed_entry.duration_in_week
self._event_type_short = feed_entry.event_type_short
self._event_type = feed_entry.event_type
self._from_date = feed_entry.from_date
self._to_date = feed_entry.to_date
self._population = feed_entry.population
self._severity = feed_entry.severity
self._vulnerability = feed_entry.vulnerability
# Round vulnerability value if presented as float.
if isinstance(self._vulnerability, float):
self._vulnerability = round(self._vulnerability, 1)
self._version = feed_entry.version
@property
def unique_id(self) -> str | None:
"""Return a unique ID containing latitude/longitude and external id."""
return f"{self._integration_id}_{self._external_id}"
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
if self._event_type_short and self._event_type_short in ICONS:
return ICONS[self._event_type_short]
return DEFAULT_ICON
@property
def source(self) -> str:
"""Return source value of this external event."""
return SOURCE
@property
def name(self) -> str | None:
"""Return the name of the entity."""
return self._title
@property
def distance(self) -> float | None:
"""Return distance value of this external event."""
return self._distance
@property
def latitude(self) -> float | None:
"""Return latitude value of this external event."""
return self._latitude
@property
def longitude(self) -> float | None:
"""Return longitude value of this external event."""
return self._longitude
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
if self.hass.config.units.name == CONF_UNIT_SYSTEM_IMPERIAL:
return LENGTH_MILES
return LENGTH_KILOMETERS
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
attributes = {}
for key, value in (
(ATTR_EXTERNAL_ID, self._external_id),
(ATTR_DESCRIPTION, self._description),
(ATTR_ATTRIBUTION, self._attribution),
(ATTR_EVENT_TYPE, self._event_type),
(ATTR_ALERT_LEVEL, self._alert_level),
(ATTR_COUNTRY, self._country),
(ATTR_DURATION_IN_WEEK, self._duration_in_week),
(ATTR_FROM_DATE, self._from_date),
(ATTR_TO_DATE, self._to_date),
(ATTR_POPULATION, self._population),
(ATTR_SEVERITY, self._severity),
(ATTR_VULNERABILITY, self._vulnerability),
):
if value or isinstance(value, bool):
attributes[key] = value
return attributes
|
|
"""Tests for question endpoints."""
import json
import unittest
import backend
import harness
class QuestionTest(harness.TestHarness):
"""Tests for question endpoints."""
@harness.with_sess(user_id=1)
def test_crud(self):
"""Basic CRUD tests."""
# create a user
harness.create_user(name='snakes')
# create a quest
resp = self.post_json(
self.url_for(backend.quest_views.QuestList),
{"name": "mouse", "summary": "nap"})
self.assertEqual(resp.status_code, 200)
# no questions yet
resp = self.app.get("/v1/quests/1/questions/1")
self.assertEqual(resp.status_code, 404)
resp = self.app.get(self.url_for(
backend.question_views.QuestionList, parent_id=1))
self.assertEqual(json.loads(resp.data)['questions'], [])
# create a resource
resp = self.post_json(
self.url_for(backend.question_views.QuestionList, parent_id=1),
{"question_type": "text", "description": "cat hotel",
'question_group': 'lab_report'})
self.assertEqual(json.loads(resp.data), {
"multiple_choices": [],'question_group': 'lab_report',
"description": "cat hotel", "question_type": "text",
"id": 1, "url": "/v1/quests/1/questions/1",
"creator_id": 1, "creator_url": "/v1/users/1",
"quest_id": 1, "quest_url": "/v1/quests/1"})
# or two
resp = self.post_json(
self.url_for(backend.question_views.QuestionList, parent_id=1),
{"question_type": "upload", "description": "snake farm",
'question_group': 'review_quiz'})
self.assertEqual(json.loads(resp.data), {
"multiple_choices": [],'question_group': 'review_quiz',
"description": "snake farm", "question_type": "upload",
"id": 2, "url": "/v1/quests/1/questions/2",
"creator_id": 1, "creator_url": "/v1/users/1",
"quest_id": 1, "quest_url": "/v1/quests/1"})
# and one more linked to a different quest
resp = self.post_json(
self.url_for(backend.quest_views.QuestList),
{"name": "mouse", "summary": "nap"})
self.assertEqual(resp.status_code, 200)
resp = self.post_json(
self.url_for(backend.question_views.QuestionList, parent_id=2),
{"question_type": "upload", "description": "snake farm",
'question_group': 'closing_questions'})
self.assertEqual(resp.status_code, 200)
# can't create a question with a bad question_group
resp = self.post_json(
self.url_for(backend.question_views.QuestionList, parent_id=2),
{"question_type": "upload", "description": "snake farm",
'question_group': 'snakes'})
self.assertEqual(resp.status_code, 400)
# and get them back
resp = self.app.get("/v1/quests/1/questions/1")
self.assertEqual(json.loads(resp.data), {
"multiple_choices": [], 'question_group': 'lab_report',
"description": "cat hotel", "question_type": "text",
"id": 1, "url": "/v1/quests/1/questions/1",
"creator_id": 1, "creator_url": "/v1/users/1",
"quest_id": 1, "quest_url": "/v1/quests/1"})
resp = self.app.get("/v1/quests/1/questions/2")
self.assertEqual(resp.status_code, 200)
resp = self.app.get(self.url_for(
backend.question_views.QuestionList, parent_id=1))
self.assertEqual(json.loads(resp.data)['questions'], [
{"description": "cat hotel", "question_type": "text",
"id": 1, "url": "/v1/quests/1/questions/1",
"creator_id": 1, "creator_url": "/v1/users/1",
"multiple_choices": [], 'question_group': 'lab_report',
"quest_id": 1, "quest_url": "/v1/quests/1"},
{"description": "snake farm", "question_type": "upload",
"id": 2, "url": "/v1/quests/1/questions/2",
"creator_id": 1, "creator_url": "/v1/users/1",
"multiple_choices": [], 'question_group': 'review_quiz',
"quest_id": 1, "quest_url": "/v1/quests/1"}])
resp = self.app.get(self.url_for(
backend.question_views.QuestionList, parent_id=100))
self.assertEqual(resp.status_code, 404)
# filter by question_group
resp = self.app.get(self.url_for(
backend.question_views.QuestionList,
parent_id=1, question_group='lab_report'))
self.assertEqual(json.loads(resp.data)['questions'], [
{"description": "cat hotel", "question_type": "text",
"id": 1, "url": "/v1/quests/1/questions/1",
"creator_id": 1, "creator_url": "/v1/users/1",
"multiple_choices": [], 'question_group': 'lab_report',
"quest_id": 1, "quest_url": "/v1/quests/1"}])
resp = self.app.get(self.url_for(
backend.question_views.QuestionList,
parent_id=1, question_group='lab_report,closing_questions'))
self.assertEqual(json.loads(resp.data)['questions'], [
{"description": "cat hotel", "question_type": "text",
"id": 1, "url": "/v1/quests/1/questions/1",
"creator_id": 1, "creator_url": "/v1/users/1",
"multiple_choices": [], 'question_group': 'lab_report',
"quest_id": 1, "quest_url": "/v1/quests/1"}])
resp = self.app.get(self.url_for(
backend.question_views.QuestionList,
parent_id=1, question_group='lab_report,review_quiz'))
self.assertEqual(json.loads(resp.data)['questions'], [
{"description": "cat hotel", "question_type": "text",
"id": 1, "url": "/v1/quests/1/questions/1",
"creator_id": 1, "creator_url": "/v1/users/1",
"multiple_choices": [], 'question_group': 'lab_report',
"quest_id": 1, "quest_url": "/v1/quests/1"},
{"description": "snake farm", "question_type": "upload",
"id": 2, "url": "/v1/quests/1/questions/2",
"creator_id": 1, "creator_url": "/v1/users/1",
"multiple_choices": [], 'question_group': 'review_quiz',
"quest_id": 1, "quest_url": "/v1/quests/1"}])
resp = self.app.get(self.url_for(
backend.question_views.QuestionList,
parent_id=100, question_group='lab_report'))
self.assertEqual(resp.status_code, 404)
# error on bad question group
resp = self.app.get(self.url_for(
backend.question_views.QuestionList,
parent_id=1, question_group='lab_report,snakes'))
self.assertEqual(resp.status_code, 400)
# and get them back with just the id
resp = self.app.get("/v1/questions/1")
self.assertEqual(json.loads(resp.data), {
"description": "cat hotel", "question_type": "text",
"id": 1, "url": "/v1/quests/1/questions/1",
"creator_id": 1, "creator_url": "/v1/users/1",
"multiple_choices": [], 'question_group': 'lab_report',
"quest_id": 1, "quest_url": "/v1/quests/1"})
# but can't do anything else with that URI
resp = self.app.post("/v1/questions/1")
self.assertEqual(resp.status_code, 405)
resp = self.app.put("/v1/questions/1")
self.assertEqual(resp.status_code, 405)
resp = self.app.delete("/v1/questions/1")
self.assertEqual(resp.status_code, 405)
# edit
resp = self.put_json('/v1/quests/1/questions/1', {
"question_type": "text", 'description': 'a blue house',
'question_group': 'review_quiz'})
self.assertEqual(resp.status_code, 200)
# but we can't change the question_type after creating it
resp = self.put_json('/v1/quests/1/questions/1', {
"question_type": "upload", 'description': 'a blue house',
'question_group': 'review_quiz'})
self.assertEqual(resp.status_code, 200)
# and get them back
resp = self.app.get("/v1/quests/1/questions/1")
self.assertEqual(json.loads(resp.data), {
"description": "a blue house", "question_type": "text",
"id": 1, "url": "/v1/quests/1/questions/1",
"creator_id": 1, "creator_url": "/v1/users/1",
"multiple_choices": [], 'question_group': 'review_quiz',
"quest_id": 1, "quest_url": "/v1/quests/1"})
# delete
resp = self.app.delete("/v1/quests/1/questions/1")
self.assertEqual(resp.status_code, 200)
# and it's gone
resp = self.app.get("/v1/quests/1/questions/1")
self.assertEqual(resp.status_code, 404)
resp = self.put_json('/v1/quests/1/questions/1', {
"question_type": "text", 'description': 'a blue house',
'question_group': 'review_quiz'})
self.assertEqual(resp.status_code, 404)
resp = self.app.delete("/v1/quests/1/questions/1")
self.assertEqual(resp.status_code, 404)
# make sure we can't create invalid question types
resp = self.post_json(self.url_for(
backend.question_views.QuestionList, parent_id=1),
{"question_type": "snakes", 'description': 'a blue house'})
self.assertEqual(resp.status_code, 400)
# and 404 on bad quest ids
resp = self.app.get("/v1/quests/1/questions/2")
self.assertEqual(resp.status_code, 200)
resp = self.app.get("/v1/quests/2/questions/2")
self.assertEqual(resp.status_code, 404)
resp = self.post_json(
self.url_for(backend.question_views.QuestionList, parent_id=20),
{"question_type": "upload", "description": "snake farm",
'question_group': 'review_quiz'})
self.assertEqual(resp.status_code, 404)
resp = self.app.get(self.url_for(
backend.question_views.QuestionList, parent_id=20))
self.assertEqual(resp.status_code, 404)
# cascade delete on quests to linked questions
resp = self.app.get("/v1/quests/1/questions/2")
self.assertEqual(resp.status_code, 200)
resp = self.app.delete("/v1/quests/1")
self.assertEqual(resp.status_code, 200)
resp = self.app.get("/v1/quests/1/questions/2")
self.assertEqual(resp.status_code, 404)
@harness.with_sess(user_id=1)
def test_answer_crud(self):
"""Test CRUD on answer resources."""
# create a user
harness.create_user(name='snakes')
# create a quest
resp = self.post_json(
self.url_for(backend.quest_views.QuestList),
{"name": "mouse", "summary": "nap"})
self.assertEqual(resp.status_code, 200)
# create some questions
resp = self.post_json(
self.url_for(backend.question_views.QuestionList, parent_id=1),
{"question_type": "text", "description": "cat hotel",
'question_group': 'review_quiz'})
self.assertEqual(resp.status_code, 200)
resp = self.post_json(
self.url_for(backend.question_views.QuestionList, parent_id=1),
{"question_type": "upload", "description": "cat upload",
'question_group': 'review_quiz'})
self.assertEqual(resp.status_code, 200)
resp = self.post_json(
self.url_for(backend.question_views.QuestionList, parent_id=1),
{"question_type": "multiple_choice", "description": "a choice",
'question_group': 'review_quiz'})
self.assertEqual(resp.status_code, 200)
resp = self.post_json(
self.url_for(backend.question_views.QuestionList, parent_id=1),
{"question_type": "multiple_choice", "description": "b choice",
'question_group': 'review_quiz'})
self.assertEqual(resp.status_code, 200)
# create some choices
resp = self.post_json(
self.url_for(
backend.question_views.MultipleChoiceList, parent_id=3),
{'answer': 'a', 'is_correct': True, 'order': 1})
self.assertEqual(resp.status_code, 200)
resp = self.post_json(
self.url_for(
backend.question_views.MultipleChoiceList, parent_id=3),
{'answer': 'b', 'is_correct': False, 'order': 2})
self.assertEqual(resp.status_code, 200)
resp = self.post_json(
self.url_for(
backend.question_views.MultipleChoiceList, parent_id=4),
{'answer': 'a', 'is_correct': False, 'order': 2})
self.assertEqual(resp.status_code, 200)
# link some answers
resp = self.post_json(
self.url_for(backend.question_views.AnswerList, parent_id=1),
{"answer_text": "cats"})
self.assertEqual(json.loads(resp.data), {
"question_type": "text", "answer_text": "cats",
"answer_multiple_choice": None,
"answer_upload_url": None,
"id": 1, "url": "/v1/questions/1/answers/1",
"creator_id": 1, "creator_url": "/v1/users/1",
"question_id": 1, "question_url": "/v1/questions/1"})
resp = self.post_json(
self.url_for(backend.question_views.AnswerList, parent_id=1),
{"answer_text": "more cats"})
self.assertEqual(json.loads(resp.data), {
"question_type": "text", "answer_text": "more cats",
"answer_multiple_choice": None, "answer_upload_url": None,
"id": 2, "url": "/v1/questions/1/answers/2",
"creator_id": 1, "creator_url": "/v1/users/1",
"question_id": 1, "question_url": "/v1/questions/1"})
resp = self.post_json(
self.url_for(backend.question_views.AnswerList, parent_id=3),
{"answer_multiple_choice": 1})
self.assertEqual(json.loads(resp.data), {
"question_type": "multiple_choice", "answer_text": None,
"answer_multiple_choice": 1, "answer_upload_url": None,
"id": 3, "url": "/v1/questions/3/answers/3",
"creator_id": 1, "creator_url": "/v1/users/1",
"question_id": 3, "question_url": "/v1/questions/3"})
# multiple choice id 1 is linked to question 3, not 4
resp = self.post_json(
self.url_for(backend.question_views.AnswerList, parent_id=4),
{"answer_multiple_choice": 3})
self.assertEqual(resp.status_code, 200)
resp = self.post_json(
self.url_for(backend.question_views.AnswerList, parent_id=4),
{"answer_multiple_choice": 1})
self.assertEqual(resp.status_code, 404)
resp = self.put_json(
"/v1/questions/4/answers/4",
{"answer_multiple_choice": 1})
self.assertEqual(resp.status_code, 404)
# non-existant multiple choice id 70
resp = self.post_json(
self.url_for(backend.question_views.AnswerList, parent_id=3),
{"answer_multiple_choice": 70})
self.assertEqual(resp.status_code, 404)
# 400 on invalid combinations of question_type and answer fields
resp = self.post_json(
self.url_for(backend.question_views.AnswerList, parent_id=1),
{"answer_upload_url": "cats.html"})
self.assertEqual(resp.status_code, 400)
resp = self.post_json(
self.url_for(backend.question_views.AnswerList, parent_id=1),
{"answer_multiple_choice": "cats"})
self.assertEqual(resp.status_code, 400)
resp = self.post_json(
self.url_for(backend.question_views.AnswerList, parent_id=2),
{"answer_text": "cats"})
self.assertEqual(resp.status_code, 400)
resp = self.post_json(
self.url_for(backend.question_views.AnswerList, parent_id=2),
{"answer_multiple_choice": "cats"})
self.assertEqual(resp.status_code, 400)
resp = self.post_json(
self.url_for(backend.question_views.AnswerList, parent_id=3),
{"answer_text": "cats"})
self.assertEqual(resp.status_code, 400)
resp = self.post_json(
self.url_for(backend.question_views.AnswerList, parent_id=3),
{"answer_upload_url": "cats"})
self.assertEqual(resp.status_code, 400)
# get them back
resp = self.app.get('/v1/questions/1/answers/2')
self.assertEqual(json.loads(resp.data), {
"question_type": "text", "answer_text": "more cats",
"answer_upload_url": None,
"answer_multiple_choice": None,
"id": 2, "url": "/v1/questions/1/answers/2",
"creator_id": 1, "creator_url": "/v1/users/1",
"question_id": 1, "question_url": "/v1/questions/1"})
resp = self.app.get(self.url_for(
backend.question_views.AnswerList, parent_id=1))
self.assertEqual(json.loads(resp.data)['answers'], [
{"answer_text": "cats", "answer_upload_url": None,
"creator_id": 1, "creator_url": "/v1/users/1",
"answer_multiple_choice": None,
"id": 1, "question_id": 1, "question_type": "text",
"question_url": "/v1/questions/1",
"url": "/v1/questions/1/answers/1"},
{"answer_text": "more cats", "answer_upload_url": None,
"creator_id": 1, "creator_url": "/v1/users/1", "id": 2,
"answer_multiple_choice": None,
"question_id": 1, "question_type": "text",
"question_url": "/v1/questions/1",
"url": "/v1/questions/1/answers/2"}])
# edit
resp = self.put_json('/v1/questions/1/answers/2', {
"question_type": "text", "answer_text": "super cat"})
self.assertEqual(resp.status_code, 200)
resp = self.app.get('/v1/questions/1/answers/2')
self.assertEqual(json.loads(resp.data), {
"question_type": "text", "answer_text": "super cat",
"answer_upload_url": None,
"answer_multiple_choice": None,
"id": 2, "url": "/v1/questions/1/answers/2",
"creator_id": 1, "creator_url": "/v1/users/1",
"question_id": 1, "question_url": "/v1/questions/1"})
# delete
resp = self.app.delete('/v1/questions/1/answers/2')
self.assertEqual(resp.status_code, 200)
resp = self.app.get('/v1/questions/1/answers/2')
self.assertEqual(resp.status_code, 404)
# bad question_id / answer_id combos 404
resp = self.app.get('/v1/questions/1/answers/1')
self.assertEqual(resp.status_code, 200)
resp = self.app.get('/v1/questions/100/answers/1')
self.assertEqual(resp.status_code, 404)
resp = self.app.put('/v1/questions/100/answers/1')
self.assertEqual(resp.status_code, 404)
resp = self.app.delete('/v1/questions/100/answers/1')
self.assertEqual(resp.status_code, 404)
resp = self.app.get(self.url_for(
backend.question_views.AnswerList, parent_id=100))
self.assertEqual(resp.status_code, 404)
@harness.with_sess(user_id=1)
def multiple_choice_test(self):
"""Test the multiple choice resource."""
# nothing, so 404
resp = self.app.get(self.url_for(
backend.question_views.MultipleChoice,
question_id=1, multiple_choice_id=1))
self.assertEqual(resp.status_code, 404)
# create our resources
harness.create_user(name='snakes')
resp = self.post_json(
self.url_for(backend.quest_views.QuestList),
{"name": "mouse", "summary": "nap"})
self.assertEqual(resp.status_code, 200)
resp = self.post_json(
self.url_for(backend.question_views.QuestionList, parent_id=1),
{'description': 'q1', 'question_type': 'multiple_choice',
'question_group': 'review_quiz'})
self.assertEqual(resp.status_code, 200)
resp = self.post_json(
self.url_for(backend.question_views.QuestionList, parent_id=1),
{'description': 'q2', 'question_type': 'text',
'question_group': 'review_quiz'})
self.assertEqual(resp.status_code, 200)
resp = self.post_json(
self.url_for(
backend.question_views.MultipleChoiceList, parent_id=1),
{'answer': 'a', 'is_correct': False, 'order': 2})
self.assertEqual(json.loads(resp.data), {
"answer": "a", "is_correct": False, 'order': 2,
"id": 1, "url": "/v1/questions/1/multiple_choices/1",
"creator_id": 1, "creator_url": "/v1/users/1",
"question_id": 1, "question_url": "/v1/questions/1"})
resp = self.post_json(
self.url_for(
backend.question_views.MultipleChoiceList, parent_id=1),
{'answer': 'b', 'is_correct': True, 'order': 1})
self.assertEqual(json.loads(resp.data), {
"answer": "b", "is_correct": True, 'order': 1,
"id": 2, "url": "/v1/questions/1/multiple_choices/2",
"creator_id": 1, "creator_url": "/v1/users/1",
"question_id": 1, "question_url": "/v1/questions/1"})
# can't add a multiple choice answer to a non-multiple choice question
resp = self.post_json(
self.url_for(
backend.question_views.MultipleChoiceList, parent_id=2),
{'answer': 'a', 'is_correct': False, 'order': 3})
self.assertEqual(resp.status_code, 400)
# or link to a non-existant question
resp = self.post_json(
self.url_for(
backend.question_views.MultipleChoiceList, parent_id=20),
{'answer': 'a', 'is_correct': False, 'order': 3})
self.assertEqual(resp.status_code, 404)
# edit
resp = self.put_json(
self.url_for(
backend.question_views.MultipleChoice,
question_id=1, multiple_choice_id=2),
{'answer': 'bee', 'is_correct': True, 'order': 1})
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
self.url_for(
backend.question_views.MultipleChoice,
question_id=1, multiple_choice_id=2))
self.assertEqual(json.loads(resp.data), {
"answer": "bee", "is_correct": True, 'order': 1,
"id": 2, "url": "/v1/questions/1/multiple_choices/2",
"creator_id": 1, "creator_url": "/v1/users/1",
"question_id": 1, "question_url": "/v1/questions/1"})
# see them get attached to a question
resp = self.app.get(
self.url_for(
backend.question_views.Question,
quest_id=1, question_id=1))
self.assertEqual(json.loads(resp.data)['multiple_choices'], [
{"answer": "bee", "is_correct": True, 'order': 1,
"id": 2, "url": "/v1/questions/1/multiple_choices/2",
"creator_id": 1, "creator_url": "/v1/users/1",
"question_id": 1, "question_url": "/v1/questions/1"},
{"answer": "a", "is_correct": False, 'order': 2,
"id": 1, "url": "/v1/questions/1/multiple_choices/1",
"creator_id": 1, "creator_url": "/v1/users/1",
"question_id": 1, "question_url": "/v1/questions/1"}])
resp = self.app.get(
self.url_for(
backend.question_views.MultipleChoiceList, parent_id=1))
self.assertEqual(json.loads(resp.data), {'multiple_choices': [
{"answer": "bee", "is_correct": True, 'order': 1,
"id": 2, "url": "/v1/questions/1/multiple_choices/2",
"creator_id": 1, "creator_url": "/v1/users/1",
"question_id": 1, "question_url": "/v1/questions/1"},
{"answer": "a", "is_correct": False, 'order': 2,
"id": 1, "url": "/v1/questions/1/multiple_choices/1",
"creator_id": 1, "creator_url": "/v1/users/1",
"question_id": 1, "question_url": "/v1/questions/1"}]})
# make sure order is respected
resp = self.put_json(
self.url_for(
backend.question_views.MultipleChoice,
question_id=1, multiple_choice_id=2),
{'answer': 'bee', 'is_correct': True, 'order': 3})
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
self.url_for(
backend.question_views.Question, quest_id=1, question_id=1))
self.assertEqual(json.loads(resp.data)['multiple_choices'], [
{"answer": "a", "is_correct": False, 'order': 2,
"id": 1, "url": "/v1/questions/1/multiple_choices/1",
"creator_id": 1, "creator_url": "/v1/users/1",
"question_id": 1, "question_url": "/v1/questions/1"},
{"answer": "bee", "is_correct": True, 'order': 3,
"id": 2, "url": "/v1/questions/1/multiple_choices/2",
"creator_id": 1, "creator_url": "/v1/users/1",
"question_id": 1, "question_url": "/v1/questions/1"}])
# delete
resp = self.app.delete(
self.url_for(
backend.question_views.MultipleChoice,
question_id=1, multiple_choice_id=2))
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
self.url_for(
backend.question_views.Question, quest_id=1, question_id=1))
self.assertEqual(json.loads(resp.data)['multiple_choices'], [
{"answer": "a", "is_correct": False, 'order': 2,
"id": 1, "url": "/v1/questions/1/multiple_choices/1",
"creator_id": 1, "creator_url": "/v1/users/1",
"question_id": 1, "question_url": "/v1/questions/1"}])
resp = self.app.get(
self.url_for(
backend.question_views.MultipleChoice,
question_id=1, multiple_choice_id=2))
self.assertEqual(resp.status_code, 404)
if __name__ == '__main__':
unittest.main()
|
|
import mimetypes
import os.path
from django.conf import settings
from django.core import mail
from django.core.files.storage import default_storage as storage
from django.core.mail import EmailMessage
from django.utils import translation
from unittest import mock
import six
from celery.exceptions import Retry
from olympia.amo.models import FakeEmail
from olympia.amo.tests import TestCase
from olympia.amo.utils import send_html_mail_jinja, send_mail
from olympia.users import notifications
from olympia.users.models import UserNotification, UserProfile
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
ATTACHMENTS_DIR = os.path.join(TESTS_DIR, 'attachments')
class TestSendMail(TestCase):
fixtures = ['base/users']
def setUp(self):
super(TestSendMail, self).setUp()
self._email_deny = list(getattr(settings, 'EMAIL_DENY_LIST', []))
def tearDown(self):
translation.activate('en_US')
settings.EMAIL_DENY_LIST = self._email_deny
super(TestSendMail, self).tearDown()
def test_send_string(self):
to = 'f@f.com'
with self.assertRaises(ValueError):
send_mail('subj', 'body', recipient_list=to)
def test_deny(self):
to = 'nobody@mozilla.org'
settings.EMAIL_DENY_LIST = (to,)
success = send_mail('test subject', 'test body', recipient_list=[to])
assert success
assert len(mail.outbox) == 0
def test_deny_flag(self):
to = 'nobody@mozilla.org'
settings.EMAIL_DENY_LIST = (to,)
success = send_mail('test subject', 'test body',
recipient_list=[to], use_deny_list=True)
assert success
assert len(mail.outbox) == 0
success = send_mail('test subject', 'test body',
recipient_list=[to], use_deny_list=False)
assert success
assert len(mail.outbox) == 1
def test_user_setting_default(self):
user = UserProfile.objects.all()[0]
to = user.email
# Confirm there's nothing in the DB and we're using the default
assert UserNotification.objects.count() == 0
# Make sure that this is True by default
setting = notifications.NOTIFICATIONS_BY_SHORT['reply']
assert setting.default_checked
success = send_mail('test subject', 'test body', perm_setting='reply',
recipient_list=[to])
assert success, "Email wasn't sent"
assert len(mail.outbox) == 1
# bug 676601
assert mail.outbox[0].body.count('users/unsubscribe') == 1
def test_user_setting_checked(self):
user = UserProfile.objects.all()[0]
to = user.email
n = notifications.NOTIFICATIONS_BY_SHORT['reply']
UserNotification.objects.get_or_create(
notification_id=n.id, user=user, enabled=True)
# Confirm we're reading from the database
assert UserNotification.objects.filter(
notification_id=n.id).count() == 1
success = send_mail('test subject', 'test body', perm_setting='reply',
recipient_list=[to])
assert "You received this email because" in mail.outbox[0].body
assert success, "Email wasn't sent"
assert len(mail.outbox) == 1
def test_user_mandatory(self):
# Make sure there's no unsubscribe link in mandatory emails.
user = UserProfile.objects.all()[0]
to = user.email
n = notifications.NOTIFICATIONS_BY_SHORT['individual_contact']
UserNotification.objects.get_or_create(
notification_id=n.id, user=user, enabled=True)
assert n.mandatory, "Notification isn't mandatory"
success = send_mail('test subject', 'test body', perm_setting=n,
recipient_list=[to])
assert success, "Email wasn't sent"
body = mail.outbox[0].body
assert "Unsubscribe:" not in body
assert "You can't unsubscribe from" in body
def test_user_setting_unchecked(self):
user = UserProfile.objects.all()[0]
to = user.email
n = notifications.NOTIFICATIONS_BY_SHORT['reply']
UserNotification.objects.get_or_create(
notification_id=n.id, user=user, enabled=False)
# Confirm we're reading from the database.
assert UserNotification.objects.filter(
notification_id=n.id).count() == 1
success = send_mail('test subject', 'test body', perm_setting='reply',
recipient_list=[to])
assert success, "Email wasn't sent"
assert len(mail.outbox) == 0
@mock.patch.object(settings, 'EMAIL_DENY_LIST', ())
def test_success_real_mail(self):
assert send_mail('test subject', 'test body',
recipient_list=['nobody@mozilla.org'])
assert len(mail.outbox) == 1
assert mail.outbox[0].subject.find('test subject') == 0
assert mail.outbox[0].body.find('test body') == 0
@mock.patch.object(settings, 'EMAIL_DENY_LIST', ())
@mock.patch.object(settings, 'SEND_REAL_EMAIL', False)
def test_success_fake_mail(self):
assert send_mail('test subject', 'test body',
recipient_list=['nobody@mozilla.org'])
assert len(mail.outbox) == 0
assert FakeEmail.objects.count() == 1
assert FakeEmail.objects.get().message.endswith('test body')
@mock.patch.object(settings, 'EMAIL_DENY_LIST', ())
@mock.patch.object(settings, 'SEND_REAL_EMAIL', False)
@mock.patch.object(settings, 'EMAIL_QA_ALLOW_LIST', ('nope@mozilla.org',))
def test_qa_allowed_list(self):
assert send_mail('test subject', 'test body',
recipient_list=['nope@mozilla.org'])
assert len(mail.outbox) == 1
assert mail.outbox[0].subject.find('test subject') == 0
assert mail.outbox[0].body.find('test body') == 0
assert FakeEmail.objects.count() == 1
assert FakeEmail.objects.get().message.endswith('test body')
@mock.patch.object(settings, 'EMAIL_DENY_LIST', ())
@mock.patch.object(settings, 'SEND_REAL_EMAIL', False)
@mock.patch.object(settings, 'EMAIL_QA_ALLOW_LIST', ('nope@mozilla.org',))
def test_qa_allowed_list_with_mixed_emails(self):
assert send_mail('test subject', 'test body',
recipient_list=['nope@mozilla.org', 'b@example.fr'])
assert len(mail.outbox) == 1
assert mail.outbox[0].to == ['nope@mozilla.org']
assert FakeEmail.objects.count() == 1
def test_dont_localize(self):
user = UserProfile.objects.all()[0]
to = user.email
translation.activate('zh_TW')
send_mail('test subject', 'test body', perm_setting='reply',
recipient_list=[to])
assert u'an add-on developer replies to' in mail.outbox[0].body
def test_send_html_mail_jinja(self):
emails = ['omg@org.yes']
subject = u'Mozilla Add-ons: Thank you for your submission!'
html_template = 'devhub/email/submission.html'
text_template = 'devhub/email/submission.txt'
send_html_mail_jinja(subject, html_template, text_template,
context={}, recipient_list=emails,
from_email=settings.ADDONS_EMAIL,
use_deny_list=False,
perm_setting='individual_contact')
msg = mail.outbox[0]
message = msg.message()
assert msg.to == emails
assert msg.subject == subject
assert msg.from_email == settings.ADDONS_EMAIL
assert message.is_multipart()
assert message.get_content_type() == 'multipart/alternative'
assert message.get_default_type() == 'text/plain'
payload = message.get_payload()
assert payload[0].get_content_type() == 'text/plain'
assert payload[1].get_content_type() == 'text/html'
message1 = payload[0].as_string()
message2 = payload[1].as_string()
assert '<a href' not in message1, 'text-only email contained HTML!'
assert '<a href' in message2, 'HTML email did not contain HTML!'
unsubscribe_msg = six.text_type(notifications.individual_contact.label)
assert unsubscribe_msg in message1
assert unsubscribe_msg in message2
def test_send_attachment(self):
path = os.path.join(ATTACHMENTS_DIR, 'bacon.txt')
attachments = [(
os.path.basename(path), storage.open(path, 'r').read(),
mimetypes.guess_type(path)[0])]
send_mail('test subject', 'test body', from_email='a@example.com',
recipient_list=['b@example.com'], attachments=attachments)
assert attachments == mail.outbox[0].attachments, (
'Attachments not included')
def test_send_multilines_subjects(self):
send_mail('test\nsubject', 'test body', from_email='a@example.com',
recipient_list=['b@example.com'])
assert 'test subject' == mail.outbox[0].subject, 'Subject not stripped'
def test_autoresponse_headers(self):
send_mail('subject', 'test body', from_email='a@example.com',
recipient_list=['b@example.com'])
headers = mail.outbox[0].extra_headers
assert headers['X-Auto-Response-Suppress'] == 'RN, NRN, OOF, AutoReply'
assert headers['Auto-Submitted'] == 'auto-generated'
def test_reply_to(self):
send_mail('subject', 'test body', from_email='a@example.com',
recipient_list=['b@example.com'], reply_to=['c@example.com'])
headers = mail.outbox[0].extra_headers
assert mail.outbox[0].reply_to == ['c@example.com']
assert headers['Auto-Submitted'] == 'auto-generated' # Still there.
def make_backend_class(self, error_order):
throw_error = iter(error_order)
def make_backend(*args, **kwargs):
if next(throw_error):
class BrokenMessage(object):
def __init__(*args, **kwargs):
pass
def send(*args, **kwargs):
raise RuntimeError('uh oh')
def attach_alternative(*args, **kwargs):
pass
backend = BrokenMessage()
else:
backend = EmailMessage(*args, **kwargs)
return backend
return make_backend
@mock.patch('olympia.amo.tasks.EmailMessage')
def test_async_will_retry_default(self, backend):
backend.side_effect = self.make_backend_class([True, True, False])
with self.assertRaises(Retry):
send_mail('test subject',
'test body',
recipient_list=['somebody@mozilla.org'])
@mock.patch('olympia.amo.tasks.EmailMessage')
def test_async_will_retry(self, backend):
backend.side_effect = self.make_backend_class([True, True, False])
with self.assertRaises(Retry):
send_mail('test subject',
'test body',
max_retries=2,
recipient_list=['somebody@mozilla.org'])
@mock.patch('olympia.amo.tasks.EmailMessage')
def test_async_will_stop_retrying(self, backend):
backend.side_effect = self.make_backend_class([True, True])
with self.assertRaises(RuntimeError):
send_mail('test subject',
'test body',
max_retries=1,
recipient_list=['somebody@mozilla.org'])
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Django settings for Hue.
#
# Local customizations are done by symlinking a file
# as local_settings.py.
import logging
import os
import sys
import pkg_resources
from guppy import hpy
import desktop.conf
import desktop.log
from desktop.lib.paths import get_desktop_root
from desktop.lib.python_util import force_dict_to_strings
HUE_DESKTOP_VERSION = pkg_resources.get_distribution("desktop").version or "Unknown"
NICE_NAME = "Hue"
ENV_HUE_PROCESS_NAME = "HUE_PROCESS_NAME"
ENV_DESKTOP_DEBUG = "DESKTOP_DEBUG"
############################################################
# Part 1: Logging and imports.
############################################################
# Configure debug mode
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Start basic logging as soon as possible.
if ENV_HUE_PROCESS_NAME not in os.environ:
_proc = os.path.basename(len(sys.argv) > 1 and sys.argv[1] or sys.argv[0])
os.environ[ENV_HUE_PROCESS_NAME] = _proc
desktop.log.basic_logging(os.environ[ENV_HUE_PROCESS_NAME])
logging.info("Welcome to Hue " + HUE_DESKTOP_VERSION)
# Then we can safely import some more stuff
from desktop import appmanager
from desktop.lib import conf
# Add fancy logging
desktop.log.fancy_logging()
############################################################
# Part 2: Generic Configuration
############################################################
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
############################################################
# Part 3: Django configuration
############################################################
# Additional locations of static files
STATICFILES_DIRS = ()
# For Django admin interface
STATIC_URL = '/static/'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
)
MIDDLEWARE_CLASSES = [
# The order matters
'desktop.middleware.EnsureSafeMethodMiddleware',
'desktop.middleware.DatabaseLoggingMiddleware',
'desktop.middleware.AuditLoggingMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'desktop.middleware.SpnegoMiddleware',
'desktop.middleware.HueRemoteUserMiddleware',
'django.middleware.locale.LocaleMiddleware',
'babeldjango.middleware.LocaleMiddleware',
'desktop.middleware.AjaxMiddleware',
# Must be after Session, Auth, and Ajax. Before everything else.
'desktop.middleware.LoginAndPermissionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'desktop.middleware.NotificationMiddleware',
'desktop.middleware.ExceptionMiddleware',
'desktop.middleware.ClusterMiddleware',
'django.middleware.transaction.TransactionMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware'
'django.middleware.csrf.CsrfViewMiddleware'
]
if os.environ.get(ENV_DESKTOP_DEBUG):
MIDDLEWARE_CLASSES.append('desktop.middleware.HtmlValidationMiddleware')
logging.debug("Will try to validate generated HTML.")
ROOT_URLCONF = 'desktop.urls'
# Hue runs its own wsgi applications
WSGI_APPLICATION = None
TEMPLATE_DIRS = (
get_desktop_root("core/templates")
)
INSTALLED_APPS = [
'django.contrib.auth',
'django_openid_auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django_extensions',
# 'debug_toolbar',
'south', # database migration tool
# i18n support
'babeldjango',
# Desktop injects all the other installed apps into here magically.
'desktop'
]
LOCALE_PATHS = [
get_desktop_root('core/src/desktop/locale')
]
# Keep default values up to date
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
# Not default
'desktop.context_processors.app_name',
)
# Desktop doesn't use an auth profile module, because
# because it doesn't mesh very well with the notion
# of having multiple apps. If your app needs
# to store data related to users, it should
# manage its own table with an appropriate foreign key.
AUTH_PROFILE_MODULE=None
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/" # For djangosaml2 bug.
PYLINTRC = get_desktop_root('.pylintrc')
# Insert our HDFS upload handler
FILE_UPLOAD_HANDLERS = (
'hadoop.fs.upload.HDFSfileUploadHandler',
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
############################################################
# Part 4: Installation of apps
############################################################
_config_dir = os.getenv("HUE_CONF_DIR", get_desktop_root("conf"))
# Libraries are loaded and configured before the apps
appmanager.load_libs()
_lib_conf_modules = [dict(module=app.conf, config_key=None) for app in appmanager.DESKTOP_LIBS if app.conf is not None]
LOCALE_PATHS.extend([app.locale_path for app in appmanager.DESKTOP_LIBS])
# Load desktop config
_desktop_conf_modules = [dict(module=desktop.conf, config_key=None)]
conf.initialize(_desktop_conf_modules, _config_dir)
# Activate l10n
# Install apps
appmanager.load_apps(desktop.conf.APP_BLACKLIST.get())
for app in appmanager.DESKTOP_APPS:
INSTALLED_APPS.extend(app.django_apps)
LOCALE_PATHS.append(app.locale_path)
logging.debug("Installed Django modules: %s" % ",".join(map(str, appmanager.DESKTOP_MODULES)))
# Load app configuration
_app_conf_modules = [dict(module=app.conf, config_key=app.config_key) for app in appmanager.DESKTOP_APPS if app.conf is not None]
conf.initialize(_lib_conf_modules, _config_dir)
conf.initialize(_app_conf_modules, _config_dir)
# Now that we've loaded the desktop conf, set the django DEBUG mode based on the conf.
DEBUG = desktop.conf.DJANGO_DEBUG_MODE.get()
TEMPLATE_DEBUG = DEBUG
############################################################
# Part 4a: Django configuration that requires bound Desktop
# configs.
############################################################
# Configure hue admins
ADMINS = []
for admin in desktop.conf.DJANGO_ADMINS.get():
admin_conf = desktop.conf.DJANGO_ADMINS[admin]
if 'name' in admin_conf.bind_to and 'email' in admin_conf.bind_to:
ADMINS.append(((admin_conf.NAME.get(), admin_conf.EMAIL.get())))
ADMINS = tuple(ADMINS)
MANAGERS = ADMINS
# Server Email Address
SERVER_EMAIL = desktop.conf.DJANGO_SERVER_EMAIL.get()
# Email backend
EMAIL_BACKEND = desktop.conf.DJANGO_EMAIL_BACKEND.get()
# Configure database
if os.getenv('DESKTOP_DB_CONFIG'):
conn_string = os.getenv('DESKTOP_DB_CONFIG')
logging.debug("DESKTOP_DB_CONFIG SET: %s" % (conn_string))
default_db = dict(zip(
["ENGINE", "NAME", "TEST_NAME", "USER", "PASSWORD", "HOST", "PORT"],
conn_string.split(':')))
else:
default_db = {
"ENGINE" : desktop.conf.DATABASE.ENGINE.get(),
"NAME" : desktop.conf.DATABASE.NAME.get(),
"USER" : desktop.conf.DATABASE.USER.get(),
"PASSWORD" : desktop.conf.DATABASE.PASSWORD.get(),
"HOST" : desktop.conf.DATABASE.HOST.get(),
"PORT" : str(desktop.conf.DATABASE.PORT.get()),
"OPTIONS": force_dict_to_strings(desktop.conf.DATABASE.OPTIONS.get()),
# DB used for tests
"TEST_NAME" : get_desktop_root('desktop-test.db')
}
DATABASES = {
'default': default_db
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-hue'
}
}
# Configure sessions
SESSION_COOKIE_AGE = desktop.conf.SESSION.TTL.get()
SESSION_COOKIE_SECURE = desktop.conf.SESSION.SECURE.get()
SESSION_EXPIRE_AT_BROWSER_CLOSE = desktop.conf.SESSION.EXPIRE_AT_BROWSER_CLOSE.get()
# HTTP only
SESSION_COOKIE_HTTPONLY = desktop.conf.SESSION.HTTP_ONLY.get()
# django-nose test specifics
TEST_RUNNER = 'desktop.lib.test_runners.HueTestRunner'
# Turn off cache middleware
if 'test' in sys.argv:
CACHE_MIDDLEWARE_SECONDS = 0
TIME_ZONE = desktop.conf.TIME_ZONE.get()
# Desktop supports only one authentication backend.
AUTHENTICATION_BACKENDS = (desktop.conf.AUTH.BACKEND.get(),)
if desktop.conf.DEMO_ENABLED.get():
AUTHENTICATION_BACKENDS = ('desktop.auth.backend.DemoBackend',)
EMAIL_HOST = desktop.conf.SMTP.HOST.get()
EMAIL_PORT = desktop.conf.SMTP.PORT.get()
EMAIL_HOST_USER = desktop.conf.SMTP.USER.get()
EMAIL_HOST_PASSWORD = desktop.conf.SMTP.PASSWORD.get()
EMAIL_USE_TLS = desktop.conf.SMTP.USE_TLS.get()
DEFAULT_FROM_EMAIL = desktop.conf.SMTP.DEFAULT_FROM.get()
# Used for securely creating sessions. Should be unique and not shared with anybody.
SECRET_KEY = desktop.conf.SECRET_KEY.get()
if SECRET_KEY == "":
logging.warning("secret_key should be configured")
# SAML
SAML_AUTHENTICATION = 'libsaml.backend.SAML2Backend' in AUTHENTICATION_BACKENDS
if SAML_AUTHENTICATION:
from libsaml.saml_settings import *
INSTALLED_APPS.append('libsaml')
LOGIN_URL = '/saml2/login/'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Middleware classes.
for middleware in desktop.conf.MIDDLEWARE.get():
MIDDLEWARE_CLASSES.append(middleware)
# OpenId
OPENID_AUTHENTICATION = 'libopenid.backend.OpenIDBackend' in AUTHENTICATION_BACKENDS
if OPENID_AUTHENTICATION:
from libopenid.openid_settings import *
INSTALLED_APPS.append('libopenid')
LOGIN_URL = '/openid/login'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# OAuth
OAUTH_AUTHENTICATION='liboauth.backend.OAuthBackend' in AUTHENTICATION_BACKENDS
if OAUTH_AUTHENTICATION:
INSTALLED_APPS.append('liboauth')
LOGIN_URL = '/oauth/accounts/login'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# URL Redirection white list.
if desktop.conf.REDIRECT_WHITELIST.get():
MIDDLEWARE_CLASSES.append('desktop.middleware.EnsureSafeRedirectURLMiddleware')
#Support HTTPS load-balancing
if desktop.conf.SECURE_PROXY_SSL_HEADER.get():
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
############################################################
# Necessary for South to not fuzz with tests. Fixed in South 0.7.1
SKIP_SOUTH_TESTS = True
# Set up environment variable so Kerberos libraries look at our private
# ticket cache
os.environ['KRB5CCNAME'] = desktop.conf.KERBEROS.CCACHE_PATH.get()
# Memory
if desktop.conf.MEMORY_PROFILER.get():
MEMORY_PROFILER = hpy()
MEMORY_PROFILER.setrelheap()
|
|
""" This module contains utility functions to construct and manipulate counting
data structures for frames.
When performing statistical profiling we obtain many call stacks. We aggregate
these call stacks into data structures that maintain counts of how many times
each function in that call stack has been called. Because these stacks will
overlap this aggregation counting structure forms a tree, such as is commonly
visualized by profiling tools.
We represent this tree as a nested dictionary with the following form:
{
'identifier': 'root',
'description': 'A long description of the line of code being run.',
'count': 10 # the number of times we have seen this line
'children': { # callers of this line. Recursive dicts
'ident-b': {'description': ...
'identifier': 'ident-a',
'count': ...
'children': {...}},
'ident-b': {'description': ...
'identifier': 'ident-b',
'count': ...
'children': {...}}}
}
"""
import bisect
from collections import defaultdict, deque
import linecache
import sys
import threading
from time import sleep
import tlz as toolz
from .metrics import time
from .utils import format_time, color_of, parse_timedelta
def identifier(frame):
""" A string identifier from a frame
Strings are cheaper to use as indexes into dicts than tuples or dicts
"""
if frame is None:
return "None"
else:
return ";".join(
(
frame.f_code.co_name,
frame.f_code.co_filename,
str(frame.f_code.co_firstlineno),
)
)
def repr_frame(frame):
""" Render a frame as a line for inclusion into a text traceback """
co = frame.f_code
text = ' File "%s", line %s, in %s' % (co.co_filename, frame.f_lineno, co.co_name)
line = linecache.getline(co.co_filename, frame.f_lineno, frame.f_globals).lstrip()
return text + "\n\t" + line
def info_frame(frame):
co = frame.f_code
line = linecache.getline(co.co_filename, frame.f_lineno, frame.f_globals).lstrip()
return {
"filename": co.co_filename,
"name": co.co_name,
"line_number": frame.f_lineno,
"line": line,
}
def process(frame, child, state, stop=None, omit=None):
""" Add counts from a frame stack onto existing state
This recursively adds counts to the existing state dictionary and creates
new entries for new functions.
Examples
--------
>>> import sys, threading
>>> ident = threading.get_ident() # replace with your thread of interest
>>> frame = sys._current_frames()[ident]
>>> state = {'children': {}, 'count': 0, 'description': 'root',
... 'identifier': 'root'}
>>> process(frame, None, state)
>>> state
{'count': 1,
'identifier': 'root',
'description': 'root',
'children': {'...'}}
"""
if omit is not None and any(frame.f_code.co_filename.endswith(o) for o in omit):
return False
prev = frame.f_back
if prev is not None and (
stop is None or not prev.f_code.co_filename.endswith(stop)
):
state = process(prev, frame, state, stop=stop)
if state is False:
return False
ident = identifier(frame)
try:
d = state["children"][ident]
except KeyError:
d = {
"count": 0,
"description": info_frame(frame),
"children": {},
"identifier": ident,
}
state["children"][ident] = d
state["count"] += 1
if child is not None:
return d
else:
d["count"] += 1
def merge(*args):
""" Merge multiple frame states together """
if not args:
return create()
s = {arg["identifier"] for arg in args}
if len(s) != 1:
raise ValueError("Expected identifiers, got %s" % str(s))
children = defaultdict(list)
for arg in args:
for child in arg["children"]:
children[child].append(arg["children"][child])
children = {k: merge(*v) for k, v in children.items()}
count = sum(arg["count"] for arg in args)
return {
"description": args[0]["description"],
"children": dict(children),
"count": count,
"identifier": args[0]["identifier"],
}
def create():
return {
"count": 0,
"children": {},
"identifier": "root",
"description": {"filename": "", "name": "", "line_number": 0, "line": ""},
}
def call_stack(frame):
""" Create a call text stack from a frame
Returns
-------
list of strings
"""
L = []
while frame:
L.append(repr_frame(frame))
frame = frame.f_back
return L[::-1]
def plot_data(state, profile_interval=0.010):
""" Convert a profile state into data useful by Bokeh
See Also
--------
plot_figure
distributed.bokeh.components.ProfilePlot
"""
starts = []
stops = []
heights = []
widths = []
colors = []
states = []
times = []
filenames = []
lines = []
line_numbers = []
names = []
def traverse(state, start, stop, height):
if not state["count"]:
return
starts.append(start)
stops.append(stop)
heights.append(height)
width = stop - start
widths.append(width)
states.append(state)
times.append(format_time(state["count"] * profile_interval))
desc = state["description"]
filenames.append(desc["filename"])
lines.append(desc["line"])
line_numbers.append(desc["line_number"])
names.append(desc["name"])
ident = state["identifier"]
try:
fn = desc["filename"]
except IndexError:
colors.append("gray")
else:
if fn == "<low-level>":
colors.append("lightgray")
else:
colors.append(color_of(fn))
delta = (stop - start) / state["count"]
x = start
for name, child in state["children"].items():
width = child["count"] * delta
traverse(child, x, x + width, height + 1)
x += width
traverse(state, 0, 1, 0)
percentages = ["{:.1f}%".format(100 * w) for w in widths]
return {
"left": starts,
"right": stops,
"bottom": heights,
"width": widths,
"top": [x + 1 for x in heights],
"color": colors,
"states": states,
"filename": filenames,
"line": lines,
"line_number": line_numbers,
"name": names,
"time": times,
"percentage": percentages,
}
def _watch(thread_id, log, interval="20ms", cycle="2s", omit=None, stop=lambda: False):
interval = parse_timedelta(interval)
cycle = parse_timedelta(cycle)
recent = create()
last = time()
while not stop():
if time() > last + cycle:
log.append((time(), recent))
recent = create()
last = time()
try:
frame = sys._current_frames()[thread_id]
except KeyError:
return
process(frame, None, recent, omit=omit)
sleep(interval)
def watch(
thread_id=None,
interval="20ms",
cycle="2s",
maxlen=1000,
omit=None,
stop=lambda: False,
):
""" Gather profile information on a particular thread
This starts a new thread to watch a particular thread and returns a deque
that holds periodic profile information.
Parameters
----------
thread_id: int
interval: str
Time per sample
cycle: str
Time per refreshing to a new profile state
maxlen: int
Passed onto deque, maximum number of periods
omit: str
Don't include entries that start with this filename
stop: callable
Function to call to see if we should stop
Returns
-------
deque
"""
if thread_id is None:
thread_id = threading.get_ident()
log = deque(maxlen=maxlen)
thread = threading.Thread(
target=_watch,
name="Profile",
kwargs={
"thread_id": thread_id,
"interval": interval,
"cycle": cycle,
"log": log,
"omit": omit,
"stop": stop,
},
)
thread.daemon = True
thread.start()
return log
def get_profile(history, recent=None, start=None, stop=None, key=None):
""" Collect profile information from a sequence of profile states
Parameters
----------
history: Sequence[Tuple[time, Dict]]
A list or deque of profile states
recent: dict
The most recent accumulating state
start: time
stop: time
"""
now = time()
if start is None:
istart = 0
else:
istart = bisect.bisect_left(history, (start,))
if stop is None:
istop = None
else:
istop = bisect.bisect_right(history, (stop,)) + 1
if istop >= len(history):
istop = None # include end
if istart == 0 and istop is None:
history = list(history)
else:
iistop = len(history) if istop is None else istop
history = [history[i] for i in range(istart, iistop)]
prof = merge(*toolz.pluck(1, history))
if not history:
return create()
if recent:
prof = merge(prof, recent)
return prof
def plot_figure(data, **kwargs):
""" Plot profile data using Bokeh
This takes the output from the function ``plot_data`` and produces a Bokeh
figure
See Also
--------
plot_data
"""
from bokeh.plotting import ColumnDataSource, figure
from bokeh.models import HoverTool
if "states" in data:
data = toolz.dissoc(data, "states")
source = ColumnDataSource(data=data)
fig = figure(tools="tap,box_zoom,xwheel_zoom,reset", **kwargs)
r = fig.quad(
"left",
"right",
"top",
"bottom",
color="color",
line_color="black",
line_width=2,
source=source,
)
r.selection_glyph = None
r.nonselection_glyph = None
hover = HoverTool(
point_policy="follow_mouse",
tooltips="""
<div>
<span style="font-size: 14px; font-weight: bold;">Name:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@name</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Filename:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@filename</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Line number:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@line_number</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Line:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@line</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Time:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@time</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Percentage:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@percentage</span>
</div>
""",
)
fig.add_tools(hover)
fig.xaxis.visible = False
fig.yaxis.visible = False
fig.grid.visible = False
return fig, source
def _remove_py_stack(frames):
for entry in frames:
if entry.is_python:
break
yield entry
def llprocess(frames, child, state):
""" Add counts from low level profile information onto existing state
This uses the ``stacktrace`` module to collect low level stack trace
information and place it onto the given sttate.
It is configured with the ``distributed.worker.profile.low-level`` config
entry.
See Also
--------
process
ll_get_stack
"""
if not frames:
return
frame = frames.pop()
if frames:
state = llprocess(frames, frame, state)
addr = hex(frame.addr - frame.offset)
ident = ";".join(map(str, (frame.name, "<low-level>", addr)))
try:
d = state["children"][ident]
except KeyError:
d = {
"count": 0,
"description": {
"filename": "<low-level>",
"name": frame.name,
"line_number": 0,
"line": str(frame),
},
"children": {},
"identifier": ident,
}
state["children"][ident] = d
state["count"] += 1
if child is not None:
return d
else:
d["count"] += 1
def ll_get_stack(tid):
""" Collect low level stack information from thread id """
from stacktrace import get_thread_stack
frames = get_thread_stack(tid, show_python=False)
llframes = list(_remove_py_stack(frames))[::-1]
return llframes
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.