repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
caidongyun/pylearn2 | pylearn2/models/mlp.py | 12 | 165807 | """
Multilayer Perceptron
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2012-2013, Universite de Montreal"
__credits__ = ["Ian Goodfellow", "David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
import logging
import math
import operator
import sys
import warnings
import numpy as np
from theano.compat import six
from theano.compat.six.moves import reduce, xrange
from theano import config
from theano.gof.op import get_debug_values
from theano.sandbox.cuda import cuda_enabled
from theano.sandbox.cuda.dnn import dnn_available, dnn_pool
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.tensor.signal.downsample import max_pool_2d
import theano.tensor as T
from pylearn2.compat import OrderedDict
from pylearn2.costs.mlp import Default
from pylearn2.expr.probabilistic_max_pooling import max_pool_channels
# Try to import the fast cudnn library, else fallback to conv2d
if cuda_enabled and dnn_available():
try:
from pylearn2.linear import cudnn2d as conv2d
except ImportError:
from pylearn2.linear import conv2d
else:
from pylearn2.linear import conv2d
from pylearn2.linear.matrixmul import MatrixMul
from pylearn2.model_extensions.norm_constraint import MaxL2FilterNorm
from pylearn2.models.model import Model
from pylearn2.monitor import get_monitor_doc
from pylearn2.expr.nnet import arg_of_softmax
from pylearn2.expr.nnet import pseudoinverse_softmax_numpy
from pylearn2.space import CompositeSpace
from pylearn2.space import Conv2DSpace
from pylearn2.space import Space
from pylearn2.space import VectorSpace, IndexSpace
from pylearn2.utils import function
from pylearn2.utils import is_iterable
from pylearn2.utils import py_float_types
from pylearn2.utils import py_integer_types
from pylearn2.utils import safe_union
from pylearn2.utils import safe_zip
from pylearn2.utils import safe_izip
from pylearn2.utils import sharedX
from pylearn2.utils import wraps
from pylearn2.utils import contains_inf
from pylearn2.utils import isfinite
from pylearn2.utils.data_specs import DataSpecsMapping
from pylearn2.expr.nnet import (elemwise_kl, kl, compute_precision,
compute_recall, compute_f1)
# Only to be used by the deprecation warning wrapper functions
from pylearn2.costs.mlp import L1WeightDecay as _L1WD
from pylearn2.costs.mlp import WeightDecay as _WD
logger = logging.getLogger(__name__)
logger.debug("MLP changing the recursion limit.")
# We need this to be high enough that the big theano graphs we make
# when doing max pooling via subtensors don't cause python to complain.
# python intentionally declares stack overflow well before the stack
# segment is actually exceeded. But we can't make this value too big
# either, or we'll get seg faults when the python interpreter really
# does go over the stack segment.
# IG encountered seg faults on eos3 (a machine at LISA labo) when using
# 50000 so for now it is set to 40000.
# I think the actual safe recursion limit can't be predicted in advance
# because you don't know how big of a stack frame each function will
# make, so there is not really a "correct" way to do this. Really the
# python interpreter should provide an option to raise the error
# precisely when you're going to exceed the stack segment.
sys.setrecursionlimit(40000)
class Layer(Model):
"""
Abstract class. A Layer of an MLP.
May only belong to one MLP.
Parameters
----------
kwargs : dict
Passed on to the superclass.
Notes
-----
This is not currently a Block because as far as I know the Block interface
assumes every input is a single matrix. It doesn't support using Spaces to
work with composite inputs, stacked multichannel image inputs, etc. If the
Block interface were upgraded to be that flexible, then we could make this
a block.
"""
# When applying dropout to a layer's input, use this for masked values.
# Usually this will be 0, but certain kinds of layers may want to override
# this behaviour.
dropout_input_mask_value = 0.
def get_mlp(self):
"""
Returns the MLP that this layer belongs to.
Returns
-------
mlp : MLP
The MLP that this layer belongs to, or None if it has not been
assigned to an MLP yet.
"""
if hasattr(self, 'mlp'):
return self.mlp
return None
def set_mlp(self, mlp):
"""
Assigns this layer to an MLP. This layer will then use the MLP's
random number generator, batch size, etc. This layer's name must
be unique within the MLP.
Parameters
----------
mlp : MLP
"""
assert self.get_mlp() is None
self.mlp = mlp
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
"""
Returns monitoring channels.
Parameters
----------
state_below : member of self.input_space
A minibatch of states that this Layer took as input.
Most of the time providing state_blow is unnecessary when
state is given.
state : member of self.output_space
A minibatch of states that this Layer took on during fprop.
Provided externally so that we don't need to make a second
expression for it. This helps keep the Theano graph smaller
so that function compilation runs faster.
targets : member of self.output_space
Should be None unless this is the last layer.
If specified, it should be a minibatch of targets for the
last layer.
Returns
-------
channels : OrderedDict
A dictionary mapping channel names to monitoring channels of
interest for this layer.
"""
return OrderedDict()
def fprop(self, state_below):
"""
Does the forward prop transformation for this layer.
Parameters
----------
state_below : member of self.input_space
A minibatch of states of the layer below.
Returns
-------
state : member of self.output_space
A minibatch of states of this layer.
"""
raise NotImplementedError(
str(type(self)) + " does not implement fprop.")
def cost(self, Y, Y_hat):
"""
The cost of outputting Y_hat when the true output is Y.
Parameters
----------
Y : theano.gof.Variable
The targets
Y_hat : theano.gof.Variable
The predictions.
Assumed to be the output of the layer's `fprop` method.
The implmentation is permitted to do things like look at the
ancestors of `Y_hat` in the theano graph. This is useful for
e.g. computing numerically stable *log* probabilities when
`Y_hat` is the *probability*.
Returns
-------
cost : theano.gof.Variable
A Theano scalar describing the cost.
"""
raise NotImplementedError(
str(type(self)) + " does not implement mlp.Layer.cost.")
def cost_from_cost_matrix(self, cost_matrix):
"""
The cost final scalar cost computed from the cost matrix
Parameters
----------
cost_matrix : WRITEME
Examples
--------
>>> # C = model.cost_matrix(Y, Y_hat)
>>> # Do something with C like setting some values to 0
>>> # cost = model.cost_from_cost_matrix(C)
"""
raise NotImplementedError(
str(type(self)) + " does not implement "
"mlp.Layer.cost_from_cost_matrix.")
def cost_matrix(self, Y, Y_hat):
"""
The element wise cost of outputting Y_hat when the true output is Y.
Parameters
----------
Y : WRITEME
Y_hat : WRITEME
Returns
-------
WRITEME
"""
raise NotImplementedError(
str(type(self)) + " does not implement mlp.Layer.cost_matrix")
def set_weights(self, weights):
"""
Sets the weights of the layer.
Parameters
----------
weights : ndarray
A numpy ndarray containing the desired weights of the layer. This
docstring is provided by the Layer base class. Layer subclasses
should add their own docstring explaining the subclass-specific
format of the ndarray.
"""
raise NotImplementedError(
str(type(self)) + " does not implement set_weights.")
def get_biases(self):
"""
Returns the value of the biases of the layer.
Returns
-------
biases : ndarray
A numpy ndarray containing the biases of the layer. This docstring
is provided by the Layer base class. Layer subclasses should add
their own docstring explaining the subclass-specific format of the
ndarray.
"""
raise NotImplementedError(
str(type(self)) + " does not implement "
"get_biases (perhaps because the class has no biases).")
def set_biases(self, biases):
"""
Sets the biases of the layer.
Parameters
----------
biases : ndarray
A numpy ndarray containing the desired biases of the layer. This
docstring is provided by the Layer base class. Layer subclasses
should add their own docstring explaining the subclass-specific
format of the ndarray.
"""
raise NotImplementedError(
str(type(self)) + " does not implement "
"set_biases (perhaps because the class has no biases).")
def get_weights_format(self):
"""
Returns a description of how to interpret the weights of the layer.
Returns
-------
format: tuple
Either ('v', 'h') or ('h', 'v').
('v', 'h') means a weight matrix of shape
(num visible units, num hidden units),
while ('h', 'v') means the transpose of it.
"""
raise NotImplementedError
def get_weight_decay(self, coeff):
"""
Provides an expression for a squared L2 penalty on the weights.
Parameters
----------
coeff : float or tuple
The coefficient on the weight decay penalty for this layer.
This docstring is provided by the Layer base class. Individual
Layer subclasses should add their own docstring explaining the
format of `coeff` for that particular layer. For most ordinary
layers, `coeff` is a single float to multiply by the weight
decay term. Layers containing many pieces may take a tuple or
nested tuple of floats, and should explain the semantics of
the different elements of the tuple.
Returns
-------
weight_decay : theano.gof.Variable
An expression for the weight decay penalty term for this
layer.
"""
raise NotImplementedError(
str(type(self)) + " does not implement get_weight_decay.")
def get_l1_weight_decay(self, coeff):
"""
Provides an expression for an L1 penalty on the weights.
Parameters
----------
coeff : float or tuple
The coefficient on the L1 weight decay penalty for this layer.
This docstring is provided by the Layer base class. Individual
Layer subclasses should add their own docstring explaining the
format of `coeff` for that particular layer. For most ordinary
layers, `coeff` is a single float to multiply by the weight
decay term. Layers containing many pieces may take a tuple or
nested tuple of floats, and should explain the semantics of
the different elements of the tuple.
Returns
-------
weight_decay : theano.gof.Variable
An expression for the L1 weight decay penalty term for this
layer.
"""
raise NotImplementedError(
str(type(self)) + " does not implement get_l1_weight_decay.")
def set_input_space(self, space):
"""
Tells the layer to prepare for input formatted according to the
given space.
Parameters
----------
space : Space
The Space the input to this layer will lie in.
Notes
-----
This usually resets parameters.
"""
raise NotImplementedError(
str(type(self)) + " does not implement set_input_space.")
class MLP(Layer):
"""
A multilayer perceptron.
Note that it's possible for an entire MLP to be a single layer of a larger
MLP.
Parameters
----------
layers : list
A list of Layer objects. The final layer specifies the output space
of this MLP.
batch_size : int, optional
If not specified then must be a positive integer. Mostly useful if
one of your layers involves a Theano op like convolution that
requires a hard-coded batch size.
nvis : int, optional
Number of "visible units" (input units). Equivalent to specifying
`input_space=VectorSpace(dim=nvis)`. Note that certain methods require
a different type of input space (e.g. a Conv2Dspace in the case of
convnets). Use the input_space parameter in such cases. Should be
None if the MLP is part of another MLP.
input_space : Space object, optional
A Space specifying the kind of input the MLP accepts. If None,
input space is specified by nvis. Should be None if the MLP is
part of another MLP.
input_source : string or (nested) tuple of strings, optional
A (nested) tuple of strings specifiying the input sources this
MLP accepts. The structure should match that of input_space. The
default is 'features'. Note that this argument is ignored when
the MLP is nested.
target_source : string or (nested) tuple of strings, optional
A (nested) tuple of strings specifiying the target sources this
MLP accepts. The structure should match that of target_space. The
default is 'targets'. Note that this argument is ignored when
the MLP is nested.
layer_name : name of the MLP layer. Should be None if the MLP is
not part of another MLP.
seed : WRITEME
monitor_targets : bool, optional
Default: True
If true, includes monitoring channels that are functions of the
targets. This can be disabled to allow monitoring on monitoring
datasets that do not include targets.
kwargs : dict
Passed on to the superclass.
"""
def __init__(self, layers, batch_size=None, input_space=None,
input_source='features', target_source='targets',
nvis=None, seed=None, layer_name=None, monitor_targets=True,
**kwargs):
super(MLP, self).__init__(**kwargs)
self.seed = seed
assert isinstance(layers, list)
assert all(isinstance(layer, Layer) for layer in layers)
assert len(layers) >= 1
self.layer_name = layer_name
self.layer_names = set()
for layer in layers:
assert layer.get_mlp() is None
if layer.layer_name in self.layer_names:
raise ValueError("MLP.__init__ given two or more layers "
"with same name: " + layer.layer_name)
layer.set_mlp(self)
self.layer_names.add(layer.layer_name)
self.layers = layers
self.batch_size = batch_size
self.force_batch_size = batch_size
self._input_source = input_source
self._target_source = target_source
self.monitor_targets = monitor_targets
if input_space is not None or nvis is not None:
self._nested = False
self.setup_rng()
# check if the layer_name is None (the MLP is the outer MLP)
assert layer_name is None
if nvis is not None:
input_space = VectorSpace(nvis)
# Check whether the input_space and input_source structures match
try:
DataSpecsMapping((input_space, input_source))
except ValueError:
raise ValueError("The structures of `input_space`, %s, and "
"`input_source`, %s do not match. If you "
"specified a CompositeSpace as an input, "
"be sure to specify the data sources as well."
% (input_space, input_source))
self.input_space = input_space
self._update_layer_input_spaces()
else:
self._nested = True
self.freeze_set = set([])
@property
def input_source(self):
assert not self._nested, "A nested MLP does not have an input source"
return self._input_source
@property
def target_source(self):
assert not self._nested, "A nested MLP does not have a target source"
return self._target_source
def setup_rng(self):
"""
.. todo::
WRITEME
"""
assert not self._nested, "Nested MLPs should use their parent's RNG"
if self.seed is None:
self.seed = [2013, 1, 4]
self.rng = np.random.RandomState(self.seed)
@wraps(Layer.get_default_cost)
def get_default_cost(self):
return Default()
@wraps(Layer.get_output_space)
def get_output_space(self):
return self.layers[-1].get_output_space()
@wraps(Layer.get_target_space)
def get_target_space(self):
return self.layers[-1].get_target_space()
@wraps(Layer.set_input_space)
def set_input_space(self, space):
if hasattr(self, "mlp"):
assert self._nested
self.rng = self.mlp.rng
self.batch_size = self.mlp.batch_size
self.input_space = space
self._update_layer_input_spaces()
def _update_layer_input_spaces(self):
"""
Tells each layer what its input space should be.
Notes
-----
This usually resets the layer's parameters!
"""
layers = self.layers
try:
layers[0].set_input_space(self.get_input_space())
except BadInputSpaceError as e:
raise TypeError("Layer 0 (" + str(layers[0]) + " of type " +
str(type(layers[0])) +
") does not support the MLP's "
+ "specified input space (" +
str(self.get_input_space()) +
" of type " + str(type(self.get_input_space())) +
"). Original exception: " + str(e))
for i in xrange(1, len(layers)):
layers[i].set_input_space(layers[i - 1].get_output_space())
def add_layers(self, layers):
"""
Add new layers on top of the existing hidden layers
Parameters
----------
layers : WRITEME
"""
existing_layers = self.layers
assert len(existing_layers) > 0
for layer in layers:
assert layer.get_mlp() is None
layer.set_mlp(self)
# In the case of nested MLPs, input/output spaces may have not yet
# been initialized
if not self._nested or hasattr(self, 'input_space'):
layer.set_input_space(existing_layers[-1].get_output_space())
existing_layers.append(layer)
assert layer.layer_name not in self.layer_names
self.layer_names.add(layer.layer_name)
def freeze(self, parameter_set):
"""
Freezes some of the parameters (new theano functions that implement
learning will not use them; existing theano functions will continue
to modify them).
Parameters
----------
parameter_set : set
Set of parameters to freeze.
"""
self.freeze_set = self.freeze_set.union(parameter_set)
@wraps(Layer.get_monitoring_channels)
def get_monitoring_channels(self, data):
# if the MLP is the outer MLP \
# (ie MLP is not contained in another structure)
if self.monitor_targets:
X, Y = data
else:
X = data
Y = None
rval = self.get_layer_monitoring_channels(state_below=X,
targets=Y)
return rval
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = OrderedDict()
state = state_below
for layer in self.layers:
# We don't go through all the inner layers recursively
state_below = state
state = layer.fprop(state)
args = [state_below, state]
if layer is self.layers[-1] and targets is not None:
args.append(targets)
ch = layer.get_layer_monitoring_channels(*args)
if not isinstance(ch, OrderedDict):
raise TypeError(str((type(ch), layer.layer_name)))
for key in ch:
value = ch[key]
doc = get_monitor_doc(value)
if doc is None:
doc = str(type(layer)) + \
".get_monitoring_channels_from_state did" + \
" not provide any further documentation for" + \
" this channel."
doc = 'This channel came from a layer called "' + \
layer.layer_name + '" of an MLP.\n' + doc
value.__doc__ = doc
rval[layer.layer_name + '_' + key] = value
return rval
def get_monitoring_data_specs(self):
"""
Returns data specs requiring both inputs and targets.
Returns
-------
data_specs: TODO
The data specifications for both inputs and targets.
"""
if not self.monitor_targets:
return (self.get_input_space(), self.get_input_source())
space = CompositeSpace((self.get_input_space(),
self.get_target_space()))
source = (self.get_input_source(), self.get_target_source())
return (space, source)
@wraps(Layer.get_params)
def get_params(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
rval = []
for layer in self.layers:
for param in layer.get_params():
if param.name is None:
logger.info(type(layer))
layer_params = layer.get_params()
assert not isinstance(layer_params, set)
for param in layer_params:
if param not in rval:
rval.append(param)
rval = [elem for elem in rval if elem not in self.freeze_set]
assert all([elem.name is not None for elem in rval])
return rval
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeffs):
# check the case where coeffs is a scalar
if not hasattr(coeffs, '__iter__'):
coeffs = [coeffs] * len(self.layers)
layer_costs = []
for layer, coeff in safe_izip(self.layers, coeffs):
if coeff != 0.:
layer_costs += [layer.get_weight_decay(coeff)]
if len(layer_costs) == 0:
return T.constant(0, dtype=config.floatX)
total_cost = reduce(operator.add, layer_costs)
return total_cost
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeffs):
# check the case where coeffs is a scalar
if not hasattr(coeffs, '__iter__'):
coeffs = [coeffs] * len(self.layers)
layer_costs = []
for layer, coeff in safe_izip(self.layers, coeffs):
if coeff != 0.:
layer_costs += [layer.get_l1_weight_decay(coeff)]
if len(layer_costs) == 0:
return T.constant(0, dtype=config.floatX)
total_cost = reduce(operator.add, layer_costs)
return total_cost
@wraps(Model.set_batch_size)
def set_batch_size(self, batch_size):
self.batch_size = batch_size
self.force_batch_size = batch_size
for layer in self.layers:
layer.set_batch_size(batch_size)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
for layer in self.layers:
layer.modify_updates(updates)
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
return get_lr_scalers_from_layers(self)
@wraps(Layer.get_weights)
def get_weights(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
return self.layers[0].get_weights()
@wraps(Layer.get_weights_view_shape)
def get_weights_view_shape(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
return self.layers[0].get_weights_view_shape()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
return self.layers[0].get_weights_format()
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
return self.layers[0].get_weights_topo()
def dropout_fprop(self, state_below, default_input_include_prob=0.5,
input_include_probs=None, default_input_scale=2.,
input_scales=None, per_example=True):
"""
Returns the output of the MLP, when applying dropout to the input and
intermediate layers.
Parameters
----------
state_below : WRITEME
The input to the MLP
default_input_include_prob : WRITEME
input_include_probs : WRITEME
default_input_scale : WRITEME
input_scales : WRITEME
per_example : bool, optional
Sample a different mask value for every example in a batch.
Defaults to `True`. If `False`, sample one mask per mini-batch.
Notes
-----
Each input to each layer is randomly included or
excluded for each example. The probability of inclusion is independent
for each input and each example. Each layer uses
`default_input_include_prob` unless that layer's name appears as a key
in input_include_probs, in which case the input inclusion probability
is given by the corresponding value.
Each feature is also multiplied by a scale factor. The scale factor for
each layer's input scale is determined by the same scheme as the input
probabilities.
"""
if input_include_probs is None:
input_include_probs = {}
if input_scales is None:
input_scales = {}
self._validate_layer_names(list(input_include_probs.keys()))
self._validate_layer_names(list(input_scales.keys()))
theano_rng = MRG_RandomStreams(max(self.rng.randint(2 ** 15), 1))
for layer in self.layers:
layer_name = layer.layer_name
if layer_name in input_include_probs:
include_prob = input_include_probs[layer_name]
else:
include_prob = default_input_include_prob
if layer_name in input_scales:
scale = input_scales[layer_name]
else:
scale = default_input_scale
state_below = self.apply_dropout(
state=state_below,
include_prob=include_prob,
theano_rng=theano_rng,
scale=scale,
mask_value=layer.dropout_input_mask_value,
input_space=layer.get_input_space(),
per_example=per_example
)
state_below = layer.fprop(state_below)
return state_below
def masked_fprop(self, state_below, mask, masked_input_layers=None,
default_input_scale=2., input_scales=None):
"""
Forward propagate through the network with a dropout mask
determined by an integer (the binary representation of
which is used to generate the mask).
Parameters
----------
state_below : tensor_like
The (symbolic) output state of the layer below.
mask : int
An integer indexing possible binary masks. It should be
< 2 ** get_total_input_dimension(masked_input_layers)
and greater than or equal to 0.
masked_input_layers : list, optional
A list of layer names to mask. If `None`, the input to all layers
(including the first hidden layer) is masked.
default_input_scale : float, optional
The amount to scale inputs in masked layers that do not appear in
`input_scales`. Defaults to 2.
input_scales : dict, optional
A dictionary mapping layer names to floating point numbers
indicating how much to scale input to a given layer.
Returns
-------
masked_output : tensor_like
The output of the forward propagation of the masked network.
"""
if input_scales is not None:
self._validate_layer_names(input_scales)
else:
input_scales = {}
if any(n not in masked_input_layers for n in input_scales):
layers = [n for n in input_scales if n not in masked_input_layers]
raise ValueError("input scales provided for layer not masked: " %
", ".join(layers))
if masked_input_layers is not None:
self._validate_layer_names(masked_input_layers)
else:
masked_input_layers = self.layer_names
num_inputs = self.get_total_input_dimension(masked_input_layers)
assert mask >= 0, "Mask must be a non-negative integer."
if mask > 0 and math.log(mask, 2) > num_inputs:
raise ValueError("mask value of %d too large; only %d "
"inputs to layers (%s)" %
(mask, num_inputs,
", ".join(masked_input_layers)))
def binary_string(x, length, dtype):
"""
Create the binary representation of an integer `x`, padded to
`length`, with dtype `dtype`.
Parameters
----------
length : WRITEME
dtype : WRITEME
Returns
-------
WRITEME
"""
s = np.empty(length, dtype=dtype)
for i in range(length - 1, -1, -1):
if x // (2 ** i) == 1:
s[i] = 1
else:
s[i] = 0
x = x % (2 ** i)
return s
remaining_mask = mask
for layer in self.layers:
if layer.layer_name in masked_input_layers:
scale = input_scales.get(layer.layer_name,
default_input_scale)
n_inputs = layer.get_input_space().get_total_dimension()
layer_dropout_mask = remaining_mask & (2 ** n_inputs - 1)
remaining_mask >>= n_inputs
mask = binary_string(layer_dropout_mask, n_inputs,
'uint8')
shape = layer.get_input_space().get_origin_batch(1).shape
s_mask = T.as_tensor_variable(mask).reshape(shape)
if layer.dropout_input_mask_value == 0:
state_below = state_below * s_mask * scale
else:
state_below = T.switch(s_mask, state_below * scale,
layer.dropout_input_mask_value)
state_below = layer.fprop(state_below)
return state_below
def _validate_layer_names(self, layers):
"""
.. todo::
WRITEME
"""
if any(layer not in self.layer_names for layer in layers):
unknown_names = [layer for layer in layers
if layer not in self.layer_names]
raise ValueError("MLP has no layer(s) named %s" %
", ".join(unknown_names))
def get_total_input_dimension(self, layers):
"""
Get the total number of inputs to the layers whose
names are listed in `layers`. Used for computing the
total number of dropout masks.
Parameters
----------
layers : WRITEME
Returns
-------
WRITEME
"""
self._validate_layer_names(layers)
total = 0
for layer in self.layers:
if layer.layer_name in layers:
total += layer.get_input_space().get_total_dimension()
return total
@wraps(Layer.fprop)
def fprop(self, state_below, return_all=False):
if not hasattr(self, "input_space"):
raise AttributeError("Input space has not been provided.")
rval = self.layers[0].fprop(state_below)
rlist = [rval]
for layer in self.layers[1:]:
rval = layer.fprop(rval)
rlist.append(rval)
if return_all:
return rlist
return rval
def apply_dropout(self, state, include_prob, scale, theano_rng,
input_space, mask_value=0, per_example=True):
"""
.. todo::
WRITEME
Parameters
----------
state: WRITEME
include_prob : WRITEME
scale : WRITEME
theano_rng : WRITEME
input_space : WRITEME
mask_value : WRITEME
per_example : bool, optional
Sample a different mask value for every example in a batch.
Defaults to `True`. If `False`, sample one mask per mini-batch.
"""
if include_prob in [None, 1.0, 1]:
return state
assert scale is not None
if isinstance(state, tuple):
return tuple(self.apply_dropout(substate, include_prob,
scale, theano_rng, mask_value)
for substate in state)
# TODO: all of this assumes that if it's not a tuple, it's
# a dense tensor. It hasn't been tested with sparse types.
# A method to format the mask (or any other values) as
# the given symbolic type should be added to the Spaces
# interface.
if per_example:
mask = theano_rng.binomial(p=include_prob, size=state.shape,
dtype=state.dtype)
else:
batch = input_space.get_origin_batch(1)
mask = theano_rng.binomial(p=include_prob, size=batch.shape,
dtype=state.dtype)
rebroadcast = T.Rebroadcast(*zip(xrange(batch.ndim),
[s == 1 for s in batch.shape]))
mask = rebroadcast(mask)
if mask_value == 0:
rval = state * mask * scale
else:
rval = T.switch(mask, state * scale, mask_value)
return T.cast(rval, state.dtype)
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
return self.layers[-1].cost(Y, Y_hat)
@wraps(Layer.cost_matrix)
def cost_matrix(self, Y, Y_hat):
return self.layers[-1].cost_matrix(Y, Y_hat)
@wraps(Layer.cost_from_cost_matrix)
def cost_from_cost_matrix(self, cost_matrix):
return self.layers[-1].cost_from_cost_matrix(cost_matrix)
def cost_from_X(self, data):
"""
Computes self.cost, but takes data=(X, Y) rather than Y_hat as an
argument.
This is just a wrapper around self.cost that computes Y_hat by
calling Y_hat = self.fprop(X)
Parameters
----------
data : WRITEME
"""
self.cost_from_X_data_specs()[0].validate(data)
X, Y = data
Y_hat = self.fprop(X)
return self.cost(Y, Y_hat)
def cost_from_X_data_specs(self):
"""
Returns the data specs needed by cost_from_X.
This is useful if cost_from_X is used in a MethodCost.
"""
space = CompositeSpace((self.get_input_space(),
self.get_target_space()))
source = (self.get_input_source(), self.get_target_source())
return (space, source)
def __str__(self):
"""
Summarizes the MLP by printing the size and format of the input to all
layers. Feel free to add reasonably concise info as needed.
"""
rval = []
for layer in self.layers:
rval.append(layer.layer_name)
input_space = layer.get_input_space()
rval.append('\tInput space: ' + str(input_space))
rval.append('\tTotal input dimension: ' +
str(input_space.get_total_dimension()))
rval = '\n'.join(rval)
return rval
class Softmax(Layer):
"""
A layer that can apply an optional affine transformation
to vectorial inputs followed by a softmax nonlinearity.
Parameters
----------
n_classes : int
Number of classes for softmax targets.
layer_name : string
Name of Softmax layers.
irange : float
If specified, initialized each weight randomly in
U(-irange, irange).
istdev : float
If specified, initialize each weight randomly from
N(0,istdev).
sparse_init : int
If specified, initial sparse_init number of weights
for each unit from N(0,1).
W_lr_scale : float
Scale for weight learning rate.
b_lr_scale : float
Scale for bias learning rate.
max_row_norm : float
Maximum norm for a row of the weight matrix.
no_affine : boolean
If True, softmax nonlinearity is applied directly to
inputs.
max_col_norm : float
Maximum norm for a column of the weight matrix.
init_bias_target_marginals : dataset
Take the probability distribution of the targets into account to
intelligently initialize biases.
binary_target_dim : int, optional
If your targets are class labels (i.e. a binary vector) then set the
number of targets here so that an IndexSpace of the proper dimension
can be used as the target space. This allows the softmax to compute
the cost much more quickly than if it needs to convert the targets
into a VectorSpace. With binary_target_dim>1, you can use one layer
to simultaneously predict a bag of words (i.e. order is not important,
the same element can be included more than once).
non_redundant : bool
If True, learns only n_classes - 1 biases and weight vectors
kwargs : dict
Passed on to the superclass.
"""
def __init__(self, n_classes, layer_name, irange=None,
istdev=None,
sparse_init=None, W_lr_scale=None,
b_lr_scale=None, max_row_norm=None,
no_affine=False,
max_col_norm=None, init_bias_target_marginals=None,
binary_target_dim=None, non_redundant=False,
**kwargs):
super(Softmax, self).__init__(**kwargs)
if max_col_norm is not None:
self.extensions.append(MaxL2FilterNorm(max_col_norm, axis=0))
if max_row_norm is not None:
self.extensions.append(MaxL2FilterNorm(max_row_norm, axis=1))
if non_redundant:
if init_bias_target_marginals:
msg = ("init_bias_target_marginals currently only works "
"with the overcomplete parameterization.")
raise NotImplementedError(msg)
if isinstance(W_lr_scale, str):
W_lr_scale = float(W_lr_scale)
self.__dict__.update(locals())
del self.self
del self.init_bias_target_marginals
if not isinstance(n_classes, py_integer_types):
raise TypeError("n_classes is of type %s, but must be integer" %
type(n_classes))
if binary_target_dim is not None:
assert isinstance(binary_target_dim, py_integer_types)
self._has_binary_target = True
self._target_space = IndexSpace(dim=binary_target_dim,
max_labels=n_classes)
else:
self._has_binary_target = False
self.output_space = VectorSpace(n_classes)
if not no_affine:
self.b = sharedX(np.zeros((n_classes - self.non_redundant,)),
name='softmax_b')
if init_bias_target_marginals:
y = init_bias_target_marginals.y
if init_bias_target_marginals.y_labels is None:
marginals = y.mean(axis=0)
else:
# compute class frequencies
if np.max(y.shape) != np.prod(y.shape):
raise AssertionError("Use of "
"`init_bias_target_marginals` "
"requires that each example has "
"a single label.")
marginals = np.bincount(y.flat) / float(y.shape[0])
assert marginals.ndim == 1
b = pseudoinverse_softmax_numpy(marginals).astype(self.b.dtype)
assert b.ndim == 1
assert b.dtype == self.b.dtype
self.b.set_value(b)
else:
assert init_bias_target_marginals is None
def __setstate__(self, state):
super(Softmax, self).__setstate__(state)
# Patch old pickle files
if not hasattr(self, 'non_redundant'):
self.non_redundant = False
if not hasattr(self, 'mask_weights'):
self.mask_weights = None
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
rval = OrderedDict()
if self.W_lr_scale is not None:
assert isinstance(self.W_lr_scale, float)
rval[self.W] = self.W_lr_scale
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
if self.b_lr_scale is not None:
assert isinstance(self.b_lr_scale, float)
rval[self.b] = self.b_lr_scale
return rval
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = OrderedDict()
if not self.no_affine:
W = self.W
assert W.ndim == 2
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=1))
col_norms = T.sqrt(sq_W.sum(axis=0))
rval.update(OrderedDict([('row_norms_min', row_norms.min()),
('row_norms_mean', row_norms.mean()),
('row_norms_max', row_norms.max()),
('col_norms_min', col_norms.min()),
('col_norms_mean', col_norms.mean()),
('col_norms_max', col_norms.max()), ]))
if (state_below is not None) or (state is not None):
if state is None:
state = self.fprop(state_below)
mx = state.max(axis=1)
rval.update(OrderedDict([('mean_max_class', mx.mean()),
('max_max_class', mx.max()),
('min_max_class', mx.min())]))
if (targets is not None):
if ((not self._has_binary_target) or
self.binary_target_dim == 1):
# if binary_target_dim>1, the misclass rate is ill-defined
y_hat = T.argmax(state, axis=1)
y = (targets.reshape(y_hat.shape)
if self._has_binary_target
else T.argmax(targets, axis=1))
misclass = T.neq(y, y_hat).mean()
misclass = T.cast(misclass, config.floatX)
rval['misclass'] = misclass
rval['nll'] = self.cost(Y_hat=state, Y=targets)
return rval
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
if not isinstance(space, Space):
raise TypeError("Expected Space, got " +
str(space) + " of type " + str(type(space)))
self.input_dim = space.get_total_dimension()
self.needs_reformat = not isinstance(space, VectorSpace)
if self.no_affine:
desired_dim = self.n_classes - self.non_redundant
assert self.input_dim == desired_dim
else:
desired_dim = self.input_dim
self.desired_space = VectorSpace(desired_dim)
if not self.needs_reformat:
assert self.desired_space == self.input_space
rng = self.mlp.rng
if self.no_affine:
self._params = []
else:
num_cols = self.n_classes - self.non_redundant
if self.irange is not None:
assert self.istdev is None
assert self.sparse_init is None
W = rng.uniform(-self.irange,
self.irange,
(self.input_dim, num_cols))
elif self.istdev is not None:
assert self.sparse_init is None
W = rng.randn(self.input_dim, num_cols) * self.istdev
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, num_cols))
for i in xrange(num_cols):
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0.:
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn()
self.W = sharedX(W, 'softmax_W')
self._params = [self.b, self.W]
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
desired = self.W.get_value().T
ipt = self.desired_space.np_format_as(desired, self.input_space)
rval = Conv2DSpace.convert_numpy(ipt,
self.input_space.axes,
('b', 0, 1, 'c'))
return rval
@wraps(Layer.get_weights)
def get_weights(self):
if not isinstance(self.input_space, VectorSpace):
raise NotImplementedError()
return self.W.get_value()
@wraps(Layer.set_weights)
def set_weights(self, weights):
self.W.set_value(weights)
@wraps(Layer.set_biases)
def set_biases(self, biases):
self.b.set_value(biases)
@wraps(Layer.get_biases)
def get_biases(self):
return self.b.get_value()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
return ('v', 'h')
@wraps(Layer.fprop)
def fprop(self, state_below):
self.input_space.validate(state_below)
if self.needs_reformat:
state_below = self.input_space.format_as(state_below,
self.desired_space)
self.desired_space.validate(state_below)
assert state_below.ndim == 2
if not hasattr(self, 'no_affine'):
self.no_affine = False
if self.no_affine:
Z = state_below
else:
assert self.W.ndim == 2
b = self.b
Z = T.dot(state_below, self.W) + b
if self.non_redundant:
zeros = T.alloc(0., Z.shape[0], 1)
Z = T.concatenate((zeros, Z), axis=1)
rval = T.nnet.softmax(Z)
for value in get_debug_values(rval):
if self.mlp.batch_size is not None:
assert value.shape[0] == self.mlp.batch_size
return rval
def _cost(self, Y, Y_hat):
z = arg_of_softmax(Y_hat)
assert z.ndim == 2
z = z - z.max(axis=1).dimshuffle(0, 'x')
log_prob = z - T.log(T.exp(z).sum(axis=1).dimshuffle(0, 'x'))
# we use sum and not mean because this is really one variable per row
if self._has_binary_target:
# The following code is the equivalent of accessing log_prob by the
# indices in Y, but it is written such that the computation can
# happen on the GPU rather than CPU.
flat_Y = Y.flatten()
flat_Y.name = 'flat_Y'
flat_log_prob = log_prob.flatten()
flat_log_prob.name = 'flat_log_prob'
range_ = T.arange(Y.shape[0])
if self.binary_target_dim > 1:
# because of an error in optimization (local_useless_tile)
# when tiling with (1, 1)
range_ = T.tile(range_.dimshuffle(0, 'x'),
(1, self.binary_target_dim)).flatten()
flat_indices = flat_Y + range_ * self.n_classes
flat_indices.name = 'flat_indices'
log_prob_of = flat_log_prob[flat_indices].reshape(Y.shape, ndim=2)
log_prob_of.name = 'log_prob_of'
else:
log_prob_of = (Y * log_prob)
return log_prob_of
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
log_prob_of = self._cost(Y, Y_hat).sum(axis=1)
assert log_prob_of.ndim == 1
rval = log_prob_of.mean()
return - rval
@wraps(Layer.cost_matrix)
def cost_matrix(self, Y, Y_hat):
log_prob_of = self._cost(Y, Y_hat)
if self._has_binary_target:
flat_Y = Y.flatten()
flat_matrix = T.alloc(0, (Y.shape[0] * log_prob_of.shape[1]))
flat_indices = flat_Y + T.extra_ops.repeat(
T.arange(Y.shape[0]) * log_prob_of.shape[1], Y.shape[1]
)
log_prob_of = T.set_subtensor(flat_matrix[flat_indices], flat_Y)
return -log_prob_of
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
return coeff * T.sqr(self.W).sum()
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W = self.W
return coeff * abs(W).sum()
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
if self.no_affine:
return
class SoftmaxPool(Layer):
"""
A hidden layer that uses the softmax function to do max pooling over groups
of units. When the pooling size is 1, this reduces to a standard sigmoidal
MLP layer.
Parameters
----------
detector_layer_dim : WRITEME
layer_name : str
The name of the layer. All layers in an MLP must have a unique name.
pool_size : WRITEME
irange : float, optional
If specified, initialized each weight randomly in U(-irange, irange).
sparse_init : int, optional
If specified, initial sparse_init number of weights for each unit from
N(0,1).
sparse_stdev : WRITEME
include_prob : float, optional
Probability of including a weight element in the set of weights
initialized to U(-irange, irange). If not included it is
initialized to 0.
init_bias : WRITEME
W_lr_scale : float, optional
Multiply the learning rate on the weights by this constant.
b_lr_scale : float, optional
Multiply the learning rate on the biases by this constant.
mask_weights : WRITEME
max_col_norm : float, optional
Maximum norm for a column of the weight matrix.
"""
def __init__(self,
detector_layer_dim,
layer_name,
pool_size=1,
irange=None,
sparse_init=None,
sparse_stdev=1.,
include_prob=1.0,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
mask_weights=None,
max_col_norm=None):
super(SoftmaxPool, self).__init__()
self.__dict__.update(locals())
del self.self
self.b = sharedX(np.zeros((self.detector_layer_dim,)) + init_bias,
name=(layer_name + '_b'))
if max_col_norm is not None:
self.extensions.append(MaxL2FilterNorm(max_col_norm, axis=0))
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
rval = OrderedDict()
if self.W_lr_scale is not None:
W, = self.transformer.get_params()
rval[W] = self.W_lr_scale
if self.b_lr_scale is not None:
rval[self.b] = self.b_lr_scale
return rval
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
if isinstance(space, VectorSpace):
self.requires_reformat = False
self.input_dim = space.dim
else:
self.requires_reformat = True
self.input_dim = space.get_total_dimension()
self.desired_space = VectorSpace(self.input_dim)
if not (self.detector_layer_dim % self.pool_size == 0):
raise ValueError("detector_layer_dim = %d, pool_size = %d. "
"Should be divisible but remainder is %d" %
(self.detector_layer_dim,
self.pool_size,
self.detector_layer_dim % self.pool_size))
self.h_space = VectorSpace(self.detector_layer_dim)
self.pool_layer_dim = self.detector_layer_dim / self.pool_size
self.output_space = VectorSpace(self.pool_layer_dim)
rng = self.mlp.rng
if self.irange is not None:
assert self.sparse_init is None
W = rng.uniform(-self.irange,
self.irange,
(self.input_dim, self.detector_layer_dim)) * \
(rng.uniform(0., 1., (self.input_dim, self.detector_layer_dim))
< self.include_prob)
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, self.detector_layer_dim))
def mask_rejects(idx, i):
if self.mask_weights is None:
return False
return self.mask_weights[idx, i] == 0.
for i in xrange(self.detector_layer_dim):
assert self.sparse_init <= self.input_dim
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0 or mask_rejects(idx, i):
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn()
W *= self.sparse_stdev
W = sharedX(W)
W.name = self.layer_name + '_W'
self.transformer = MatrixMul(W)
W, = self.transformer.get_params()
assert W.name is not None
if self.mask_weights is not None:
expected_shape = (self.input_dim, self.detector_layer_dim)
if expected_shape != self.mask_weights.shape:
raise ValueError("Expected mask with shape " +
str(expected_shape) +
" but got " +
str(self.mask_weights.shape))
self.mask = sharedX(self.mask_weights)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
if self.mask_weights is not None:
W, = self.transformer.get_params()
if W in updates:
updates[W] = updates[W] * self.mask
@wraps(Layer.get_params)
def get_params(self):
assert self.b.name is not None
W, = self.transformer.get_params()
assert W.name is not None
rval = self.transformer.get_params()
assert not isinstance(rval, set)
rval = list(rval)
assert self.b not in rval
rval.append(self.b)
return rval
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * T.sqr(W).sum()
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * abs(W).sum()
@wraps(Layer.get_weights)
def get_weights(self):
if self.requires_reformat:
# This is not really an unimplemented case.
# We actually don't know how to format the weights
# in design space. We got the data in topo space
# and we don't have access to the dataset
raise NotImplementedError()
W, = self.transformer.get_params()
return W.get_value()
@wraps(Layer.set_weights)
def set_weights(self, weights):
W, = self.transformer.get_params()
W.set_value(weights)
@wraps(Layer.set_biases)
def set_biases(self, biases):
"""
.. todo::
WRITEME
"""
self.b.set_value(biases)
@wraps(Layer.get_biases)
def get_biases(self):
return self.b.get_value()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
return ('v', 'h')
@wraps(Layer.get_weights_view_shape)
def get_weights_view_shape(self):
total = self.detector_layer_dim
cols = self.pool_size
if cols == 1:
# Let the PatchViewer decide how to arrange the units
# when they're not pooled
raise NotImplementedError()
# When they are pooled, make each pooling unit have one row
rows = total / cols
return rows, cols
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
W, = self.transformer.get_params()
W = W.T
W = W.reshape((self.detector_layer_dim,
self.input_space.shape[0],
self.input_space.shape[1],
self.input_space.num_channels))
W = Conv2DSpace.convert(W, self.input_space.axes, ('b', 0, 1, 'c'))
return function([], W)()
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, **kwargs):
W, = self.transformer.get_params()
assert W.ndim == 2
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=1))
col_norms = T.sqrt(sq_W.sum(axis=0))
rval = OrderedDict([('row_norms_min', row_norms.min()),
('row_norms_mean', row_norms.mean()),
('row_norms_max', row_norms.max()),
('col_norms_min', col_norms.min()),
('col_norms_mean', col_norms.mean()),
('col_norms_max', col_norms.max()), ])
if (state_below is not None) or (state is not None):
if state is None:
P = self.fprop(state_below)
else:
P = state
if self.pool_size == 1:
vars_and_prefixes = [(P, '')]
else:
vars_and_prefixes = [(P, 'p_')]
for var, prefix in vars_and_prefixes:
v_max = var.max(axis=0)
v_min = var.min(axis=0)
v_mean = var.mean(axis=0)
v_range = v_max - v_min
# max_x.mean_u is "the mean over *u*nits of the max over
# e*x*amples" The x and u are included in the name because
# otherwise its hard to remember which axis is which when
# reading the monitor I use inner.outer rather than
# outer_of_inner or something like that because I want
# mean_x.* to appear next to each other in the alphabetical
# list, as these are commonly plotted together
for key, val in [('max_x.max_u', v_max.max()),
('max_x.mean_u', v_max.mean()),
('max_x.min_u', v_max.min()),
('min_x.max_u', v_min.max()),
('min_x.mean_u', v_min.mean()),
('min_x.min_u', v_min.min()),
('range_x.max_u', v_range.max()),
('range_x.mean_u', v_range.mean()),
('range_x.min_u', v_range.min()),
('mean_x.max_u', v_mean.max()),
('mean_x.mean_u', v_mean.mean()),
('mean_x.min_u', v_mean.min())]:
rval[prefix + key] = val
return rval
@wraps(Layer.fprop)
def fprop(self, state_below):
self.input_space.validate(state_below)
if self.requires_reformat:
state_below = self.input_space.format_as(state_below,
self.desired_space)
z = self.transformer.lmul(state_below) + self.b
if self.layer_name is not None:
z.name = self.layer_name + '_z'
p, h = max_pool_channels(z, self.pool_size)
p.name = self.layer_name + '_p_'
return p
class Linear(Layer):
"""
A "linear model" in machine learning terminology. This would be more
accurately described as an affine model because it adds an offset to
the output as well as doing a matrix multiplication. The output is:
output = T.dot(weights, input) + biases
This class may be used as the output layer of an MLP for regression.
It may also be used as a hidden layer. Most hidden layers classes are
subclasses of this class that add apply a fixed nonlinearity to the
output of the affine transformation provided by this class.
One notable use of this class is to provide "bottleneck" layers.
By using a Linear layer with few hidden units followed by a nonlinear
layer such as RectifiedLinear with many hidden units, one essentially
gets a RectifiedLinear layer with a factored weight matrix, which can
reduce the number of parameters in the model (by making the effective
weight matrix low rank).
Parameters
----------
dim : int
The number of elements in the output of the layer.
layer_name : str
The name of the layer. All layers in an MLP must have a unique name.
irange : float, optional
If specified, initialized each weight randomly in U(-irange, irange).
istdev : float, optional
If specified, initialize each weight randomly from N(0,istdev).
sparse_init : int, optional
If specified, initial sparse_init number of weights for each unit from
N(0,1).
sparse_stdev : WRITEME
include_prob : float
Probability of including a weight element in the set of weights
initialized to U(-irange, irange). If not included it is
initialized to 0.
Anything that can be broadcasted to a numpy vector.
Provides the initial value of the biases of the model.
When using this class as an output layer (specifically the Linear
class, or subclasses that don't change the output like
LinearGaussian, but not subclasses that change the output, like
Softmax) it can be a good idea to set this to the return value of
the `mean_of_targets` function. This provides the mean value of
all the targets in the training set, so the model is initialized
to a dummy model that predicts the expected value of each output
variable.
W_lr_scale : float, optional
Multiply the learning rate on the weights by this constant.
b_lr_scale : float, optional
Multiply the learning rate on the biases by this constant.
mask_weights : ndarray, optional
If provided, the weights will be multiplied by this mask after each
learning update.
max_row_norm : float, optional
Maximum norm for a row of the weight matrix.
max_col_norm : float, optional
Maximum norm for a column of the weight matrix.
min_col_norm : WRITEME
copy_input : REMOVED
use_abs_loss : bool, optional
If True, the cost function will be mean absolute error rather
than mean squared error.
You can think of mean squared error as fitting a Gaussian
distribution with variance 1, or as learning to predict the mean
of the data.
You can think of mean absolute error as fitting a Laplace
distribution with variance 1, or as learning to predict the
median of the data.
use_bias : bool, optional
If False, does not add the bias term to the output.
kwargs : dict
Passed on to superclass constructor.
"""
def __init__(self,
dim,
layer_name,
irange=None,
istdev=None,
sparse_init=None,
sparse_stdev=1.,
include_prob=1.0,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
mask_weights=None,
max_row_norm=None,
max_col_norm=None,
min_col_norm=None,
copy_input=None,
use_abs_loss=False,
use_bias=True,
**kwargs):
if copy_input is not None:
raise AssertionError(
"The copy_input option had a bug and has "
"been removed from the library.")
super(Linear, self).__init__(**kwargs)
if use_bias and init_bias is None:
init_bias = 0.
self.__dict__.update(locals())
del self.self
if use_bias:
self.b = sharedX(np.zeros((self.dim,)) + init_bias,
name=(layer_name + '_b'))
else:
assert b_lr_scale is None
init_bias is None
if (((max_col_norm is not None) or (min_col_norm is not None))
and (max_row_norm is not None)):
raise ValueError('Column and row constraint '
'at the same time is forbidden.')
if (max_col_norm is not None) or (min_col_norm is not None):
self.extensions.append(MaxL2FilterNorm(
limit=max_col_norm,
min_limit=min_col_norm,
axis=0))
if max_row_norm is not None:
self.extensions.append(MaxL2FilterNorm(max_row_norm, axis=1))
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
rval = OrderedDict()
if self.W_lr_scale is not None:
W, = self.transformer.get_params()
rval[W] = self.W_lr_scale
if self.b_lr_scale is not None:
rval[self.b] = self.b_lr_scale
return rval
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
if isinstance(space, VectorSpace):
self.requires_reformat = False
self.input_dim = space.dim
else:
self.requires_reformat = True
self.input_dim = space.get_total_dimension()
self.desired_space = VectorSpace(self.input_dim)
self.output_space = VectorSpace(self.dim)
rng = self.mlp.rng
if self.irange is not None:
assert self.istdev is None
assert self.sparse_init is None
W = rng.uniform(-self.irange,
self.irange,
(self.input_dim, self.dim)) * \
(rng.uniform(0., 1., (self.input_dim, self.dim))
< self.include_prob)
elif self.istdev is not None:
assert self.sparse_init is None
W = rng.randn(self.input_dim, self.dim) * self.istdev
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, self.dim))
def mask_rejects(idx, i):
if self.mask_weights is None:
return False
return self.mask_weights[idx, i] == 0.
for i in xrange(self.dim):
assert self.sparse_init <= self.input_dim
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0 or mask_rejects(idx, i):
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn()
W *= self.sparse_stdev
W = sharedX(W)
W.name = self.layer_name + '_W'
self.transformer = MatrixMul(W)
W, = self.transformer.get_params()
assert W.name is not None
if self.mask_weights is not None:
expected_shape = (self.input_dim, self.dim)
if expected_shape != self.mask_weights.shape:
raise ValueError("Expected mask with shape " +
str(expected_shape) + " but got " +
str(self.mask_weights.shape))
self.mask = sharedX(self.mask_weights)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
if self.mask_weights is not None:
W, = self.transformer.get_params()
if W in updates:
updates[W] = updates[W] * self.mask
@wraps(Layer.get_params)
def get_params(self):
W, = self.transformer.get_params()
assert W.name is not None
rval = self.transformer.get_params()
assert not isinstance(rval, set)
rval = list(rval)
if self.use_bias:
assert self.b.name is not None
assert self.b not in rval
rval.append(self.b)
return rval
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * T.sqr(W).sum()
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * abs(W).sum()
@wraps(Layer.get_weights)
def get_weights(self):
if self.requires_reformat:
# This is not really an unimplemented case.
# We actually don't know how to format the weights
# in design space. We got the data in topo space
# and we don't have access to the dataset
raise NotImplementedError()
W, = self.transformer.get_params()
W = W.get_value()
return W
@wraps(Layer.set_weights)
def set_weights(self, weights):
W, = self.transformer.get_params()
W.set_value(weights)
@wraps(Layer.set_biases)
def set_biases(self, biases):
self.b.set_value(biases)
@wraps(Layer.get_biases)
def get_biases(self):
"""
.. todo::
WRITEME
"""
return self.b.get_value()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
return ('v', 'h')
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
W, = self.transformer.get_params()
W = W.T
W = W.reshape((self.dim, self.input_space.shape[0],
self.input_space.shape[1],
self.input_space.num_channels))
W = Conv2DSpace.convert(W, self.input_space.axes, ('b', 0, 1, 'c'))
return function([], W)()
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
W, = self.transformer.get_params()
assert W.ndim == 2
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=1))
col_norms = T.sqrt(sq_W.sum(axis=0))
rval = OrderedDict([('row_norms_min', row_norms.min()),
('row_norms_mean', row_norms.mean()),
('row_norms_max', row_norms.max()),
('col_norms_min', col_norms.min()),
('col_norms_mean', col_norms.mean()),
('col_norms_max', col_norms.max()), ])
if (state is not None) or (state_below is not None):
if state is None:
state = self.fprop(state_below)
mx = state.max(axis=0)
mean = state.mean(axis=0)
mn = state.min(axis=0)
rg = mx - mn
rval['range_x_max_u'] = rg.max()
rval['range_x_mean_u'] = rg.mean()
rval['range_x_min_u'] = rg.min()
rval['max_x_max_u'] = mx.max()
rval['max_x_mean_u'] = mx.mean()
rval['max_x_min_u'] = mx.min()
rval['mean_x_max_u'] = mean.max()
rval['mean_x_mean_u'] = mean.mean()
rval['mean_x_min_u'] = mean.min()
rval['min_x_max_u'] = mn.max()
rval['min_x_mean_u'] = mn.mean()
rval['min_x_min_u'] = mn.min()
return rval
def _linear_part(self, state_below):
"""
Parameters
----------
state_below : member of input_space
Returns
-------
output : theano matrix
Affine transformation of state_below
"""
self.input_space.validate(state_below)
if self.requires_reformat:
state_below = self.input_space.format_as(state_below,
self.desired_space)
z = self.transformer.lmul(state_below)
if self.use_bias:
z += self.b
if self.layer_name is not None:
z.name = self.layer_name + '_z'
return z
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
return p
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
return self.cost_from_cost_matrix(self.cost_matrix(Y, Y_hat))
@wraps(Layer.cost_from_cost_matrix)
def cost_from_cost_matrix(self, cost_matrix):
return cost_matrix.sum(axis=1).mean()
@wraps(Layer.cost_matrix)
def cost_matrix(self, Y, Y_hat):
if(self.use_abs_loss):
return T.abs_(Y - Y_hat)
else:
return T.sqr(Y - Y_hat)
class Tanh(Linear):
"""
A layer that performs an affine transformation of its (vectorial)
input followed by a hyperbolic tangent elementwise nonlinearity.
Parameters
----------
kwargs : dict
Keyword arguments to pass through to `Linear` class constructor.
"""
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
p = T.tanh(p)
return p
@wraps(Layer.cost)
def cost(self, *args, **kwargs):
raise NotImplementedError()
class Sigmoid(Linear):
"""
A layer that performs an affine transformation of its
input followed by a logistic sigmoid elementwise nonlinearity.
Parameters
----------
monitor_style : string
Values can be any of ['detection', 'one_hot_class',
'bit_vector_class']
'detection' is the default.
- 'detection' : get_monitor_from_state makes no assumptions about
target, reports info about how good model is at
detecting positive bits.
This will monitor precision, recall, and F1 score
based on a detection threshold of 0.5. Note that
these quantities are computed *per-minibatch* and
averaged together. Unless your entire monitoring
dataset fits in one minibatch, this is not the same
as the true F1 score, etc., and will usually
seriously overestimate your performance.
- 'one_hot_class' : get_monitor_from_state assumes target is
one-hot class indicator, even though you're training the
model as k independent sigmoids. Gives info on how
good the argmax over the sigmoids behaves as a classifier.
- 'bit_vector_class' : get_monitor_from_state treats each
sigmoid as predicting a 1 iff its value is > 0.5. Each
example is counted as correct iff all of the bits in its
target are predicted correctly.
This includes as a special case the situation where the
target is a single 0 or 1 label.
- 'classification' : deprecated; originally this string was
used for 'one_hot_class', then due to a miscommunication
it was changed to be used for 'bit_vector_class'.
kwargs : dict
Passed through to the Layer class constructor
"""
def __init__(self, monitor_style='detection', **kwargs):
super(Sigmoid, self).__init__(**kwargs)
if monitor_style == 'classification':
monitor_style = 'bit_vector_class'
warnings.warn("The 'classification' monitor style is deprecated."
" Switch to 'bit_vector_class' (or possibly"
" 'one_hot_class' if your code predates 8f4b62b3df)."
" 'classification' may be removed on or after "
"2015-04-21.")
assert monitor_style in ['one_hot_class', 'bit_vector_class',
'detection']
self.monitor_style = monitor_style
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
p = T.nnet.sigmoid(p)
return p
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
"""
Returns a batch (vector) of
mean across units of KL divergence for each example.
Parameters
----------
Y : theano.gof.Variable
Targets
Y_hat : theano.gof.Variable
Output of `fprop`
mean across units, mean across batch of KL divergence
Notes
-----
Uses KL(P || Q) where P is defined by Y and Q is defined by Y_hat
Currently Y must be purely binary. If it's not, you'll still
get the right gradient, but the value in the monitoring channel
will be wrong.
Y_hat must be generated by fprop, i.e., it must be a symbolic
sigmoid.
p log p - p log q + (1-p) log (1-p) - (1-p) log (1-q)
For binary p, some terms drop out:
- p log q - (1-p) log (1-q)
- p log sigmoid(z) - (1-p) log sigmoid(-z)
p softplus(-z) + (1-p) softplus(z)
"""
total = self.kl(Y=Y, Y_hat=Y_hat)
ave = total.mean()
return ave
def kl(self, Y, Y_hat):
"""
Computes the KL divergence.
Parameters
----------
Y : Variable
targets for the sigmoid outputs. Currently Y must be purely binary.
If it's not, you'll still get the right gradient, but the
value in the monitoring channel will be wrong.
Y_hat : Variable
predictions made by the sigmoid layer. Y_hat must be generated by
fprop, i.e., it must be a symbolic sigmoid.
Returns
-------
ave : Variable
average kl divergence between Y and Y_hat.
Notes
-----
Warning: This function expects a sigmoid nonlinearity in the
output layer and it uses kl function under pylearn2/expr/nnet/.
Returns a batch (vector) of mean across units of KL
divergence for each example,
KL(P || Q) where P is defined by Y and Q is defined by Y_hat:
p log p - p log q + (1-p) log (1-p) - (1-p) log (1-q)
For binary p, some terms drop out:
- p log q - (1-p) log (1-q)
- p log sigmoid(z) - (1-p) log sigmoid(-z)
p softplus(-z) + (1-p) softplus(z)
"""
batch_axis = self.output_space.get_batch_axis()
div = kl(Y=Y, Y_hat=Y_hat, batch_axis=batch_axis)
return div
@wraps(Layer.cost_matrix)
def cost_matrix(self, Y, Y_hat):
rval = elemwise_kl(Y, Y_hat)
assert rval.ndim == 2
return rval
def get_detection_channels_from_state(self, state, target):
"""
Returns monitoring channels when using the layer to do detection
of binary events.
Parameters
----------
state : theano.gof.Variable
Output of `fprop`
target : theano.gof.Variable
The targets from the dataset
Returns
-------
channels : OrderedDict
Dictionary mapping channel names to Theano channel values.
"""
rval = OrderedDict()
y_hat = state > 0.5
y = target > 0.5
wrong_bit = T.cast(T.neq(y, y_hat), state.dtype)
rval['01_loss'] = wrong_bit.mean()
rval['kl'] = self.cost(Y_hat=state, Y=target)
y = T.cast(y, state.dtype)
y_hat = T.cast(y_hat, state.dtype)
tp = (y * y_hat).sum()
fp = ((1 - y) * y_hat).sum()
precision = compute_precision(tp, fp)
recall = compute_recall(y, tp)
f1 = compute_f1(precision, recall)
rval['precision'] = precision
rval['recall'] = recall
rval['f1'] = f1
tp = (y * y_hat).sum(axis=0)
fp = ((1 - y) * y_hat).sum(axis=0)
precision = compute_precision(tp, fp)
rval['per_output_precision_max'] = precision.max()
rval['per_output_precision_mean'] = precision.mean()
rval['per_output_precision_min'] = precision.min()
recall = compute_recall(y, tp)
rval['per_output_recall_max'] = recall.max()
rval['per_output_recall_mean'] = recall.mean()
rval['per_output_recall_min'] = recall.min()
f1 = compute_f1(precision, recall)
rval['per_output_f1_max'] = f1.max()
rval['per_output_f1_mean'] = f1.mean()
rval['per_output_f1_min'] = f1.min()
return rval
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = super(Sigmoid, self).get_layer_monitoring_channels(
state=state, targets=targets)
if (targets is not None) and \
((state_below is not None) or (state is not None)):
if state is None:
state = self.fprop(state_below)
if self.monitor_style == 'detection':
rval.update(self.get_detection_channels_from_state(state,
targets))
elif self.monitor_style == 'one_hot_class':
# For this monitor style, we know (by assumption) that
# exactly one bit is always on, so we pick
# the single most likely bit under the model, regardless
# of whether its probability exceeds 0.5
prediction = state.argmax(axis=1)
labels = targets.argmax(axis=1)
incorrect = T.neq(prediction, labels)
misclass = T.cast(incorrect, config.floatX).mean()
rval['misclass'] = misclass
else:
assert self.monitor_style == 'bit_vector_class'
# Threshold Y_hat at 0.5.
prediction = T.gt(state, 0.5)
# If even one feature is wrong for a given training example,
# it's considered incorrect, so we max over columns.
incorrect = T.neq(targets, prediction).max(axis=1)
rval['misclass'] = T.cast(incorrect, config.floatX).mean()
return rval
class RectifiedLinear(Linear):
"""
Rectified linear MLP layer (Glorot and Bengio 2011).
Parameters
----------
left_slope : float
The slope the line should have left of 0.
kwargs : dict
Keyword arguments to pass to `Linear` class constructor.
"""
def __init__(self, left_slope=0.0, **kwargs):
super(RectifiedLinear, self).__init__(**kwargs)
self.left_slope = left_slope
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
# Original: p = p * (p > 0.) + self.left_slope * p * (p < 0.)
# T.switch is faster.
# For details, see benchmarks in
# pylearn2/scripts/benchmark/time_relu.py
p = T.switch(p > 0., p, self.left_slope * p)
return p
@wraps(Layer.cost)
def cost(self, *args, **kwargs):
raise NotImplementedError()
class Softplus(Linear):
"""
An MLP layer using the softplus nonlinearity
h = log(1 + exp(Wx + b))
Parameters
----------
kwargs : dict
Keyword arguments to `Linear` constructor.
"""
def __init__(self, **kwargs):
super(Softplus, self).__init__(**kwargs)
@wraps(Layer.fprop)
def fprop(self, state_below):
p = self._linear_part(state_below)
p = T.nnet.softplus(p)
return p
@wraps(Layer.cost)
def cost(self, *args, **kwargs):
raise NotImplementedError()
class SpaceConverter(Layer):
"""
A Layer with no parameters that converts the input from
one space to another.
Parameters
----------
layer_name : str
Name of the layer.
output_space : Space
The space to convert to.
"""
def __init__(self, layer_name, output_space):
super(SpaceConverter, self).__init__()
self.__dict__.update(locals())
del self.self
self._params = []
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
@wraps(Layer.fprop)
def fprop(self, state_below):
return self.input_space.format_as(state_below, self.output_space)
class ConvNonlinearity(object):
"""
Abstract convolutional nonlinearity class.
"""
def apply(self, linear_response):
"""
Applies the nonlinearity over the convolutional layer.
Parameters
----------
linear_response: Variable
linear response of the layer.
Returns
-------
p: Variable
the response of the layer after the activation function
is applied over.
"""
p = linear_response
return p
def _get_monitoring_channels_for_activations(self, state):
"""
Computes the monitoring channels which does not require targets.
Parameters
----------
state : member of self.output_space
A minibatch of states that this Layer took on during fprop.
Provided externally so that we don't need to make a second
expression for it. This helps keep the Theano graph smaller
so that function compilation runs faster.
Returns
-------
rval : OrderedDict
A dictionary mapping channel names to monitoring channels of
interest for this layer.
"""
rval = OrderedDict({})
mx = state.max(axis=0)
mean = state.mean(axis=0)
mn = state.min(axis=0)
rg = mx - mn
rval['range_x_max_u'] = rg.max()
rval['range_x_mean_u'] = rg.mean()
rval['range_x_min_u'] = rg.min()
rval['max_x_max_u'] = mx.max()
rval['max_x_mean_u'] = mx.mean()
rval['max_x_min_u'] = mx.min()
rval['mean_x_max_u'] = mean.max()
rval['mean_x_mean_u'] = mean.mean()
rval['mean_x_min_u'] = mean.min()
rval['min_x_max_u'] = mn.max()
rval['min_x_mean_u'] = mn.mean()
rval['min_x_min_u'] = mn.min()
return rval
def get_monitoring_channels_from_state(self, state, target,
cost_fn=None):
"""
Override the default get_monitoring_channels_from_state function.
Parameters
----------
state : member of self.output_space
A minibatch of states that this Layer took on during fprop.
Provided externally so that we don't need to make a second
expression for it. This helps keep the Theano graph smaller
so that function compilation runs faster.
target : member of self.output_space
Should be None unless this is the last layer.
If specified, it should be a minibatch of targets for the
last layer.
cost_fn : theano computational graph or None
This is the theano computational graph of a cost function.
Returns
-------
rval : OrderedDict
A dictionary mapping channel names to monitoring channels of
interest for this layer.
"""
rval = self._get_monitoring_channels_for_activations(state)
return rval
class IdentityConvNonlinearity(ConvNonlinearity):
"""
Linear convolutional nonlinearity class.
"""
def __init__(self):
self.non_lin_name = "linear"
@wraps(ConvNonlinearity.get_monitoring_channels_from_state)
def get_monitoring_channels_from_state(self,
state,
target,
cost_fn=False):
rval = super(IdentityConvNonlinearity,
self).get_monitoring_channels_from_state(state,
target,
cost_fn)
if target is not None:
prediction = T.gt(state, 0.5)
incorrect = T.new(target, prediction).max(axis=1)
rval["misclass"] = T.cast(incorrect, config.floatX).mean()
return rval
class RectifierConvNonlinearity(ConvNonlinearity):
"""
A simple rectifier nonlinearity class for convolutional layers.
Parameters
----------
left_slope : float
The slope of the left half of the activation function.
"""
def __init__(self, left_slope=0.0):
"""
Parameters
----------
left_slope : float, optional
left slope for the linear response of the rectifier function.
default is 0.0.
"""
self.non_lin_name = "rectifier"
self.left_slope = left_slope
@wraps(ConvNonlinearity.apply)
def apply(self, linear_response):
"""
Applies the rectifier nonlinearity over the convolutional layer.
"""
p = linear_response * (linear_response > 0.) + self.left_slope *\
linear_response * (linear_response < 0.)
return p
class SigmoidConvNonlinearity(ConvNonlinearity):
"""
Sigmoid nonlinearity class for convolutional layers.
Parameters
----------
monitor_style : str, optional
default monitor_style is "classification".
This determines whether to do classification or detection.
"""
def __init__(self, monitor_style="classification"):
assert monitor_style in ['classification', 'detection']
self.monitor_style = monitor_style
self.non_lin_name = "sigmoid"
@wraps(ConvNonlinearity.apply)
def apply(self, linear_response):
"""
Applies the sigmoid nonlinearity over the convolutional layer.
"""
p = T.nnet.sigmoid(linear_response)
return p
@wraps(ConvNonlinearity.get_monitoring_channels_from_state)
def get_monitoring_channels_from_state(self, state, target,
cost_fn=None):
rval = super(SigmoidConvNonlinearity,
self).get_monitoring_channels_from_state(state,
target,
cost_fn)
if target is not None:
y_hat = state > 0.5
y = target > 0.5
wrong_bit = T.cast(T.neq(y, y_hat), state.dtype)
rval['01_loss'] = wrong_bit.mean()
rval['kl'] = cost_fn(Y_hat=state, Y=target)
y = T.cast(y, state.dtype)
y_hat = T.cast(y_hat, state.dtype)
tp = (y * y_hat).sum()
fp = ((1 - y) * y_hat).sum()
precision = compute_precision(tp, fp)
recall = compute_recall(y, tp)
f1 = compute_f1(precision, recall)
rval['precision'] = precision
rval['recall'] = recall
rval['f1'] = f1
tp = (y * y_hat).sum(axis=[0, 1])
fp = ((1 - y) * y_hat).sum(axis=[0, 1])
precision = compute_precision(tp, fp)
rval['per_output_precision_max'] = precision.max()
rval['per_output_precision_mean'] = precision.mean()
rval['per_output_precision_min'] = precision.min()
recall = compute_recall(y, tp)
rval['per_output_recall_max'] = recall.max()
rval['per_output_recall_mean'] = recall.mean()
rval['per_output_recall_min'] = recall.min()
f1 = compute_f1(precision, recall)
rval['per_output_f1_max'] = f1.max()
rval['per_output_f1_mean'] = f1.mean()
rval['per_output_f1_min'] = f1.min()
return rval
class TanhConvNonlinearity(ConvNonlinearity):
"""
Tanh nonlinearity class for convolutional layers.
"""
def __init__(self):
self.non_lin_name = "tanh"
@wraps(ConvNonlinearity.apply)
def apply(self, linear_response):
"""
Applies the tanh nonlinearity over the convolutional layer.
"""
p = T.tanh(linear_response)
return p
class ConvElemwise(Layer):
"""
Generic convolutional elemwise layer.
Takes the ConvNonlinearity object as an argument and implements
convolutional layer with the specified nonlinearity.
This function can implement:
* Linear convolutional layer
* Rectifier convolutional layer
* Sigmoid convolutional layer
* Tanh convolutional layer
based on the nonlinearity argument that it recieves.
Parameters
----------
output_channels : int
The number of output channels the layer should have.
kernel_shape : tuple
The shape of the convolution kernel.
pool_shape : tuple
The shape of the spatial max pooling. A two-tuple of ints.
pool_stride : tuple
The stride of the spatial max pooling. Also must be square.
layer_name : str
A name for this layer that will be prepended to monitoring channels
related to this layer.
nonlinearity : object
An instance of a nonlinearity object which might be inherited
from the ConvNonlinearity class.
irange : float, optional
if specified, initializes each weight randomly in
U(-irange, irange)
border_mode : str, optional
A string indicating the size of the output:
- "full" : The output is the full discrete linear convolution of the
inputs.
- "valid" : The output consists only of those elements that do not
rely on the zero-padding. (Default)
sparse_init : WRITEME
include_prob : float, optional
probability of including a weight element in the set of weights
initialized to U(-irange, irange). If not included it is initialized
to 1.0.
init_bias : float, optional
All biases are initialized to this number. Default is 0.
W_lr_scale : float or None
The learning rate on the weights for this layer is multiplied by this
scaling factor
b_lr_scale : float or None
The learning rate on the biases for this layer is multiplied by this
scaling factor
max_kernel_norm : float or None
If specified, each kernel is constrained to have at most this norm.
pool_type : str or None
The type of the pooling operation performed the convolution.
Default pooling type is max-pooling.
tied_b : bool, optional
If true, all biases in the same channel are constrained to be the
same as each other. Otherwise, each bias at each location is
learned independently. Default is true.
detector_normalization : callable or None
See `output_normalization`.
If pooling argument is not provided, detector_normalization
is not applied on the layer.
output_normalization : callable or None
if specified, should be a callable object. the state of the
network is optionally replaced with normalization(state) at each
of the 3 points in processing:
- detector: the maxout units can be normalized prior to the
spatial pooling
- output: the output of the layer, after sptial pooling, can
be normalized as well
kernel_stride : 2-tuple of ints, optional
The stride of the convolution kernel. Default is (1, 1).
"""
def __init__(self,
output_channels,
kernel_shape,
layer_name,
nonlinearity,
irange=None,
border_mode='valid',
sparse_init=None,
include_prob=1.0,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
max_kernel_norm=None,
pool_type=None,
pool_shape=None,
pool_stride=None,
tied_b=None,
detector_normalization=None,
output_normalization=None,
kernel_stride=(1, 1),
monitor_style="classification"):
if (irange is None) and (sparse_init is None):
raise AssertionError("You should specify either irange or "
"sparse_init when calling the constructor of "
"ConvElemwise.")
elif (irange is not None) and (sparse_init is not None):
raise AssertionError("You should specify either irange or "
"sparse_init when calling the constructor of "
"ConvElemwise and not both.")
if pool_type is not None:
assert pool_shape is not None, (
"You should specify the shape of the spatial %s-pooling." %
pool_type)
assert pool_stride is not None, (
"You should specify the strides of the spatial %s-pooling." %
pool_type)
assert nonlinearity is not None
super(ConvElemwise, self).__init__()
self.nonlin = nonlinearity
self.__dict__.update(locals())
assert monitor_style in ['classification', 'detection'], (
"%s.monitor_style should be either"
"detection or classification" % self.__class__.__name__)
del self.self
if max_kernel_norm is not None:
self.extensions.append(
MaxL2FilterNorm(max_kernel_norm, axis=(1, 2, 3))
)
def initialize_transformer(self, rng):
"""
This function initializes the transformer of the class. Re-running
this function will reset the transformer.
Parameters
----------
rng : object
random number generator object.
"""
if self.irange is not None:
assert self.sparse_init is None
self.transformer = conv2d.make_random_conv2D(
irange=self.irange,
input_space=self.input_space,
output_space=self.detector_space,
kernel_shape=self.kernel_shape,
subsample=self.kernel_stride,
border_mode=self.border_mode,
rng=rng)
elif self.sparse_init is not None:
self.transformer = conv2d.make_sparse_random_conv2D(
num_nonzero=self.sparse_init,
input_space=self.input_space,
output_space=self.detector_space,
kernel_shape=self.kernel_shape,
subsample=self.kernel_stride,
border_mode=self.border_mode,
rng=rng)
else:
raise ValueError('irange and sparse_init cannot be both None')
def initialize_output_space(self):
"""
Initializes the output space of the ConvElemwise layer by taking
pooling operator and the hyperparameters of the convolutional layer
into consideration as well.
"""
dummy_batch_size = self.mlp.batch_size
if dummy_batch_size is None:
dummy_batch_size = 2
dummy_detector =\
sharedX(self.detector_space.get_origin_batch(dummy_batch_size))
if self.pool_type is not None:
assert self.pool_type in ['max', 'mean']
if self.pool_type == 'max':
dummy_p = max_pool(bc01=dummy_detector,
pool_shape=self.pool_shape,
pool_stride=self.pool_stride,
image_shape=self.detector_space.shape)
elif self.pool_type == 'mean':
dummy_p = mean_pool(bc01=dummy_detector,
pool_shape=self.pool_shape,
pool_stride=self.pool_stride,
image_shape=self.detector_space.shape)
dummy_p = dummy_p.eval()
self.output_space = Conv2DSpace(shape=[dummy_p.shape[2],
dummy_p.shape[3]],
num_channels=self.output_channels,
axes=('b', 'c', 0, 1))
else:
dummy_detector = dummy_detector.eval()
self.output_space = Conv2DSpace(shape=[dummy_detector.shape[2],
dummy_detector.shape[3]],
num_channels=self.output_channels,
axes=('b', 'c', 0, 1))
logger.info('Output space: {0}'.format(self.output_space.shape))
@wraps(Layer.set_input_space)
def set_input_space(self, space):
""" Note: this function will reset the parameters! """
self.input_space = space
if not isinstance(space, Conv2DSpace):
raise BadInputSpaceError(self.__class__.__name__ +
".set_input_space "
"expected a Conv2DSpace, got " +
str(space) + " of type " +
str(type(space)))
rng = self.mlp.rng
if self.border_mode == 'valid':
output_shape = [int((self.input_space.shape[0]
- self.kernel_shape[0])
/ self.kernel_stride[0]) + 1,
int((self.input_space.shape[1]
- self.kernel_shape[1])
/ self.kernel_stride[1]) + 1]
elif self.border_mode == 'full':
output_shape = [int((self.input_space.shape[0]
+ self.kernel_shape[0])
/ self.kernel_stride[0]) - 1,
int((self.input_space.shape[1]
+ self.kernel_shape[1])
/ self.kernel_stride[1]) - 1]
self.detector_space = Conv2DSpace(shape=output_shape,
num_channels=self.output_channels,
axes=('b', 'c', 0, 1))
self.initialize_transformer(rng)
W, = self.transformer.get_params()
W.name = self.layer_name + '_W'
if self.tied_b:
self.b = sharedX(np.zeros((self.detector_space.num_channels)) +
self.init_bias)
else:
self.b = sharedX(self.detector_space.get_origin() + self.init_bias)
self.b.name = self.layer_name + '_b'
logger.info('Input shape: {0}'.format(self.input_space.shape))
logger.info('Detector space: {0}'.format(self.detector_space.shape))
self.initialize_output_space()
@wraps(Layer.get_params)
def get_params(self):
assert self.b.name is not None
W, = self.transformer.get_params()
assert W.name is not None
rval = self.transformer.get_params()
assert not isinstance(rval, set)
rval = list(rval)
assert self.b not in rval
rval.append(self.b)
return rval
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * T.sqr(W).sum()
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W, = self.transformer.get_params()
return coeff * abs(W).sum()
@wraps(Layer.set_weights)
def set_weights(self, weights):
W, = self.transformer.get_params()
W.set_value(weights)
@wraps(Layer.set_biases)
def set_biases(self, biases):
self.b.set_value(biases)
@wraps(Layer.get_biases)
def get_biases(self):
return self.b.get_value()
@wraps(Layer.get_weights_format)
def get_weights_format(self):
return ('v', 'h')
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
rval = OrderedDict()
if self.W_lr_scale is not None:
W, = self.transformer.get_params()
rval[W] = self.W_lr_scale
if self.b_lr_scale is not None:
rval[self.b] = self.b_lr_scale
return rval
@wraps(Layer.get_weights_topo)
def get_weights_topo(self):
outp, inp, rows, cols = range(4)
raw = self.transformer._filters.get_value()
return np.transpose(raw, (outp, rows, cols, inp))
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
W, = self.transformer.get_params()
assert W.ndim == 4
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=(1, 2, 3)))
rval = OrderedDict([
('kernel_norms_min', row_norms.min()),
('kernel_norms_mean', row_norms.mean()),
('kernel_norms_max', row_norms.max()),
])
cst = self.cost
orval = self.nonlin.get_monitoring_channels_from_state(state,
targets,
cost_fn=cst)
rval.update(orval)
return rval
@wraps(Layer.fprop)
def fprop(self, state_below):
self.input_space.validate(state_below)
z = self.transformer.lmul(state_below)
if not hasattr(self, 'tied_b'):
self.tied_b = False
if self.tied_b:
b = self.b.dimshuffle('x', 0, 'x', 'x')
else:
b = self.b.dimshuffle('x', 0, 1, 2)
z = z + b
d = self.nonlin.apply(z)
if self.layer_name is not None:
d.name = self.layer_name + '_z'
self.detector_space.validate(d)
if self.pool_type is not None:
# Format the input to be supported by max pooling
if not hasattr(self, 'detector_normalization'):
self.detector_normalization = None
if self.detector_normalization:
d = self.detector_normalization(d)
assert self.pool_type in ['max', 'mean'], ("pool_type should be"
"either max or mean"
"pooling.")
if self.pool_type == 'max':
p = max_pool(bc01=d, pool_shape=self.pool_shape,
pool_stride=self.pool_stride,
image_shape=self.detector_space.shape)
elif self.pool_type == 'mean':
p = mean_pool(bc01=d, pool_shape=self.pool_shape,
pool_stride=self.pool_stride,
image_shape=self.detector_space.shape)
self.output_space.validate(p)
else:
p = d
if not hasattr(self, 'output_normalization'):
self.output_normalization = None
if self.output_normalization:
p = self.output_normalization(p)
return p
def cost(self, Y, Y_hat):
"""
Cost for convnets is hardcoded to be the cost for sigmoids.
TODO: move the cost into the non-linearity class.
Parameters
----------
Y : theano.gof.Variable
Output of `fprop`
Y_hat : theano.gof.Variable
Targets
Returns
-------
cost : theano.gof.Variable
0-D tensor describing the cost
Notes
-----
Cost mean across units, mean across batch of KL divergence
KL(P || Q) where P is defined by Y and Q is defined by Y_hat
KL(P || Q) = p log p - p log q + (1-p) log (1-p) - (1-p) log (1-q)
"""
assert self.nonlin.non_lin_name == "sigmoid", ("ConvElemwise "
"supports "
"cost function "
"for only "
"sigmoid layer "
"for now.")
batch_axis = self.output_space.get_batch_axis()
ave_total = kl(Y=Y, Y_hat=Y_hat, batch_axis=batch_axis)
ave = ave_total.mean()
return ave
class ConvRectifiedLinear(ConvElemwise):
"""
A convolutional rectified linear layer, based on theano's B01C
formatted convolution.
Parameters
----------
output_channels : int
The number of output channels the layer should have.
kernel_shape : tuple
The shape of the convolution kernel.
pool_shape : tuple
The shape of the spatial max pooling. A two-tuple of ints.
pool_stride : tuple
The stride of the spatial max pooling. Also must be square.
layer_name : str
A name for this layer that will be prepended to monitoring channels
related to this layer.
irange : float
if specified, initializes each weight randomly in
U(-irange, irange)
border_mode : str
A string indicating the size of the output:
- "full" : The output is the full discrete linear convolution of the
inputs.
- "valid" : The output consists only of those elements that do not
rely on the zero-padding. (Default)
include_prob : float
probability of including a weight element in the set of weights
initialized to U(-irange, irange). If not included it is initialized
to 0.
init_bias : float
All biases are initialized to this number
W_lr_scale : float
The learning rate on the weights for this layer is multiplied by this
scaling factor
b_lr_scale : float
The learning rate on the biases for this layer is multiplied by this
scaling factor
left_slope : float
The slope of the left half of the activation function
max_kernel_norm : float
If specifed, each kernel is constrained to have at most this norm.
pool_type :
The type of the pooling operation performed the the convolution.
Default pooling type is max-pooling.
tied_b : bool
If true, all biases in the same channel are constrained to be the
same as each other. Otherwise, each bias at each location is
learned independently.
detector_normalization : callable
See `output_normalization`
output_normalization : callable
if specified, should be a callable object. the state of the
network is optionally replaced with normalization(state) at each
of the 3 points in processing:
- detector: the rectifier units can be normalized prior to the
spatial pooling
- output: the output of the layer, after spatial pooling, can
be normalized as well
kernel_stride : tuple
The stride of the convolution kernel. A two-tuple of ints.
"""
def __init__(self,
output_channels,
kernel_shape,
pool_shape,
pool_stride,
layer_name,
irange=None,
border_mode='valid',
sparse_init=None,
include_prob=1.0,
init_bias=0.,
W_lr_scale=None,
b_lr_scale=None,
left_slope=0.0,
max_kernel_norm=None,
pool_type='max',
tied_b=False,
detector_normalization=None,
output_normalization=None,
kernel_stride=(1, 1),
monitor_style="classification"):
nonlinearity = RectifierConvNonlinearity(left_slope)
if (irange is None) and (sparse_init is None):
raise AssertionError("You should specify either irange or "
"sparse_init when calling the constructor of "
"ConvRectifiedLinear.")
elif (irange is not None) and (sparse_init is not None):
raise AssertionError("You should specify either irange or "
"sparse_init when calling the constructor of "
"ConvRectifiedLinear and not both.")
# Alias the variables for pep8
mkn = max_kernel_norm
dn = detector_normalization
on = output_normalization
super(ConvRectifiedLinear, self).__init__(output_channels,
kernel_shape,
layer_name,
nonlinearity,
irange=irange,
border_mode=border_mode,
sparse_init=sparse_init,
include_prob=include_prob,
init_bias=init_bias,
W_lr_scale=W_lr_scale,
b_lr_scale=b_lr_scale,
pool_shape=pool_shape,
pool_stride=pool_stride,
max_kernel_norm=mkn,
pool_type=pool_type,
tied_b=tied_b,
detector_normalization=dn,
output_normalization=on,
kernel_stride=kernel_stride,
monitor_style=monitor_style)
def pool_dnn(bc01, pool_shape, pool_stride, mode='max'):
"""
cuDNN pooling op.
Parameters
----------
bc01 : theano tensor
Minibatch in format (batch size, channels, rows, cols).
pool_shape : tuple
Shape of the pool region (rows, cols).
pool_stride : tuple
Strides between pooling regions (row stride, col stride).
mode : str
Flag for `mean` or `max` pooling.
Returns
-------
mx : theano tensor
The output of pooling applied to `bc01`.
"""
assert mode in ['max', 'mean']
if mode == 'mean':
raise NotImplementedError('Mean pooling is not implemented '
'in Pylearn2 using cuDNN as of '
'January 19th, 2015.')
mx = dnn_pool(bc01, tuple(pool_shape), tuple(pool_stride), mode)
return mx
def max_pool(bc01, pool_shape, pool_stride, image_shape, try_dnn=True):
"""
Theano's max pooling op only supports pool_stride = pool_shape
so here we have a graph that does max pooling with strides
Parameters
----------
bc01 : theano tensor
minibatch in format (batch size, channels, rows, cols)
pool_shape : tuple
shape of the pool region (rows, cols)
pool_stride : tuple
strides between pooling regions (row stride, col stride)
image_shape : tuple
avoid doing some of the arithmetic in theano
try_dnn : bool
Flag to set cuDNN use (default: True).
Returns
-------
pooled : theano tensor
The output of pooling applied to `bc01`
See Also
--------
max_pool_c01b : Same functionality but with ('c', 0, 1, 'b') axes
sandbox.cuda_convnet.pool.max_pool_c01b : Same functionality as
`max_pool_c01b` but GPU-only and considerably faster.
mean_pool : Mean pooling instead of max pooling
"""
mx = None
r, c = image_shape
pr, pc = pool_shape
rs, cs = pool_stride
assert pr <= r
assert pc <= c
name = bc01.name
if name is None:
name = 'anon_bc01'
if try_dnn and bc01.dtype == "float32":
use_dnn = dnn_available()
else:
use_dnn = False
if pool_shape == pool_stride and not use_dnn:
mx = max_pool_2d(bc01, pool_shape, False)
mx.name = 'max_pool(' + name + ')'
return mx
# Compute index in pooled space of last needed pool
# (needed = each input pixel must appear in at least one pool)
def last_pool(im_shp, p_shp, p_strd):
rval = int(np.ceil(float(im_shp - p_shp) / p_strd))
assert p_strd * rval + p_shp >= im_shp
assert p_strd * (rval - 1) + p_shp < im_shp
# Catch case where p_strd > p_shp causes pool
# to be set outside of im_shp.
if p_strd * rval >= im_shp:
rval -= 1
return rval
# Compute starting row of the last pool
last_pool_r = last_pool(image_shape[0],
pool_shape[0],
pool_stride[0]) * pool_stride[0]
# Compute number of rows needed in image for all indexes to work out
required_r = last_pool_r + pr
last_pool_c = last_pool(image_shape[1],
pool_shape[1],
pool_stride[1]) * pool_stride[1]
required_c = last_pool_c + pc
for bc01v in get_debug_values(bc01):
assert not contains_inf(bc01v)
assert bc01v.shape[2] == image_shape[0]
assert bc01v.shape[3] == image_shape[1]
if (required_r > r) or (required_c > c):
small_r = min(required_r, r)
small_c = min(required_c, c)
assert bc01.dtype.startswith('float')
wide_infinity = T.alloc(T.constant(-np.inf, dtype=bc01.dtype),
bc01.shape[0],
bc01.shape[1],
required_r,
required_c)
bc01 = T.set_subtensor(wide_infinity[:, :, 0:small_r, 0:small_c],
bc01[:, :, 0:small_r, 0:small_c])
name = 'infinite_padded_' + name
if use_dnn:
mx = pool_dnn(bc01, pool_shape, pool_stride, 'max')
else:
for row_within_pool in xrange(pool_shape[0]):
row_stop = last_pool_r + row_within_pool + 1
for col_within_pool in xrange(pool_shape[1]):
col_stop = last_pool_c + col_within_pool + 1
cur = bc01[:,
:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs]
cur.name = ('max_pool_cur_' + name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
if mx is None:
mx = cur
else:
mx = T.maximum(mx, cur)
mx.name = ('max_pool_mx_' + name + '_' +
str(row_within_pool) + '_' +
str(col_within_pool))
mx.name = 'max_pool(' + name + ')'
for mxv in get_debug_values(mx):
assert isfinite(mxv)
return mx
def max_pool_c01b(c01b, pool_shape, pool_stride, image_shape):
"""
Theano's max pooling op only supports pool_stride = pool_shape
so here we have a graph that does max pooling with strides
Parameters
----------
c01b : theano tensor
minibatch in format (channels, rows, cols, batch size)
pool_shape : tuple
shape of the pool region (rows, cols)
pool_stride : tuple
strides between pooling regions (row stride, col stride)
image_shape : tuple
avoid doing some of the arithmetic in theano
Returns
-------
pooled : theano tensor
The output of pooling applied to `c01b`
See Also
--------
sandbox.cuda_convnet.pool.max_pool_c01b : Same functionality but GPU-only
and considerably faster.
max_pool : Same functionality but with ('b', 0, 1, 'c') axes
"""
mx = None
r, c = image_shape
pr, pc = pool_shape
rs, cs = pool_stride
assert pr > 0
assert pc > 0
assert pr <= r
assert pc <= c
# Compute index in pooled space of last needed pool
# (needed = each input pixel must appear in at least one pool)
def last_pool(im_shp, p_shp, p_strd):
rval = int(np.ceil(float(im_shp - p_shp) / p_strd))
assert p_strd * rval + p_shp >= im_shp
assert p_strd * (rval - 1) + p_shp < im_shp
return rval
# Compute starting row of the last pool
last_pool_r = last_pool(image_shape[0],
pool_shape[0],
pool_stride[0]) * pool_stride[0]
# Compute number of rows needed in image for all indexes to work out
required_r = last_pool_r + pr
last_pool_c = last_pool(image_shape[1],
pool_shape[1],
pool_stride[1]) * pool_stride[1]
required_c = last_pool_c + pc
for c01bv in get_debug_values(c01b):
assert not contains_inf(c01bv)
assert c01bv.shape[1] == r
assert c01bv.shape[2] == c
wide_infinity = T.alloc(-np.inf,
c01b.shape[0],
required_r,
required_c,
c01b.shape[3])
name = c01b.name
if name is None:
name = 'anon_bc01'
c01b = T.set_subtensor(wide_infinity[:, 0:r, 0:c, :], c01b)
c01b.name = 'infinite_padded_' + name
for row_within_pool in xrange(pool_shape[0]):
row_stop = last_pool_r + row_within_pool + 1
for col_within_pool in xrange(pool_shape[1]):
col_stop = last_pool_c + col_within_pool + 1
cur = c01b[:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs,
:]
cur.name = ('max_pool_cur_' + c01b.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
if mx is None:
mx = cur
else:
mx = T.maximum(mx, cur)
mx.name = ('max_pool_mx_' + c01b.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
mx.name = 'max_pool(' + name + ')'
for mxv in get_debug_values(mx):
assert isfinite(mxv)
return mx
def mean_pool(bc01, pool_shape, pool_stride, image_shape):
"""
Does mean pooling (aka average pooling) via a Theano graph.
Parameters
----------
bc01 : theano tensor
minibatch in format (batch size, channels, rows, cols)
pool_shape : tuple
shape of the pool region (rows, cols)
pool_stride : tuple
strides between pooling regions (row stride, col stride)
image_shape : tuple
(rows, cols) tuple to avoid doing some arithmetic in theano
Returns
-------
pooled : theano tensor
The output of pooling applied to `bc01`
See Also
--------
max_pool : Same thing but with max pooling
Examples
--------
>>> import theano
>>> import theano.tensor as T
>>> from pylearn2.models.mlp import mean_pool
>>> import numpy as np
>>> t = np.array([[1, 1, 3, 3],
... [1, 1, 3, 3],
... [5, 5, 7, 7],
... [5, 5, 7, 7],
... [9, 9, 11, 11],
... [9, 9, 11, 11]])
>>> X = np.zeros((3, t.shape[0], t.shape[1]))
>>> X[:] = t
>>> X = X[np.newaxis]
>>> X_sym = T.tensor4('X')
>>> pool_it = mean_pool(X_sym, pool_shape=(2, 2), pool_stride=(2, 2),
... image_shape=(6, 4))
>>> f = theano.function(inputs=[X_sym], outputs=pool_it)
This will pool over over windows of size (2, 2) while also stepping by this
same amount, shrinking the examples input to [[1, 3], [5, 7], [9, 11]].
"""
mx = None
r, c = image_shape
pr, pc = pool_shape
rs, cs = pool_stride
# Compute index in pooled space of last needed pool
# (needed = each input pixel must appear in at least one pool)
def last_pool(im_shp, p_shp, p_strd):
rval = int(np.ceil(float(im_shp - p_shp) / p_strd))
assert p_strd * rval + p_shp >= im_shp
assert p_strd * (rval - 1) + p_shp < im_shp
return rval
# Compute starting row of the last pool
last_pool_r = last_pool(image_shape[0],
pool_shape[0],
pool_stride[0]) * pool_stride[0]
# Compute number of rows needed in image for all indexes to work out
required_r = last_pool_r + pr
last_pool_c = last_pool(image_shape[1],
pool_shape[1],
pool_stride[1]) * pool_stride[1]
required_c = last_pool_c + pc
for bc01v in get_debug_values(bc01):
assert not contains_inf(bc01v)
assert bc01v.shape[2] == image_shape[0]
assert bc01v.shape[3] == image_shape[1]
wide_infinity = T.alloc(-np.inf,
bc01.shape[0],
bc01.shape[1],
required_r,
required_c)
name = bc01.name
if name is None:
name = 'anon_bc01'
bc01 = T.set_subtensor(wide_infinity[:, :, 0:r, 0:c], bc01)
bc01.name = 'infinite_padded_' + name
# Create a 'mask' used to keep count of the number of elements summed for
# each position
wide_infinity_count = T.alloc(0, bc01.shape[0], bc01.shape[1], required_r,
required_c)
bc01_count = T.set_subtensor(wide_infinity_count[:, :, 0:r, 0:c], 1)
for row_within_pool in xrange(pool_shape[0]):
row_stop = last_pool_r + row_within_pool + 1
for col_within_pool in xrange(pool_shape[1]):
col_stop = last_pool_c + col_within_pool + 1
cur = bc01[:,
:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs]
cur.name = ('mean_pool_cur_' + bc01.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
cur_count = bc01_count[:,
:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs]
if mx is None:
mx = cur
count = cur_count
else:
mx = mx + cur
count = count + cur_count
mx.name = ('mean_pool_mx_' + bc01.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
mx /= count
mx.name = 'mean_pool(' + name + ')'
for mxv in get_debug_values(mx):
assert isfinite(mxv)
return mx
@wraps(_WD)
def WeightDecay(*args, **kwargs):
warnings.warn("pylearn2.models.mlp.WeightDecay has moved to "
"pylearn2.costs.mlp.WeightDecay. This link"
"may be removed after 2015-05-13.")
return _WD(*args, **kwargs)
@wraps(_L1WD)
def L1WeightDecay(*args, **kwargs):
warnings.warn("pylearn2.models.mlp.L1WeightDecay has moved to "
"pylearn2.costs.mlp.WeightDecay. This link"
"may be removed after 2015-05-13.")
return _L1WD(*args, **kwargs)
class LinearGaussian(Linear):
"""
A Linear layer augmented with a precision vector, for modeling
conditionally Gaussian data.
Specifically, given an input x, this layer models the distrbution over
the output as
y ~ p(y | x) = N(y | Wx + b, beta^-1)
i.e., y is conditionally Gaussian with mean Wx + b and variance
beta^-1.
beta is a diagonal precision matrix so beta^-1 is a diagonal covariance
matrix.
Internally, beta is stored as the vector of diagonal values on this
matrix.
Since the output covariance is not a function of the input, this does
not provide an example-specific estimate of the error in the mean.
However, the vector-valued beta does mean that maximizing log p(y | x)
will reweight the mean squared error so that variables that can be
estimated easier will receive a higher penalty. This is one way of
adapting the model better to heterogenous data.
Parameters
----------
init_beta : float or ndarray
Any value > 0 that can be broadcasted to a vector of shape (dim, ).
The elements of beta are initialized to this value.
A good value is often the precision (inverse variance) of the target
variables in the training set, as provided by the
`beta_from_targets` function. This is the optimal beta for a dummy
model that just predicts the mean target value from the training set.
min_beta : float
The elements of beta are constrained to be >= this value.
This value must be > 0., otherwise the output conditional is not
constrained to be a valid probability distribution.
A good value is often the precision (inverse variance) of the target
variables in the training set, as provided by the
`beta_from_targets` function. This is the optimal beta for a dummy
model that just predicts the mean target value from the training set.
A trained model should always be able to obtain at least this much
precision, at least on the training set.
max_beta : float
The elements of beta are constrained to be <= this value.
We impose this constraint because for problems
where the training set values can be predicted
exactly, beta can grow without bound, which also makes the
gradients grow without bound, resulting in numerical problems.
kwargs : dict
Arguments to the `Linear` superclass.
"""
def __init__(self, init_beta, min_beta, max_beta, beta_lr_scale, **kwargs):
super(LinearGaussian, self).__init__(**kwargs)
self.__dict__.update(locals())
del self.self
del self.kwargs
@wraps(Layer.set_input_space)
def set_input_space(self, space):
super(LinearGaussian, self).set_input_space(space)
assert isinstance(self.output_space, VectorSpace)
self.beta = sharedX(self.output_space.get_origin() + self.init_beta,
'beta')
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = super(LinearGaussian,
self).get_layer_monitoring_channels(state_below,
state,
targets)
assert isinstance(rval, OrderedDict)
rval['beta_min'] = self.beta.min()
rval['beta_mean'] = self.beta.mean()
rval['beta_max'] = self.beta.max()
if targets:
rval['mse'] = T.sqr(state - targets).mean()
return rval
@wraps(Linear.cost)
def cost(self, Y, Y_hat):
return (0.5 * T.dot(T.sqr(Y - Y_hat), self.beta).mean() -
0.5 * T.log(self.beta).sum())
@wraps(Linear.cost_matrix)
def cost_matrix(self, Y, Y_hat):
return 0.5 * T.sqr(Y - Y_hat) * self.beta - 0.5 * T.log(self.beta)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
super(LinearGaussian, self)._modify_updates(updates)
if self.beta in updates:
updates[self.beta] = T.clip(updates[self.beta],
self.min_beta,
self.max_beta)
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
rval = super(LinearGaussian, self).get_lr_scalers()
if self.beta_lr_scale is not None:
rval[self.beta] = self.beta_lr_scale
return rval
@wraps(Layer.get_params)
def get_params(self):
return super(LinearGaussian, self).get_params() + [self.beta]
def beta_from_design(design, min_var=1e-6, max_var=1e6):
"""
Returns the marginal precision of a design matrix.
Parameters
----------
design : ndarray
A numpy ndarray containing a design matrix
min_var : float
max_var : float
All variances are constrained to lie in the range [min_var, max_var]
to avoid numerical issues like infinite precision.
Returns
-------
beta : ndarray
A 1D vector containing the marginal precision of each variable in the
design matrix.
"""
return 1. / np.clip(design.var(axis=0), min_var, max_var)
def beta_from_targets(dataset, **kwargs):
"""
Returns the marginal precision of the targets in a dataset.
Parameters
----------
dataset : DenseDesignMatrix
A DenseDesignMatrix with a targets field `y`
kwargs : dict
Extra arguments to `beta_from_design`
Returns
-------
beta : ndarray
A 1-D vector containing the marginal precision of the *targets* in
`dataset`.
"""
return beta_from_design(dataset.y, **kwargs)
def beta_from_features(dataset, **kwargs):
"""
Returns the marginal precision of the features in a dataset.
Parameters
----------
dataset : DenseDesignMatrix
The dataset to compute the precision on.
kwargs : dict
Passed through to `beta_from_design`
Returns
-------
beta : ndarray
Vector of precision values for each feature in `dataset`
"""
return beta_from_design(dataset.X, **kwargs)
def mean_of_targets(dataset):
"""
Returns the mean of the targets in a dataset.
Parameters
----------
dataset : DenseDesignMatrix
Returns
-------
mn : ndarray
A 1-D vector with entry i giving the mean of target i
"""
return dataset.y.mean(axis=0)
class PretrainedLayer(Layer):
"""
A layer whose weights are initialized, and optionally fixed,
based on prior training.
Parameters
----------
layer_content : Model
Should implement "upward_pass" (RBM and Autoencoder do this)
freeze_params: bool
If True, regard layer_conent's parameters as fixed
If False, they become parameters of this layer and can be
fine-tuned to optimize the MLP's cost function.
"""
def __init__(self, layer_name, layer_content, freeze_params=False):
super(PretrainedLayer, self).__init__()
self.__dict__.update(locals())
del self.self
@wraps(Layer.set_input_space)
def set_input_space(self, space):
assert self.get_input_space() == space
@wraps(Layer.get_params)
def get_params(self):
if self.freeze_params:
return []
return self.layer_content.get_params()
@wraps(Layer.get_input_space)
def get_input_space(self):
return self.layer_content.get_input_space()
@wraps(Layer.get_output_space)
def get_output_space(self):
return self.layer_content.get_output_space()
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
return OrderedDict([])
@wraps(Layer.fprop)
def fprop(self, state_below):
return self.layer_content.upward_pass(state_below)
class CompositeLayer(Layer):
"""
A Layer that runs several layers in parallel. Its default behavior
is to pass the layer's input to each of the components.
Alternatively, it can take a CompositeSpace as an input and a mapping
from inputs to layers i.e. providing each component layer with a
subset of the inputs.
Parameters
----------
layer_name : str
The name of this layer
layers : tuple or list
The component layers to run in parallel.
inputs_to_layers : dict mapping int to list of ints, optional
Can only be used if the input space is a CompositeSpace.
If inputs_to_layers[i] contains j, it means input i will
be given as input to component j. Note that if multiple inputs are
passed on to e.g. an inner CompositeLayer, the same order will
be maintained. If the list is empty, the input will be discarded.
If an input does not appear in the dictionary, it will be given to
all components.
Examples
--------
>>> composite_layer = CompositeLayer(
... layer_name='composite_layer',
... layers=[Tanh(7, 'h0', 0.1), Sigmoid(5, 'h1', 0.1)],
... inputs_to_layers={
... 0: [1],
... 1: [0]
... })
This CompositeLayer has a CompositeSpace with 2 subspaces as its
input space. The first input is given to the Sigmoid layer, the second
input is given to the Tanh layer.
>>> wrapper_layer = CompositeLayer(
... layer_name='wrapper_layer',
... layers=[Linear(9, 'h2', 0.1),
... composite_layer,
... Tanh(7, 'h3', 0.1)],
... inputs_to_layers={
... 0: [1],
... 2: []
... })
This CompositeLayer takes 3 inputs. The first one is given to the
inner CompositeLayer. The second input is passed on to each component
layer i.e. to the Tanh, Linear as well as CompositeLayer. The third
input is discarded. Note that the inner CompositeLayer wil receive
the inputs with the same ordering i.e. [0, 1], and never [1, 0].
"""
def __init__(self, layer_name, layers, inputs_to_layers=None):
self.num_layers = len(layers)
if inputs_to_layers is not None:
if not isinstance(inputs_to_layers, dict):
raise TypeError("CompositeLayer expected inputs_to_layers to "
"be dict, got " + str(type(inputs_to_layers)))
self.inputs_to_layers = OrderedDict()
for key in sorted(inputs_to_layers):
assert isinstance(key, py_integer_types)
value = inputs_to_layers[key]
assert is_iterable(value)
assert all(isinstance(v, py_integer_types) for v in value)
# Check 'not value' to support case of empty list
assert not value or all(0 <= v < self.num_layers
for v in value)
self.inputs_to_layers[key] = sorted(value)
super(CompositeLayer, self).__init__()
self.__dict__.update(locals())
del self.self
@property
def routing_needed(self):
return self.inputs_to_layers is not None
@wraps(Layer.set_input_space)
def set_input_space(self, space):
if not isinstance(space, CompositeSpace):
if self.inputs_to_layers is not None:
raise ValueError("CompositeLayer received an inputs_to_layers "
"mapping, but does not have a CompositeSpace "
"as its input space, so there is nothing to "
"map. Received " + str(space) + " as input "
"space.")
elif self.routing_needed:
if not max(self.inputs_to_layers) < len(space.components):
raise ValueError("The inputs_to_layers mapping of "
"CompositeSpace contains they key " +
str(max(self.inputs_to_layers)) + " "
"(0-based) but the input space only "
"contains " + str(self.num_layers) + " "
"layers.")
# Invert the dictionary
self.layers_to_inputs = OrderedDict()
for i in xrange(self.num_layers):
inputs = []
for j in xrange(len(space.components)):
if j in self.inputs_to_layers:
if i in self.inputs_to_layers[j]:
inputs.append(j)
else:
inputs.append(j)
self.layers_to_inputs[i] = inputs
for i, layer in enumerate(self.layers):
if self.routing_needed and i in self.layers_to_inputs:
cur_space = space.restrict(self.layers_to_inputs[i])
else:
cur_space = space
layer.set_input_space(cur_space)
self.input_space = space
self.output_space = CompositeSpace(tuple(layer.get_output_space()
for layer in self.layers))
self._target_space = CompositeSpace(tuple(layer.get_target_space()
for layer in self.layers))
@wraps(Layer.get_params)
def get_params(self):
rval = []
for layer in self.layers:
rval = safe_union(layer.get_params(), rval)
return rval
@wraps(Layer.fprop)
def fprop(self, state_below):
rvals = []
for i, layer in enumerate(self.layers):
if self.routing_needed and i in self.layers_to_inputs:
cur_state_below = [state_below[j]
for j in self.layers_to_inputs[i]]
# This is to mimic the behavior of CompositeSpace's restrict
# method, which only returns a CompositeSpace when the number
# of components is greater than 1
if len(cur_state_below) == 1:
cur_state_below, = cur_state_below
else:
cur_state_below = state_below
rvals.append(layer.fprop(cur_state_below))
return tuple(rvals)
def _weight_decay_aggregate(self, method_name, coeff):
if isinstance(coeff, py_float_types):
return T.sum([getattr(layer, method_name)(coeff)
for layer in self.layers])
elif is_iterable(coeff):
assert all(layer_coeff >= 0 for layer_coeff in coeff)
return T.sum([getattr(layer, method_name)(layer_coeff) for
layer, layer_coeff in safe_zip(self.layers, coeff)
if layer_coeff > 0], dtype=config.floatX)
else:
raise TypeError("CompositeLayer's " + method_name + " received "
"coefficients of type " + str(type(coeff)) + " "
"but must be provided with a float or list/tuple")
def get_weight_decay(self, coeff):
"""
Provides an expression for a squared L2 penalty on the weights,
which is the weighted sum of the squared L2 penalties of the layer
components.
Parameters
----------
coeff : float or tuple/list
The coefficient on the squared L2 weight decay penalty for
this layer. If a single value is provided, this coefficient is
used for each component layer. If a list of tuple of
coefficients is given they are passed on to the component
layers in the given order.
Returns
-------
weight_decay : theano.gof.Variable
An expression for the squared L2 weight decay penalty term for
this layer.
"""
return self._weight_decay_aggregate('get_weight_decay', coeff)
def get_l1_weight_decay(self, coeff):
"""
Provides an expression for a squared L1 penalty on the weights,
which is the weighted sum of the squared L1 penalties of the layer
components.
Parameters
----------
coeff : float or tuple/list
The coefficient on the L1 weight decay penalty for this layer.
If a single value is provided, this coefficient is used for
each component layer. If a list of tuple of coefficients is
given they are passed on to the component layers in the
given order.
Returns
-------
weight_decay : theano.gof.Variable
An expression for the L1 weight decay penalty term for this
layer.
"""
return self._weight_decay_aggregate('get_l1_weight_decay', coeff)
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
return sum(layer.cost(Y_elem, Y_hat_elem)
for layer, Y_elem, Y_hat_elem in
safe_zip(self.layers, Y, Y_hat))
@wraps(Layer.set_mlp)
def set_mlp(self, mlp):
super(CompositeLayer, self).set_mlp(mlp)
for layer in self.layers:
layer.set_mlp(mlp)
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = OrderedDict()
# TODO: reduce redundancy with fprop method
for i, layer in enumerate(self.layers):
if self.routing_needed and i in self.layers_to_inputs:
cur_state_below = [state_below[j]
for j in self.layers_to_inputs[i]]
# This is to mimic the behavior of CompositeSpace's restrict
# method, which only returns a CompositeSpace when the number
# of components is greater than 1
if len(cur_state_below) == 1:
cur_state_below, = cur_state_below
else:
cur_state_below = state_below
if state is not None:
cur_state = state[i]
else:
cur_state = None
if targets is not None:
cur_targets = targets[i]
else:
cur_targets = None
d = layer.get_layer_monitoring_channels(
cur_state_below, cur_state, cur_targets)
for key in d:
rval[layer.layer_name + '_' + key] = d[key]
return rval
@wraps(Model._modify_updates)
def _modify_updates(self, updates):
for layer in self.layers:
layer.modify_updates(updates)
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
return get_lr_scalers_from_layers(self)
class FlattenerLayer(Layer):
"""
A wrapper around a different layer that flattens
the original layer's output.
The cost works by unflattening the target and then
calling the wrapped Layer's cost.
This is mostly intended for use with CompositeLayer as the wrapped
Layer, and is mostly useful as a workaround for theano not having
a TupleVariable with which to represent a composite target.
There are obvious memory, performance, and readability issues with doing
this, so really it would be better for theano to support TupleTypes.
See pylearn2.sandbox.tuple_var and the theano-dev e-mail thread
"TupleType".
Parameters
----------
raw_layer : Layer
Layer that FlattenerLayer wraps.
"""
def __init__(self, raw_layer):
super(FlattenerLayer, self).__init__()
self.__dict__.update(locals())
del self.self
self.layer_name = raw_layer.layer_name
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.raw_layer.set_input_space(space)
total_dim = self.raw_layer.get_output_space().get_total_dimension()
self.output_space = VectorSpace(total_dim)
@wraps(Layer.get_input_space)
def get_input_space(self):
return self.raw_layer.get_input_space()
@wraps(Layer.get_monitoring_channels)
def get_monitoring_channels(self, data):
return self.raw_layer.get_monitoring_channels(data)
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
raw_space = self.raw_layer.get_output_space()
state = raw_space.undo_format_as(state,
self.get_output_space())
if targets is not None:
targets = self.get_target_space().format_as(
targets, self.raw_layer.get_target_space())
return self.raw_layer.get_layer_monitoring_channels(
state_below=state_below,
state=state,
targets=targets
)
@wraps(Layer.get_monitoring_data_specs)
def get_monitoring_data_specs(self):
return self.raw_layer.get_monitoring_data_specs()
@wraps(Layer.get_params)
def get_params(self):
return self.raw_layer.get_params()
@wraps(Layer.get_weights)
def get_weights(self):
return self.raw_layer.get_weights()
@wraps(Layer.get_weight_decay)
def get_weight_decay(self, coeffs):
return self.raw_layer.get_weight_decay(coeffs)
@wraps(Layer.get_l1_weight_decay)
def get_l1_weight_decay(self, coeffs):
return self.raw_layer.get_l1_weight_decay(coeffs)
@wraps(Layer.set_batch_size)
def set_batch_size(self, batch_size):
self.raw_layer.set_batch_size(batch_size)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
self.raw_layer.modify_updates(updates)
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
return self.raw_layer.get_lr_scalers()
@wraps(Layer.fprop)
def fprop(self, state_below):
raw = self.raw_layer.fprop(state_below)
return self.raw_layer.get_output_space().format_as(raw,
self.output_space)
@wraps(Layer.cost)
def cost(self, Y, Y_hat):
raw_space = self.raw_layer.get_output_space()
target_space = self.output_space
raw_Y = target_space.format_as(Y, raw_space)
raw_Y_hat = raw_space.undo_format_as(Y_hat, target_space)
raw_space.validate(raw_Y_hat)
return self.raw_layer.cost(raw_Y, raw_Y_hat)
@wraps(Layer.set_mlp)
def set_mlp(self, mlp):
super(FlattenerLayer, self).set_mlp(mlp)
self.raw_layer.set_mlp(mlp)
class WindowLayer(Layer):
"""
Layer used to select a window of an image input.
The input of the layer must be Conv2DSpace.
Parameters
----------
layer_name : str
A name for this layer.
window : tuple
A four-tuple of ints indicating respectively
the top left x and y position, and
the bottom right x and y position of the window.
"""
def __init__(self, layer_name, window):
super(WindowLayer, self).__init__()
self.__dict__.update(locals())
del self.self
if window[0] < 0 or window[0] > window[2] or \
window[1] < 0 or window[1] > window[3]:
raise ValueError("WindowLayer: bad window parameter")
@wraps(Layer.fprop)
def fprop(self, state_below):
extracts = [slice(None), slice(None), slice(None), slice(None)]
extracts[self.rows] = slice(self.window[0], self.window[2] + 1)
extracts[self.cols] = slice(self.window[1], self.window[3] + 1)
extracts = tuple(extracts)
return state_below[extracts]
@wraps(Layer.set_input_space)
def set_input_space(self, space):
self.input_space = space
if not isinstance(space, Conv2DSpace):
raise TypeError("The input to a Window layer should be a "
"Conv2DSpace, but layer " + self.layer_name +
" got " + str(type(self.input_space)))
axes = space.axes
self.rows = axes.index(0)
self.cols = axes.index(1)
nrows = space.shape[0]
ncols = space.shape[1]
if self.window[2] + 1 > nrows or self.window[3] + 1 > ncols:
raise ValueError("WindowLayer: bad window shape. "
"Input is [" + str(nrows) + ", " +
str(ncols) + "], "
"but layer " + self.layer_name + " has window "
+ str(self.window))
self.output_space = Conv2DSpace(
shape=[self.window[2] - self.window[0] + 1,
self.window[3] - self.window[1] + 1],
num_channels=space.num_channels,
axes=axes)
@wraps(Layer.get_params)
def get_params(self):
return []
@wraps(Layer.get_monitoring_channels)
def get_monitoring_channels(self):
return []
def generate_dropout_mask(mlp, default_include_prob=0.5,
input_include_probs=None, rng=(2013, 5, 17)):
"""
Generate a dropout mask (as an integer) given inclusion
probabilities.
Parameters
----------
mlp : object
An MLP object.
default_include_prob : float, optional
The probability of including an input to a hidden
layer, for layers not listed in `input_include_probs`.
Default is 0.5.
input_include_probs : dict, optional
A dictionary mapping layer names to probabilities
of input inclusion for that layer. Default is `None`,
in which `default_include_prob` is used for all
layers.
rng : RandomState object or seed, optional
A `numpy.random.RandomState` object or a seed used to
create one.
Returns
-------
mask : int
An integer indexing a dropout mask for the network,
drawn with the appropriate probability given the
inclusion probabilities.
"""
if input_include_probs is None:
input_include_probs = {}
if not hasattr(rng, 'uniform'):
rng = np.random.RandomState(rng)
total_units = 0
mask = 0
for layer in mlp.layers:
if layer.layer_name in input_include_probs:
p = input_include_probs[layer.layer_name]
else:
p = default_include_prob
for _ in xrange(layer.get_input_space().get_total_dimension()):
mask |= int(rng.uniform() < p) << total_units
total_units += 1
return mask
def sampled_dropout_average(mlp, inputs, num_masks,
default_input_include_prob=0.5,
input_include_probs=None,
default_input_scale=2.,
input_scales=None,
rng=(2013, 5, 17),
per_example=False):
"""
Take the geometric mean over a number of randomly sampled
dropout masks for an MLP with softmax outputs.
Parameters
----------
mlp : object
An MLP object.
inputs : tensor_like
A Theano variable representing a minibatch appropriate
for fpropping through the MLP.
num_masks : int
The number of masks to sample.
default_input_include_prob : float, optional
The probability of including an input to a hidden
layer, for layers not listed in `input_include_probs`.
Default is 0.5.
input_include_probs : dict, optional
A dictionary mapping layer names to probabilities
of input inclusion for that layer. Default is `None`,
in which `default_include_prob` is used for all
layers.
default_input_scale : float, optional
The amount to scale input in dropped out layers.
input_scales : dict, optional
A dictionary mapping layer names to constants by
which to scale the input.
rng : RandomState object or seed, optional
A `numpy.random.RandomState` object or a seed used to
create one.
per_example : bool, optional
If `True`, generate a different mask for every single
test example, so you have `num_masks` per example
instead of `num_mask` networks total. If `False`,
`num_masks` masks are fixed in the graph.
Returns
-------
geo_mean : tensor_like
A symbolic graph for the geometric mean prediction of
all the networks.
"""
if input_include_probs is None:
input_include_probs = {}
if input_scales is None:
input_scales = {}
if not hasattr(rng, 'uniform'):
rng = np.random.RandomState(rng)
mlp._validate_layer_names(list(input_include_probs.keys()))
mlp._validate_layer_names(list(input_scales.keys()))
if per_example:
outputs = [mlp.dropout_fprop(inputs, default_input_include_prob,
input_include_probs,
default_input_scale,
input_scales)
for _ in xrange(num_masks)]
else:
masks = [generate_dropout_mask(mlp, default_input_include_prob,
input_include_probs, rng)
for _ in xrange(num_masks)]
outputs = [mlp.masked_fprop(inputs, mask, None,
default_input_scale, input_scales)
for mask in masks]
return geometric_mean_prediction(outputs)
def exhaustive_dropout_average(mlp, inputs, masked_input_layers=None,
default_input_scale=2., input_scales=None):
"""
Take the geometric mean over all dropout masks of an
MLP with softmax outputs.
Parameters
----------
mlp : object
An MLP object.
inputs : tensor_like
A Theano variable representing a minibatch appropriate
for fpropping through the MLP.
masked_input_layers : list, optional
A list of layer names whose input should be masked.
Default is all layers (including the first hidden
layer, i.e. mask the input).
default_input_scale : float, optional
The amount to scale input in dropped out layers.
input_scales : dict, optional
A dictionary mapping layer names to constants by
which to scale the input.
Returns
-------
geo_mean : tensor_like
A symbolic graph for the geometric mean prediction
of all exponentially many masked subnetworks.
Notes
-----
This is obviously exponential in the size of the network,
don't do this except for tiny toy networks.
"""
if masked_input_layers is None:
masked_input_layers = mlp.layer_names
mlp._validate_layer_names(masked_input_layers)
if input_scales is None:
input_scales = {}
mlp._validate_layer_names(input_scales.keys())
if any(key not in masked_input_layers for key in input_scales):
not_in = [key for key in input_scales
if key not in mlp.layer_names]
raise ValueError(", ".join(not_in) + " in input_scales"
" but not masked")
num_inputs = mlp.get_total_input_dimension(masked_input_layers)
outputs = [mlp.masked_fprop(inputs, mask, masked_input_layers,
default_input_scale, input_scales)
for mask in xrange(2 ** num_inputs)]
return geometric_mean_prediction(outputs)
def geometric_mean_prediction(forward_props):
"""
Take the geometric mean over all dropout masks of an
MLP with softmax outputs.
Parameters
----------
forward_props : list
A list of Theano graphs corresponding to forward
propagations through the network with different
dropout masks.
Returns
-------
geo_mean : tensor_like
A symbolic graph for the geometric mean prediction
of all exponentially many masked subnetworks.
Notes
-----
This is obviously exponential in the size of the network,
don't do this except for tiny toy networks.
"""
presoftmax = []
for out in forward_props:
assert isinstance(out.owner.op, T.nnet.Softmax)
assert len(out.owner.inputs) == 1
presoftmax.append(out.owner.inputs[0])
average = reduce(operator.add, presoftmax) / float(len(presoftmax))
return T.nnet.softmax(average)
class BadInputSpaceError(TypeError):
"""
An error raised by an MLP layer when set_input_space is given an
object that is not one of the Spaces that layer supports.
"""
def get_lr_scalers_from_layers(owner):
"""
Get the learning rate scalers for all member layers of
`owner`.
Parameters
----------
owner : Model
Any Model with a `layers` field
Returns
-------
lr_scalers : OrderedDict
A dictionary mapping parameters of `owner` to learning
rate scalers.
"""
rval = OrderedDict()
params = owner.get_params()
for layer in owner.layers:
contrib = layer.get_lr_scalers()
assert isinstance(contrib, OrderedDict)
# No two layers can contend to scale a parameter
assert not any([key in rval for key in contrib])
# Don't try to scale anything that's not a parameter
assert all([key in params for key in contrib])
rval.update(contrib)
assert all([isinstance(val, float) for val in rval.values()])
return rval
| bsd-3-clause |
radicalbit/ambari | ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py | 2 | 9988 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import socket
import time
import traceback
import logging
from resource_management.core import global_lock
from resource_management.libraries.functions import format
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import stack_tools
from resource_management.core.resources import Execute
from resource_management.core.signal_utils import TerminateStrategy
from ambari_commons.os_check import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
OK_MESSAGE = "Metastore OK - Hive command took {0:.3f}s"
CRITICAL_MESSAGE = "Metastore on {0} failed ({1})"
SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
HIVE_METASTORE_URIS_KEY = '{{hive-site/hive.metastore.uris}}'
# The configured Kerberos executable search paths, if any
KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
# default keytab location
SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY = 'default.smoke.keytab'
SMOKEUSER_KEYTAB_DEFAULT = '/etc/security/keytabs/smokeuser.headless.keytab'
# default smoke principal
SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.smoke.principal'
SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
# default smoke user
SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
SMOKEUSER_DEFAULT = 'ambari-qa'
STACK_NAME = '{{cluster-env/stack_name}}'
STACK_ROOT = '{{cluster-env/stack_root}}'
HIVE_CONF_DIR_LEGACY = '/etc/hive/conf.server'
HIVE_BIN_DIR_LEGACY = '/usr/lib/hive/bin'
CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout'
CHECK_COMMAND_TIMEOUT_DEFAULT = 60.0
HADOOPUSER_KEY = '{{cluster-env/hadoop.user.name}}'
HADOOPUSER_DEFAULT = 'hadoop'
logger = logging.getLogger('ambari_alerts')
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def get_tokens():
"""
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
"""
return (SECURITY_ENABLED_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,
HIVE_METASTORE_URIS_KEY, SMOKEUSER_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
STACK_NAME, STACK_ROOT)
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def get_tokens():
"""
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
"""
return (HIVE_METASTORE_URIS_KEY, HADOOPUSER_KEY)
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def execute(configurations={}, parameters={}, host_name=None):
"""
Returns a tuple containing the result code and a pre-formatted result label
Keyword arguments:
configurations (dictionary): a mapping of configuration key to value
parameters (dictionary): a mapping of script parameter key to value
host_name (string): the name of this host where the alert is running
"""
if configurations is None:
return (('UNKNOWN', ['There were no configurations supplied to the script.']))
if not HIVE_METASTORE_URIS_KEY in configurations:
return (('UNKNOWN', ['Hive metastore uris were not supplied to the script.']))
metastore_uris = configurations[HIVE_METASTORE_URIS_KEY].split(',')
security_enabled = False
if SECURITY_ENABLED_KEY in configurations:
security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
check_command_timeout = CHECK_COMMAND_TIMEOUT_DEFAULT
if CHECK_COMMAND_TIMEOUT_KEY in parameters:
check_command_timeout = float(parameters[CHECK_COMMAND_TIMEOUT_KEY])
# defaults
smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT
smokeuser = SMOKEUSER_DEFAULT
# check script params
if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
if SMOKEUSER_SCRIPT_PARAM_KEY in parameters:
smokeuser = parameters[SMOKEUSER_SCRIPT_PARAM_KEY]
if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]
# check configurations last as they should always take precedence
if SMOKEUSER_PRINCIPAL_KEY in configurations:
smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
if SMOKEUSER_KEY in configurations:
smokeuser = configurations[SMOKEUSER_KEY]
result_code = None
try:
if security_enabled:
if SMOKEUSER_KEYTAB_KEY in configurations:
smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]
# Get the configured Kerberos executable search paths, if any
if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
else:
kerberos_executable_search_paths = None
kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
kinitcmd=format("{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal}; ")
# prevent concurrent kinit
kinit_lock = global_lock.get_lock(global_lock.LOCK_TYPE_KERBEROS)
kinit_lock.acquire()
try:
Execute(kinitcmd, user=smokeuser,
path=["/bin/", "/usr/bin/", "/usr/lib/hive/bin/", "/usr/sbin/"],
timeout=10)
finally:
kinit_lock.release()
if host_name is None:
host_name = socket.getfqdn()
for uri in metastore_uris:
if host_name in uri:
metastore_uri = uri
conf_dir = HIVE_CONF_DIR_LEGACY
bin_dir = HIVE_BIN_DIR_LEGACY
if STACK_NAME in configurations and STACK_ROOT in configurations:
stack_root = stack_tools.get_stack_root(configurations[STACK_NAME], configurations[STACK_ROOT])
hive_conf_dir = stack_root + format("/current/hive-metastore/conf")
hive_bin_dir = stack_root + format("/current/hive-metastore/bin")
if os.path.exists(hive_conf_dir):
conf_dir = hive_conf_dir
bin_dir = hive_bin_dir
cmd = format("export HIVE_CONF_DIR='{conf_dir}' ; "
"hive --hiveconf hive.metastore.uris={metastore_uri}\
--hiveconf hive.metastore.client.connect.retry.delay=1\
--hiveconf hive.metastore.failure.retries=1\
--hiveconf hive.metastore.connect.retries=1\
--hiveconf hive.metastore.client.socket.timeout=14\
--hiveconf hive.execution.engine=mr -e 'show databases;'")
start_time = time.time()
try:
Execute(cmd, user=smokeuser,
path=["/bin/", "/usr/bin/", "/usr/sbin/", bin_dir],
timeout=int(check_command_timeout),
timeout_kill_strategy=TerminateStrategy.KILL_PROCESS_TREE,
)
total_time = time.time() - start_time
result_code = 'OK'
label = OK_MESSAGE.format(total_time)
except:
result_code = 'CRITICAL'
label = CRITICAL_MESSAGE.format(host_name, traceback.format_exc())
except:
label = traceback.format_exc()
result_code = 'UNKNOWN'
return ((result_code, [label]))
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def execute(configurations={}, parameters={}, host_name=None):
"""
Returns a tuple containing the result code and a pre-formatted result label
Keyword arguments:
configurations (dictionary): a mapping of configuration key to value
parameters (dictionary): a mapping of script parameter key to value
host_name (string): the name of this host where the alert is running
"""
from resource_management.libraries.functions import reload_windows_env
reload_windows_env()
hive_home = os.environ['HIVE_HOME']
if configurations is None:
return (('UNKNOWN', ['There were no configurations supplied to the script.']))
if not HIVE_METASTORE_URIS_KEY in configurations:
return (('UNKNOWN', ['Hive metastore uris were not supplied to the script.']))
metastore_uris = configurations[HIVE_METASTORE_URIS_KEY].split(',')
# defaults
hiveuser = HADOOPUSER_DEFAULT
if HADOOPUSER_KEY in configurations:
hiveuser = configurations[HADOOPUSER_KEY]
result_code = None
try:
if host_name is None:
host_name = socket.getfqdn()
for uri in metastore_uris:
if host_name in uri:
metastore_uri = uri
hive_cmd = os.path.join(hive_home, "bin", "hive.cmd")
cmd = format("cmd /c {hive_cmd} --hiveconf hive.metastore.uris={metastore_uri}\
--hiveconf hive.metastore.client.connect.retry.delay=1\
--hiveconf hive.metastore.failure.retries=1\
--hiveconf hive.metastore.connect.retries=1\
--hiveconf hive.metastore.client.socket.timeout=14\
--hiveconf hive.execution.engine=mr -e 'show databases;'")
start_time = time.time()
try:
Execute(cmd, user=hiveuser, timeout=30)
total_time = time.time() - start_time
result_code = 'OK'
label = OK_MESSAGE.format(total_time)
except:
result_code = 'CRITICAL'
label = CRITICAL_MESSAGE.format(host_name, traceback.format_exc())
except:
label = traceback.format_exc()
result_code = 'UNKNOWN'
return ((result_code, [label]))
| apache-2.0 |
quamilek/django | django/conf/locale/de/formats.py | 504 | 1100 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
Br3nda/calcalcal | pylib/cherrypy/tutorial/tut05_derived_objects.py | 36 | 2291 | """
Tutorial - Object inheritance
You are free to derive your request handler classes from any base
class you wish. In most real-world applications, you will probably
want to create a central base class used for all your pages, which takes
care of things like printing a common page header and footer.
"""
import cherrypy
class Page:
# Store the page title in a class attribute
title = 'Untitled Page'
def header(self):
return '''
<html>
<head>
<title>%s</title>
<head>
<body>
<h2>%s</h2>
''' % (self.title, self.title)
def footer(self):
return '''
</body>
</html>
'''
# Note that header and footer don't get their exposed attributes
# set to True. This isn't necessary since the user isn't supposed
# to call header or footer directly; instead, we'll call them from
# within the actually exposed handler methods defined in this
# class' subclasses.
class HomePage(Page):
# Different title for this page
title = 'Tutorial 5'
def __init__(self):
# create a subpage
self.another = AnotherPage()
def index(self):
# Note that we call the header and footer methods inherited
# from the Page class!
return self.header() + '''
<p>
Isn't this exciting? There's
<a href="./another/">another page</a>, too!
</p>
''' + self.footer()
index.exposed = True
class AnotherPage(Page):
title = 'Another Page'
def index(self):
return self.header() + '''
<p>
And this is the amazing second page!
</p>
''' + self.footer()
index.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(HomePage(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(HomePage(), config=tutconf)
| mit |
BOOTMGR/lge_victo_msm7x30-CM | tools/perf/scripts/python/check-perf-trace.py | 948 | 2501 | # perf trace event handlers, generated by perf trace -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
jyogi/purvar-agent | checks.d/network.py | 5 | 26830 | # (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
"""
Collects network metrics.
"""
# stdlib
import re
import socket
from collections import defaultdict
# project
from checks import AgentCheck
from utils.platform import Platform
from utils.subprocess_output import (
get_subprocess_output,
SubprocessOutputEmptyError,
)
import psutil
BSD_TCP_METRICS = [
(re.compile("^\s*(\d+) data packets \(\d+ bytes\) retransmitted\s*$"), 'system.net.tcp.retrans_packs'),
(re.compile("^\s*(\d+) packets sent\s*$"), 'system.net.tcp.sent_packs'),
(re.compile("^\s*(\d+) packets received\s*$"), 'system.net.tcp.rcv_packs')
]
SOLARIS_TCP_METRICS = [
(re.compile("\s*tcpRetransSegs\s*=\s*(\d+)\s*"), 'system.net.tcp.retrans_segs'),
(re.compile("\s*tcpOutDataSegs\s*=\s*(\d+)\s*"), 'system.net.tcp.in_segs'),
(re.compile("\s*tcpInSegs\s*=\s*(\d+)\s*"), 'system.net.tcp.out_segs')
]
class Network(AgentCheck):
SOURCE_TYPE_NAME = 'system'
TCP_STATES = {
"ss": {
"ESTAB": "established",
"SYN-SENT": "opening",
"SYN-RECV": "opening",
"FIN-WAIT-1": "closing",
"FIN-WAIT-2": "closing",
"TIME-WAIT": "time_wait",
"UNCONN": "closing",
"CLOSE-WAIT": "closing",
"LAST-ACK": "closing",
"LISTEN": "listening",
"CLOSING": "closing",
},
"netstat": {
"ESTABLISHED": "established",
"SYN_SENT": "opening",
"SYN_RECV": "opening",
"FIN_WAIT1": "closing",
"FIN_WAIT2": "closing",
"TIME_WAIT": "time_wait",
"CLOSE": "closing",
"CLOSE_WAIT": "closing",
"LAST_ACK": "closing",
"LISTEN": "listening",
"CLOSING": "closing",
},
"psutil": {
psutil.CONN_ESTABLISHED: "established",
psutil.CONN_SYN_SENT: "opening",
psutil.CONN_SYN_RECV: "opening",
psutil.CONN_FIN_WAIT1: "closing",
psutil.CONN_FIN_WAIT2: "closing",
psutil.CONN_TIME_WAIT: "time_wait",
psutil.CONN_CLOSE: "closing",
psutil.CONN_CLOSE_WAIT: "closing",
psutil.CONN_LAST_ACK: "closing",
psutil.CONN_LISTEN: "listening",
psutil.CONN_CLOSING: "closing",
psutil.CONN_NONE: "connections", # CONN_NONE is always returned for udp connections
}
}
PSUTIL_TYPE_MAPPING = {
socket.SOCK_STREAM: 'tcp',
socket.SOCK_DGRAM: 'udp',
}
PSUTIL_FAMILY_MAPPING = {
socket.AF_INET: '4',
socket.AF_INET6: '6',
}
CX_STATE_GAUGE = {
('udp4', 'connections'): 'system.net.udp4.connections',
('udp6', 'connections'): 'system.net.udp6.connections',
('tcp4', 'established'): 'system.net.tcp4.established',
('tcp4', 'opening'): 'system.net.tcp4.opening',
('tcp4', 'closing'): 'system.net.tcp4.closing',
('tcp4', 'listening'): 'system.net.tcp4.listening',
('tcp4', 'time_wait'): 'system.net.tcp4.time_wait',
('tcp6', 'established'): 'system.net.tcp6.established',
('tcp6', 'opening'): 'system.net.tcp6.opening',
('tcp6', 'closing'): 'system.net.tcp6.closing',
('tcp6', 'listening'): 'system.net.tcp6.listening',
('tcp6', 'time_wait'): 'system.net.tcp6.time_wait',
}
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances=instances)
if instances is not None and len(instances) > 1:
raise Exception("Network check only supports one configured instance.")
def check(self, instance):
if instance is None:
instance = {}
self._excluded_ifaces = instance.get('excluded_interfaces', [])
self._collect_cx_state = instance.get('collect_connection_state', False)
self._exclude_iface_re = None
exclude_re = instance.get('excluded_interface_re', None)
if exclude_re:
self.log.debug("Excluding network devices matching: %s" % exclude_re)
self._exclude_iface_re = re.compile(exclude_re)
if Platform.is_linux():
self._check_linux(instance)
elif Platform.is_bsd():
self._check_bsd(instance)
elif Platform.is_solaris():
self._check_solaris(instance)
elif Platform.is_windows():
self._check_psutil()
def _submit_devicemetrics(self, iface, vals_by_metric):
if iface in self._excluded_ifaces or (self._exclude_iface_re and self._exclude_iface_re.match(iface)):
# Skip this network interface.
return False
expected_metrics = [
'bytes_rcvd',
'bytes_sent',
'packets_in.count',
'packets_in.error',
'packets_out.count',
'packets_out.error',
]
for m in expected_metrics:
assert m in vals_by_metric
assert len(vals_by_metric) == len(expected_metrics)
count = 0
for metric, val in vals_by_metric.iteritems():
self.rate('system.net.%s' % metric, val, device_name=iface)
count += 1
self.log.debug("tracked %s network metrics for interface %s" % (count, iface))
def _parse_value(self, v):
if v == "-":
return 0
else:
try:
return long(v)
except ValueError:
return 0
def _submit_regexed_values(self, output, regex_list):
lines = output.splitlines()
for line in lines:
for regex, metric in regex_list:
value = re.match(regex, line)
if value:
self.rate(metric, self._parse_value(value.group(1)))
def _check_linux(self, instance):
proc_location = self.agentConfig.get('procfs_path', '/proc').rstrip('/')
if self._collect_cx_state:
try:
self.log.debug("Using `ss` to collect connection state")
# Try using `ss` for increased performance over `netstat`
for ip_version in ['4', '6']:
# Call `ss` for each IP version because there's no built-in way of distinguishing
# between the IP versions in the output
output, _, _ = get_subprocess_output(["ss", "-n", "-u", "-t", "-a", "-{0}".format(ip_version)], self.log)
lines = output.splitlines()
# Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port
# udp UNCONN 0 0 127.0.0.1:8125 *:*
# udp ESTAB 0 0 127.0.0.1:37036 127.0.0.1:8125
# udp UNCONN 0 0 fe80::a00:27ff:fe1c:3c4:123 :::*
# tcp TIME-WAIT 0 0 90.56.111.177:56867 46.105.75.4:143
# tcp LISTEN 0 0 ::ffff:127.0.0.1:33217 ::ffff:127.0.0.1:7199
# tcp ESTAB 0 0 ::ffff:127.0.0.1:58975 ::ffff:127.0.0.1:2181
metrics = self._parse_linux_cx_state(lines[1:], self.TCP_STATES['ss'], 1, ip_version=ip_version)
# Only send the metrics which match the loop iteration's ip version
for stat, metric in self.CX_STATE_GAUGE.iteritems():
if stat[0].endswith(ip_version):
self.gauge(metric, metrics.get(metric))
except OSError:
self.log.info("`ss` not found: using `netstat` as a fallback")
output, _, _ = get_subprocess_output(["netstat", "-n", "-u", "-t", "-a"], self.log)
lines = output.splitlines()
# Active Internet connections (w/o servers)
# Proto Recv-Q Send-Q Local Address Foreign Address State
# tcp 0 0 46.105.75.4:80 79.220.227.193:2032 SYN_RECV
# tcp 0 0 46.105.75.4:143 90.56.111.177:56867 ESTABLISHED
# tcp 0 0 46.105.75.4:50468 107.20.207.175:443 TIME_WAIT
# tcp6 0 0 46.105.75.4:80 93.15.237.188:58038 FIN_WAIT2
# tcp6 0 0 46.105.75.4:80 79.220.227.193:2029 ESTABLISHED
# udp 0 0 0.0.0.0:123 0.0.0.0:*
# udp6 0 0 :::41458 :::*
metrics = self._parse_linux_cx_state(lines[2:], self.TCP_STATES['netstat'], 5)
for metric, value in metrics.iteritems():
self.gauge(metric, value)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting connection stats.")
proc_dev_path = "{}/net/dev".format(proc_location)
proc = open(proc_dev_path, 'r')
try:
lines = proc.readlines()
finally:
proc.close()
# Inter-| Receive | Transmit
# face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
# lo:45890956 112797 0 0 0 0 0 0 45890956 112797 0 0 0 0 0 0
# eth0:631947052 1042233 0 19 0 184 0 1206 1208625538 1320529 0 0 0 0 0 0
# eth1: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
for l in lines[2:]:
cols = l.split(':', 1)
x = cols[1].split()
# Filter inactive interfaces
if self._parse_value(x[0]) or self._parse_value(x[8]):
iface = cols[0].strip()
metrics = {
'bytes_rcvd': self._parse_value(x[0]),
'bytes_sent': self._parse_value(x[8]),
'packets_in.count': self._parse_value(x[1]),
'packets_in.error': self._parse_value(x[2]) + self._parse_value(x[3]),
'packets_out.count': self._parse_value(x[9]),
'packets_out.error':self._parse_value(x[10]) + self._parse_value(x[11]),
}
self._submit_devicemetrics(iface, metrics)
try:
proc_snmp_path = "{}/net/snmp".format(proc_location)
proc = open(proc_snmp_path, 'r')
# IP: Forwarding DefaultTTL InReceives InHdrErrors ...
# IP: 2 64 377145470 0 ...
# Icmp: InMsgs InErrors InDestUnreachs InTimeExcds ...
# Icmp: 1644495 1238 1643257 0 ...
# IcmpMsg: InType3 OutType3
# IcmpMsg: 1643257 1643257
# Tcp: RtoAlgorithm RtoMin RtoMax MaxConn ...
# Tcp: 1 200 120000 -1 ...
# Udp: InDatagrams NoPorts InErrors OutDatagrams ...
# Udp: 24249494 1643257 0 25892947 ...
# UdpLite: InDatagrams Noports InErrors OutDatagrams ...
# UdpLite: 0 0 0 0 ...
try:
lines = proc.readlines()
finally:
proc.close()
tcp_lines = [line for line in lines if line.startswith('Tcp:')]
udp_lines = [line for line in lines if line.startswith('Udp:')]
tcp_column_names = tcp_lines[0].strip().split()
tcp_values = tcp_lines[1].strip().split()
tcp_metrics = dict(zip(tcp_column_names, tcp_values))
udp_column_names = udp_lines[0].strip().split()
udp_values = udp_lines[1].strip().split()
udp_metrics = dict(zip(udp_column_names, udp_values))
# line start indicating what kind of metrics we're looking at
assert(tcp_metrics['Tcp:'] == 'Tcp:')
tcp_metrics_name = {
'RetransSegs': 'system.net.tcp.retrans_segs',
'InSegs' : 'system.net.tcp.in_segs',
'OutSegs' : 'system.net.tcp.out_segs'
}
for key, metric in tcp_metrics_name.iteritems():
self.rate(metric, self._parse_value(tcp_metrics[key]))
assert(udp_metrics['Udp:'] == 'Udp:')
udp_metrics_name = {
'InDatagrams': 'system.net.udp.in_datagrams',
'NoPorts': 'system.net.udp.no_ports',
'InErrors': 'system.net.udp.in_errors',
'OutDatagrams': 'system.net.udp.out_datagrams',
'RcvbufErrors': 'system.net.udp.rcv_buf_errors',
'SndbufErrors': 'system.net.udp.snd_buf_errors'
}
for key, metric in udp_metrics_name.iteritems():
if key in udp_metrics:
self.rate(metric, self._parse_value(udp_metrics[key]))
except IOError:
# On Openshift, /proc/net/snmp is only readable by root
self.log.debug("Unable to read %s.", proc_snmp_path)
# Parse the output of the command that retrieves the connection state (either `ss` or `netstat`)
# Returns a dict metric_name -> value
def _parse_linux_cx_state(self, lines, tcp_states, state_col, ip_version=None):
metrics = dict.fromkeys(self.CX_STATE_GAUGE.values(), 0)
for l in lines:
cols = l.split()
if cols[0].startswith('tcp'):
protocol = "tcp{0}".format(ip_version) if ip_version else ("tcp4", "tcp6")[cols[0] == "tcp6"]
if cols[state_col] in tcp_states:
metric = self.CX_STATE_GAUGE[protocol, tcp_states[cols[state_col]]]
metrics[metric] += 1
elif cols[0].startswith('udp'):
protocol = "udp{0}".format(ip_version) if ip_version else ("udp4", "udp6")[cols[0] == "udp6"]
metric = self.CX_STATE_GAUGE[protocol, 'connections']
metrics[metric] += 1
return metrics
def _check_bsd(self, instance):
netstat_flags = ['-i', '-b']
# FreeBSD's netstat truncates device names unless you pass '-W'
if Platform.is_freebsd():
netstat_flags.append('-W')
try:
output, _, _ = get_subprocess_output(["netstat"] + netstat_flags, self.log)
lines = output.splitlines()
# Name Mtu Network Address Ipkts Ierrs Ibytes Opkts Oerrs Obytes Coll
# lo0 16384 <Link#1> 318258 0 428252203 318258 0 428252203 0
# lo0 16384 localhost fe80:1::1 318258 - 428252203 318258 - 428252203 -
# lo0 16384 127 localhost 318258 - 428252203 318258 - 428252203 -
# lo0 16384 localhost ::1 318258 - 428252203 318258 - 428252203 -
# gif0* 1280 <Link#2> 0 0 0 0 0 0 0
# stf0* 1280 <Link#3> 0 0 0 0 0 0 0
# en0 1500 <Link#4> 04:0c:ce:db:4e:fa 20801309 0 13835457425 15149389 0 11508790198 0
# en0 1500 seneca.loca fe80:4::60c:ceff: 20801309 - 13835457425 15149389 - 11508790198 -
# en0 1500 2001:470:1f 2001:470:1f07:11d 20801309 - 13835457425 15149389 - 11508790198 -
# en0 1500 2001:470:1f 2001:470:1f07:11d 20801309 - 13835457425 15149389 - 11508790198 -
# en0 1500 192.168.1 192.168.1.63 20801309 - 13835457425 15149389 - 11508790198 -
# en0 1500 2001:470:1f 2001:470:1f07:11d 20801309 - 13835457425 15149389 - 11508790198 -
# p2p0 2304 <Link#5> 06:0c:ce:db:4e:fa 0 0 0 0 0 0 0
# ham0 1404 <Link#6> 7a:79:05:4d:bf:f5 30100 0 6815204 18742 0 8494811 0
# ham0 1404 5 5.77.191.245 30100 - 6815204 18742 - 8494811 -
# ham0 1404 seneca.loca fe80:6::7879:5ff: 30100 - 6815204 18742 - 8494811 -
# ham0 1404 2620:9b::54 2620:9b::54d:bff5 30100 - 6815204 18742 - 8494811 -
headers = lines[0].split()
# Given the irregular structure of the table above, better to parse from the end of each line
# Verify headers first
# -7 -6 -5 -4 -3 -2 -1
for h in ("Ipkts", "Ierrs", "Ibytes", "Opkts", "Oerrs", "Obytes", "Coll"):
if h not in headers:
self.logger.error("%s not found in %s; cannot parse" % (h, headers))
return False
current = None
for l in lines[1:]:
# Another header row, abort now, this is IPv6 land
if "Name" in l:
break
x = l.split()
if len(x) == 0:
break
iface = x[0]
if iface.endswith("*"):
iface = iface[:-1]
if iface == current:
# skip multiple lines of same interface
continue
else:
current = iface
# Filter inactive interfaces
if self._parse_value(x[-5]) or self._parse_value(x[-2]):
iface = current
metrics = {
'bytes_rcvd': self._parse_value(x[-5]),
'bytes_sent': self._parse_value(x[-2]),
'packets_in.count': self._parse_value(x[-7]),
'packets_in.error': self._parse_value(x[-6]),
'packets_out.count': self._parse_value(x[-4]),
'packets_out.error':self._parse_value(x[-3]),
}
self._submit_devicemetrics(iface, metrics)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting connection stats.")
try:
netstat, _, _ = get_subprocess_output(["netstat", "-s", "-p" "tcp"], self.log)
#3651535 packets sent
# 972097 data packets (615753248 bytes)
# 5009 data packets (2832232 bytes) retransmitted
# 0 resends initiated by MTU discovery
# 2086952 ack-only packets (471 delayed)
# 0 URG only packets
# 0 window probe packets
# 310851 window update packets
# 336829 control packets
# 0 data packets sent after flow control
# 3058232 checksummed in software
# 3058232 segments (571218834 bytes) over IPv4
# 0 segments (0 bytes) over IPv6
#4807551 packets received
# 1143534 acks (for 616095538 bytes)
# 165400 duplicate acks
# ...
self._submit_regexed_values(netstat, BSD_TCP_METRICS)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting TCP stats.")
def _check_solaris(self, instance):
# Can't get bytes sent and received via netstat
# Default to kstat -p link:0:
try:
netstat, _, _ = get_subprocess_output(["kstat", "-p", "link:0:"], self.log)
metrics_by_interface = self._parse_solaris_netstat(netstat)
for interface, metrics in metrics_by_interface.iteritems():
self._submit_devicemetrics(interface, metrics)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting kstat stats.")
try:
netstat, _, _ = get_subprocess_output(["netstat", "-s", "-P" "tcp"], self.log)
# TCP: tcpRtoAlgorithm= 4 tcpRtoMin = 200
# tcpRtoMax = 60000 tcpMaxConn = -1
# tcpActiveOpens = 57 tcpPassiveOpens = 50
# tcpAttemptFails = 1 tcpEstabResets = 0
# tcpCurrEstab = 0 tcpOutSegs = 254
# tcpOutDataSegs = 995 tcpOutDataBytes =1216733
# tcpRetransSegs = 0 tcpRetransBytes = 0
# tcpOutAck = 185 tcpOutAckDelayed = 4
# ...
self._submit_regexed_values(netstat, SOLARIS_TCP_METRICS)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting TCP stats.")
def _parse_solaris_netstat(self, netstat_output):
"""
Return a mapping of network metrics by interface. For example:
{ interface:
{'bytes_sent': 0,
'bytes_rcvd': 0,
'bytes_rcvd': 0,
...
}
}
"""
# Here's an example of the netstat output:
#
# link:0:net0:brdcstrcv 527336
# link:0:net0:brdcstxmt 1595
# link:0:net0:class net
# link:0:net0:collisions 0
# link:0:net0:crtime 16359935.2637943
# link:0:net0:ierrors 0
# link:0:net0:ifspeed 10000000000
# link:0:net0:ipackets 682834
# link:0:net0:ipackets64 682834
# link:0:net0:link_duplex 0
# link:0:net0:link_state 1
# link:0:net0:multircv 0
# link:0:net0:multixmt 1595
# link:0:net0:norcvbuf 0
# link:0:net0:noxmtbuf 0
# link:0:net0:obytes 12820668
# link:0:net0:obytes64 12820668
# link:0:net0:oerrors 0
# link:0:net0:opackets 105445
# link:0:net0:opackets64 105445
# link:0:net0:rbytes 113983614
# link:0:net0:rbytes64 113983614
# link:0:net0:snaptime 16834735.1607669
# link:0:net0:unknowns 0
# link:0:net0:zonename 53aa9b7e-48ba-4152-a52b-a6368c3d9e7c
# link:0:net1:brdcstrcv 4947620
# link:0:net1:brdcstxmt 1594
# link:0:net1:class net
# link:0:net1:collisions 0
# link:0:net1:crtime 16359935.2839167
# link:0:net1:ierrors 0
# link:0:net1:ifspeed 10000000000
# link:0:net1:ipackets 4947620
# link:0:net1:ipackets64 4947620
# link:0:net1:link_duplex 0
# link:0:net1:link_state 1
# link:0:net1:multircv 0
# link:0:net1:multixmt 1594
# link:0:net1:norcvbuf 0
# link:0:net1:noxmtbuf 0
# link:0:net1:obytes 73324
# link:0:net1:obytes64 73324
# link:0:net1:oerrors 0
# link:0:net1:opackets 1594
# link:0:net1:opackets64 1594
# link:0:net1:rbytes 304384894
# link:0:net1:rbytes64 304384894
# link:0:net1:snaptime 16834735.1613302
# link:0:net1:unknowns 0
# link:0:net1:zonename 53aa9b7e-48ba-4152-a52b-a6368c3d9e7c
# A mapping of solaris names -> datadog names
metric_by_solaris_name = {
'rbytes64':'bytes_rcvd',
'obytes64':'bytes_sent',
'ipackets64':'packets_in.count',
'ierrors':'packets_in.error',
'opackets64':'packets_out.count',
'oerrors':'packets_out.error',
}
lines = [l for l in netstat_output.splitlines() if len(l) > 0]
metrics_by_interface = {}
for l in lines:
# Parse the metric & interface.
cols = l.split()
link, n, iface, name = cols[0].split(":")
assert link == "link"
# Get the datadog metric name.
ddname = metric_by_solaris_name.get(name, None)
if ddname is None:
continue
# Add it to this interface's list of metrics.
metrics = metrics_by_interface.get(iface, {})
metrics[ddname] = self._parse_value(cols[1])
metrics_by_interface[iface] = metrics
return metrics_by_interface
def _check_psutil(self):
"""
Gather metrics about connections states and interfaces counters
using psutil facilities
"""
if self._collect_cx_state:
self._cx_state_psutil()
self._cx_counters_psutil()
def _cx_state_psutil(self):
"""
Collect metrics about connections state using psutil
"""
metrics = defaultdict(int)
for conn in psutil.net_connections():
protocol = self._parse_protocol_psutil(conn)
status = self.TCP_STATES['psutil'].get(conn.status)
metric = self.CX_STATE_GAUGE.get((protocol, status))
if metric is None:
self.log.warning('Metric not found for: %s,%s', protocol, status)
else:
metrics[metric] += 1
for metric, value in metrics.iteritems():
self.gauge(metric, value)
def _cx_counters_psutil(self):
"""
Collect metrics about interfaces counters using psutil
"""
for iface, counters in psutil.net_io_counters(pernic=True).iteritems():
metrics = {
'bytes_rcvd': counters.bytes_recv,
'bytes_sent': counters.bytes_sent,
'packets_in.count': counters.packets_recv,
'packets_in.error': counters.errin,
'packets_out.count': counters.packets_sent,
'packets_out.error': counters.errout,
}
self._submit_devicemetrics(iface, metrics)
def _parse_protocol_psutil(self, conn):
"""
Returns a string describing the protocol for the given connection
in the form `tcp4`, 'udp4` as in `self.CX_STATE_GAUGE`
"""
protocol = self.PSUTIL_TYPE_MAPPING.get(conn.type, '')
family = self.PSUTIL_FAMILY_MAPPING.get(conn.family, '')
return '{}{}'.format(protocol, family)
| bsd-3-clause |
tanmaythakur/django | tests/admin_docs/models.py | 83 | 1625 | """
Models for testing various aspects of the djang.contrib.admindocs app
"""
from django.db import models
class Company(models.Model):
name = models.CharField(max_length=200)
class Group(models.Model):
name = models.CharField(max_length=200)
class Family(models.Model):
last_name = models.CharField(max_length=200)
class Person(models.Model):
"""
Stores information about a person, related to :model:`myapp.Company`.
**Notes**
Use ``save_changes()`` when saving this object.
``company``
Field storing :model:`myapp.Company` where the person works.
(DESCRIPTION)
.. raw:: html
:file: admin_docs/evilfile.txt
.. include:: admin_docs/evilfile.txt
"""
first_name = models.CharField(max_length=200, help_text="The person's first name")
last_name = models.CharField(max_length=200, help_text="The person's last name")
company = models.ForeignKey(Company, models.CASCADE, help_text="place of work")
family = models.ForeignKey(Family, models.SET_NULL, related_name='+', null=True)
groups = models.ManyToManyField(Group, help_text="has membership")
def _get_full_name(self):
return "%s %s" % (self.first_name, self.last_name)
def add_image(self):
pass
def delete_image(self):
pass
def save_changes(self):
pass
def set_status(self):
pass
def get_full_name(self):
"""
Get the full name of the person
"""
return self._get_full_name()
def get_status_count(self):
return 0
def get_groups_list(self):
return []
| bsd-3-clause |
gurneyalex/account-invoicing | account_invoice_force_number/__init__.py | 34 | 1047 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Domsense srl (<http://www.domsense.com>)
# Copyright (C) 2011-2013 Agile Business Group sagl
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import invoice
| agpl-3.0 |
romankagan/DDBWorkbench | python/lib/Lib/site-packages/django/contrib/gis/maps/google/__init__.py | 603 | 2648 | """
This module houses the GoogleMap object, used for generating
the needed javascript to embed Google Maps in a Web page.
Google(R) is a registered trademark of Google, Inc. of Mountain View, California.
Example:
* In the view:
return render_to_response('template.html', {'google' : GoogleMap(key="abcdefg")})
* In the template:
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
{{ google.xhtml }}
<head>
<title>Google Maps via GeoDjango</title>
{{ google.style }}
{{ google.scripts }}
</head>
{{ google.body }}
<div id="{{ google.dom_id }}" style="width:600px;height:400px;"></div>
</body>
</html>
Note: If you want to be more explicit in your templates, the following are
equivalent:
{{ google.body }} => "<body {{ google.onload }} {{ google.onunload }}>"
{{ google.xhtml }} => "<html xmlns="http://www.w3.org/1999/xhtml" {{ google.xmlns }}>"
{{ google.style }} => "<style>{{ google.vml_css }}</style>"
Explanation:
- The `xhtml` property provides the correct XML namespace needed for
Google Maps to operate in IE using XHTML. Google Maps on IE uses
VML to draw polylines. Returns, by default:
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
- The `style` property provides the correct style tag for the CSS
properties required by Google Maps on IE:
<style type="text/css">v\:* {behavior:url(#default#VML);}</style>
- The `scripts` property provides the necessary <script> tags for
including the Google Maps javascript, as well as including the
generated javascript.
- The `body` property provides the correct attributes for the
body tag to load the generated javascript. By default, returns:
<body onload="gmap_load()" onunload="GUnload()">
- The `dom_id` property returns the DOM id for the map. Defaults to "map".
The following attributes may be set or customized in your local settings:
* GOOGLE_MAPS_API_KEY: String of your Google Maps API key. These are tied to
to a domain. May be obtained from http://www.google.com/apis/maps/
* GOOGLE_MAPS_API_VERSION (optional): Defaults to using "2.x"
* GOOGLE_MAPS_URL (optional): Must have a substitution ('%s') for the API
version.
"""
from django.contrib.gis.maps.google.gmap import GoogleMap, GoogleMapSet
from django.contrib.gis.maps.google.overlays import GEvent, GIcon, GMarker, GPolygon, GPolyline
from django.contrib.gis.maps.google.zoom import GoogleZoom
| apache-2.0 |
ndawe/goodruns | goodruns/sorteddict.py | 1 | 3468 | """
Copied from:
https://code.djangoproject.com/browser/django/
trunk/django/utils/datastructures.py#L99
BSD license
"""
import copy
from types import GeneratorType
import bisect
class SortedDict(dict):
"""
A dictionary that keeps its keys in the
order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.key_order = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
elif isinstance(data, GeneratorType):
# Unfortunately we need to be able to read a generator twice. Once
# to get the data into self with our super().__init__ call and a
# second time to setup key_order correctly
data = list(data)
super(SortedDict, self).__init__(data)
if isinstance(data, dict):
self.key_order = data.keys()
else:
self.key_order = []
seen = set()
for key, value in data:
if key not in seen:
self.key_order.append(key)
seen.add(key)
self.key_order.sort()
def __deepcopy__(self, memo):
return self.__class__([(key, copy.deepcopy(value, memo))
for key, value in self.iteritems()])
def __setitem__(self, key, value):
if key not in self:
self.key_order.insert(bisect.bisect(self.key_order, key), key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.key_order.remove(key)
def __iter__(self):
return iter(self.key_order)
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.key_order.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.key_order.remove(result[0])
return result
def items(self):
return zip(self.key_order, self.values())
def iteritems(self):
for key in self.key_order:
yield key, self[key]
def keys(self):
return self.key_order[:]
def iterkeys(self):
return iter(self.key_order)
def values(self):
return map(self.__getitem__, self.key_order)
def itervalues(self):
for key in self.key_order:
yield self[key]
def update(self, dict_):
for k, v in dict_.iteritems():
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.key_order.append(key)
return super(SortedDict, self).setdefault(key, default)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.key_order = self.key_order[:]
return obj
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(SortedDict, self).clear()
self.key_order = []
| gpl-3.0 |
GoogleDeveloperExperts/experts-app-backend | models/activity_record.py | 1 | 9785 | from google.appengine.ext import ndb
from endpoints_proto_datastore.ndb import EndpointsModel
from endpoints_proto_datastore.ndb import EndpointsAliasProperty
from endpoints_proto_datastore.ndb import EndpointsVariantIntegerProperty
from endpoints_proto_datastore.ndb import EndpointsComputedProperty
from datetime import datetime
from protorpc import messages
from models.activity_post import ActivityPost
from models.account import Account
import logging
import math
class ActivityMetaData(EndpointsModel):
# groups of activities that can be reported together
# content #community #techtalk #bugreport #forumpost #opensourcecode
activity_group = ndb.StringProperty()
# bugreport, techtalk, applies to all
title = ndb.StringProperty()
# applies to all, provides more detail / abstract about the activity
description = ndb.StringProperty()
# sub activity type
type = ndb.StringProperty()
# for all types, can be event link/blog link/github link/...
link = ndb.StringProperty()
# impact is about the number of people impacted
# views for #blogpost, attendess for #techtalks ...
impact = EndpointsVariantIntegerProperty(variant=messages.Variant.INT32)
# for some acivities, links to slides, video's etc
other_link1 = ndb.StringProperty()
other_link2 = ndb.StringProperty()
# community, techtalk
location = ndb.StringProperty()
google_expensed = ndb.BooleanProperty()
us_approx_amount = EndpointsVariantIntegerProperty(
variant=messages.Variant.INT32)
class ActivityRecord(EndpointsModel):
_message_fields_schema = ('id', 'gplus_id', 'gde_name', 'date_created',
'date_updated', 'post_date', 'activity_types',
'product_groups', 'activity_link', 'gplus_posts',
'activity_title', 'plus_oners', 'resharers',
'comments', 'metadata', 'social_impact', 'meta_impact',
'total_impact', 'api_key', 'deleted')
_api_key = None
# we identify Expert's uniquely using this
gplus_id = ndb.StringProperty()
# dates: are they really useful????
date_created = ndb.DateTimeProperty(auto_now_add=True)
date_updated = ndb.DateTimeProperty(auto_now=True)
# first post date, will be more interesting
post_date = ndb.StringProperty()
# related posts, we store the post_id's and the activity link
# in some case the activity link is the gplus_post link itself
# when there are no links attached to the post
activity_link = ndb.StringProperty()
activity_title = ndb.StringProperty()
gplus_posts = ndb.StringProperty(repeated=True)
# cumulative plus_oners & resharers
plus_oners = EndpointsVariantIntegerProperty(
variant=messages.Variant.INT32)
resharers = EndpointsVariantIntegerProperty(variant=messages.Variant.INT32)
comments = EndpointsVariantIntegerProperty(variant=messages.Variant.INT32)
# activity types and product groups
activity_types = ndb.StringProperty(repeated=True)
product_groups = ndb.StringProperty(repeated=True)
# activity type metadata
metadata = ndb.StructuredProperty(ActivityMetaData, repeated=True)
deleted = ndb.BooleanProperty()
def ApiKeySet(self, value):
self._api_key = value
@EndpointsAliasProperty(setter=ApiKeySet, property_type=messages.StringField)
def api_key(self):
return self._api_key
def DummySetter(self, value):
# do nothing since property will not be updated from API methods
return
@EndpointsAliasProperty(setter=DummySetter, property_type=messages.StringField)
def gde_name(self):
if self.gplus_id is None:
return None
gde = ndb.Key(Account, self.gplus_id).get()
if gde is None:
return None
return gde.display_name
@EndpointsComputedProperty(property_type=messages.FloatField)
def total_impact(self):
if self.activity_title is None:
return None
social_impact = 1
if self.resharers is not None:
social_impact += self.resharers
if self.plus_oners is not None:
social_impact += self.plus_oners
if self.comments is not None:
social_impact += self.comments
meta_impact = 1
if self.metadata is None or len(self.metadata) == 0:
pass
else:
for meta in self.metadata:
if meta.impact is not None:
meta_impact += meta.impact
impact = 1 + math.log10(social_impact) + math.log10(meta_impact)
return float(impact)
@EndpointsComputedProperty(property_type=messages.IntegerField, variant=messages.Variant.INT32)
def social_impact(self):
if self.activity_title is None:
return None
impact = 0
if self.resharers is not None:
impact += self.resharers
if self.plus_oners is not None:
impact += self.plus_oners
if self.comments is not None:
impact += self.comments
return impact
@EndpointsComputedProperty(property_type=messages.IntegerField, variant=messages.Variant.INT32)
def meta_impact(self):
if self.activity_title is None:
return None
impact = 0
if self.metadata is None or len(self.metadata) == 0:
pass
else:
for meta in self.metadata:
if meta.impact is not None:
impact += meta.impact
return impact
def MinDateSet(self, value):
if value is not None:
self._endpoints_query_info._filters.add(
ActivityRecord.post_date >= value)
@EndpointsAliasProperty(setter=MinDateSet, property_type=messages.StringField)
def minDate(self):
"""
minDate is only used as parameter in query_methods
so there should never be a reason to actually retrieve the value
"""
return None
def MaxDateSet(self, value):
if value is not None:
self._endpoints_query_info._filters.add(
ActivityRecord.post_date <= value)
@EndpointsAliasProperty(setter=MaxDateSet, property_type=messages.StringField)
def maxDate(self):
"""
maxDate is only used as parameter in query_methods
so there should never be a reason to actually retrieve the value
"""
return None
def IncludeDeletedSet(self, value):
"""
If value is true all activity records will be returned.
Otherwise a filter for non-deleted items is necessary for the query.
"""
if value is None or value is False:
self._endpoints_query_info._filters.add(
ActivityRecord.deleted == False
)
@EndpointsAliasProperty(setter=IncludeDeletedSet, property_type=messages.BooleanField, default=False)
def includeDeleted(self):
"""
includedDeleted is only used as parameter in query_methods
so there should never be a reason to actually retrieve the value
"""
return None
def calculate_impact(self):
self.plus_oners = 0
self.resharers = 0
self.comments = 0
for post_id in self.gplus_posts:
post_key = ndb.Key(ActivityPost, post_id)
activity_post = post_key.get()
if activity_post is not None:
self.plus_oners += activity_post.plus_oners
self.resharers += activity_post.resharers
self.comments += activity_post.comments
if activity_post.product_group:
for product_group in activity_post.product_group:
if product_group not in self.product_groups:
self.product_groups.append(product_group)
if activity_post.activity_type:
for act_type in activity_post.activity_type:
if act_type not in self.activity_types:
self.activity_types.append(act_type)
def add_post(self, activity_post):
if (self.gplus_posts.count(activity_post.post_id) == 0):
self.gplus_posts.append(activity_post.post_id)
self.calculate_impact()
self.put()
def create_activity_record(activity_post):
# is there a link attached to the post? if not query using the post as
# activity link
activity_link = activity_post.links
if activity_post.links == "":
activity_link = activity_post.url
date = datetime.strptime(activity_post.date[0:19], '%Y-%m-%dT%H:%M:%S')
date_format = date.strftime("%Y-%m-%d")
activity_record = ActivityRecord(gplus_id=activity_post.gplus_id,
post_date=date_format,
activity_link=activity_link,
activity_title=activity_post.title,
deleted=False)
activity_record.put()
logging.info('create new activity record')
return activity_record
def find(activity_post):
# activity_link = activity_post.links
# if activity_post.links == "":
# activity_link = activity_post.url
# records = ActivityRecord.query(ActivityRecord.activity_link ==
# activity_link).fetch(1)
# the find funtion was failing for AR merged from the front end
# this looks for the AR based on the AP id instead of the link
records = ActivityRecord.query(
ActivityRecord.gplus_posts == activity_post.id).fetch(1)
if (len(records) == 0):
return None
return records[0]
| apache-2.0 |
openthread/silk | silk/device/netns_base.py | 1 | 8483 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for network namespace controller.
"""
import logging
import os
import subprocess
from silk.device.system_call_manager import SystemCallManager
from silk.node.base_node import BaseNode
import silk.postprocessing.ip as silk_ip
def create_link_pair(interface_1, interface_2):
command = "sudo ip link add name %s " % interface_1
command += "type veth peer name %s" % interface_2
proc = subprocess.Popen(command, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
return proc.communicate()[0]
class NetnsController(SystemCallManager):
"""
This class contains methods for creating, destroying, and manipulating network namespaces. It also provides
methods for making systems calls in network namespaces.
Network namespace manipulation requires sudo. All inheriting classes must be run with sudo.
Classes that inherit from NetnsController
1) Must provide a self.device_path attribute. (This is used to roll a unique network namespace name.)
2) Inheriting class must define the following logging methods
a) log_debug(log_line)
b) log_info(log_line)
c) log_warning(log_line)
d) log_error(log_line)
e) log_critical(log_line)
"""
_hw_model = None
def __init__(self, netns: str = None, device_path: str = None):
"""
Carve out a unique network namespace for this device instance.
Initialize the necessary synchronization mechanisms for async
operations.
Startup the system call worker thread.
"""
self.netns = netns
if netns is not None and device_path is None:
self.device_path = ""
else:
self.device_path = device_path
self.create_netns()
SystemCallManager.__init__(self)
def create_netns(self):
"""
wpantund will run in a network namespace for these tests.
This function should be called on instantiation to create a
unique network namespace name.
This function returns the network namespace name.
"""
self.log_info("Adding network namespace for %s" % self.device_path)
if self.netns is None:
self.netns = os.path.basename(self.device_path)
command = "sudo ip netns add %s" % self.netns
self._make_system_call("netns-add", command, 2)
return self.netns
def delete_netns(self):
"""Delete netns containing this device.
"""
self.log_info("Deleting network namespace for %s" % self.device_path)
command = "sudo ip netns del %s" % self.netns
self._make_system_call("netns-del", command, 2)
def netns_pids(self):
"""List all PIDs running in this device's netns.
"""
self.log_info("Getting PIDs for network namespace for %s" % self.device_path)
command = "sudo ip netns pids %s" % self.netns
output = self._make_system_call("netns-pids", command, 2).strip()
return output.split("\n")
def netns_killall(self):
"""Stop all PIDs in this netns.
"""
self.log_info("Stopping all processes in %s" % self.device_path)
for pid in self.netns_pids():
if len(pid.strip()) > 0:
self.make_netns_call("kill -SIGINT %s" % pid)
def cleanup_netns(self):
"""
Stop all PIDs running in the netns.
Delete the netns.
"""
self.log_info("Cleaning up network namespace for %s" % self.device_path)
self.netns_killall()
self.delete_netns()
def construct_netns_command(self, user_command):
"""Format a command so that it is called in this device's network namespace.
"""
command = "sudo ip netns exec %s " % self.netns
command += user_command
return command
def make_netns_call(self, command, timeout=10):
"""
Take a standard system call (eg: ifconfig, ping, etc.).
Format the command so that it will be called in this network namespace.
Make the system call with a timeout.
"""
command = self.construct_netns_command(command)
return self._make_system_call("netns-exec", command, timeout)
def make_netns_call_async(self, command, expect, timeout, field=None, exact_match: bool = False):
"""
Take a standard system call (eg: ifconfig, ping, etc.).
Format the command so that it will be called in this network namespace.
Make the system call with a timeout.
"""
command = self.construct_netns_command(command)
return self.make_system_call_async("netns-exec", command, expect, timeout, field, exact_match)
def link_set(self, interface_name, virtual_eth_peer):
"""
Assign a network namespace link endpoint to this network namespace.
Bring up the new interface.
"""
command = "ip link set %s netns %s" % (interface_name, self.netns)
self._make_system_call("link-set", command, 1)
command = "ifconfig %s up" % interface_name
self.make_netns_call(command, 1)
command = "ip link set %s up" % virtual_eth_peer
self._make_system_call("link-set", command, 1)
def add_ip6_addr(self, prefix, subnet, mac, interface, interface_label):
"""
Construct a new IP with the specified prefix, subnet, and MAC.
Store the IP address that was generated.
Make a call to add the newly formed address to the appropriate
interface.
"""
new_ip = silk_ip.assemble(prefix, subnet, mac)
command = "ip addr add %s/64 dev %s" % (new_ip, interface)
self.store_data(new_ip, interface_label)
self.make_netns_call_async(command, "", 1)
self.make_netns_call_async("ifconfig", "", 1)
def set_default_route(self, default_interface=None):
if default_interface is None:
default_interface = self.thread_interface
command = "ip -6 route add default dev %s" % default_interface
self.make_netns_call_async(command, "", 1)
def enable_ipv6_forwarding(self):
command = "sysctl -w net.ipv6.conf.all.forwarding=1"
self.make_netns_call_async(command, "", 1, None)
def disable_ipv6_forwarding(self):
command = "sysctl -w net.ipv6.conf.all.forwarding=0"
self.make_netns_call_async(command, "", 1, None)
def add_route(self, dest, dest_subnet_length, via_addr, interface_name):
command = "ip -6 route add %s/%s via %s dev %s" % (dest, dest_subnet_length, via_addr, interface_name)
self.make_netns_call_async(command, "", 1, None)
class StandaloneNetworkNamespace(NetnsController, BaseNode):
"""Class to control a standalone network namespace that is not associated with a development board.
"""
def __init__(self, netns_name):
device_path = os.path.join("/dev", netns_name)
BaseNode.__init__(self, netns_name)
NetnsController.__init__(self, netns_name, device_path)
def tear_down(self):
self.cleanup_netns()
#################################
# Logging functions
#################################
def set_logger(self, parent_logger):
self.logger = parent_logger.getChild(self.netns)
self.logger.setLevel(logging.DEBUG)
def log_debug(self, log_line):
if self.logger is not None:
self.logger.debug(log_line)
def log_info(self, log_line):
if self.logger is not None:
self.logger.info(log_line)
def log_warning(self, log_line):
if self.logger is not None:
self.logger.warning(log_line)
def log_error(self, log_line):
if self.logger is not None:
self.logger.error(log_line)
def log_critical(self, log_line):
if self.logger is not None:
self.logger.critical(log_line)
| apache-2.0 |
takeshineshiro/swift | test/unit/common/middleware/test_healthcheck.py | 35 | 2901 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import unittest
from swift.common.swob import Request, Response
from swift.common.middleware import healthcheck
class FakeApp(object):
def __call__(self, env, start_response):
req = Request(env)
return Response(request=req, body='FAKE APP')(
env, start_response)
class TestHealthCheck(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.disable_path = os.path.join(self.tempdir, 'dont-taze-me-bro')
self.got_statuses = []
def tearDown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
def get_app(self, app, global_conf, **local_conf):
factory = healthcheck.filter_factory(global_conf, **local_conf)
return factory(app)
def start_response(self, status, headers):
self.got_statuses.append(status)
def test_healthcheck(self):
req = Request.blank('/healthcheck', environ={'REQUEST_METHOD': 'GET'})
app = self.get_app(FakeApp(), {})
resp = app(req.environ, self.start_response)
self.assertEquals(['200 OK'], self.got_statuses)
self.assertEquals(resp, ['OK'])
def test_healtcheck_pass(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
app = self.get_app(FakeApp(), {})
resp = app(req.environ, self.start_response)
self.assertEquals(['200 OK'], self.got_statuses)
self.assertEquals(resp, ['FAKE APP'])
def test_healthcheck_pass_not_disabled(self):
req = Request.blank('/healthcheck', environ={'REQUEST_METHOD': 'GET'})
app = self.get_app(FakeApp(), {}, disable_path=self.disable_path)
resp = app(req.environ, self.start_response)
self.assertEquals(['200 OK'], self.got_statuses)
self.assertEquals(resp, ['OK'])
def test_healthcheck_pass_disabled(self):
open(self.disable_path, 'w')
req = Request.blank('/healthcheck', environ={'REQUEST_METHOD': 'GET'})
app = self.get_app(FakeApp(), {}, disable_path=self.disable_path)
resp = app(req.environ, self.start_response)
self.assertEquals(['503 Service Unavailable'], self.got_statuses)
self.assertEquals(resp, ['DISABLED BY FILE'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
huaweiswitch/neutron | neutron/plugins/vmware/plugins/service.py | 7 | 80699 | # Copyright 2013 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import netaddr
from oslo.config import cfg
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.db.firewall import firewall_db
from neutron.db import l3_db
from neutron.db.loadbalancer import loadbalancer_db
from neutron.db import routedserviceinsertion_db as rsi_db
from neutron.db.vpn import vpn_db
from neutron.extensions import firewall as fw_ext
from neutron.extensions import l3
from neutron.extensions import routedserviceinsertion as rsi
from neutron.extensions import vpnaas as vpn_ext
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as service_constants
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import config # noqa
from neutron.plugins.vmware.common import exceptions as nsx_exc
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware.dbexts import servicerouter as sr_db
from neutron.plugins.vmware.dbexts import vcns_db
from neutron.plugins.vmware.dbexts import vcns_models
from neutron.plugins.vmware.extensions import servicerouter as sr
from neutron.plugins.vmware.nsxlib import router as routerlib
from neutron.plugins.vmware.nsxlib import switch as switchlib
from neutron.plugins.vmware.plugins import base
from neutron.plugins.vmware.vshield.common import constants as vcns_const
from neutron.plugins.vmware.vshield.common import exceptions
from neutron.plugins.vmware.vshield.tasks import constants as tasks_const
from neutron.plugins.vmware.vshield import vcns_driver
from sqlalchemy.orm import exc as sa_exc
LOG = logging.getLogger(__name__)
ROUTER_TYPE_BASIC = 1
ROUTER_TYPE_ADVANCED = 2
ROUTER_STATUS = [
service_constants.ACTIVE,
service_constants.DOWN,
service_constants.PENDING_CREATE,
service_constants.PENDING_DELETE,
service_constants.ERROR
]
ROUTER_STATUS_LEVEL = {
service_constants.ACTIVE: vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE,
service_constants.DOWN: vcns_const.RouterStatus.ROUTER_STATUS_DOWN,
service_constants.PENDING_CREATE: (
vcns_const.RouterStatus.ROUTER_STATUS_PENDING_CREATE
),
service_constants.PENDING_DELETE: (
vcns_const.RouterStatus.ROUTER_STATUS_PENDING_DELETE
),
service_constants.ERROR: vcns_const.RouterStatus.ROUTER_STATUS_ERROR
}
class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
base.NsxPluginV2,
rsi_db.RoutedServiceInsertionDbMixin,
firewall_db.Firewall_db_mixin,
loadbalancer_db.LoadBalancerPluginDb,
vpn_db.VPNPluginDb
):
supported_extension_aliases = (
base.NsxPluginV2.supported_extension_aliases + [
"service-router",
"routed-service-insertion",
"fwaas",
"lbaas",
"vpnaas"
])
# The service plugin cannot currently support pagination
__native_pagination_support = False
__native_sorting_support = False
def __init__(self):
super(NsxAdvancedPlugin, self).__init__()
self._super_create_ext_gw_port = (
self._port_drivers['create'][l3_db.DEVICE_OWNER_ROUTER_GW])
self._super_delete_ext_gw_port = (
self._port_drivers['delete'][l3_db.DEVICE_OWNER_ROUTER_GW])
self._port_drivers['create'][l3_db.DEVICE_OWNER_ROUTER_GW] = (
self._vcns_create_ext_gw_port)
self._port_drivers['delete'][l3_db.DEVICE_OWNER_ROUTER_GW] = (
self._vcns_delete_ext_gw_port)
# cache router type based on router id
self._router_type = {}
self.callbacks = VcnsCallbacks(self.safe_reference)
# load the vCNS driver
self._load_vcns_drivers()
# switchlib's create_lswitch needs to be replaced in order to proxy
# logical switch create requests to vcns
self._set_create_lswitch_proxy()
def _set_create_lswitch_proxy(self):
base.switchlib.create_lswitch = self._proxy_create_lswitch
def _proxy_create_lswitch(self, *args, **kwargs):
name, tz_config, tags = (
_process_base_create_lswitch_args(*args, **kwargs)
)
return self.vcns_driver.create_lswitch(
name, tz_config, tags=tags,
port_isolation=None, replication_mode=None)
def _load_vcns_drivers(self):
self.vcns_driver = vcns_driver.VcnsDriver(self.callbacks)
def _set_router_type(self, router_id, router_type):
self._router_type[router_id] = router_type
def _get_router_type(self, context=None, router_id=None, router=None):
if not router:
if router_id in self._router_type:
return self._router_type[router_id]
router = self._get_router(context, router_id)
LOG.debug("EDGE: router = %s", router)
if router['extra_attributes']['service_router']:
router_type = ROUTER_TYPE_ADVANCED
else:
router_type = ROUTER_TYPE_BASIC
self._set_router_type(router['id'], router_type)
return router_type
def _find_router_type(self, router):
is_service_router = router.get(sr.SERVICE_ROUTER, False)
if is_service_router:
return ROUTER_TYPE_ADVANCED
else:
return ROUTER_TYPE_BASIC
def _is_advanced_service_router(self, context=None, router_id=None,
router=None):
if router:
router_type = self._get_router_type(router=router)
else:
router_type = self._get_router_type(context, router_id)
return (router_type == ROUTER_TYPE_ADVANCED)
def _vcns_create_ext_gw_port(self, context, port_data):
router_id = port_data['device_id']
if not self._is_advanced_service_router(context, router_id):
self._super_create_ext_gw_port(context, port_data)
return
# NOP for Edge because currently the port will be create internally
# by VSM
LOG.debug("EDGE: _vcns_create_ext_gw_port")
def _vcns_delete_ext_gw_port(self, context, port_data):
router_id = port_data['device_id']
if not self._is_advanced_service_router(context, router_id):
self._super_delete_ext_gw_port(context, port_data)
return
# NOP for Edge
LOG.debug("EDGE: _vcns_delete_ext_gw_port")
def _get_external_attachment_info(self, context, router):
gw_port = router.gw_port
ipaddress = None
netmask = None
nexthop = None
if gw_port:
# gw_port may have multiple IPs, only configure the first one
if gw_port.get('fixed_ips'):
ipaddress = gw_port['fixed_ips'][0]['ip_address']
network_id = gw_port.get('network_id')
if network_id:
ext_net = self._get_network(context, network_id)
if not ext_net.external:
msg = (_("Network '%s' is not a valid external "
"network") % network_id)
raise n_exc.BadRequest(resource='router', msg=msg)
if ext_net.subnets:
ext_subnet = ext_net.subnets[0]
netmask = str(netaddr.IPNetwork(ext_subnet.cidr).netmask)
nexthop = ext_subnet.gateway_ip
return (ipaddress, netmask, nexthop)
def _get_external_gateway_address(self, context, router):
ipaddress, netmask, nexthop = self._get_external_attachment_info(
context, router)
return nexthop
def _vcns_update_static_routes(self, context, **kwargs):
router = kwargs.get('router')
if router is None:
router = self._get_router(context, kwargs['router_id'])
edge_id = kwargs.get('edge_id')
if edge_id is None:
binding = vcns_db.get_vcns_router_binding(context.session,
router['id'])
edge_id = binding['edge_id']
skippable = True
if 'nexthop' in kwargs:
nexthop = kwargs['nexthop']
# The default gateway and vnic config has dependencies, if we
# explicitly specify nexthop to change, tell the driver not to
# skip this route update
skippable = False
else:
nexthop = self._get_external_gateway_address(context,
router)
if 'subnets' in kwargs:
subnets = kwargs['subnets']
else:
subnets = self._find_router_subnets_cidrs(context.elevated(),
router['id'])
routes = []
for subnet in subnets:
routes.append({
'cidr': subnet,
'nexthop': vcns_const.INTEGRATION_LR_IPADDRESS.split('/')[0]
})
self.vcns_driver.update_routes(router['id'], edge_id, nexthop, routes,
skippable)
def _get_nat_rules(self, context, router):
fip_qry = context.session.query(l3_db.FloatingIP)
fip_db = fip_qry.filter_by(router_id=router['id']).all()
dnat = []
snat = []
for fip in fip_db:
if fip.fixed_port_id:
dnat.append({
'dst': fip.floating_ip_address,
'translated': fip.fixed_ip_address
})
gw_port = router.gw_port
if gw_port and router.enable_snat:
if gw_port.get('fixed_ips'):
snat_ip = gw_port['fixed_ips'][0]['ip_address']
subnets = self._find_router_subnets_cidrs(context.elevated(),
router['id'])
for subnet in subnets:
snat.append({
'src': subnet,
'translated': snat_ip
})
return (snat, dnat)
def _update_nat_rules(self, context, router):
snat, dnat = self._get_nat_rules(context, router)
binding = vcns_db.get_vcns_router_binding(context.session,
router['id'])
self.vcns_driver.update_nat_rules(router['id'],
binding['edge_id'],
snat, dnat)
def _update_interface(self, context, router, sync=False):
addr, mask, nexthop = self._get_external_attachment_info(
context, router)
secondary = []
fip_qry = context.session.query(l3_db.FloatingIP)
fip_db = fip_qry.filter_by(router_id=router['id']).all()
for fip in fip_db:
if fip.fixed_port_id:
secondary.append(fip.floating_ip_address)
#Add all vip addresses bound on the router
vip_addrs = self._get_all_vip_addrs_by_router_id(context,
router['id'])
secondary.extend(vip_addrs)
binding = vcns_db.get_vcns_router_binding(context.session,
router['id'])
task = self.vcns_driver.update_interface(
router['id'], binding['edge_id'],
vcns_const.EXTERNAL_VNIC_INDEX,
self.vcns_driver.external_network,
addr, mask, secondary=secondary)
if sync:
task.wait(tasks_const.TaskState.RESULT)
def _update_router_gw_info(self, context, router_id, info):
if not self._is_advanced_service_router(context, router_id):
super(NsxAdvancedPlugin, self)._update_router_gw_info(
context, router_id, info)
return
# get original gw_port config
router = self._get_router(context, router_id)
org_ext_net_id = router.gw_port_id and router.gw_port.network_id
org_enable_snat = router.enable_snat
orgaddr, orgmask, orgnexthop = self._get_external_attachment_info(
context, router)
super(base.NsxPluginV2, self)._update_router_gw_info(
context, router_id, info, router=router)
new_ext_net_id = router.gw_port_id and router.gw_port.network_id
new_enable_snat = router.enable_snat
newaddr, newmask, newnexthop = self._get_external_attachment_info(
context, router)
binding = vcns_db.get_vcns_router_binding(context.session, router_id)
if new_ext_net_id != org_ext_net_id and orgnexthop:
# network changed, need to remove default gateway before vnic
# can be configured
LOG.debug("VCNS: delete default gateway %s", orgnexthop)
self._vcns_update_static_routes(context,
router=router,
edge_id=binding['edge_id'],
nexthop=None)
if orgaddr != newaddr or orgmask != newmask:
self.vcns_driver.update_interface(
router_id, binding['edge_id'],
vcns_const.EXTERNAL_VNIC_INDEX,
self.vcns_driver.external_network,
newaddr, newmask)
if orgnexthop != newnexthop:
self._vcns_update_static_routes(context,
router=router,
edge_id=binding['edge_id'],
nexthop=newnexthop)
if (new_ext_net_id == org_ext_net_id and
org_enable_snat == new_enable_snat):
return
self._update_nat_rules(context, router)
def _add_subnet_snat_rule(self, context, router, subnet):
# NOP for service router
if not self._is_advanced_service_router(router=router):
super(NsxAdvancedPlugin, self)._add_subnet_snat_rule(
context, router, subnet)
def _delete_subnet_snat_rule(self, context, router, subnet):
# NOP for service router
if not self._is_advanced_service_router(router=router):
super(NsxAdvancedPlugin, self)._delete_subnet_snat_rule(
context, router, subnet)
def _remove_floatingip_address(self, context, fip_db):
# NOP for service router
router_id = fip_db.router_id
if not self._is_advanced_service_router(context, router_id):
super(NsxAdvancedPlugin, self)._remove_floatingip_address(
context, fip_db)
def _create_advanced_service_router(self, context, neutron_router_id,
name, lrouter, lswitch):
# store binding
binding = vcns_db.add_vcns_router_binding(
context.session, neutron_router_id, None, lswitch['uuid'],
service_constants.PENDING_CREATE)
# deploy edge
jobdata = {
'neutron_router_id': neutron_router_id,
'lrouter': lrouter,
'lswitch': lswitch,
'context': context
}
# deploy and wait until the deploy requeste has been requested
# so we will have edge_id ready. The wait here should be fine
# as we're not in a database transaction now
self.vcns_driver.deploy_edge(
lrouter['uuid'], name, lswitch['uuid'], jobdata=jobdata,
wait_for_exec=True)
return binding
def _create_integration_lswitch(self, tenant_id, name):
# use defautl transport zone
transport_zone_config = [{
"zone_uuid": self.cluster.default_tz_uuid,
"transport_type": cfg.CONF.NSX.default_transport_type
}]
return self.vcns_driver.create_lswitch(name, transport_zone_config)
def _add_router_integration_interface(self, tenant_id, name,
lrouter, lswitch):
# create logic switch port
try:
ls_port = switchlib.create_lport(
self.cluster, lswitch['uuid'], tenant_id,
'', '', lrouter['uuid'], True)
except api_exc.NsxApiException:
msg = (_("An exception occurred while creating a port "
"on lswitch %s") % lswitch['uuid'])
LOG.exception(msg)
raise n_exc.NeutronException(message=msg)
# create logic router port
try:
neutron_port_id = ''
pname = name[:36] + '-lp'
admin_status_enabled = True
lr_port = routerlib.create_router_lport(
self.cluster, lrouter['uuid'], tenant_id,
neutron_port_id, pname, admin_status_enabled,
[vcns_const.INTEGRATION_LR_IPADDRESS])
except api_exc.NsxApiException:
msg = (_("Unable to create port on NSX logical router %s") % name)
LOG.exception(msg)
switchlib.delete_port(
self.cluster, lswitch['uuid'], ls_port['uuid'])
raise n_exc.NeutronException(message=msg)
# attach logic router port to switch port
try:
self._update_router_port_attachment(
self.cluster, None, lrouter['uuid'], {}, lr_port['uuid'],
'PatchAttachment', ls_port['uuid'], None)
except api_exc.NsxApiException as e:
# lr_port should have been deleted
switchlib.delete_port(
self.cluster, lswitch['uuid'], ls_port['uuid'])
raise e
def _create_lrouter(self, context, router, nexthop):
lrouter = super(NsxAdvancedPlugin, self)._create_lrouter(
context, router, vcns_const.INTEGRATION_EDGE_IPADDRESS)
router_type = self._find_router_type(router)
self._set_router_type(lrouter['uuid'], router_type)
if router_type == ROUTER_TYPE_BASIC:
return lrouter
tenant_id = self._get_tenant_id_for_create(context, router)
name = router['name']
try:
lsname = name[:36] + '-ls'
lswitch = self._create_integration_lswitch(
tenant_id, lsname)
except Exception:
msg = _("Unable to create integration logic switch "
"for router %s") % name
LOG.exception(msg)
routerlib.delete_lrouter(self.cluster, lrouter['uuid'])
raise n_exc.NeutronException(message=msg)
try:
self._add_router_integration_interface(tenant_id, name,
lrouter, lswitch)
except Exception:
msg = _("Unable to add router interface to integration lswitch "
"for router %s") % name
LOG.exception(msg)
routerlib.delete_lrouter(self.cluster, lrouter['uuid'])
raise n_exc.NeutronException(message=msg)
try:
self._create_advanced_service_router(
context, router['id'], name, lrouter, lswitch)
except Exception:
msg = (_("Unable to create advance service router for %s") % name)
LOG.exception(msg)
self.vcns_driver.delete_lswitch(lswitch('uuid'))
routerlib.delete_lrouter(self.cluster, lrouter['uuid'])
raise n_exc.NeutronException(message=msg)
lrouter['status'] = service_constants.PENDING_CREATE
return lrouter
def check_router_in_use(self, context, router_id):
router_filter = {'router_id': [router_id]}
vpnservices = self.get_vpnservices(
context, filters={'router_id': [router_id]})
if vpnservices:
raise vpn_ext.RouterInUseByVPNService(
router_id=router_id,
vpnservice_id=vpnservices[0]['id'])
vips = self.get_vips(
context, filters=router_filter)
if vips:
raise nsx_exc.RouterInUseByLBService(
router_id=router_id,
vip_id=vips[0]['id'])
firewalls = self.get_firewalls(
context, filters=router_filter)
if firewalls:
raise nsx_exc.RouterInUseByFWService(
router_id=router_id,
firewall_id=firewalls[0]['id'])
def _delete_lrouter(self, context, router_id, nsx_router_id):
binding = vcns_db.get_vcns_router_binding(context.session, router_id)
if not binding:
super(NsxAdvancedPlugin, self)._delete_lrouter(
context, router_id, nsx_router_id)
else:
#Check whether router has an advanced service inserted.
self.check_router_in_use(context, router_id)
vcns_db.update_vcns_router_binding(
context.session, router_id,
status=service_constants.PENDING_DELETE)
lswitch_id = binding['lswitch_id']
edge_id = binding['edge_id']
# delete lswitch
try:
self.vcns_driver.delete_lswitch(lswitch_id)
except exceptions.ResourceNotFound:
LOG.warning(_("Did not found lswitch %s in NSX"), lswitch_id)
# delete edge
jobdata = {
'context': context
}
self.vcns_driver.delete_edge(router_id, edge_id, jobdata=jobdata)
# delete NSX logical router
routerlib.delete_lrouter(self.cluster, nsx_router_id)
if id in self._router_type:
del self._router_type[router_id]
def _update_lrouter(self, context, router_id, name, nexthop, routes=None):
if not self._is_advanced_service_router(context, router_id):
return super(NsxAdvancedPlugin, self)._update_lrouter(
context, router_id, name, nexthop, routes=routes)
previous_routes = super(NsxAdvancedPlugin, self)._update_lrouter(
context, router_id, name,
vcns_const.INTEGRATION_EDGE_IPADDRESS, routes=routes)
# TODO(fank): Theoretically users can specify extra routes for
# physical network, and routes for phyiscal network needs to be
# configured on Edge. This can be done by checking if nexthop is in
# external network. But for now we only handle routes for logic
# space and leave it for future enhancement.
# Let _update_router_gw_info handle nexthop change
#self._vcns_update_static_routes(context, router_id=router_id)
return previous_routes
def _retrieve_and_delete_nat_rules(self, context, floating_ip_address,
internal_ip, router_id,
min_num_rules_expected=0):
# NOP for advanced service router
if not self._is_advanced_service_router(context, router_id):
super(NsxAdvancedPlugin, self)._retrieve_and_delete_nat_rules(
context, floating_ip_address, internal_ip, router_id,
min_num_rules_expected=min_num_rules_expected)
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
# Update DB model only for advanced service router
router_id = self._get_fip_assoc_data(context, fip, floatingip_db)[2]
if (router_id and
not self._is_advanced_service_router(context, router_id)):
super(NsxAdvancedPlugin, self)._update_fip_assoc(
context, fip, floatingip_db, external_port)
else:
super(base.NsxPluginV2, self)._update_fip_assoc(
context, fip, floatingip_db, external_port)
def _get_nsx_lrouter_status(self, id):
try:
lrouter = routerlib.get_lrouter(self.cluster, id)
lr_status = lrouter["_relations"]["LogicalRouterStatus"]
if lr_status["fabric_status"]:
nsx_status = vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE
else:
nsx_status = vcns_const.RouterStatus.ROUTER_STATUS_DOWN
except n_exc.NotFound:
nsx_status = vcns_const.RouterStatus.ROUTER_STATUS_ERROR
return nsx_status
def _get_vse_status(self, context, id):
binding = vcns_db.get_vcns_router_binding(context.session, id)
edge_status_level = self.vcns_driver.get_edge_status(
binding['edge_id'])
edge_db_status_level = ROUTER_STATUS_LEVEL[binding.status]
if edge_status_level > edge_db_status_level:
return edge_status_level
else:
return edge_db_status_level
def _get_all_nsx_lrouters_statuses(self, tenant_id, fields):
# get nsx lrouters status
nsx_lrouters = routerlib.get_lrouters(self.cluster,
tenant_id,
fields)
nsx_status = {}
for nsx_lrouter in nsx_lrouters:
if (nsx_lrouter["_relations"]["LogicalRouterStatus"]
["fabric_status"]):
nsx_status[nsx_lrouter['uuid']] = (
vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE
)
else:
nsx_status[nsx_lrouter['uuid']] = (
vcns_const.RouterStatus.ROUTER_STATUS_DOWN
)
return nsx_status
def _get_all_vse_statuses(self, context):
bindings = self._model_query(
context, vcns_models.VcnsRouterBinding)
vse_db_status_level = {}
edge_id_to_router_id = {}
router_ids = []
for binding in bindings:
if not binding['edge_id']:
continue
router_id = binding['router_id']
router_ids.append(router_id)
edge_id_to_router_id[binding['edge_id']] = router_id
vse_db_status_level[router_id] = (
ROUTER_STATUS_LEVEL[binding['status']])
if not vse_db_status_level:
# no advanced service router, no need to query
return {}
vse_status_level = {}
edges_status_level = self.vcns_driver.get_edges_statuses()
for edge_id, status_level in edges_status_level.iteritems():
if edge_id in edge_id_to_router_id:
router_id = edge_id_to_router_id[edge_id]
db_status_level = vse_db_status_level[router_id]
if status_level > db_status_level:
vse_status_level[router_id] = status_level
else:
vse_status_level[router_id] = db_status_level
return vse_status_level
def get_router(self, context, id, fields=None):
if fields and 'status' not in fields:
return super(NsxAdvancedPlugin, self).get_router(
context, id, fields=fields)
router = super(NsxAdvancedPlugin, self).get_router(context, id)
router_type = self._find_router_type(router)
if router_type == ROUTER_TYPE_ADVANCED:
vse_status_level = self._get_vse_status(context, id)
if vse_status_level > ROUTER_STATUS_LEVEL[router['status']]:
router['status'] = ROUTER_STATUS[vse_status_level]
return self._fields(router, fields)
def get_routers(self, context, filters=None, fields=None, **kwargs):
routers = super(NsxAdvancedPlugin, self).get_routers(
context, filters=filters, **kwargs)
if fields and 'status' not in fields:
# no status checking, just return regular get_routers
return [self._fields(router, fields) for router in routers]
for router in routers:
router_type = self._find_router_type(router)
if router_type == ROUTER_TYPE_ADVANCED:
break
else:
# no advanced service router, return here
return [self._fields(router, fields) for router in routers]
vse_status_all = self._get_all_vse_statuses(context)
for router in routers:
router_type = self._find_router_type(router)
if router_type == ROUTER_TYPE_ADVANCED:
vse_status_level = vse_status_all.get(router['id'])
if vse_status_level is None:
vse_status_level = (
vcns_const.RouterStatus.ROUTER_STATUS_ERROR)
if vse_status_level > ROUTER_STATUS_LEVEL[router['status']]:
router['status'] = ROUTER_STATUS[vse_status_level]
return [self._fields(router, fields) for router in routers]
def add_router_interface(self, context, router_id, interface_info):
info = super(NsxAdvancedPlugin, self).add_router_interface(
context, router_id, interface_info)
if self._is_advanced_service_router(context, router_id):
router = self._get_router(context, router_id)
if router.enable_snat:
self._update_nat_rules(context, router)
# TODO(fank): do rollback on error, or have a dedicated thread
# do sync work (rollback, re-configure, or make router down)
self._vcns_update_static_routes(context, router=router)
return info
def remove_router_interface(self, context, router_id, interface_info):
info = super(NsxAdvancedPlugin, self).remove_router_interface(
context, router_id, interface_info)
if self._is_advanced_service_router(context, router_id):
router = self._get_router(context, router_id)
if router.enable_snat:
self._update_nat_rules(context, router)
# TODO(fank): do rollback on error, or have a dedicated thread
# do sync work (rollback, re-configure, or make router down)
self._vcns_update_static_routes(context, router=router)
return info
def create_floatingip(self, context, floatingip):
fip = super(NsxAdvancedPlugin, self).create_floatingip(
context, floatingip)
router_id = fip.get('router_id')
if router_id and self._is_advanced_service_router(context, router_id):
router = self._get_router(context, router_id)
# TODO(fank): do rollback on error, or have a dedicated thread
# do sync work (rollback, re-configure, or make router down)
self._update_nat_rules(context, router)
self._update_interface(context, router)
return fip
def update_floatingip(self, context, id, floatingip):
fip = super(NsxAdvancedPlugin, self).update_floatingip(
context, id, floatingip)
router_id = fip.get('router_id')
if router_id and self._is_advanced_service_router(context, router_id):
router = self._get_router(context, router_id)
# TODO(fank): do rollback on error, or have a dedicated thread
# do sync work (rollback, re-configure, or make router down)
self._update_nat_rules(context, router)
self._update_interface(context, router)
elif not router_id:
# The floating IP has been disassociated and should be set to DOWN
self.update_floatingip_status(context, fip['id'],
constants.FLOATINGIP_STATUS_DOWN)
return fip
def delete_floatingip(self, context, id):
fip_db = self._get_floatingip(context, id)
router_id = None
if fip_db.fixed_port_id:
router_id = fip_db.router_id
super(NsxAdvancedPlugin, self).delete_floatingip(context, id)
if router_id and self._is_advanced_service_router(context, router_id):
router = self._get_router(context, router_id)
# TODO(fank): do rollback on error, or have a dedicated thread
# do sync work (rollback, re-configure, or make router down)
self._update_interface(context, router)
self._update_nat_rules(context, router)
def disassociate_floatingips(self, context, port_id):
routers = set()
try:
fip_qry = context.session.query(l3_db.FloatingIP)
fip_dbs = fip_qry.filter_by(fixed_port_id=port_id)
for fip_db in fip_dbs:
routers.add(fip_db.router_id)
except sa_exc.NoResultFound:
pass
super(NsxAdvancedPlugin, self).disassociate_floatingips(context,
port_id)
for router_id in routers:
if self._is_advanced_service_router(context, router_id):
router = self._get_router(context, router_id)
# TODO(fank): do rollback on error, or have a dedicated thread
# do sync work (rollback, re-configure, or make router down)
self._update_interface(context, router)
self._update_nat_rules(context, router)
#
# FWaaS plugin implementation
#
def _firewall_set_status(
self, context, firewall_id, status, firewall=None):
with context.session.begin(subtransactions=True):
fw_db = self._get_firewall(context, firewall_id)
if status == service_constants.PENDING_UPDATE and (
fw_db.status == service_constants.PENDING_DELETE):
raise fw_ext.FirewallInPendingState(
firewall_id=firewall_id, pending_state=status)
else:
fw_db.status = status
if firewall:
firewall['status'] = status
def _ensure_firewall_update_allowed(self, context, firewall_id):
fwall = self.get_firewall(context, firewall_id)
if fwall['status'] in [service_constants.PENDING_CREATE,
service_constants.PENDING_UPDATE,
service_constants.PENDING_DELETE]:
raise fw_ext.FirewallInPendingState(firewall_id=firewall_id,
pending_state=fwall['status'])
def _ensure_firewall_policy_update_allowed(
self, context, firewall_policy_id):
firewall_policy = self.get_firewall_policy(context, firewall_policy_id)
for firewall_id in firewall_policy.get('firewall_list', []):
self._ensure_firewall_update_allowed(context, firewall_id)
def _ensure_update_or_delete_firewall_rule(
self, context, firewall_rule_id):
fw_rule = self.get_firewall_rule(context, firewall_rule_id)
if fw_rule.get('firewall_policy_id'):
self._ensure_firewall_policy_update_allowed(
context, fw_rule['firewall_policy_id'])
def _make_firewall_rule_list_by_policy_id(self, context, fw_policy_id):
if not fw_policy_id:
return []
firewall_policy_db = self._get_firewall_policy(context, fw_policy_id)
return [
self._make_firewall_rule_dict(fw_rule_db)
for fw_rule_db in firewall_policy_db['firewall_rules']
]
def _get_edge_id_by_vcns_edge_binding(self, context,
router_id):
#Get vcns_router_binding mapping between router and edge
router_binding = vcns_db.get_vcns_router_binding(
context.session, router_id)
return router_binding.edge_id
def _get_firewall_list_from_firewall_policy(self, context, policy_id):
firewall_policy_db = self._get_firewall_policy(context, policy_id)
return [
self._make_firewall_dict(fw_db)
for fw_db in firewall_policy_db['firewalls']
]
def _get_firewall_list_from_firewall_rule(self, context, rule_id):
rule = self._get_firewall_rule(context, rule_id)
if not rule.firewall_policy_id:
# The firewall rule is not associated with firewall policy yet
return None
return self._get_firewall_list_from_firewall_policy(
context, rule.firewall_policy_id)
def _vcns_update_firewall(self, context, fw, router_id=None, **kwargs):
edge_id = kwargs.get('edge_id')
if not edge_id:
edge_id = self._get_edge_id_by_vcns_edge_binding(
context, router_id)
firewall_rule_list = kwargs.get('firewall_rule_list')
if not firewall_rule_list:
firewall_rule_list = self._make_firewall_rule_list_by_policy_id(
context, fw['firewall_policy_id'])
fw_with_rules = fw
fw_with_rules['firewall_rule_list'] = firewall_rule_list
try:
self.vcns_driver.update_firewall(context, edge_id, fw_with_rules)
except exceptions.VcnsApiException as e:
self._firewall_set_status(
context, fw['id'], service_constants.ERROR)
msg = (_("Failed to create firewall on vShield Edge "
"bound on router %s") % router_id)
LOG.exception(msg)
raise e
except exceptions.VcnsBadRequest as e:
self._firewall_set_status(
context, fw['id'], service_constants.ERROR)
LOG.exception(_("Bad Firewall request Input"))
raise e
def _vcns_delete_firewall(self, context, router_id=None, **kwargs):
edge_id = kwargs.get('edge_id')
if not edge_id:
edge_id = self._get_edge_id_by_vcns_edge_binding(
context, router_id)
#TODO(linb):do rollback on error
self.vcns_driver.delete_firewall(context, edge_id)
def create_firewall(self, context, firewall):
LOG.debug("create_firewall() called")
router_id = firewall['firewall'].get(vcns_const.ROUTER_ID)
if not router_id:
msg = _("router_id is not provided!")
LOG.error(msg)
raise n_exc.BadRequest(resource='router', msg=msg)
if not self._is_advanced_service_router(context, router_id):
msg = _("router_id:%s is not an advanced router!") % router_id
LOG.error(msg)
raise n_exc.BadRequest(resource='router', msg=msg)
if self._get_resource_router_id_binding(
context, firewall_db.Firewall, router_id=router_id):
msg = _("A firewall is already associated with the router")
LOG.error(msg)
raise nsx_exc.ServiceOverQuota(
overs='firewall', err_msg=msg)
fw = super(NsxAdvancedPlugin, self).create_firewall(context, firewall)
#Add router service insertion binding with firewall object
res = {
'id': fw['id'],
'router_id': router_id
}
self._process_create_resource_router_id(
context, res, firewall_db.Firewall)
# Since there is only one firewall per edge,
# here would be bulk configuration operation on firewall
self._vcns_update_firewall(context, fw, router_id)
self._firewall_set_status(
context, fw['id'], service_constants.ACTIVE, fw)
fw[rsi.ROUTER_ID] = router_id
return fw
def update_firewall(self, context, id, firewall):
LOG.debug("update_firewall() called")
self._ensure_firewall_update_allowed(context, id)
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=id)
rule_list_pre = self._make_firewall_rule_list_by_policy_id(
context,
self.get_firewall(context, id)['firewall_policy_id'])
firewall['firewall']['status'] = service_constants.PENDING_UPDATE
fw = super(NsxAdvancedPlugin, self).update_firewall(
context, id, firewall)
fw[rsi.ROUTER_ID] = service_router_binding['router_id']
rule_list_new = self._make_firewall_rule_list_by_policy_id(
context, fw['firewall_policy_id'])
if rule_list_pre == rule_list_new:
self._firewall_set_status(
context, fw['id'], service_constants.ACTIVE, fw)
return fw
else:
self._vcns_update_firewall(
context, fw, service_router_binding.router_id,
firewall_rule_list=rule_list_new)
self._firewall_set_status(
context, fw['id'], service_constants.ACTIVE, fw)
return fw
def delete_firewall(self, context, id):
LOG.debug("delete_firewall() called")
self._firewall_set_status(
context, id, service_constants.PENDING_DELETE)
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=id)
self._vcns_delete_firewall(context, service_router_binding.router_id)
super(NsxAdvancedPlugin, self).delete_firewall(context, id)
self._delete_resource_router_id_binding(
context, id, firewall_db.Firewall)
def get_firewall(self, context, id, fields=None):
fw = super(NsxAdvancedPlugin, self).get_firewall(
context, id, fields)
if fields and rsi.ROUTER_ID not in fields:
return fw
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=fw['id'])
fw[rsi.ROUTER_ID] = service_router_binding['router_id']
return fw
def get_firewalls(self, context, filters=None, fields=None):
fws = super(NsxAdvancedPlugin, self).get_firewalls(
context, filters, fields)
if fields and rsi.ROUTER_ID not in fields:
return fws
service_router_bindings = self._get_resource_router_id_bindings(
context, firewall_db.Firewall,
resource_ids=[fw['id'] for fw in fws])
mapping = dict([(binding['resource_id'], binding['router_id'])
for binding in service_router_bindings])
for fw in fws:
fw[rsi.ROUTER_ID] = mapping[fw['id']]
return fws
def update_firewall_rule(self, context, id, firewall_rule):
LOG.debug("update_firewall_rule() called")
self._ensure_update_or_delete_firewall_rule(context, id)
fwr_pre = self.get_firewall_rule(context, id)
fwr = super(NsxAdvancedPlugin, self).update_firewall_rule(
context, id, firewall_rule)
if fwr_pre == fwr:
return fwr
# check if this rule is associated with firewall
fw_list = self._get_firewall_list_from_firewall_rule(context, id)
if not fw_list:
return fwr
for fw in fw_list:
# get router service insertion binding with firewall id
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=fw['id'])
edge_id = self._get_edge_id_by_vcns_edge_binding(
context, service_router_binding.router_id)
#TODO(linb): do rollback on error
self.vcns_driver.update_firewall_rule(context, id, edge_id, fwr)
return fwr
def update_firewall_policy(self, context, id, firewall_policy):
LOG.debug("update_firewall_policy() called")
self._ensure_firewall_policy_update_allowed(context, id)
firewall_rules_pre = self._make_firewall_rule_list_by_policy_id(
context, id)
fwp = super(NsxAdvancedPlugin, self).update_firewall_policy(
context, id, firewall_policy)
firewall_rules = self._make_firewall_rule_list_by_policy_id(
context, id)
if firewall_rules_pre == firewall_rules:
return fwp
# check if this policy is associated with firewall
fw_list = self._get_firewall_list_from_firewall_policy(context, id)
if not fw_list:
return fwp
for fw in fw_list:
# Get the router_service insertion binding with firewall id
# TODO(fank): optimized by using _get_resource_router_id_bindings
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=fw['id'])
self._vcns_update_firewall(
context, fw, service_router_binding.router_id,
firewall_rule_list=firewall_rules)
return fwp
def insert_rule(self, context, id, rule_info):
LOG.debug("insert_rule() called")
self._ensure_firewall_policy_update_allowed(context, id)
fwp = super(NsxAdvancedPlugin, self).insert_rule(
context, id, rule_info)
fwr = super(NsxAdvancedPlugin, self).get_firewall_rule(
context, rule_info['firewall_rule_id'])
# check if this policy is associated with firewall
fw_list = self._get_firewall_list_from_firewall_policy(context, id)
if not fw_list:
return fwp
for fw in fw_list:
# TODO(fank): optimized by using _get_resource_router_id_bindings
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=fw['id'])
edge_id = self._get_edge_id_by_vcns_edge_binding(
context, service_router_binding.router_id)
if rule_info.get('insert_before') or rule_info.get('insert_after'):
#if insert_before or insert_after is set, we would call
#VCNS insert_rule API
#TODO(linb): do rollback on error
self.vcns_driver.insert_rule(context, rule_info, edge_id, fwr)
else:
#Else we would call bulk configuration on the firewall
self._vcns_update_firewall(context, fw, edge_id=edge_id)
return fwp
def remove_rule(self, context, id, rule_info):
LOG.debug("remove_rule() called")
self._ensure_firewall_policy_update_allowed(context, id)
fwp = super(NsxAdvancedPlugin, self).remove_rule(
context, id, rule_info)
fwr = super(NsxAdvancedPlugin, self).get_firewall_rule(
context, rule_info['firewall_rule_id'])
# check if this policy is associated with firewall
fw_list = self._get_firewall_list_from_firewall_policy(context, id)
if not fw_list:
return fwp
for fw in fw_list:
# TODO(fank): optimized by using _get_resource_router_id_bindings
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=fw['id'])
edge_id = self._get_edge_id_by_vcns_edge_binding(
context, service_router_binding.router_id)
#TODO(linb): do rollback on error
self.vcns_driver.delete_firewall_rule(
context, fwr['id'], edge_id)
return fwp
#
# LBAAS service plugin implementation
#
def _get_edge_id_by_vip_id(self, context, vip_id):
try:
service_router_binding = self._get_resource_router_id_binding(
context, loadbalancer_db.Vip, resource_id=vip_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to find the edge with "
"vip_id: %s"), vip_id)
return self._get_edge_id_by_vcns_edge_binding(
context, service_router_binding.router_id)
def _get_all_vip_addrs_by_router_id(
self, context, router_id):
vip_bindings = self._get_resource_router_id_bindings(
context, loadbalancer_db.Vip, router_ids=[router_id])
vip_addrs = []
for vip_binding in vip_bindings:
vip = self.get_vip(context, vip_binding.resource_id)
vip_addrs.append(vip.get('address'))
return vip_addrs
def _add_router_service_insertion_binding(self, context, resource_id,
router_id,
model):
res = {
'id': resource_id,
'router_id': router_id
}
self._process_create_resource_router_id(context, res,
model)
def _resource_set_status(self, context, model, id, status, obj=None,
pool_id=None):
with context.session.begin(subtransactions=True):
try:
qry = context.session.query(model)
if issubclass(model, loadbalancer_db.PoolMonitorAssociation):
res = qry.filter_by(monitor_id=id,
pool_id=pool_id).one()
else:
res = qry.filter_by(id=id).one()
if status == service_constants.PENDING_UPDATE and (
res.get('status') == service_constants.PENDING_DELETE):
msg = (_("Operation can't be performed, Since resource "
"%(model)s : %(id)s is in DELETEing status!") %
{'model': model,
'id': id})
LOG.error(msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
else:
res.status = status
except sa_exc.NoResultFound:
msg = (_("Resource %(model)s : %(id)s not found!") %
{'model': model,
'id': id})
LOG.exception(msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
if obj:
obj['status'] = status
def _vcns_create_pool_and_monitors(self, context, pool_id, **kwargs):
pool = self.get_pool(context, pool_id)
edge_id = kwargs.get('edge_id')
if not edge_id:
edge_id = self._get_edge_id_by_vip_id(
context, pool['vip_id'])
#Check wheter the pool is already created on the router
#in case of future's M:N relation between Pool and Vip
#Check associated HealthMonitors and then create them
for monitor_id in pool.get('health_monitors'):
hm = self.get_health_monitor(context, monitor_id)
try:
self.vcns_driver.create_health_monitor(
context, edge_id, hm)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create healthmonitor "
"associated with pool id: %s!") % pool_id)
for monitor_ide in pool.get('health_monitors'):
if monitor_ide == monitor_id:
break
self.vcns_driver.delete_health_monitor(
context, monitor_ide, edge_id)
#Create the pool on the edge
members = [
super(NsxAdvancedPlugin, self).get_member(
context, member_id)
for member_id in pool.get('members')
]
try:
self.vcns_driver.create_pool(context, edge_id, pool, members)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create pool on vshield edge"))
self.vcns_driver.delete_pool(
context, pool_id, edge_id)
for monitor_id in pool.get('health_monitors'):
self.vcns_driver.delete_health_monitor(
context, monitor_id, edge_id)
def _vcns_update_pool(self, context, pool, **kwargs):
edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id'])
members = kwargs.get('members')
if not members:
members = [
super(NsxAdvancedPlugin, self).get_member(
context, member_id)
for member_id in pool.get('members')
]
self.vcns_driver.update_pool(context, edge_id, pool, members)
def create_vip(self, context, vip):
LOG.debug("create_vip() called")
router_id = vip['vip'].get(vcns_const.ROUTER_ID)
if not router_id:
msg = _("router_id is not provided!")
LOG.error(msg)
raise n_exc.BadRequest(resource='router', msg=msg)
if not self._is_advanced_service_router(context, router_id):
msg = _("router_id: %s is not an advanced router!") % router_id
LOG.error(msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
#Check whether the vip port is an external port
subnet_id = vip['vip']['subnet_id']
network_id = self.get_subnet(context, subnet_id)['network_id']
ext_net = self._get_network(context, network_id)
if not ext_net.external:
msg = (_("Network '%s' is not a valid external "
"network") % network_id)
raise nsx_exc.NsxPluginException(err_msg=msg)
v = super(NsxAdvancedPlugin, self).create_vip(context, vip)
#Get edge_id for the resource
router_binding = vcns_db.get_vcns_router_binding(
context.session,
router_id)
edge_id = router_binding.edge_id
#Add vip_router binding
self._add_router_service_insertion_binding(context, v['id'],
router_id,
loadbalancer_db.Vip)
#Create the vip port on vShield Edge
router = self._get_router(context, router_id)
self._update_interface(context, router, sync=True)
#Create the vip and associated pool/monitor on the corresponding edge
try:
self._vcns_create_pool_and_monitors(
context, v['pool_id'], edge_id=edge_id)
self.vcns_driver.create_vip(context, edge_id, v)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create vip!"))
self._delete_resource_router_id_binding(
context, v['id'], loadbalancer_db.Vip)
super(NsxAdvancedPlugin, self).delete_vip(context, v['id'])
self._resource_set_status(context, loadbalancer_db.Vip,
v['id'], service_constants.ACTIVE, v)
v[rsi.ROUTER_ID] = router_id
return v
def update_vip(self, context, id, vip):
edge_id = self._get_edge_id_by_vip_id(context, id)
old_vip = self.get_vip(context, id)
session_persistence_update = bool(
vip['vip'].get('session_persistence'))
vip['vip']['status'] = service_constants.PENDING_UPDATE
v = super(NsxAdvancedPlugin, self).update_vip(context, id, vip)
v[rsi.ROUTER_ID] = self._get_resource_router_id_binding(
context, loadbalancer_db.Vip, resource_id=id)['router_id']
if old_vip['pool_id'] != v['pool_id']:
self.vcns_driver.delete_vip(context, id)
#Delete old pool/monitor on the edge
#TODO(linb): Factor out procedure for removing pool and health
#separate method
old_pool = self.get_pool(context, old_vip['pool_id'])
self.vcns_driver.delete_pool(
context, old_vip['pool_id'], edge_id)
for monitor_id in old_pool.get('health_monitors'):
self.vcns_driver.delete_health_monitor(
context, monitor_id, edge_id)
#Create new pool/monitor object on the edge
#TODO(linb): add exception handle if error
self._vcns_create_pool_and_monitors(
context, v['pool_id'], edge_id=edge_id)
self.vcns_driver.create_vip(context, edge_id, v)
return v
try:
self.vcns_driver.update_vip(context, v, session_persistence_update)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update vip with id: %s!"), id)
self._resource_set_status(context, loadbalancer_db.Vip,
id, service_constants.ERROR, v)
self._resource_set_status(context, loadbalancer_db.Vip,
v['id'], service_constants.ACTIVE, v)
return v
def delete_vip(self, context, id):
v = self.get_vip(context, id)
self._resource_set_status(
context, loadbalancer_db.Vip,
id, service_constants.PENDING_DELETE)
try:
self.vcns_driver.delete_vip(context, id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete vip with id: %s!"), id)
self._resource_set_status(context, loadbalancer_db.Vip,
id, service_constants.ERROR)
edge_id = self._get_edge_id_by_vip_id(context, id)
#Check associated HealthMonitors and then delete them
pool = self.get_pool(context, v['pool_id'])
self.vcns_driver.delete_pool(context, v['pool_id'], edge_id)
for monitor_id in pool.get('health_monitors'):
#TODO(linb): do exception handle if error
self.vcns_driver.delete_health_monitor(
context, monitor_id, edge_id)
router_binding = self._get_resource_router_id_binding(
context, loadbalancer_db.Vip, resource_id=id)
router = self._get_router(context, router_binding.router_id)
self._delete_resource_router_id_binding(
context, id, loadbalancer_db.Vip)
super(NsxAdvancedPlugin, self).delete_vip(context, id)
self._update_interface(context, router, sync=True)
def get_vip(self, context, id, fields=None):
vip = super(NsxAdvancedPlugin, self).get_vip(context, id, fields)
if fields and rsi.ROUTER_ID not in fields:
return vip
service_router_binding = self._get_resource_router_id_binding(
context, loadbalancer_db.Vip, resource_id=vip['id'])
vip[rsi.ROUTER_ID] = service_router_binding['router_id']
return vip
def get_vips(self, context, filters=None, fields=None):
vips = super(NsxAdvancedPlugin, self).get_vips(
context, filters, fields)
if fields and rsi.ROUTER_ID not in fields:
return vips
service_router_bindings = self._get_resource_router_id_bindings(
context, loadbalancer_db.Vip,
resource_ids=[vip['id'] for vip in vips])
mapping = dict([(binding['resource_id'], binding['router_id'])
for binding in service_router_bindings])
for vip in vips:
vip[rsi.ROUTER_ID] = mapping[vip['id']]
return vips
def update_pool(self, context, id, pool):
pool['pool']['status'] = service_constants.PENDING_UPDATE
p = super(NsxAdvancedPlugin, self).update_pool(context, id, pool)
#Check whether the pool is already associated with the vip
if not p.get('vip_id'):
self._resource_set_status(context, loadbalancer_db.Pool,
p['id'], service_constants.ACTIVE, p)
return p
try:
self._vcns_update_pool(context, p)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool with id: %s!"), id)
self._resource_set_status(context, loadbalancer_db.Pool,
p['id'], service_constants.ERROR, p)
self._resource_set_status(context, loadbalancer_db.Pool,
p['id'], service_constants.ACTIVE, p)
return p
def create_member(self, context, member):
m = super(NsxAdvancedPlugin, self).create_member(context, member)
pool_id = m.get('pool_id')
pool = self.get_pool(context, pool_id)
if not pool.get('vip_id'):
self._resource_set_status(context, loadbalancer_db.Member,
m['id'], service_constants.ACTIVE, m)
return m
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id,
service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool with the member"))
super(NsxAdvancedPlugin, self).delete_member(context, m['id'])
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id, service_constants.ACTIVE)
self._resource_set_status(context, loadbalancer_db.Member,
m['id'], service_constants.ACTIVE, m)
return m
def update_member(self, context, id, member):
member['member']['status'] = service_constants.PENDING_UPDATE
old_member = self.get_member(context, id)
m = super(NsxAdvancedPlugin, self).update_member(
context, id, member)
if m['pool_id'] != old_member['pool_id']:
old_pool_id = old_member['pool_id']
old_pool = self.get_pool(context, old_pool_id)
if old_pool.get('vip_id'):
self._resource_set_status(
context, loadbalancer_db.Pool,
old_pool_id, service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, old_pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update old pool "
"with the member"))
super(NsxAdvancedPlugin, self).delete_member(
context, m['id'])
self._resource_set_status(
context, loadbalancer_db.Pool,
old_pool_id, service_constants.ACTIVE)
pool_id = m['pool_id']
pool = self.get_pool(context, pool_id)
if not pool.get('vip_id'):
self._resource_set_status(context, loadbalancer_db.Member,
m['id'], service_constants.ACTIVE, m)
return m
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id,
service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool with the member"))
super(NsxAdvancedPlugin, self).delete_member(
context, m['id'])
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id, service_constants.ACTIVE)
self._resource_set_status(context, loadbalancer_db.Member,
m['id'], service_constants.ACTIVE, m)
return m
def delete_member(self, context, id):
m = self.get_member(context, id)
super(NsxAdvancedPlugin, self).delete_member(context, id)
pool_id = m['pool_id']
pool = self.get_pool(context, pool_id)
if not pool.get('vip_id'):
return
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id, service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool with the member"))
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id, service_constants.ACTIVE)
def update_health_monitor(self, context, id, health_monitor):
old_hm = super(NsxAdvancedPlugin, self).get_health_monitor(
context, id)
hm = super(NsxAdvancedPlugin, self).update_health_monitor(
context, id, health_monitor)
for hm_pool in hm.get('pools'):
pool_id = hm_pool['pool_id']
pool = self.get_pool(context, pool_id)
if pool.get('vip_id'):
edge_id = self._get_edge_id_by_vip_id(
context, pool['vip_id'])
try:
self.vcns_driver.update_health_monitor(
context, edge_id, old_hm, hm)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update monitor "
"with id: %s!"), id)
return hm
def create_pool_health_monitor(self, context,
health_monitor, pool_id):
monitor_id = health_monitor['health_monitor']['id']
pool = self.get_pool(context, pool_id)
monitors = pool.get('health_monitors')
if len(monitors) > 0:
msg = _("Vcns right now can only support "
"one monitor per pool")
LOG.error(msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
#Check whether the pool is already associated with the vip
if not pool.get('vip_id'):
res = super(NsxAdvancedPlugin,
self).create_pool_health_monitor(context,
health_monitor,
pool_id)
return res
#Get the edge_id
edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id'])
res = super(NsxAdvancedPlugin,
self).create_pool_health_monitor(context,
health_monitor,
pool_id)
monitor = self.get_health_monitor(context, monitor_id)
#TODO(linb)Add Exception handle if error
self.vcns_driver.create_health_monitor(context, edge_id, monitor)
#Get updated pool
pool['health_monitors'].append(monitor['id'])
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to associate monitor with pool!"))
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.ERROR)
super(NsxAdvancedPlugin, self).delete_pool_health_monitor(
context, monitor_id, pool_id)
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.ACTIVE)
self._resource_set_status(
context, loadbalancer_db.PoolMonitorAssociation,
monitor_id, service_constants.ACTIVE, res,
pool_id=pool_id)
return res
def delete_pool_health_monitor(self, context, id, pool_id):
super(NsxAdvancedPlugin, self).delete_pool_health_monitor(
context, id, pool_id)
pool = self.get_pool(context, pool_id)
#Check whether the pool is already associated with the vip
if pool.get('vip_id'):
#Delete the monitor on vshield edge
edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id'])
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(
_("Failed to update pool with pool_monitor!"))
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.ERROR)
#TODO(linb): Add exception handle if error
self.vcns_driver.delete_health_monitor(context, id, edge_id)
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.ACTIVE)
def _vcns_update_ipsec_config(
self, context, vpnservice_id, removed_ipsec_conn_id=None):
sites = []
vpn_service = self._get_vpnservice(context, vpnservice_id)
edge_id = self._get_edge_id_by_vcns_edge_binding(
context, vpn_service.router_id)
if not vpn_service.router.gw_port:
msg = _("Failed to update ipsec vpn configuration on edge, since "
"the router: %s does not have a gateway yet!"
) % vpn_service.router_id
LOG.error(msg)
raise exceptions.VcnsBadRequest(resource='router', msg=msg)
external_ip = vpn_service.router.gw_port['fixed_ips'][0]['ip_address']
subnet = self._make_subnet_dict(vpn_service.subnet)
for ipsec_site_conn in vpn_service.ipsec_site_connections:
if ipsec_site_conn.id != removed_ipsec_conn_id:
site = self._make_ipsec_site_connection_dict(ipsec_site_conn)
ikepolicy = self._make_ikepolicy_dict(
ipsec_site_conn.ikepolicy)
ipsecpolicy = self._make_ipsecpolicy_dict(
ipsec_site_conn.ipsecpolicy)
sites.append({'site': site,
'ikepolicy': ikepolicy,
'ipsecpolicy': ipsecpolicy,
'subnet': subnet,
'external_ip': external_ip})
try:
self.vcns_driver.update_ipsec_config(
edge_id, sites, enabled=vpn_service.admin_state_up)
except exceptions.VcnsBadRequest:
with excutils.save_and_reraise_exception():
LOG.exception(_("Bad or unsupported Input request!"))
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
msg = (_("Failed to update ipsec VPN configuration "
"with vpnservice: %(vpnservice_id)s on vShield Edge: "
"%(edge_id)s") % {'vpnservice_id': vpnservice_id,
'edge_id': edge_id})
LOG.exception(msg)
def create_vpnservice(self, context, vpnservice):
LOG.debug("create_vpnservice() called")
router_id = vpnservice['vpnservice'].get('router_id')
if not self._is_advanced_service_router(context, router_id):
msg = _("router_id:%s is not an advanced router!") % router_id
LOG.warning(msg)
raise exceptions.VcnsBadRequest(resource='router', msg=msg)
if self.get_vpnservices(context, filters={'router_id': [router_id]}):
msg = _("a vpnservice is already associated with the router: %s"
) % router_id
LOG.warning(msg)
raise nsx_exc.ServiceOverQuota(
overs='vpnservice', err_msg=msg)
service = super(NsxAdvancedPlugin, self).create_vpnservice(
context, vpnservice)
self._resource_set_status(
context, vpn_db.VPNService,
service['id'], service_constants.ACTIVE, service)
return service
def update_vpnservice(self, context, vpnservice_id, vpnservice):
vpnservice['vpnservice']['status'] = service_constants.PENDING_UPDATE
service = super(NsxAdvancedPlugin, self).update_vpnservice(
context, vpnservice_id, vpnservice)
# Only admin_state_up attribute is configurable on Edge.
if vpnservice['vpnservice'].get('admin_state_up') is None:
self._resource_set_status(
context, vpn_db.VPNService,
service['id'], service_constants.ACTIVE, service)
return service
# Test whether there is one ipsec site connection attached to
# the vpnservice. If not, just return without updating ipsec
# config on edge side.
vpn_service_db = self._get_vpnservice(context, vpnservice_id)
if not vpn_service_db.ipsec_site_connections:
self._resource_set_status(
context, vpn_db.VPNService,
service['id'], service_constants.ACTIVE, service)
return service
try:
self._vcns_update_ipsec_config(context, service['id'])
except Exception:
with excutils.save_and_reraise_exception():
self._resource_set_status(
context, vpn_db.VPNService,
service['id'], service_constants.ERROR, service)
self._resource_set_status(
context, vpn_db.VPNService,
service['id'], service_constants.ACTIVE, service)
return service
def create_ipsec_site_connection(self, context, ipsec_site_connection):
ipsec_site_conn = super(
NsxAdvancedPlugin, self).create_ipsec_site_connection(
context, ipsec_site_connection)
try:
self._vcns_update_ipsec_config(
context, ipsec_site_conn['vpnservice_id'])
except Exception:
with excutils.save_and_reraise_exception():
super(NsxAdvancedPlugin, self).delete_ipsec_site_connection(
context, ipsec_site_conn['id'])
self._resource_set_status(
context, vpn_db.IPsecSiteConnection,
ipsec_site_conn['id'], service_constants.ACTIVE, ipsec_site_conn)
return ipsec_site_conn
def update_ipsec_site_connection(self, context, ipsec_site_connection_id,
ipsec_site_connection):
ipsec_site_connection['ipsec_site_connection']['status'] = (
service_constants.PENDING_UPDATE)
ipsec_site_conn = super(
NsxAdvancedPlugin, self).update_ipsec_site_connection(
context, ipsec_site_connection_id, ipsec_site_connection)
try:
self._vcns_update_ipsec_config(
context, ipsec_site_conn['vpnservice_id'])
except Exception:
with excutils.save_and_reraise_exception():
self._resource_set_status(
context, vpn_db.IPsecSiteConnection, ipsec_site_conn['id'],
service_constants.ERROR, ipsec_site_conn)
self._resource_set_status(
context, vpn_db.IPsecSiteConnection,
ipsec_site_conn['id'], service_constants.ACTIVE, ipsec_site_conn)
return ipsec_site_conn
def delete_ipsec_site_connection(self, context, ipsec_site_conn_id):
self._resource_set_status(
context, vpn_db.IPsecSiteConnection,
ipsec_site_conn_id, service_constants.PENDING_DELETE)
vpnservice_id = self.get_ipsec_site_connection(
context, ipsec_site_conn_id)['vpnservice_id']
try:
self._vcns_update_ipsec_config(
context, vpnservice_id, ipsec_site_conn_id)
except Exception:
with excutils.save_and_reraise_exception():
self._resource_set_status(
context, vpn_db.IPsecSiteConnection, ipsec_site_conn_id,
service_constants.ERROR)
super(NsxAdvancedPlugin, self).delete_ipsec_site_connection(
context, ipsec_site_conn_id)
class VcnsCallbacks(object):
"""Edge callback implementation Callback functions for
asynchronous tasks.
"""
def __init__(self, plugin):
self.plugin = plugin
def edge_deploy_started(self, task):
"""callback when deployment task started."""
jobdata = task.userdata['jobdata']
context = jobdata['context']
edge_id = task.userdata.get('edge_id')
neutron_router_id = jobdata['neutron_router_id']
name = task.userdata['router_name']
if edge_id:
LOG.debug("Start deploying %(edge_id)s for router %(name)s", {
'edge_id': edge_id,
'name': name})
vcns_db.update_vcns_router_binding(
context.session, neutron_router_id, edge_id=edge_id)
else:
LOG.debug("Failed to deploy Edge for router %s", name)
vcns_db.update_vcns_router_binding(
context.session, neutron_router_id,
status=service_constants.ERROR)
def edge_deploy_result(self, task):
"""callback when deployment task finished."""
jobdata = task.userdata['jobdata']
lrouter = jobdata['lrouter']
context = jobdata['context']
name = task.userdata['router_name']
neutron_router_id = jobdata['neutron_router_id']
router_db = None
try:
router_db = self.plugin._get_router(
context, neutron_router_id)
except l3.RouterNotFound:
# Router might have been deleted before deploy finished
LOG.exception(_("Router %s not found"), lrouter['uuid'])
if task.status == tasks_const.TaskStatus.COMPLETED:
LOG.debug("Successfully deployed %(edge_id)s for "
"router %(name)s", {
'edge_id': task.userdata['edge_id'],
'name': name})
if (router_db and
router_db['status'] == service_constants.PENDING_CREATE):
router_db['status'] = service_constants.ACTIVE
binding = vcns_db.get_vcns_router_binding(
context.session, neutron_router_id)
# only update status to active if its status is pending create
if binding['status'] == service_constants.PENDING_CREATE:
vcns_db.update_vcns_router_binding(
context.session, neutron_router_id,
status=service_constants.ACTIVE)
else:
LOG.debug("Failed to deploy Edge for router %s", name)
if router_db:
router_db['status'] = service_constants.ERROR
vcns_db.update_vcns_router_binding(
context.session, neutron_router_id,
status=service_constants.ERROR)
def edge_delete_result(self, task):
jobdata = task.userdata['jobdata']
router_id = task.userdata['router_id']
context = jobdata['context']
if task.status == tasks_const.TaskStatus.COMPLETED:
vcns_db.delete_vcns_router_binding(context.session,
router_id)
def interface_update_result(self, task):
LOG.debug("interface_update_result %d", task.status)
def snat_create_result(self, task):
LOG.debug("snat_create_result %d", task.status)
def snat_delete_result(self, task):
LOG.debug("snat_delete_result %d", task.status)
def dnat_create_result(self, task):
LOG.debug("dnat_create_result %d", task.status)
def dnat_delete_result(self, task):
LOG.debug("dnat_delete_result %d", task.status)
def routes_update_result(self, task):
LOG.debug("routes_update_result %d", task.status)
def nat_update_result(self, task):
LOG.debug("nat_update_result %d", task.status)
def _process_base_create_lswitch_args(*args, **kwargs):
tags = utils.get_tags()
tags.append({"tag": args[1],
"scope": "quantum_net_id"})
if args[2]:
tags.append({"tag": args[2], "scope": "os_tid"})
switch_name = args[3]
tz_config = args[4]
if kwargs.get("shared", False) or len(args) >= 6:
tags.append({"tag": "true", "scope": "shared"})
if kwargs.get("tags"):
tags.extend(kwargs["tags"])
return switch_name, tz_config, tags
| apache-2.0 |
cpennington/edx-platform | lms/djangoapps/courseware/tests/test_credit_requirements.py | 4 | 6551 | """
Tests for credit requirement display on the progress page.
"""
import ddt
import six
from django.conf import settings
from django.urls import reverse
from mock import patch
from course_modes.models import CourseMode
from openedx.core.djangoapps.credit import api as credit_api
from openedx.core.djangoapps.credit.models import CreditCourse
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@patch.dict(settings.FEATURES, {"ENABLE_CREDIT_ELIGIBILITY": True})
@ddt.ddt
class ProgressPageCreditRequirementsTest(SharedModuleStoreTestCase):
"""
Tests for credit requirement display on the progress page.
"""
USERNAME = "bob"
PASSWORD = "test"
USER_FULL_NAME = "Bob"
MIN_GRADE_REQ_DISPLAY = "Final Grade Credit Requirement"
VERIFICATION_REQ_DISPLAY = "Midterm Exam Credit Requirement"
@classmethod
def setUpClass(cls):
super(ProgressPageCreditRequirementsTest, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(ProgressPageCreditRequirementsTest, self).setUp()
# Configure course as a credit course
CreditCourse.objects.create(course_key=self.course.id, enabled=True)
# Configure credit requirements (passing grade and in-course reverification)
credit_api.set_credit_requirements(
self.course.id,
[
{
"namespace": "grade",
"name": "grade",
"display_name": self.MIN_GRADE_REQ_DISPLAY,
"criteria": {
"min_grade": 0.8
}
},
{
"namespace": "reverification",
"name": "midterm",
"display_name": self.VERIFICATION_REQ_DISPLAY,
"criteria": {}
}
]
)
# Create a user and log in
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
self.user.profile.name = self.USER_FULL_NAME
self.user.profile.save()
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result, msg="Could not log in")
# Enroll the user in the course as "verified"
self.enrollment = CourseEnrollmentFactory(
user=self.user,
course_id=self.course.id,
mode="verified"
)
def test_credit_requirements_maybe_eligible(self):
# The user hasn't satisfied any of the credit requirements yet, but she
# also hasn't failed any.
response = self._get_progress_page()
# Expect that the requirements are displayed
self.assertContains(response, self.MIN_GRADE_REQ_DISPLAY)
self.assertContains(response, self.VERIFICATION_REQ_DISPLAY)
self.assertContains(response, "Upcoming")
self.assertContains(
response,
u"{}, you have not yet met the requirements for credit".format(self.USER_FULL_NAME)
)
def test_credit_requirements_eligible(self):
"""
Mark the user as eligible for all requirements. Requirements are only displayed
for credit and verified enrollments.
"""
credit_api.set_credit_requirement_status(
self.user, self.course.id,
"grade", "grade",
status="satisfied",
reason={"final_grade": 0.95}
)
credit_api.set_credit_requirement_status(
self.user, self.course.id,
"reverification", "midterm",
status="satisfied", reason={}
)
# Check the progress page display
response = self._get_progress_page()
self.assertContains(response, self.MIN_GRADE_REQ_DISPLAY)
self.assertContains(response, self.VERIFICATION_REQ_DISPLAY)
self.assertContains(
response,
u"{}, you have met the requirements for credit in this course.".format(self.USER_FULL_NAME)
)
self.assertContains(response, u"Completed by {date}")
credit_requirements = credit_api.get_credit_requirement_status(self.course.id, self.user.username)
for requirement in credit_requirements:
self.assertContains(response, requirement['status_date'].strftime(u'%Y-%m-%d %H:%M'))
self.assertNotContains(response, "95%")
def test_credit_requirements_not_eligible(self):
"""
Mark the user as having failed both requirements. Requirements are only displayed
for credit and verified enrollments.
"""
credit_api.set_credit_requirement_status(
self.user, self.course.id,
"reverification", "midterm",
status="failed", reason={}
)
# Check the progress page display
response = self._get_progress_page()
self.assertContains(response, self.MIN_GRADE_REQ_DISPLAY)
self.assertContains(response, self.VERIFICATION_REQ_DISPLAY)
self.assertContains(
response,
u"{}, you are no longer eligible for credit in this course.".format(self.USER_FULL_NAME)
)
self.assertContains(response, "Verification Failed")
@ddt.data(
(CourseMode.VERIFIED, True),
(CourseMode.CREDIT_MODE, True),
(CourseMode.HONOR, False),
(CourseMode.AUDIT, False),
(CourseMode.PROFESSIONAL, False),
(CourseMode.NO_ID_PROFESSIONAL_MODE, False)
)
@ddt.unpack
def test_credit_requirements_on_progress_page(self, enrollment_mode, is_requirement_displayed):
"""Test the progress table is only displayed to the verified and credit students."""
self.enrollment.mode = enrollment_mode
self.enrollment.save()
response = self._get_progress_page()
# Verify the requirements are shown only if the user is in a credit-eligible mode.
classes = ('credit-eligibility', 'eligibility-heading')
method = self.assertContains if is_requirement_displayed else self.assertNotContains
for _class in classes:
method(response, _class)
def _get_progress_page(self):
"""Load the progress page for the course the user is enrolled in. """
url = reverse("progress", kwargs={"course_id": six.text_type(self.course.id)})
return self.client.get(url)
| agpl-3.0 |
hardanimal/UFT_UPGEM | src/UFT/backend/configuration.py | 1 | 3635 | #!/usr/bin/env python
# encoding: utf-8
"""PGEM test configuration model.
Default connect to configuration.db which save the test items settings.
"""
__version__ = "0.1"
__author__ = "@fanmuzhi, @boqiling"
__all__ = ["PGEMConfig", "TestItem"]
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, Float, String, Boolean
from sqlalchemy import ForeignKey, UniqueConstraint
from sqlalchemy.orm import relationship
SQLBase = declarative_base()
class PGEMConfig(SQLBase):
__tablename__ = "configuration"
id = Column(Integer, primary_key=True)
partnumber = Column(String(20), nullable=False)
description = Column(String(50))
revision = Column(String(5), nullable=False)
testitems = relationship("TestItem", backref="configuration",
cascade="all, delete-orphan")
__table_args__ = (UniqueConstraint('partnumber',
'revision',
name='_partnumber_revision_uc_'),)
def to_dict(self):
items_list = {}
for item in self.testitems:
items_list.update(item.to_dict())
# items_list = {"ITEM": items_list}
return {"partnumber": self.partnumber,
"description": self.description,
"revision": self.revision,
"testitems": items_list}
class TestItem(SQLBase):
__tablename__ = "test_item"
id = Column(Integer, primary_key=True)
configid = Column(Integer, ForeignKey("configuration.id"))
name = Column(String(10), nullable=False)
description = Column(String(30))
enable = Column(Boolean, nullable=False)
min = Column(Float)
max = Column(Float)
stoponfail = Column(Boolean, default=True)
misc = Column(String(50))
def to_dict(self):
return {
self.name: {
"description": self.description,
"enable": int(self.enable),
"min": self.min,
"max": self.max,
"stoponfail": int(self.stoponfail),
"misc": self.misc
}
}
if __name__ == "__main__":
from session import SessionManager
dburi = "sqlite:///configuration.db"
sm = SessionManager()
session = sm.get_session(dburi)
sm.prepare_db(dburi, [PGEMConfig, TestItem])
# Insert Example
CrystalConfig = PGEMConfig()
CrystalConfig.partnumber = "AGIGA9601-002BCA"
CrystalConfig.description = "Crystal"
CrystalConfig.revision = "04"
CheckTemp = TestItem()
CheckTemp.name = "Check_Temp"
CheckTemp.description = "Check Temperature on chip SE97BTP, data in degree"
CheckTemp.enable = True
CheckTemp.min = 5.0
CheckTemp.max = 30.0
CheckTemp.stoponfail = False
Charge = TestItem()
Charge.name = "Charge"
Charge.description = "Charge DUT with BQ24707, limition in seconds"
Charge.enable = True
Charge.min = 30.0
Charge.max = 120.0
Charge.stoponfail = True
try:
CrystalConfig.testitems.append(CheckTemp)
CrystalConfig.testitems.append(Charge)
session.add(CrystalConfig)
session.commit()
except Exception as e:
print e
session.rollback()
# Query Example
crystal = session.query(PGEMConfig).filter(
PGEMConfig.partnumber == "AGIGA9601-002BCA",
PGEMConfig.revision == "04").first()
for testitem in crystal.testitems:
if testitem.name == "Charge":
print testitem.name
print testitem.description
print testitem.max
print crystal.to_dict()
| gpl-3.0 |
CouchPotato/CouchPotatoServer | libs/synchronousdeluge/client.py | 151 | 5078 | import os
import platform
from collections import defaultdict
from itertools import imap
from synchronousdeluge.exceptions import DelugeRPCError
from synchronousdeluge.protocol import DelugeRPCRequest, DelugeRPCResponse
from synchronousdeluge.transfer import DelugeTransfer
__all__ = ["DelugeClient"]
RPC_RESPONSE = 1
RPC_ERROR = 2
RPC_EVENT = 3
class DelugeClient(object):
def __init__(self):
"""A deluge client session."""
self.transfer = DelugeTransfer()
self.modules = []
self._request_counter = 0
def _get_local_auth(self):
auth_file = ""
username = password = ""
if platform.system() in ('Windows', 'Microsoft'):
appDataPath = os.environ.get("APPDATA")
if not appDataPath:
import _winreg
hkey = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders")
appDataReg = _winreg.QueryValueEx(hkey, "AppData")
appDataPath = appDataReg[0]
_winreg.CloseKey(hkey)
auth_file = os.path.join(appDataPath, "deluge", "auth")
else:
from xdg.BaseDirectory import save_config_path
try:
auth_file = os.path.join(save_config_path("deluge"), "auth")
except OSError, e:
return username, password
if os.path.exists(auth_file):
for line in open(auth_file):
if line.startswith("#"):
# This is a comment line
continue
line = line.strip()
try:
lsplit = line.split(":")
except Exception, e:
continue
if len(lsplit) == 2:
username, password = lsplit
elif len(lsplit) == 3:
username, password, level = lsplit
else:
continue
if username == "localclient":
return (username, password)
return ("", "")
def _create_module_method(self, module, method):
fullname = "{0}.{1}".format(module, method)
def func(obj, *args, **kwargs):
return self.remote_call(fullname, *args, **kwargs)
func.__name__ = method
return func
def _introspect(self):
self.modules = []
methods = self.remote_call("daemon.get_method_list").get()
methodmap = defaultdict(dict)
splitter = lambda v: v.split(".")
for module, method in imap(splitter, methods):
methodmap[module][method] = self._create_module_method(module, method)
for module, methods in methodmap.items():
clsname = "DelugeModule{0}".format(module.capitalize())
cls = type(clsname, (), methods)
setattr(self, module, cls())
self.modules.append(module)
def remote_call(self, method, *args, **kwargs):
req = DelugeRPCRequest(self._request_counter, method, *args, **kwargs)
message = next(self.transfer.send_request(req))
response = DelugeRPCResponse()
if not isinstance(message, tuple):
return
if len(message) < 3:
return
message_type = message[0]
# if message_type == RPC_EVENT:
# event = message[1]
# values = message[2]
#
# if event in self._event_handlers:
# for handler in self._event_handlers[event]:
# gevent.spawn(handler, *values)
#
# elif message_type in (RPC_RESPONSE, RPC_ERROR):
if message_type in (RPC_RESPONSE, RPC_ERROR):
request_id = message[1]
value = message[2]
if request_id == self._request_counter :
if message_type == RPC_RESPONSE:
response.set(value)
elif message_type == RPC_ERROR:
err = DelugeRPCError(*value)
response.set_exception(err)
self._request_counter += 1
return response
def connect(self, host="127.0.0.1", port=58846, username="", password=""):
"""Connects to a daemon process.
:param host: str, the hostname of the daemon
:param port: int, the port of the daemon
:param username: str, the username to login with
:param password: str, the password to login with
"""
# Connect transport
self.transfer.connect((host, port))
# Attempt to fetch local auth info if needed
if not username and host in ("127.0.0.1", "localhost"):
username, password = self._get_local_auth()
# Authenticate
self.remote_call("daemon.login", username, password).get()
# Introspect available methods
self._introspect()
@property
def connected(self):
return self.transfer.connected
def disconnect(self):
"""Disconnects from the daemon."""
self.transfer.disconnect()
| gpl-3.0 |
cloudera/recordservice | tests/hs2/test_fetch.py | 16 | 11507 | # Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import re
from tests.hs2.hs2_test_suite import HS2TestSuite, needs_session
from TCLIService import TCLIService, constants
from TCLIService.ttypes import TTypeId
# Simple test to make sure all the HS2 types are supported for both the row and
# column-oriented versions of the HS2 protocol.
class TestFetch(HS2TestSuite):
def __verify_primitive_type(self, expected_type, hs2_type):
assert hs2_type.typeDesc.types[0].primitiveEntry.type == expected_type
def __verify_char_max_len(self, t, max_len):
l = t.typeDesc.types[0].primitiveEntry.typeQualifiers.qualifiers\
[constants.CHARACTER_MAXIMUM_LENGTH]
assert l.i32Value == max_len
def __verify_decimal_precision_scale(self, hs2_type, precision, scale):
p = hs2_type.typeDesc.types[0].primitiveEntry.typeQualifiers.qualifiers\
[constants.PRECISION]
s = hs2_type.typeDesc.types[0].primitiveEntry.typeQualifiers.qualifiers\
[constants.SCALE]
assert p.i32Value == precision
assert s.i32Value == scale
@needs_session(TCLIService.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V1)
def test_result_metadata_v1(self):
execute_statement_req = TCLIService.TExecuteStatementReq()
execute_statement_req.sessionHandle = self.session_handle
# Verify all primitive types in the alltypes table.
execute_statement_req.statement =\
"SELECT * FROM functional.alltypessmall ORDER BY id LIMIT 1"
execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)
HS2TestSuite.check_response(execute_statement_resp)
results = self.fetch_at_most(execute_statement_resp.operationHandle,
TCLIService.TFetchOrientation.FETCH_NEXT, 1, 1)
assert len(results.results.rows) == 1
metadata_resp = self.result_metadata(execute_statement_resp.operationHandle)
column_types = metadata_resp.schema.columns
assert len(column_types) == 13
self.__verify_primitive_type(TTypeId.INT_TYPE, column_types[0])
self.__verify_primitive_type(TTypeId.BOOLEAN_TYPE, column_types[1])
self.__verify_primitive_type(TTypeId.TINYINT_TYPE, column_types[2])
self.__verify_primitive_type(TTypeId.SMALLINT_TYPE, column_types[3])
self.__verify_primitive_type(TTypeId.INT_TYPE, column_types[4])
self.__verify_primitive_type(TTypeId.BIGINT_TYPE, column_types[5])
self.__verify_primitive_type(TTypeId.FLOAT_TYPE, column_types[6])
self.__verify_primitive_type(TTypeId.DOUBLE_TYPE, column_types[7])
self.__verify_primitive_type(TTypeId.STRING_TYPE, column_types[8])
self.__verify_primitive_type(TTypeId.STRING_TYPE, column_types[9])
self.__verify_primitive_type(TTypeId.TIMESTAMP_TYPE, column_types[10])
self.__verify_primitive_type(TTypeId.INT_TYPE, column_types[11])
self.__verify_primitive_type(TTypeId.INT_TYPE, column_types[12])
self.close(execute_statement_resp.operationHandle)
# Verify the result metadata for the DECIMAL type.
execute_statement_req.statement =\
"SELECT d1,d5 FROM functional.decimal_tbl ORDER BY d1 LIMIT 1"
execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)
HS2TestSuite.check_response(execute_statement_resp)
results = self.fetch_at_most(execute_statement_resp.operationHandle,
TCLIService.TFetchOrientation.FETCH_NEXT, 1, 1)
assert len(results.results.rows) == 1
# Verify the result schema is what we expect. The result has 2 columns, the
# first is decimal(9,0) and the second is decimal(10,5)
metadata_resp = self.result_metadata(execute_statement_resp.operationHandle)
column_types = metadata_resp.schema.columns
assert len(column_types) == 2
self.__verify_primitive_type(TTypeId.DECIMAL_TYPE, column_types[0])
self.__verify_decimal_precision_scale(column_types[0], 9, 0)
self.__verify_primitive_type(TTypeId.DECIMAL_TYPE, column_types[1])
self.__verify_decimal_precision_scale(column_types[1], 10, 5)
self.close(execute_statement_resp.operationHandle)
# Verify the result metadata for the CHAR/VARCHAR types.
execute_statement_req.statement =\
"SELECT * FROM functional.chars_tiny ORDER BY cs LIMIT 1"
execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)
HS2TestSuite.check_response(execute_statement_resp)
results = self.fetch_at_most(execute_statement_resp.operationHandle,
TCLIService.TFetchOrientation.FETCH_NEXT, 1, 1)
assert len(results.results.rows) == 1
metadata_resp = self.result_metadata(execute_statement_resp.operationHandle)
column_types = metadata_resp.schema.columns
assert len(column_types) == 3
self.__verify_primitive_type(TTypeId.CHAR_TYPE, column_types[0])
self.__verify_char_max_len(column_types[0], 5)
self.__verify_primitive_type(TTypeId.CHAR_TYPE, column_types[1])
self.__verify_char_max_len(column_types[1], 140)
self.__verify_primitive_type(TTypeId.VARCHAR_TYPE, column_types[2])
self.__verify_char_max_len(column_types[2], 32)
self.close(execute_statement_resp.operationHandle)
def __query_and_fetch(self, query):
execute_statement_req = TCLIService.TExecuteStatementReq()
execute_statement_req.sessionHandle = self.session_handle
execute_statement_req.statement = query
execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)
HS2TestSuite.check_response(execute_statement_resp)
fetch_results_req = TCLIService.TFetchResultsReq()
fetch_results_req.operationHandle = execute_statement_resp.operationHandle
fetch_results_req.maxRows = 1024
fetch_results_resp = self.hs2_client.FetchResults(fetch_results_req)
HS2TestSuite.check_response(fetch_results_resp)
return fetch_results_resp
@needs_session()
def test_alltypes_v6(self):
"""Test that a simple select statement works for all types"""
fetch_results_resp = self.__query_and_fetch(
"SELECT *, NULL from functional.alltypes ORDER BY id LIMIT 1")
num_rows, result = self.column_results_to_string(fetch_results_resp.results.columns)
assert num_rows == 1
assert result == \
"0, True, 0, 0, 0, 0, 0.0, 0.0, 01/01/09, 0, 2009-01-01 00:00:00, 2009, 1, NULL\n"
# Decimals
fetch_results_resp = self.__query_and_fetch(
"SELECT * from functional.decimal_tbl LIMIT 1")
num_rows, result = self.column_results_to_string(fetch_results_resp.results.columns)
assert result == ("1234, 2222, 1.2345678900, "
"0.12345678900000000000000000000000000000, 12345.78900, 1\n")
# VARCHAR
fetch_results_resp = self.__query_and_fetch("SELECT CAST('str' AS VARCHAR(3))")
num_rows, result = self.column_results_to_string(fetch_results_resp.results.columns)
assert result == "str\n"
# CHAR not inlined
fetch_results_resp = self.__query_and_fetch("SELECT CAST('car' AS CHAR(140))")
num_rows, result = self.column_results_to_string(fetch_results_resp.results.columns)
assert result == "car" + (" " * 137) + "\n"
# CHAR inlined
fetch_results_resp = self.__query_and_fetch("SELECT CAST('car' AS CHAR(5))")
num_rows, result = self.column_results_to_string(fetch_results_resp.results.columns)
assert result == "car \n"
@needs_session()
def test_show_partitions(self):
"""Regression test for IMPALA-1330"""
for query in ["SHOW PARTITIONS functional.alltypes",
"SHOW TABLE STATS functional.alltypes"]:
fetch_results_resp = self.__query_and_fetch(query)
num_rows, result = \
self.column_results_to_string(fetch_results_resp.results.columns)
assert num_rows == 25
# Match whether stats are computed or not
assert re.match(
r"2009, 1, -?\d+, -?\d+, \d*\.?\d+KB, NOT CACHED, NOT CACHED, TEXT", result) is not None
@needs_session()
def test_show_column_stats(self):
fetch_results_resp = self.__query_and_fetch("SHOW COLUMN STATS functional.alltypes")
num_rows, result = self.column_results_to_string(fetch_results_resp.results.columns)
assert num_rows == 13
assert re.match(r"id, INT, -?\d+, -?\d+, (NULL|\d+), 4.0", result) is not None
@needs_session(TCLIService.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V1)
def test_execute_select_v1(self):
"""Test that a simple select statement works in the row-oriented protocol"""
execute_statement_req = TCLIService.TExecuteStatementReq()
execute_statement_req.sessionHandle = self.session_handle
execute_statement_req.statement = "SELECT COUNT(*) FROM functional.alltypes"
execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)
HS2TestSuite.check_response(execute_statement_resp)
fetch_results_req = TCLIService.TFetchResultsReq()
fetch_results_req.operationHandle = execute_statement_resp.operationHandle
fetch_results_req.maxRows = 100
fetch_results_resp = self.hs2_client.FetchResults(fetch_results_req)
HS2TestSuite.check_response(fetch_results_resp)
assert len(fetch_results_resp.results.rows) == 1
assert fetch_results_resp.results.startRowOffset == 0
try:
assert not fetch_results_resp.hasMoreRows
except AssertionError:
pytest.xfail("IMPALA-558")
@needs_session()
def test_select_null(self):
"""Regression test for IMPALA-1370, where NULL literals would appear as strings where
they should be booleans"""
execute_statement_req = TCLIService.TExecuteStatementReq()
execute_statement_req.sessionHandle = self.session_handle
execute_statement_req.statement = "select null"
execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)
HS2TestSuite.check_response(execute_statement_resp)
# Check that the expected type is boolean (for compatibility with Hive, see also
# IMPALA-914)
get_result_metadata_req = TCLIService.TGetResultSetMetadataReq()
get_result_metadata_req.operationHandle = execute_statement_resp.operationHandle
get_result_metadata_resp = \
self.hs2_client.GetResultSetMetadata(get_result_metadata_req)
col = get_result_metadata_resp.schema.columns[0]
assert col.typeDesc.types[0].primitiveEntry.type == TTypeId.BOOLEAN_TYPE
# Check that the actual type is boolean
fetch_results_req = TCLIService.TFetchResultsReq()
fetch_results_req.operationHandle = execute_statement_resp.operationHandle
fetch_results_req.maxRows = 1
fetch_results_resp = self.hs2_client.FetchResults(fetch_results_req)
HS2TestSuite.check_response(fetch_results_resp)
assert fetch_results_resp.results.columns[0].boolVal is not None
assert self.column_results_to_string(
fetch_results_resp.results.columns) == (1, "NULL\n")
@needs_session()
def test_compute_stats(self):
"""Exercise the child query path"""
self.__query_and_fetch("compute stats functional.alltypes")
| apache-2.0 |
rbarlow/pulp | server/test/unit/server/managers/schedule/test_consumer.py | 15 | 7585 | import unittest
import mock
from pulp.server.db.model.consumer import Consumer
from pulp.server.db.model.dispatch import ScheduledCall
from pulp.server.exceptions import MissingResource, MissingValue, InvalidValue
from pulp.server.managers.factory import initialize
from pulp.server.managers.schedule.consumer import (ConsumerScheduleManager, UNIT_INSTALL_ACTION,
UNIT_UPDATE_ACTION, ACTIONS_TO_TASKS)
initialize()
class TestValidate(unittest.TestCase):
def setUp(self):
super(TestValidate, self).setUp()
self.manager = ConsumerScheduleManager()
@mock.patch('pulp.server.managers.consumer.cud.ConsumerManager.get_consumer')
def test_calls_get_consumer(self, mock_get):
self.manager._validate_consumer('foo')
mock_get.assert_called_once_with('foo')
@mock.patch('pulp.server.db.model.base.Model.get_collection')
def test_raises_missing(self, mock_get_collection):
# mock another layer down to verify manager integration
mock_get_collection.return_value.find_one.side_effect = MissingResource
self.assertRaises(MissingResource, self.manager._validate_consumer, 'foo')
class TestGet(unittest.TestCase):
def setUp(self):
super(TestGet, self).setUp()
self.manager = ConsumerScheduleManager()
self.calls = [
ScheduledCall('PT1H', ACTIONS_TO_TASKS[UNIT_INSTALL_ACTION]),
ScheduledCall('PT4H', ACTIONS_TO_TASKS[UNIT_UPDATE_ACTION])
]
@mock.patch('pulp.server.managers.schedule.utils.get_by_resource')
def test_no_action(self, mock_get_by_resource):
mock_get_by_resource.return_value = self.calls
result = self.manager.get('consumer1')
mock_get_by_resource.assert_called_once_with(Consumer.build_resource_tag('consumer1'))
self.assertEqual(result, self.calls)
@mock.patch('pulp.server.managers.schedule.utils.get_by_resource')
def test_with_action(self, mock_get_by_resource):
mock_get_by_resource.return_value = self.calls
result = self.manager.get('consumer1', UNIT_INSTALL_ACTION)
mock_get_by_resource.assert_called_once_with(Consumer.build_resource_tag('consumer1'))
self.assertEqual(list(result), self.calls[:1])
class TestCreate(unittest.TestCase):
def setUp(self):
super(TestCreate, self).setUp()
self.manager = ConsumerScheduleManager()
self.units = [
{'type_id': 'mytype', 'unit_key': {'name': 'foo'}}
]
@mock.patch.object(ConsumerScheduleManager, '_validate_consumer')
def test_validation(self, mock_validate):
mock_validate.side_effect = MissingResource
self.assertRaises(MissingResource, self.manager.create_schedule, UNIT_INSTALL_ACTION,
'consumer1', self.units, {}, 'PT1H')
mock_validate.assert_called_once_with('consumer1')
@mock.patch.object(ScheduledCall, 'save')
@mock.patch('pulp.server.managers.consumer.cud.ConsumerManager.get_consumer')
def test_allows_arbitrary_options(self, mock_get_consumer, mock_save):
self.manager.create_schedule(UNIT_INSTALL_ACTION, 'consumer1',
self.units, {'arbitrary_option': True}, 'PT1H')
mock_save.assert_called_once_with()
@mock.patch('pulp.server.managers.consumer.cud.ConsumerManager.get_consumer')
def test_validate_schedule(self, mock_get_consumer):
self.assertRaises(InvalidValue, self.manager.create_schedule, UNIT_INSTALL_ACTION,
'consumer1', self.units, {}, 'not a valid schedule')
@mock.patch('pulp.server.managers.consumer.cud.ConsumerManager.get_consumer')
def test_validate_units(self, mock_get_consumer):
self.assertRaises(MissingValue, self.manager.create_schedule, UNIT_INSTALL_ACTION,
'consumer1', [], {}, 'PT1M')
@mock.patch.object(ScheduledCall, 'save')
@mock.patch('pulp.server.managers.consumer.cud.ConsumerManager.get_consumer')
def test_save(self, mock_get_consumer, mock_save):
iso_schedule = 'PT1H'
result = self.manager.create_schedule(UNIT_INSTALL_ACTION, 'consumer1',
self.units, {}, iso_schedule, 4, False)
self.assertEqual(result.iso_schedule, iso_schedule)
self.assertEqual(result.args, ['consumer1'])
self.assertEqual(result.kwargs['units'], self.units)
self.assertEqual(result.kwargs['options'], {})
self.assertEqual(result.resource, Consumer.build_resource_tag('consumer1'))
self.assertTrue(result.enabled is False)
mock_save.assert_called_once_with()
class TestUpdate(unittest.TestCase):
def setUp(self):
super(TestUpdate, self).setUp()
self.manager = ConsumerScheduleManager()
self.units = [
{'type_id': 'mytype', 'unit_key': {'name': 'foo'}}
]
@mock.patch.object(ConsumerScheduleManager, '_validate_consumer')
def test_validation(self, mock_validate):
mock_validate.side_effect = MissingResource
self.assertRaises(MissingResource, self.manager.update_schedule, 'consumer1', 'schedule1',
self.units)
mock_validate.assert_called_once_with('consumer1')
@mock.patch('pulp.server.managers.schedule.utils.update')
@mock.patch('pulp.server.managers.consumer.cud.ConsumerManager.get_consumer')
def test_units(self, mock_get_consumer, mock_update):
result = self.manager.update_schedule('consumer1', 'schedule1', self.units)
mock_update.assert_called_once_with('schedule1', {'kwargs': {'units': self.units}})
self.assertEqual(result, mock_update.return_value)
@mock.patch('pulp.server.managers.schedule.utils.update')
@mock.patch('pulp.server.managers.consumer.cud.ConsumerManager.get_consumer')
def test_options(self, mock_get_consumer, mock_update):
options = {'foo': 'bar'}
result = self.manager.update_schedule('consumer1', 'schedule1', options=options)
mock_update.assert_called_once_with('schedule1', {'kwargs': {'options': options}})
self.assertEqual(result, mock_update.return_value)
@mock.patch('pulp.server.managers.schedule.utils.update')
@mock.patch('pulp.server.managers.consumer.cud.ConsumerManager.get_consumer')
def test_other_data(self, mock_get_consumer, mock_update):
schedule_data = {'enabled': False}
result = self.manager.update_schedule('consumer1', 'schedule1', schedule_data=schedule_data)
mock_update.assert_called_once_with('schedule1', {'enabled': False})
self.assertEqual(result, mock_update.return_value)
class TestDelete(unittest.TestCase):
def setUp(self):
super(TestDelete, self).setUp()
self.manager = ConsumerScheduleManager()
@mock.patch.object(ConsumerScheduleManager, '_validate_consumer')
def test_validation(self, mock_validate):
mock_validate.side_effect = MissingResource
self.assertRaises(MissingResource, self.manager.delete_schedule, 'consumer1', 'schedule1')
mock_validate.assert_called_once_with('consumer1')
@mock.patch('pulp.server.managers.schedule.utils.delete')
@mock.patch.object(ConsumerScheduleManager, '_validate_consumer')
def test_calls_delete(self, mock_validate, mock_delete):
self.manager.delete_schedule('consumer1', 'schedule1')
mock_delete.assert_called_once_with('schedule1')
mock_validate.assert_called_once_with('consumer1')
| gpl-2.0 |
guewen/OpenUpgrade | addons/stock_account/wizard/stock_change_standard_price.py | 61 | 3467 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class change_standard_price(osv.osv_memory):
_name = "stock.change.standard.price"
_description = "Change Standard Price"
_columns = {
'new_price': fields.float('Price', required=True, digits_compute=dp.get_precision('Product Price'),
help="If cost price is increased, stock variation account will be debited "
"and stock output account will be credited with the value = (difference of amount * quantity available).\n"
"If cost price is decreased, stock variation account will be creadited and stock input account will be debited."),
}
def default_get(self, cr, uid, fields, context=None):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None:
context = {}
product_pool = self.pool.get('product.product')
product_obj = product_pool.browse(cr, uid, context.get('active_id', False))
res = super(change_standard_price, self).default_get(cr, uid, fields, context=context)
price = product_obj.standard_price
if 'new_price' in fields:
res.update({'new_price': price})
return res
def change_price(self, cr, uid, ids, context=None):
""" Changes the Standard Price of Product.
And creates an account move accordingly.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
if context is None:
context = {}
rec_id = context and context.get('active_id', False)
assert rec_id, _('Active ID is not set in Context.')
prod_obj = self.pool.get('product.product')
res = self.browse(cr, uid, ids, context=context)
prod_obj.do_change_standard_price(cr, uid, [rec_id], res[0].new_price, context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zachcp/qiime | scripts/blast_wrapper.py | 15 | 2554 | #!/usr/bin/env python
# File created on 20 Dec 2009.
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Greg Caporaso", "Jesse Stombaugh"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
from qiime.util import parse_command_line_parameters, get_options_lookup
from qiime.util import make_option
from skbio.parse.sequences import parse_fasta
from qiime.util import qiime_blast_seqs
options_lookup = get_options_lookup()
# blast_wrapper.py
script_info = {}
script_info['brief_description'] = """Blast Interface"""
script_info[
'script_description'] = """This script is a functionally-limited interface to the qiime.util.qiime_blast_seqs function, primarily useful for testing purposes. Once that function has been integrated into qiime as the primary blast interface it will move to PyCogent. An expanded version of this command line interface may replace the script functionality of cogent.app.blast at that point."""
script_info['script_usage'] = []
script_info['script_usage'].append(("""Example:""", """Blast all sequences in inseqs.fasta (-i) against a BLAST db constructed \
from refseqs.fasta (-r).""", """%prog -i $PWD/inseqs.fasta -r $PWD/refseqs.fasta"""))
script_info[
'output_description'] = """This is a utility program, which returns BLAST results."""
script_info['required_options'] = [
options_lookup['fasta_as_primary_input'],
make_option('-r', '--refseqs_fp', type='string',
help='path to blast database as a fasta file')
]
script_info['optional_options'] = [
make_option('-n', '--num_seqs_per_blast_run', type='int', default='1000',
help='number of sequences passed to each blast call ' +
"- useful for very large sequence collections [default: %default]")
]
script_info['version'] = __version__
def main():
option_parser, options, args = parse_command_line_parameters(**script_info)
blast_results = qiime_blast_seqs(
seqs=parse_fasta(open(options.input_fasta_fp)),
refseqs_fp=options.refseqs_fp,
seqs_per_blast_run=options.num_seqs_per_blast_run)
for query_id, blast_result in blast_results.items():
first_blast_result = blast_result[0][0]
print '%s: %s %s %s' % (
query_id,
first_blast_result['SUBJECT ID'],
first_blast_result['E-VALUE'],
first_blast_result['% IDENTITY'])
if __name__ == "__main__":
main()
| gpl-2.0 |
Shrhawk/edx-platform | lms/djangoapps/courseware/model_data.py | 9 | 35754 | """
Classes to provide the LMS runtime data storage to XBlocks.
:class:`DjangoKeyValueStore`: An XBlock :class:`~KeyValueStore` which
stores a subset of xblocks scopes as Django ORM objects. It wraps
:class:`~FieldDataCache` to provide an XBlock-friendly interface.
:class:`FieldDataCache`: A object which provides a read-through prefetch cache
of data to support XBlock fields within a limited set of scopes.
The remaining classes in this module provide read-through prefetch cache implementations
for specific scopes. The individual classes provide the knowledge of what are the essential
pieces of information for each scope, and thus how to cache, prefetch, and create new field data
entries.
UserStateCache: A cache for Scope.user_state
UserStateSummaryCache: A cache for Scope.user_state_summary
PreferencesCache: A cache for Scope.preferences
UserInfoCache: A cache for Scope.user_info
DjangoOrmFieldCache: A base-class for single-row-per-field caches.
"""
import json
from abc import abstractmethod, ABCMeta
from collections import defaultdict, namedtuple
from .models import (
StudentModule,
XModuleUserStateSummaryField,
XModuleStudentPrefsField,
XModuleStudentInfoField
)
import logging
from opaque_keys.edx.keys import CourseKey, UsageKey
from opaque_keys.edx.block_types import BlockTypeKeyV1
from opaque_keys.edx.asides import AsideUsageKeyV1
from contracts import contract, new_contract
from django.db import DatabaseError
from xblock.runtime import KeyValueStore
from xblock.exceptions import KeyValueMultiSaveError, InvalidScopeError
from xblock.fields import Scope, UserScope
from xmodule.modulestore.django import modulestore
from xblock.core import XBlockAside
from courseware.user_state_client import DjangoXBlockUserStateClient
log = logging.getLogger(__name__)
class InvalidWriteError(Exception):
"""
Raised to indicate that writing to a particular key
in the KeyValueStore is disabled
"""
def _all_usage_keys(descriptors, aside_types):
"""
Return a set of all usage_ids for the `descriptors` and for
as all asides in `aside_types` for those descriptors.
"""
usage_ids = set()
for descriptor in descriptors:
usage_ids.add(descriptor.scope_ids.usage_id)
for aside_type in aside_types:
usage_ids.add(AsideUsageKeyV1(descriptor.scope_ids.usage_id, aside_type))
return usage_ids
def _all_block_types(descriptors, aside_types):
"""
Return a set of all block_types for the supplied `descriptors` and for
the asides types in `aside_types` associated with those descriptors.
"""
block_types = set()
for descriptor in descriptors:
block_types.add(BlockTypeKeyV1(descriptor.entry_point, descriptor.scope_ids.block_type))
for aside_type in aside_types:
block_types.add(BlockTypeKeyV1(XBlockAside.entry_point, aside_type))
return block_types
class DjangoKeyValueStore(KeyValueStore):
"""
This KeyValueStore will read and write data in the following scopes to django models
Scope.user_state_summary
Scope.user_state
Scope.preferences
Scope.user_info
Access to any other scopes will raise an InvalidScopeError
Data for Scope.user_state is stored as StudentModule objects via the django orm.
Data for the other scopes is stored in individual objects that are named for the
scope involved and have the field name as a key
If the key isn't found in the expected table during a read or a delete, then a KeyError will be raised
"""
_allowed_scopes = (
Scope.user_state_summary,
Scope.user_state,
Scope.preferences,
Scope.user_info,
)
def __init__(self, field_data_cache):
self._field_data_cache = field_data_cache
def get(self, key):
self._raise_unless_scope_is_allowed(key)
return self._field_data_cache.get(key)
def set(self, key, value):
"""
Set a single value in the KeyValueStore
"""
self.set_many({key: value})
def set_many(self, kv_dict):
"""
Provide a bulk save mechanism.
`kv_dict`: A dictionary of dirty fields that maps
xblock.KvsFieldData._key : value
"""
for key in kv_dict:
# Check key for validity
self._raise_unless_scope_is_allowed(key)
self._field_data_cache.set_many(kv_dict)
def delete(self, key):
self._raise_unless_scope_is_allowed(key)
self._field_data_cache.delete(key)
def has(self, key):
self._raise_unless_scope_is_allowed(key)
return self._field_data_cache.has(key)
def _raise_unless_scope_is_allowed(self, key):
"""Raise an InvalidScopeError if key.scope is not in self._allowed_scopes."""
if key.scope not in self._allowed_scopes:
raise InvalidScopeError(key, self._allowed_scopes)
new_contract("DjangoKeyValueStore", DjangoKeyValueStore)
new_contract("DjangoKeyValueStore_Key", DjangoKeyValueStore.Key)
class DjangoOrmFieldCache(object):
"""
Baseclass for Scope-specific field cache objects that are based on
single-row-per-field Django ORM objects.
"""
__metaclass__ = ABCMeta
def __init__(self):
self._cache = {}
def cache_fields(self, fields, xblocks, aside_types):
"""
Load all fields specified by ``fields`` for the supplied ``xblocks``
and ``aside_types`` into this cache.
Arguments:
fields (list of str): Field names to cache.
xblocks (list of :class:`XBlock`): XBlocks to cache fields for.
aside_types (list of str): Aside types to cache fields for.
"""
for field_object in self._read_objects(fields, xblocks, aside_types):
self._cache[self._cache_key_for_field_object(field_object)] = field_object
@contract(kvs_key=DjangoKeyValueStore.Key)
def get(self, kvs_key):
"""
Return the django model object specified by `kvs_key` from
the cache.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The field value to delete
Returns: A django orm object from the cache
"""
cache_key = self._cache_key_for_kvs_key(kvs_key)
if cache_key not in self._cache:
raise KeyError(kvs_key.field_name)
field_object = self._cache[cache_key]
return json.loads(field_object.value)
@contract(kvs_key=DjangoKeyValueStore.Key)
def set(self, kvs_key, value):
"""
Set the specified `kvs_key` to the field value `value`.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The field value to delete
value: The field value to store
"""
self.set_many({kvs_key: value})
@contract(kv_dict="dict(DjangoKeyValueStore_Key: *)")
def set_many(self, kv_dict):
"""
Set the specified fields to the supplied values.
Arguments:
kv_dict (dict): A dictionary mapping :class:`~DjangoKeyValueStore.Key`
objects to values to set.
"""
saved_fields = []
for kvs_key, value in sorted(kv_dict.items()):
cache_key = self._cache_key_for_kvs_key(kvs_key)
field_object = self._cache.get(cache_key)
try:
serialized_value = json.dumps(value)
# It is safe to force an insert or an update, because
# a) we should have retrieved the object as part of the
# prefetch step, so if it isn't in our cache, it doesn't exist yet.
# b) no other code should be modifying these models out of band of
# this cache.
if field_object is None:
field_object = self._create_object(kvs_key, serialized_value)
field_object.save(force_insert=True)
self._cache[cache_key] = field_object
else:
field_object.value = serialized_value
field_object.save(force_update=True)
except DatabaseError:
log.exception("Saving field %r failed", kvs_key.field_name)
raise KeyValueMultiSaveError(saved_fields)
finally:
saved_fields.append(kvs_key.field_name)
@contract(kvs_key=DjangoKeyValueStore.Key)
def delete(self, kvs_key):
"""
Delete the value specified by `kvs_key`.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The field value to delete
Raises: KeyError if key isn't found in the cache
"""
cache_key = self._cache_key_for_kvs_key(kvs_key)
field_object = self._cache.get(cache_key)
if field_object is None:
raise KeyError(kvs_key.field_name)
field_object.delete()
del self._cache[cache_key]
@contract(kvs_key=DjangoKeyValueStore.Key, returns=bool)
def has(self, kvs_key):
"""
Return whether the specified `kvs_key` is set.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The field value to delete
Returns: bool
"""
return self._cache_key_for_kvs_key(kvs_key) in self._cache
@contract(kvs_key=DjangoKeyValueStore.Key, returns="datetime|None")
def last_modified(self, kvs_key):
"""
Return when the supplied field was changed.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The field value to delete
Returns: datetime if there was a modified date, or None otherwise
"""
field_object = self._cache.get(self._cache_key_for_kvs_key(kvs_key))
if field_object is None:
return None
else:
return field_object.modified
def __len__(self):
return len(self._cache)
@abstractmethod
def _create_object(self, kvs_key, value):
"""
Create a new object to add to the cache (which should record
the specified field ``value`` for the field identified by
``kvs_key``).
Arguments:
kvs_key (:class:`DjangoKeyValueStore.Key`): Which field to create an entry for
value: What value to record in the field
"""
raise NotImplementedError()
@abstractmethod
def _read_objects(self, fields, xblocks, aside_types):
"""
Return an iterator for all objects stored in the underlying datastore
for the ``fields`` on the ``xblocks`` and the ``aside_types`` associated
with them.
Arguments:
fields (list of str): Field names to return values for
xblocks (list of :class:`~XBlock`): XBlocks to load fields for
aside_types (list of str): Asides to load field for (which annotate the supplied
xblocks).
"""
raise NotImplementedError()
@abstractmethod
def _cache_key_for_field_object(self, field_object):
"""
Return the key used in this DjangoOrmFieldCache to store the specified field_object.
Arguments:
field_object: A Django model instance that stores the data for fields in this cache
"""
raise NotImplementedError()
@abstractmethod
def _cache_key_for_kvs_key(self, key):
"""
Return the key used in this DjangoOrmFieldCache for the specified KeyValueStore key.
Arguments:
key (:class:`~DjangoKeyValueStore.Key`): The key representing the cached field
"""
raise NotImplementedError()
class UserStateCache(object):
"""
Cache for Scope.user_state xblock field data.
"""
def __init__(self, user, course_id):
self._cache = defaultdict(dict)
self.course_id = course_id
self.user = user
self._client = DjangoXBlockUserStateClient(self.user)
def cache_fields(self, fields, xblocks, aside_types): # pylint: disable=unused-argument
"""
Load all fields specified by ``fields`` for the supplied ``xblocks``
and ``aside_types`` into this cache.
Arguments:
fields (list of str): Field names to cache.
xblocks (list of :class:`XBlock`): XBlocks to cache fields for.
aside_types (list of str): Aside types to cache fields for.
"""
block_field_state = self._client.get_many(
self.user.username,
_all_usage_keys(xblocks, aside_types),
)
for usage_key, field_state in block_field_state:
self._cache[usage_key] = field_state
@contract(kvs_key=DjangoKeyValueStore.Key)
def set(self, kvs_key, value):
"""
Set the specified `kvs_key` to the field value `value`.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The field value to delete
value: The field value to store
"""
self.set_many({kvs_key: value})
@contract(kvs_key=DjangoKeyValueStore.Key, returns="datetime|None")
def last_modified(self, kvs_key):
"""
Return when the supplied field was changed.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The key representing the cached field
Returns: datetime if there was a modified date, or None otherwise
"""
return self._client.get_mod_date(
self.user.username,
kvs_key.block_scope_id,
fields=[kvs_key.field_name],
).get(kvs_key.field_name)
@contract(kv_dict="dict(DjangoKeyValueStore_Key: *)")
def set_many(self, kv_dict):
"""
Set the specified fields to the supplied values.
Arguments:
kv_dict (dict): A dictionary mapping :class:`~DjangoKeyValueStore.Key`
objects to values to set.
"""
pending_updates = defaultdict(dict)
for kvs_key, value in kv_dict.items():
cache_key = self._cache_key_for_kvs_key(kvs_key)
pending_updates[cache_key][kvs_key.field_name] = value
try:
self._client.set_many(
self.user.username,
pending_updates
)
except DatabaseError:
log.exception("Saving user state failed for %s", self.user.username)
raise KeyValueMultiSaveError([])
finally:
self._cache.update(pending_updates)
@contract(kvs_key=DjangoKeyValueStore.Key)
def get(self, kvs_key):
"""
Return the django model object specified by `kvs_key` from
the cache.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The field value to delete
Returns: A django orm object from the cache
"""
cache_key = self._cache_key_for_kvs_key(kvs_key)
if cache_key not in self._cache:
raise KeyError(kvs_key.field_name)
return self._cache[cache_key][kvs_key.field_name]
@contract(kvs_key=DjangoKeyValueStore.Key)
def delete(self, kvs_key):
"""
Delete the value specified by `kvs_key`.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The field value to delete
Raises: KeyError if key isn't found in the cache
"""
cache_key = self._cache_key_for_kvs_key(kvs_key)
if cache_key not in self._cache:
raise KeyError(kvs_key.field_name)
field_state = self._cache[cache_key]
if kvs_key.field_name not in field_state:
raise KeyError(kvs_key.field_name)
self._client.delete(self.user.username, cache_key, fields=[kvs_key.field_name])
del field_state[kvs_key.field_name]
@contract(kvs_key=DjangoKeyValueStore.Key, returns=bool)
def has(self, kvs_key):
"""
Return whether the specified `kvs_key` is set.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The field value to delete
Returns: bool
"""
cache_key = self._cache_key_for_kvs_key(kvs_key)
return (
cache_key in self._cache and
kvs_key.field_name in self._cache[cache_key]
)
def __len__(self):
return len(self._cache)
def _cache_key_for_kvs_key(self, key):
"""
Return the key used in this DjangoOrmFieldCache for the specified KeyValueStore key.
Arguments:
key (:class:`~DjangoKeyValueStore.Key`): The key representing the cached field
"""
return key.block_scope_id
class UserStateSummaryCache(DjangoOrmFieldCache):
"""
Cache for Scope.user_state_summary xblock field data.
"""
def __init__(self, course_id):
super(UserStateSummaryCache, self).__init__()
self.course_id = course_id
def _create_object(self, kvs_key, value):
"""
Create a new object to add to the cache (which should record
the specified field ``value`` for the field identified by
``kvs_key``).
Arguments:
kvs_key (:class:`DjangoKeyValueStore.Key`): Which field to create an entry for
value: The value to assign to the new field object
"""
return XModuleUserStateSummaryField(
field_name=kvs_key.field_name,
usage_id=kvs_key.block_scope_id,
value=value,
)
def _read_objects(self, fields, xblocks, aside_types):
"""
Return an iterator for all objects stored in the underlying datastore
for the ``fields`` on the ``xblocks`` and the ``aside_types`` associated
with them.
Arguments:
fields (list of :class:`~Field`): Fields to return values for
xblocks (list of :class:`~XBlock`): XBlocks to load fields for
aside_types (list of str): Asides to load field for (which annotate the supplied
xblocks).
"""
return XModuleUserStateSummaryField.objects.chunked_filter(
'usage_id__in',
_all_usage_keys(xblocks, aside_types),
field_name__in=set(field.name for field in fields),
)
def _cache_key_for_field_object(self, field_object):
"""
Return the key used in this DjangoOrmFieldCache to store the specified field_object.
Arguments:
field_object: A Django model instance that stores the data for fields in this cache
"""
return (field_object.usage_id.map_into_course(self.course_id), field_object.field_name)
def _cache_key_for_kvs_key(self, key):
"""
Return the key used in this DjangoOrmFieldCache for the specified KeyValueStore key.
Arguments:
key (:class:`~DjangoKeyValueStore.Key`): The key representing the cached field
"""
return (key.block_scope_id, key.field_name)
class PreferencesCache(DjangoOrmFieldCache):
"""
Cache for Scope.preferences xblock field data.
"""
def __init__(self, user):
super(PreferencesCache, self).__init__()
self.user = user
def _create_object(self, kvs_key, value):
"""
Create a new object to add to the cache (which should record
the specified field ``value`` for the field identified by
``kvs_key``).
Arguments:
kvs_key (:class:`DjangoKeyValueStore.Key`): Which field to create an entry for
value: The value to assign to the new field object
"""
return XModuleStudentPrefsField(
field_name=kvs_key.field_name,
module_type=BlockTypeKeyV1(kvs_key.block_family, kvs_key.block_scope_id),
student_id=kvs_key.user_id,
value=value,
)
def _read_objects(self, fields, xblocks, aside_types):
"""
Return an iterator for all objects stored in the underlying datastore
for the ``fields`` on the ``xblocks`` and the ``aside_types`` associated
with them.
Arguments:
fields (list of str): Field names to return values for
xblocks (list of :class:`~XBlock`): XBlocks to load fields for
aside_types (list of str): Asides to load field for (which annotate the supplied
xblocks).
"""
return XModuleStudentPrefsField.objects.chunked_filter(
'module_type__in',
_all_block_types(xblocks, aside_types),
student=self.user.pk,
field_name__in=set(field.name for field in fields),
)
def _cache_key_for_field_object(self, field_object):
"""
Return the key used in this DjangoOrmFieldCache to store the specified field_object.
Arguments:
field_object: A Django model instance that stores the data for fields in this cache
"""
return (field_object.module_type, field_object.field_name)
def _cache_key_for_kvs_key(self, key):
"""
Return the key used in this DjangoOrmFieldCache for the specified KeyValueStore key.
Arguments:
key (:class:`~DjangoKeyValueStore.Key`): The key representing the cached field
"""
return (BlockTypeKeyV1(key.block_family, key.block_scope_id), key.field_name)
class UserInfoCache(DjangoOrmFieldCache):
"""
Cache for Scope.user_info xblock field data
"""
def __init__(self, user):
super(UserInfoCache, self).__init__()
self.user = user
def _create_object(self, kvs_key, value):
"""
Create a new object to add to the cache (which should record
the specified field ``value`` for the field identified by
``kvs_key``).
Arguments:
kvs_key (:class:`DjangoKeyValueStore.Key`): Which field to create an entry for
value: The value to assign to the new field object
"""
return XModuleStudentInfoField(
field_name=kvs_key.field_name,
student_id=kvs_key.user_id,
value=value,
)
def _read_objects(self, fields, xblocks, aside_types):
"""
Return an iterator for all objects stored in the underlying datastore
for the ``fields`` on the ``xblocks`` and the ``aside_types`` associated
with them.
Arguments:
fields (list of str): Field names to return values for
xblocks (list of :class:`~XBlock`): XBlocks to load fields for
aside_types (list of str): Asides to load field for (which annotate the supplied
xblocks).
"""
return XModuleStudentInfoField.objects.filter(
student=self.user.pk,
field_name__in=set(field.name for field in fields),
)
def _cache_key_for_field_object(self, field_object):
"""
Return the key used in this DjangoOrmFieldCache to store the specified field_object.
Arguments:
field_object: A Django model instance that stores the data for fields in this cache
"""
return field_object.field_name
def _cache_key_for_kvs_key(self, key):
"""
Return the key used in this DjangoOrmFieldCache for the specified KeyValueStore key.
Arguments:
key (:class:`~DjangoKeyValueStore.Key`): The key representing the cached field
"""
return key.field_name
class FieldDataCache(object):
"""
A cache of django model objects needed to supply the data
for a module and its descendants
"""
def __init__(self, descriptors, course_id, user, select_for_update=False, asides=None):
"""
Find any courseware.models objects that are needed by any descriptor
in descriptors. Attempts to minimize the number of queries to the database.
Note: Only modules that have store_state = True or have shared
state will have a StudentModule.
Arguments
descriptors: A list of XModuleDescriptors.
course_id: The id of the current course
user: The user for which to cache data
select_for_update: Ignored
asides: The list of aside types to load, or None to prefetch no asides.
"""
if asides is None:
self.asides = []
else:
self.asides = asides
assert isinstance(course_id, CourseKey)
self.course_id = course_id
self.user = user
self.cache = {
Scope.user_state: UserStateCache(
self.user,
self.course_id,
),
Scope.user_info: UserInfoCache(
self.user,
),
Scope.preferences: PreferencesCache(
self.user,
),
Scope.user_state_summary: UserStateSummaryCache(
self.course_id,
),
}
self.scorable_locations = set()
self.add_descriptors_to_cache(descriptors)
def add_descriptors_to_cache(self, descriptors):
"""
Add all `descriptors` to this FieldDataCache.
"""
if self.user.is_authenticated():
self.scorable_locations.update(desc.location for desc in descriptors if desc.has_score)
for scope, fields in self._fields_to_cache(descriptors).items():
if scope not in self.cache:
continue
self.cache[scope].cache_fields(fields, descriptors, self.asides)
def add_descriptor_descendents(self, descriptor, depth=None, descriptor_filter=lambda descriptor: True):
"""
Add all descendants of `descriptor` to this FieldDataCache.
Arguments:
descriptor: An XModuleDescriptor
depth is the number of levels of descendant modules to load StudentModules for, in addition to
the supplied descriptor. If depth is None, load all descendant StudentModules
descriptor_filter is a function that accepts a descriptor and return whether the field data
should be cached
"""
def get_child_descriptors(descriptor, depth, descriptor_filter):
"""
Return a list of all child descriptors down to the specified depth
that match the descriptor filter. Includes `descriptor`
descriptor: The parent to search inside
depth: The number of levels to descend, or None for infinite depth
descriptor_filter(descriptor): A function that returns True
if descriptor should be included in the results
"""
if descriptor_filter(descriptor):
descriptors = [descriptor]
else:
descriptors = []
if depth is None or depth > 0:
new_depth = depth - 1 if depth is not None else depth
for child in descriptor.get_children() + descriptor.get_required_module_descriptors():
descriptors.extend(get_child_descriptors(child, new_depth, descriptor_filter))
return descriptors
with modulestore().bulk_operations(descriptor.location.course_key):
descriptors = get_child_descriptors(descriptor, depth, descriptor_filter)
self.add_descriptors_to_cache(descriptors)
@classmethod
def cache_for_descriptor_descendents(cls, course_id, user, descriptor, depth=None,
descriptor_filter=lambda descriptor: True,
select_for_update=False, asides=None):
"""
course_id: the course in the context of which we want StudentModules.
user: the django user for whom to load modules.
descriptor: An XModuleDescriptor
depth is the number of levels of descendant modules to load StudentModules for, in addition to
the supplied descriptor. If depth is None, load all descendant StudentModules
descriptor_filter is a function that accepts a descriptor and return whether the field data
should be cached
select_for_update: Ignored
"""
cache = FieldDataCache([], course_id, user, select_for_update, asides=asides)
cache.add_descriptor_descendents(descriptor, depth, descriptor_filter)
return cache
def _fields_to_cache(self, descriptors):
"""
Returns a map of scopes to fields in that scope that should be cached
"""
scope_map = defaultdict(set)
for descriptor in descriptors:
for field in descriptor.fields.values():
scope_map[field.scope].add(field)
return scope_map
@contract(key=DjangoKeyValueStore.Key)
def get(self, key):
"""
Load the field value specified by `key`.
Arguments:
key (`DjangoKeyValueStore.Key`): The field value to load
Returns: The found value
Raises: KeyError if key isn't found in the cache
"""
if key.scope.user == UserScope.ONE and not self.user.is_anonymous():
# If we're getting user data, we expect that the key matches the
# user we were constructed for.
assert key.user_id == self.user.id
if key.scope not in self.cache:
raise KeyError(key.field_name)
return self.cache[key.scope].get(key)
@contract(kv_dict="dict(DjangoKeyValueStore_Key: *)")
def set_many(self, kv_dict):
"""
Set all of the fields specified by the keys of `kv_dict` to the values
in that dict.
Arguments:
kv_dict (dict): dict mapping from `DjangoKeyValueStore.Key`s to field values
Raises: DatabaseError if any fields fail to save
"""
saved_fields = []
by_scope = defaultdict(dict)
for key, value in kv_dict.iteritems():
if key.scope.user == UserScope.ONE and not self.user.is_anonymous():
# If we're getting user data, we expect that the key matches the
# user we were constructed for.
assert key.user_id == self.user.id
if key.scope not in self.cache:
continue
by_scope[key.scope][key] = value
for scope, set_many_data in by_scope.iteritems():
try:
self.cache[scope].set_many(set_many_data)
# If save is successful on these fields, add it to
# the list of successful saves
saved_fields.extend(key.field_name for key in set_many_data)
except KeyValueMultiSaveError as exc:
log.exception('Error saving fields %r', [key.field_name for key in set_many_data])
raise KeyValueMultiSaveError(saved_fields + exc.saved_field_names)
@contract(key=DjangoKeyValueStore.Key)
def delete(self, key):
"""
Delete the value specified by `key`.
Arguments:
key (`DjangoKeyValueStore.Key`): The field value to delete
Raises: KeyError if key isn't found in the cache
"""
if key.scope.user == UserScope.ONE and not self.user.is_anonymous():
# If we're getting user data, we expect that the key matches the
# user we were constructed for.
assert key.user_id == self.user.id
if key.scope not in self.cache:
raise KeyError(key.field_name)
self.cache[key.scope].delete(key)
@contract(key=DjangoKeyValueStore.Key, returns=bool)
def has(self, key):
"""
Return whether the specified `key` is set.
Arguments:
key (`DjangoKeyValueStore.Key`): The field value to delete
Returns: bool
"""
if key.scope.user == UserScope.ONE and not self.user.is_anonymous():
# If we're getting user data, we expect that the key matches the
# user we were constructed for.
assert key.user_id == self.user.id
if key.scope not in self.cache:
return False
return self.cache[key.scope].has(key)
@contract(key=DjangoKeyValueStore.Key, returns="datetime|None")
def last_modified(self, key):
"""
Return when the supplied field was changed.
Arguments:
key (`DjangoKeyValueStore.Key`): The field value to delete
Returns: datetime if there was a modified date, or None otherwise
"""
if key.scope.user == UserScope.ONE and not self.user.is_anonymous():
# If we're getting user data, we expect that the key matches the
# user we were constructed for.
assert key.user_id == self.user.id
if key.scope not in self.cache:
return None
return self.cache[key.scope].last_modified(key)
def __len__(self):
return sum(len(cache) for cache in self.cache.values())
class ScoresClient(object):
"""
Basic client interface for retrieving Score information.
Eventually, this should read and write scores, but at the moment it only
handles the read side of things.
"""
Score = namedtuple('Score', 'correct total')
def __init__(self, course_key, user_id):
"""Basic constructor. from_field_data_cache() is more appopriate for most uses."""
self.course_key = course_key
self.user_id = user_id
self._locations_to_scores = {}
self._has_fetched = False
def __contains__(self, location):
"""Return True if we have a score for this location."""
return location in self._locations_to_scores
def fetch_scores(self, locations):
"""Grab score information."""
scores_qset = StudentModule.objects.filter(
student_id=self.user_id,
course_id=self.course_key,
module_state_key__in=set(locations),
)
# Locations in StudentModule don't necessarily have course key info
# attached to them (since old mongo identifiers don't include runs).
# So we have to add that info back in before we put it into our lookup.
self._locations_to_scores.update({
UsageKey.from_string(location).map_into_course(self.course_key): self.Score(correct, total)
for location, correct, total
in scores_qset.values_list('module_state_key', 'grade', 'max_grade')
})
self._has_fetched = True
def get(self, location):
"""
Get the score for a given location, if it exists.
If we don't have a score for that location, return `None`. Note that as
convention, you should be passing in a location with full course run
information.
"""
if not self._has_fetched:
raise ValueError(
"Tried to fetch location {} from ScoresClient before fetch_scores() has run."
.format(location)
)
return self._locations_to_scores.get(location)
@classmethod
def from_field_data_cache(cls, fd_cache):
"""Create a ScoresClient from a populated FieldDataCache."""
client = cls(fd_cache.course_id, fd_cache.user.id)
client.fetch_scores(fd_cache.scorable_locations)
return client
# @contract(user_id=int, usage_key=UsageKey, score="number|None", max_score="number|None")
def set_score(user_id, usage_key, score, max_score):
"""
Set the score and max_score for the specified user and xblock usage.
"""
student_module, created = StudentModule.objects.get_or_create(
student_id=user_id,
module_state_key=usage_key,
course_id=usage_key.course_key,
defaults={
'grade': score,
'max_grade': max_score,
}
)
if not created:
student_module.grade = score
student_module.max_grade = max_score
student_module.save()
| agpl-3.0 |
oandrew/home-assistant | tests/components/media_player/test_cast.py | 22 | 1338 | """The tests for the Cast Media player platform."""
# pylint: disable=protected-access
import unittest
from unittest.mock import patch
from homeassistant.components.media_player import cast
class FakeChromeCast(object):
"""A fake Chrome Cast."""
def __init__(self, host, port):
"""Initialize the fake Chrome Cast."""
self.host = host
self.port = port
class TestCastMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
@patch('homeassistant.components.media_player.cast.CastDevice')
@patch('pychromecast.get_chromecasts')
def test_filter_duplicates(self, mock_get_chromecasts, mock_device):
"""Test filtering of duplicates."""
mock_get_chromecasts.return_value = [
FakeChromeCast('some_host', cast.DEFAULT_PORT)
]
# Test chromecasts as if they were hardcoded in configuration.yaml
cast.setup_platform(None, {
'host': 'some_host'
}, lambda _: _)
assert mock_device.called
mock_device.reset_mock()
assert not mock_device.called
# Test chromecasts as if they were automatically discovered
cast.setup_platform(None, {}, lambda _: _, ('some_host',
cast.DEFAULT_PORT))
assert not mock_device.called
| mit |
gonboy/sl4a | python/src/Lib/test/test_time.py | 58 | 9341 | from test import test_support
import time
import unittest
class TimeTestCase(unittest.TestCase):
def setUp(self):
self.t = time.time()
def test_data_attributes(self):
time.altzone
time.daylight
time.timezone
time.tzname
def test_clock(self):
time.clock()
def test_conversions(self):
self.assert_(time.ctime(self.t)
== time.asctime(time.localtime(self.t)))
self.assert_(long(time.mktime(time.localtime(self.t)))
== long(self.t))
def test_sleep(self):
time.sleep(1.2)
def test_strftime(self):
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = ' %' + directive
try:
time.strftime(format, tt)
except ValueError:
self.fail('conversion specifier: %r failed.' % format)
def test_strftime_bounds_checking(self):
# Make sure that strftime() checks the bounds of the various parts
#of the time tuple (0 is valid for *all* values).
# Check year [1900, max(int)]
self.assertRaises(ValueError, time.strftime, '',
(1899, 1, 1, 0, 0, 0, 0, 1, -1))
if time.accept2dyear:
self.assertRaises(ValueError, time.strftime, '',
(-1, 1, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(100, 1, 1, 0, 0, 0, 0, 1, -1))
# Check month [1, 12] + zero support
self.assertRaises(ValueError, time.strftime, '',
(1900, -1, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 13, 1, 0, 0, 0, 0, 1, -1))
# Check day of month [1, 31] + zero support
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, -1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 32, 0, 0, 0, 0, 1, -1))
# Check hour [0, 23]
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, -1, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 24, 0, 0, 0, 1, -1))
# Check minute [0, 59]
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, -1, 0, 0, 1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 60, 0, 0, 1, -1))
# Check second [0, 61]
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, -1, 0, 1, -1))
# C99 only requires allowing for one leap second, but Python's docs say
# allow two leap seconds (0..61)
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, 62, 0, 1, -1))
# No check for upper-bound day of week;
# value forced into range by a ``% 7`` calculation.
# Start check at -2 since gettmarg() increments value before taking
# modulo.
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, 0, -2, 1, -1))
# Check day of the year [1, 366] + zero support
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, 0, 0, -1, -1))
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, 0, 0, 367, -1))
# Check daylight savings flag [-1, 1]
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, 0, 0, 1, -2))
self.assertRaises(ValueError, time.strftime, '',
(1900, 1, 1, 0, 0, 0, 0, 1, 2))
def test_default_values_for_zero(self):
# Make sure that using all zeros uses the proper default values.
# No test for daylight savings since strftime() does not change output
# based on its value.
expected = "2000 01 01 00 00 00 1 001"
result = time.strftime("%Y %m %d %H %M %S %w %j", (0,)*9)
self.assertEquals(expected, result)
def test_strptime(self):
# Should be able to go round-trip from strftime to strptime without
# throwing an exception.
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = '%' + directive
strf_output = time.strftime(format, tt)
try:
time.strptime(strf_output, format)
except ValueError:
self.fail("conversion specifier %r failed with '%s' input." %
(format, strf_output))
def test_asctime(self):
time.asctime(time.gmtime(self.t))
self.assertRaises(TypeError, time.asctime, 0)
def test_tzset(self):
if not hasattr(time, "tzset"):
return # Can't test this; don't want the test suite to fail
from os import environ
# Epoch time of midnight Dec 25th 2002. Never DST in northern
# hemisphere.
xmas2002 = 1040774400.0
# These formats are correct for 2002, and possibly future years
# This format is the 'standard' as documented at:
# http://www.opengroup.org/onlinepubs/007904975/basedefs/xbd_chap08.html
# They are also documented in the tzset(3) man page on most Unix
# systems.
eastern = 'EST+05EDT,M4.1.0,M10.5.0'
victoria = 'AEST-10AEDT-11,M10.5.0,M3.5.0'
utc='UTC+0'
org_TZ = environ.get('TZ',None)
try:
# Make sure we can switch to UTC time and results are correct
# Note that unknown timezones default to UTC.
# Note that altzone is undefined in UTC, as there is no DST
environ['TZ'] = eastern
time.tzset()
environ['TZ'] = utc
time.tzset()
self.failUnlessEqual(
time.gmtime(xmas2002), time.localtime(xmas2002)
)
self.failUnlessEqual(time.daylight, 0)
self.failUnlessEqual(time.timezone, 0)
self.failUnlessEqual(time.localtime(xmas2002).tm_isdst, 0)
# Make sure we can switch to US/Eastern
environ['TZ'] = eastern
time.tzset()
self.failIfEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
self.failUnlessEqual(time.tzname, ('EST', 'EDT'))
self.failUnlessEqual(len(time.tzname), 2)
self.failUnlessEqual(time.daylight, 1)
self.failUnlessEqual(time.timezone, 18000)
self.failUnlessEqual(time.altzone, 14400)
self.failUnlessEqual(time.localtime(xmas2002).tm_isdst, 0)
self.failUnlessEqual(len(time.tzname), 2)
# Now go to the southern hemisphere.
environ['TZ'] = victoria
time.tzset()
self.failIfEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
self.failUnless(time.tzname[0] == 'AEST', str(time.tzname[0]))
self.failUnless(time.tzname[1] == 'AEDT', str(time.tzname[1]))
self.failUnlessEqual(len(time.tzname), 2)
self.failUnlessEqual(time.daylight, 1)
self.failUnlessEqual(time.timezone, -36000)
self.failUnlessEqual(time.altzone, -39600)
self.failUnlessEqual(time.localtime(xmas2002).tm_isdst, 1)
finally:
# Repair TZ environment variable in case any other tests
# rely on it.
if org_TZ is not None:
environ['TZ'] = org_TZ
elif environ.has_key('TZ'):
del environ['TZ']
time.tzset()
def test_insane_timestamps(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for func in time.ctime, time.gmtime, time.localtime:
for unreasonable in -1e200, 1e200:
self.assertRaises(ValueError, func, unreasonable)
def test_ctime_without_arg(self):
# Not sure how to check the values, since the clock could tick
# at any time. Make sure these are at least accepted and
# don't raise errors.
time.ctime()
time.ctime(None)
def test_gmtime_without_arg(self):
gt0 = time.gmtime()
gt1 = time.gmtime(None)
t0 = time.mktime(gt0)
t1 = time.mktime(gt1)
self.assert_(0 <= (t1-t0) < 0.2)
def test_localtime_without_arg(self):
lt0 = time.localtime()
lt1 = time.localtime(None)
t0 = time.mktime(lt0)
t1 = time.mktime(lt1)
self.assert_(0 <= (t1-t0) < 0.2)
def test_main():
test_support.run_unittest(TimeTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
wmjac/pygtsa | calc_energies.py | 1 | 3664 | # Copyright (C) 2014 William M. Jacobs
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or (at
# your option) any later version.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import argparse, random
from pygtsa.structure import Assembly
from pygtsa.histogram import EVHistogram
from pygtsa.cgraph import mc_simulate_energies_EV
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('structure', type=str, help="path to input structure file")
parser.add_argument('lnhEV', type=str, help="path to lnhEV file")
subparsers = parser.add_subparsers(dest='distribution')
parser_gaussian = subparsers.add_parser('gaussian')
parser_gaussian.add_argument('mean', metavar='MU', type=float, help="mean of Gaussian distribution")
parser_gaussian.add_argument('stddev', metavar='SIGMA', type=float, \
help="standard deviation of Gaussian distribution")
parser_gaussian.add_argument('--nsamples', type=int, default=100, \
help="number of independent energy samples [100]")
parser_gaussian.add_argument('--output-prefix', metavar='PATH', type=str, default='./', \
help="path to output files [./]")
clargs = parser.parse_args()
if clargs.distribution == None:
raise Exception("please select a distribution")
# Initialize
target = Assembly.read(clargs.structure)
with open(clargs.lnhEV, 'r') as f:
lnhEV = EVHistogram.read(f)
if clargs.output_prefix != '' and clargs.output_prefix[-1] != '/':
clargs.output_prefix = clargs.output_prefix + '_'
# Sample energies
if clargs.distribution == 'gaussian':
energy_dicts = [{E : random.normalvariate(clargs.mean, clargs.stddev) for E in target.edges()} \
for i in range(clargs.nsamples)]
print("Writing energy samples to", clargs.output_prefix + 'bonds.dat')
with open(clargs.output_prefix + 'bonds.dat', 'w') as f:
for E in target.edges():
f.write("%d -- %d:" % E)
for i in range(len(energy_dicts)):
f.write(" %g" % energy_dicts[i][E])
f.write("\n")
# Calculate dihedrals and adjacents
print("Calculating energy averages...")
nsteps = 2 * target.number_of_edges()
nsamples = 4000 * sum(sum(1 for j in range(lnhEV.h.shape[1]) if lnhEV.h[i,j] >= 0.) \
for i in range(lnhEV.h.shape[0]))
print("Collecting %d samples with %d steps between samples." % (nsamples, nsteps))
energies, visits = mc_simulate_energies_EV(target, lnhEV, energy_dicts, nsteps, nsamples)
print("Writing average energies to", clargs.output_prefix + 'energies.dat')
with open(clargs.output_prefix + 'energies.dat', 'w') as f:
f.write("# average energies (%s distribution)\n" % clargs.distribution)
energies.write(f)
print("Writing sampling histogram to", clargs.output_prefix + 'sampling_visits.dat')
with open(clargs.output_prefix + 'sampling_visits.dat', 'w') as f:
visits.write(f)
| gpl-3.0 |
Achint08/open-event-orga-server | migrations/versions/9913b58c2640_.py | 10 | 1112 | """empty message
Revision ID: 9913b58c2640
Revises: ddc941500bd2
Create Date: 2016-07-29 23:23:27.324000
"""
# revision identifiers, used by Alembic.
revision = '9913b58c2640'
down_revision = 'ddc941500bd2'
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('orders', sa.Column('brand', sa.String(), nullable=True))
op.add_column('orders', sa.Column('exp_month', sa.Integer(), nullable=True))
op.add_column('orders', sa.Column('exp_year', sa.Integer(), nullable=True))
op.add_column('orders', sa.Column('last4', sa.String(), nullable=True))
op.add_column('orders', sa.Column('payment_mode', sa.String(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('orders', 'payment_mode')
op.drop_column('orders', 'last4')
op.drop_column('orders', 'exp_year')
op.drop_column('orders', 'exp_month')
op.drop_column('orders', 'brand')
### end Alembic commands ###
| gpl-3.0 |
cnweibo/bootstrapgrunt | node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/analyzer.py | 294 | 21436 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is intended for use as a GYP_GENERATOR. It takes as input (by way of
the generator flag config_path) the path of a json file that dictates the files
and targets to search for. The following keys are supported:
files: list of paths (relative) of the files to search for.
targets: list of targets to search for. The target names are unqualified.
The following is output:
error: only supplied if there is an error.
targets: the set of targets passed in via targets that either directly or
indirectly depend upon the set of paths supplied in files.
build_targets: minimal set of targets that directly depend on the changed
files and need to be built. The expectation is this set of targets is passed
into a build step.
status: outputs one of three values: none of the supplied files were found,
one of the include files changed so that it should be assumed everything
changed (in this case targets and build_targets are not output) or at
least one file was found.
invalid_targets: list of supplied targets thare were not found.
If the generator flag analyzer_output_path is specified, output is written
there. Otherwise output is written to stdout.
"""
import gyp.common
import gyp.ninja_syntax as ninja_syntax
import json
import os
import posixpath
import sys
debug = False
found_dependency_string = 'Found dependency'
no_dependency_string = 'No dependencies'
# Status when it should be assumed that everything has changed.
all_changed_string = 'Found dependency (all)'
# MatchStatus is used indicate if and how a target depends upon the supplied
# sources.
# The target's sources contain one of the supplied paths.
MATCH_STATUS_MATCHES = 1
# The target has a dependency on another target that contains one of the
# supplied paths.
MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2
# The target's sources weren't in the supplied paths and none of the target's
# dependencies depend upon a target that matched.
MATCH_STATUS_DOESNT_MATCH = 3
# The target doesn't contain the source, but the dependent targets have not yet
# been visited to determine a more specific status yet.
MATCH_STATUS_TBD = 4
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
generator_default_variables[dirname] = '!!!'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def _ToGypPath(path):
"""Converts a path to the format used by gyp."""
if os.sep == '\\' and os.altsep == '/':
return path.replace('\\', '/')
return path
def _ResolveParent(path, base_path_components):
"""Resolves |path|, which starts with at least one '../'. Returns an empty
string if the path shouldn't be considered. See _AddSources() for a
description of |base_path_components|."""
depth = 0
while path.startswith('../'):
depth += 1
path = path[3:]
# Relative includes may go outside the source tree. For example, an action may
# have inputs in /usr/include, which are not in the source tree.
if depth > len(base_path_components):
return ''
if depth == len(base_path_components):
return path
return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \
'/' + path
def _AddSources(sources, base_path, base_path_components, result):
"""Extracts valid sources from |sources| and adds them to |result|. Each
source file is relative to |base_path|, but may contain '..'. To make
resolving '..' easier |base_path_components| contains each of the
directories in |base_path|. Additionally each source may contain variables.
Such sources are ignored as it is assumed dependencies on them are expressed
and tracked in some other means."""
# NOTE: gyp paths are always posix style.
for source in sources:
if not len(source) or source.startswith('!!!') or source.startswith('$'):
continue
# variable expansion may lead to //.
org_source = source
source = source[0] + source[1:].replace('//', '/')
if source.startswith('../'):
source = _ResolveParent(source, base_path_components)
if len(source):
result.append(source)
continue
result.append(base_path + source)
if debug:
print 'AddSource', org_source, result[len(result) - 1]
def _ExtractSourcesFromAction(action, base_path, base_path_components,
results):
if 'inputs' in action:
_AddSources(action['inputs'], base_path, base_path_components, results)
def _ToLocalPath(toplevel_dir, path):
"""Converts |path| to a path relative to |toplevel_dir|."""
if path == toplevel_dir:
return ''
if path.startswith(toplevel_dir + '/'):
return path[len(toplevel_dir) + len('/'):]
return path
def _ExtractSources(target, target_dict, toplevel_dir):
# |target| is either absolute or relative and in the format of the OS. Gyp
# source paths are always posix. Convert |target| to a posix path relative to
# |toplevel_dir_|. This is done to make it easy to build source paths.
base_path = posixpath.dirname(_ToLocalPath(toplevel_dir, _ToGypPath(target)))
base_path_components = base_path.split('/')
# Add a trailing '/' so that _AddSources() can easily build paths.
if len(base_path):
base_path += '/'
if debug:
print 'ExtractSources', target, base_path
results = []
if 'sources' in target_dict:
_AddSources(target_dict['sources'], base_path, base_path_components,
results)
# Include the inputs from any actions. Any changes to these affect the
# resulting output.
if 'actions' in target_dict:
for action in target_dict['actions']:
_ExtractSourcesFromAction(action, base_path, base_path_components,
results)
if 'rules' in target_dict:
for rule in target_dict['rules']:
_ExtractSourcesFromAction(rule, base_path, base_path_components, results)
return results
class Target(object):
"""Holds information about a particular target:
deps: set of Targets this Target depends upon. This is not recursive, only the
direct dependent Targets.
match_status: one of the MatchStatus values.
back_deps: set of Targets that have a dependency on this Target.
visited: used during iteration to indicate whether we've visited this target.
This is used for two iterations, once in building the set of Targets and
again in _GetBuildTargets().
name: fully qualified name of the target.
requires_build: True if the target type is such that it needs to be built.
See _DoesTargetTypeRequireBuild for details.
added_to_compile_targets: used when determining if the target was added to the
set of targets that needs to be built.
in_roots: true if this target is a descendant of one of the root nodes.
is_executable: true if the type of target is executable."""
def __init__(self, name):
self.deps = set()
self.match_status = MATCH_STATUS_TBD
self.back_deps = set()
self.name = name
# TODO(sky): I don't like hanging this off Target. This state is specific
# to certain functions and should be isolated there.
self.visited = False
self.requires_build = False
self.added_to_compile_targets = False
self.in_roots = False
self.is_executable = False
class Config(object):
"""Details what we're looking for
files: set of files to search for
targets: see file description for details."""
def __init__(self):
self.files = []
self.targets = set()
def Init(self, params):
"""Initializes Config. This is a separate method as it raises an exception
if there is a parse error."""
generator_flags = params.get('generator_flags', {})
config_path = generator_flags.get('config_path', None)
if not config_path:
return
try:
f = open(config_path, 'r')
config = json.load(f)
f.close()
except IOError:
raise Exception('Unable to open file ' + config_path)
except ValueError as e:
raise Exception('Unable to parse config file ' + config_path + str(e))
if not isinstance(config, dict):
raise Exception('config_path must be a JSON file containing a dictionary')
self.files = config.get('files', [])
self.targets = set(config.get('targets', []))
def _WasBuildFileModified(build_file, data, files, toplevel_dir):
"""Returns true if the build file |build_file| is either in |files| or
one of the files included by |build_file| is in |files|. |toplevel_dir| is
the root of the source tree."""
if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files:
if debug:
print 'gyp file modified', build_file
return True
# First element of included_files is the file itself.
if len(data[build_file]['included_files']) <= 1:
return False
for include_file in data[build_file]['included_files'][1:]:
# |included_files| are relative to the directory of the |build_file|.
rel_include_file = \
_ToGypPath(gyp.common.UnrelativePath(include_file, build_file))
if _ToLocalPath(toplevel_dir, rel_include_file) in files:
if debug:
print 'included gyp file modified, gyp_file=', build_file, \
'included file=', rel_include_file
return True
return False
def _GetOrCreateTargetByName(targets, target_name):
"""Creates or returns the Target at targets[target_name]. If there is no
Target for |target_name| one is created. Returns a tuple of whether a new
Target was created and the Target."""
if target_name in targets:
return False, targets[target_name]
target = Target(target_name)
targets[target_name] = target
return True, target
def _DoesTargetTypeRequireBuild(target_dict):
"""Returns true if the target type is such that it needs to be built."""
# If a 'none' target has rules or actions we assume it requires a build.
return target_dict['type'] != 'none' or \
target_dict.get('actions') or target_dict.get('rules')
def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files,
build_files):
"""Returns a tuple of the following:
. A dictionary mapping from fully qualified name to Target.
. A list of the targets that have a source file in |files|.
. Set of root Targets reachable from the the files |build_files|.
This sets the |match_status| of the targets that contain any of the source
files in |files| to MATCH_STATUS_MATCHES.
|toplevel_dir| is the root of the source tree."""
# Maps from target name to Target.
targets = {}
# Targets that matched.
matching_targets = []
# Queue of targets to visit.
targets_to_visit = target_list[:]
# Maps from build file to a boolean indicating whether the build file is in
# |files|.
build_file_in_files = {}
# Root targets across all files.
roots = set()
# Set of Targets in |build_files|.
build_file_targets = set()
while len(targets_to_visit) > 0:
target_name = targets_to_visit.pop()
created_target, target = _GetOrCreateTargetByName(targets, target_name)
if created_target:
roots.add(target)
elif target.visited:
continue
target.visited = True
target.requires_build = _DoesTargetTypeRequireBuild(
target_dicts[target_name])
target.is_executable = target_dicts[target_name]['type'] == 'executable'
build_file = gyp.common.ParseQualifiedTarget(target_name)[0]
if not build_file in build_file_in_files:
build_file_in_files[build_file] = \
_WasBuildFileModified(build_file, data, files, toplevel_dir)
if build_file in build_files:
build_file_targets.add(target)
# If a build file (or any of its included files) is modified we assume all
# targets in the file are modified.
if build_file_in_files[build_file]:
print 'matching target from modified build file', target_name
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
else:
sources = _ExtractSources(target_name, target_dicts[target_name],
toplevel_dir)
for source in sources:
if source in files:
print 'target', target_name, 'matches', source
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
break
# Add dependencies to visit as well as updating back pointers for deps.
for dep in target_dicts[target_name].get('dependencies', []):
targets_to_visit.append(dep)
created_dep_target, dep_target = _GetOrCreateTargetByName(targets, dep)
if not created_dep_target:
roots.discard(dep_target)
target.deps.add(dep_target)
dep_target.back_deps.add(target)
return targets, matching_targets, roots & build_file_targets
def _GetUnqualifiedToTargetMapping(all_targets, to_find):
"""Returns a mapping (dictionary) from unqualified name to Target for all the
Targets in |to_find|."""
result = {}
if not to_find:
return result
to_find = set(to_find)
for target_name in all_targets.keys():
extracted = gyp.common.ParseQualifiedTarget(target_name)
if len(extracted) > 1 and extracted[1] in to_find:
to_find.remove(extracted[1])
result[extracted[1]] = all_targets[target_name]
if not to_find:
return result
return result
def _DoesTargetDependOn(target):
"""Returns true if |target| or any of its dependencies matches the supplied
set of paths. This updates |matches| of the Targets as it recurses.
target: the Target to look for."""
if target.match_status == MATCH_STATUS_DOESNT_MATCH:
return False
if target.match_status == MATCH_STATUS_MATCHES or \
target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY:
return True
for dep in target.deps:
if _DoesTargetDependOn(dep):
target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY
return True
target.match_status = MATCH_STATUS_DOESNT_MATCH
return False
def _GetTargetsDependingOn(possible_targets):
"""Returns the list of Targets in |possible_targets| that depend (either
directly on indirectly) on the matched targets.
possible_targets: targets to search from."""
found = []
for target in possible_targets:
if _DoesTargetDependOn(target):
found.append(target)
return found
def _AddBuildTargets(target, roots, add_if_no_ancestor, result):
"""Recurses through all targets that depend on |target|, adding all targets
that need to be built (and are in |roots|) to |result|.
roots: set of root targets.
add_if_no_ancestor: If true and there are no ancestors of |target| then add
|target| to |result|. |target| must still be in |roots|.
result: targets that need to be built are added here."""
if target.visited:
return
target.visited = True
target.in_roots = not target.back_deps and target in roots
for back_dep_target in target.back_deps:
_AddBuildTargets(back_dep_target, roots, False, result)
target.added_to_compile_targets |= back_dep_target.added_to_compile_targets
target.in_roots |= back_dep_target.in_roots
# Always add 'executable' targets. Even though they may be built by other
# targets that depend upon them it makes detection of what is going to be
# built easier.
if target.in_roots and \
(target.is_executable or
(not target.added_to_compile_targets and
(add_if_no_ancestor or target.requires_build))):
result.add(target)
target.added_to_compile_targets = True
def _GetBuildTargets(matching_targets, roots):
"""Returns the set of Targets that require a build.
matching_targets: targets that changed and need to be built.
roots: set of root targets in the build files to search from."""
result = set()
for target in matching_targets:
_AddBuildTargets(target, roots, True, result)
return result
def _WriteOutput(params, **values):
"""Writes the output, either to stdout or a file is specified."""
if 'error' in values:
print 'Error:', values['error']
if 'status' in values:
print values['status']
if 'targets' in values:
values['targets'].sort()
print 'Supplied targets that depend on changed files:'
for target in values['targets']:
print '\t', target
if 'invalid_targets' in values:
values['invalid_targets'].sort()
print 'The following targets were not found:'
for target in values['invalid_targets']:
print '\t', target
if 'build_targets' in values:
values['build_targets'].sort()
print 'Targets that require a build:'
for target in values['build_targets']:
print '\t', target
output_path = params.get('generator_flags', {}).get(
'analyzer_output_path', None)
if not output_path:
print json.dumps(values)
return
try:
f = open(output_path, 'w')
f.write(json.dumps(values) + '\n')
f.close()
except IOError as e:
print 'Error writing to output file', output_path, str(e)
def _WasGypIncludeFileModified(params, files):
"""Returns true if one of the files in |files| is in the set of included
files."""
if params['options'].includes:
for include in params['options'].includes:
if _ToGypPath(include) in files:
print 'Include file modified, assuming all changed', include
return True
return False
def _NamesNotIn(names, mapping):
"""Returns a list of the values in |names| that are not in |mapping|."""
return [name for name in names if name not in mapping]
def _LookupTargets(names, mapping):
"""Returns a list of the mapping[name] for each value in |names| that is in
|mapping|."""
return [mapping[name] for name in names if name in mapping]
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
def GenerateOutput(target_list, target_dicts, data, params):
"""Called by gyp as the final stage. Outputs results."""
config = Config()
try:
config.Init(params)
if not config.files:
raise Exception('Must specify files to analyze via config_path generator '
'flag')
toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir))
if debug:
print 'toplevel_dir', toplevel_dir
if _WasGypIncludeFileModified(params, config.files):
result_dict = { 'status': all_changed_string,
'targets': list(config.targets) }
_WriteOutput(params, **result_dict)
return
all_targets, matching_targets, roots = _GenerateTargets(
data, target_list, target_dicts, toplevel_dir, frozenset(config.files),
params['build_files'])
unqualified_mapping = _GetUnqualifiedToTargetMapping(all_targets,
config.targets)
invalid_targets = None
if len(unqualified_mapping) != len(config.targets):
invalid_targets = _NamesNotIn(config.targets, unqualified_mapping)
if matching_targets:
search_targets = _LookupTargets(config.targets, unqualified_mapping)
matched_search_targets = _GetTargetsDependingOn(search_targets)
# Reset the visited status for _GetBuildTargets.
for target in all_targets.itervalues():
target.visited = False
build_targets = _GetBuildTargets(matching_targets, roots)
matched_search_targets = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in matched_search_targets]
build_targets = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in build_targets]
else:
matched_search_targets = []
build_targets = []
result_dict = { 'targets': matched_search_targets,
'status': found_dependency_string if matching_targets else
no_dependency_string,
'build_targets': build_targets}
if invalid_targets:
result_dict['invalid_targets'] = invalid_targets
_WriteOutput(params, **result_dict)
except Exception as e:
_WriteOutput(params, error=str(e))
| mit |
AnishShah/tensorflow | tensorflow/contrib/model_pruning/python/layers/layers.py | 27 | 15357 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow layers with added variables for parameter masking.
Branched from tensorflow/contrib/layers/python/layers/layers.py
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib.model_pruning.python.layers import core_layers as core
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
def _model_variable_getter(getter,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
rename=None,
use_resource=None,
**_):
"""Getter that uses model_variable for compatibility with core layers."""
short_name = name.split('/')[-1]
if rename and short_name in rename:
name_components = name.split('/')
name_components[-1] = rename[short_name]
name = '/'.join(name_components)
return variables.model_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=collections,
trainable=trainable,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=getter,
use_resource=use_resource)
def _build_variable_getter(rename=None):
"""Build a model variable getter that respects scope getter and renames."""
# VariableScope will nest the getters
def layer_variable_getter(getter, *args, **kwargs):
kwargs['rename'] = rename
return _model_variable_getter(getter, *args, **kwargs)
return layer_variable_getter
def _add_variable_to_collections(variable, collections_set, collections_name):
"""Adds variable (or all its parts) to all collections with that name."""
collections = utils.get_variable_collections(collections_set,
collections_name) or []
variables_list = [variable]
if isinstance(variable, tf_variables.PartitionedVariable):
variables_list = [v for v in variable]
for collection in collections:
for var in variables_list:
if var not in ops.get_collection(collection):
ops.add_to_collection(collection, var)
@add_arg_scope
def masked_convolution(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds an 2D convolution followed by an optional batch_norm layer.
The layer creates a mask variable on top of the weight variable. The input to
the convolution operation is the elementwise multiplication of the mask
variable and the weigh
It is required that 1 <= N <= 3.
`convolution` creates a variable called `weights`, representing the
convolutional kernel, that is convolved (actually cross-correlated) with the
`inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is
provided (such as `batch_norm`), it is then applied. Otherwise, if
`normalizer_fn` is None and a `biases_initializer` is provided then a `biases`
variable would be created and added the activations. Finally, if
`activation_fn` is not `None`, it is applied to the activations as well.
Performs atrous convolution with input stride/dilation rate equal to `rate`
if a value > 1 for any dimension of `rate` is specified. In this case
`stride` values != 1 are not supported.
Args:
inputs: A Tensor of rank N+2 of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
spatial dimensions. Specifying any `stride` value != 1 is incompatible
with specifying any `rate` value != 1.
padding: One of `"VALID"` or `"SAME"`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
rate: A sequence of N positive integers specifying the dilation rate to use
for atrous convolution. Can be a single integer to specify the same
value for all spatial dimensions. Specifying any `rate` value != 1 is
incompatible with specifying any `stride` value != 1.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
ValueError: Both 'rate' and `stride` are not uniformly 1.
"""
if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:
raise ValueError('Invalid data_format: %r' % (data_format,))
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
with variable_scope.variable_scope(
scope, 'Conv', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if input_rank == 3:
raise ValueError('Sparse Convolution not supported for input with rank',
input_rank)
elif input_rank == 4:
layer_class = core.MaskedConv2D
elif input_rank == 5:
raise ValueError('Sparse Convolution not supported for input with rank',
input_rank)
else:
raise ValueError('Sparse Convolution not supported for input with rank',
input_rank)
if data_format is None or data_format == 'NHWC':
df = 'channels_last'
elif data_format == 'NCHW':
df = 'channels_first'
else:
raise ValueError('Unsupported data format', data_format)
layer = layer_class(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
dilation_rate=rate,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.use_bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
masked_conv2d = masked_convolution
@add_arg_scope
def masked_fully_connected(
inputs,
num_outputs,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a sparse fully connected layer. The weight matrix is masked.
`fully_connected` creates a variable called `weights`, representing a fully
connected weight matrix, which is multiplied by the `inputs` to produce a
`Tensor` of hidden units. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the hidden units. Finally, if `activation_fn` is not `None`,
it is applied to the hidden units as well.
Note: that if `inputs` have a rank greater than 2, then `inputs` is flattened
prior to the initial matrix multiply by `weights`.
Args:
inputs: A tensor of at least rank 2 and static value for the last dimension;
i.e. `[batch_size, depth]`, `[None, None, None, channels]`.
num_outputs: Integer or long, the number of output units in the layer.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collections per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
The tensor variable representing the result of the series of operations.
Raises:
ValueError: If x has rank less than 2 or if its last dimension is not set.
"""
if not isinstance(num_outputs, six.integer_types):
raise ValueError('num_outputs should be int or long, got %s.' %
(num_outputs,))
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
with variable_scope.variable_scope(
scope,
'fully_connected', [inputs],
reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
layer = core.MaskedFullyConnected(
units=num_outputs,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
# Apply normalizer function / layer.
if normalizer_fn is not None:
if not normalizer_params:
normalizer_params = {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
| apache-2.0 |
riklaunim/django-custom-multisite | django/dispatch/saferef.py | 86 | 10503 | """
"Safe weakrefs", originally from pyDispatcher.
Provides a way to safely weakref any function, including bound methods (which
aren't handled by the core weakref module).
"""
import traceback
import weakref
def safeRef(target, onDelete = None):
"""Return a *safe* weak reference to a callable target
target -- the object to be weakly referenced, if it's a
bound method reference, will create a BoundMethodWeakref,
otherwise creates a simple weakref.
onDelete -- if provided, will have a hard reference stored
to the callable to be called after the safe reference
goes out of scope with the reference object, (either a
weakref or a BoundMethodWeakref) as argument.
"""
if hasattr(target, 'im_self'):
if target.im_self is not None:
# Turn a bound method into a BoundMethodWeakref instance.
# Keep track of these instances for lookup by disconnect().
assert hasattr(target, 'im_func'), """safeRef target %r has im_self, but no im_func, don't know how to create reference"""%( target,)
reference = get_bound_method_weakref(
target=target,
onDelete=onDelete
)
return reference
if callable(onDelete):
return weakref.ref(target, onDelete)
else:
return weakref.ref( target )
class BoundMethodWeakref(object):
"""'Safe' and reusable weak references to instance methods
BoundMethodWeakref objects provide a mechanism for
referencing a bound method without requiring that the
method object itself (which is normally a transient
object) is kept alive. Instead, the BoundMethodWeakref
object keeps weak references to both the object and the
function which together define the instance method.
Attributes:
key -- the identity key for the reference, calculated
by the class's calculateKey method applied to the
target instance method
deletionMethods -- sequence of callable objects taking
single argument, a reference to this object which
will be called when *either* the target object or
target function is garbage collected (i.e. when
this object becomes invalid). These are specified
as the onDelete parameters of safeRef calls.
weakSelf -- weak reference to the target object
weakFunc -- weak reference to the target function
Class Attributes:
_allInstances -- class attribute pointing to all live
BoundMethodWeakref objects indexed by the class's
calculateKey(target) method applied to the target
objects. This weak value dictionary is used to
short-circuit creation so that multiple references
to the same (object, function) pair produce the
same BoundMethodWeakref instance.
"""
_allInstances = weakref.WeakValueDictionary()
def __new__( cls, target, onDelete=None, *arguments,**named ):
"""Create new instance or return current instance
Basically this method of construction allows us to
short-circuit creation of references to already-
referenced instance methods. The key corresponding
to the target is calculated, and if there is already
an existing reference, that is returned, with its
deletionMethods attribute updated. Otherwise the
new instance is created and registered in the table
of already-referenced methods.
"""
key = cls.calculateKey(target)
current =cls._allInstances.get(key)
if current is not None:
current.deletionMethods.append( onDelete)
return current
else:
base = super( BoundMethodWeakref, cls).__new__( cls )
cls._allInstances[key] = base
base.__init__( target, onDelete, *arguments,**named)
return base
def __init__(self, target, onDelete=None):
"""Return a weak-reference-like instance for a bound method
target -- the instance-method target for the weak
reference, must have im_self and im_func attributes
and be reconstructable via:
target.im_func.__get__( target.im_self )
which is true of built-in instance methods.
onDelete -- optional callback which will be called
when this weak reference ceases to be valid
(i.e. either the object or the function is garbage
collected). Should take a single argument,
which will be passed a pointer to this object.
"""
def remove(weak, self=self):
"""Set self.isDead to true when method or instance is destroyed"""
methods = self.deletionMethods[:]
del self.deletionMethods[:]
try:
del self.__class__._allInstances[ self.key ]
except KeyError:
pass
for function in methods:
try:
if callable( function ):
function( self )
except Exception, e:
try:
traceback.print_exc()
except AttributeError, err:
print '''Exception during saferef %s cleanup function %s: %s'''%(
self, function, e
)
self.deletionMethods = [onDelete]
self.key = self.calculateKey( target )
self.weakSelf = weakref.ref(target.im_self, remove)
self.weakFunc = weakref.ref(target.im_func, remove)
self.selfName = str(target.im_self)
self.funcName = str(target.im_func.__name__)
def calculateKey( cls, target ):
"""Calculate the reference key for this reference
Currently this is a two-tuple of the id()'s of the
target object and the target function respectively.
"""
return (id(target.im_self),id(target.im_func))
calculateKey = classmethod( calculateKey )
def __str__(self):
"""Give a friendly representation of the object"""
return """%s( %s.%s )"""%(
self.__class__.__name__,
self.selfName,
self.funcName,
)
__repr__ = __str__
def __nonzero__( self ):
"""Whether we are still a valid reference"""
return self() is not None
def __cmp__( self, other ):
"""Compare with another reference"""
if not isinstance (other,self.__class__):
return cmp( self.__class__, type(other) )
return cmp( self.key, other.key)
def __call__(self):
"""Return a strong reference to the bound method
If the target cannot be retrieved, then will
return None, otherwise returns a bound instance
method for our object and function.
Note:
You may call this method any number of times,
as it does not invalidate the reference.
"""
target = self.weakSelf()
if target is not None:
function = self.weakFunc()
if function is not None:
return function.__get__(target)
return None
class BoundNonDescriptorMethodWeakref(BoundMethodWeakref):
"""A specialized BoundMethodWeakref, for platforms where instance methods
are not descriptors.
It assumes that the function name and the target attribute name are the
same, instead of assuming that the function is a descriptor. This approach
is equally fast, but not 100% reliable because functions can be stored on an
attribute named differenty than the function's name such as in:
class A: pass
def foo(self): return "foo"
A.bar = foo
But this shouldn't be a common use case. So, on platforms where methods
aren't descriptors (such as Jython) this implementation has the advantage
of working in the most cases.
"""
def __init__(self, target, onDelete=None):
"""Return a weak-reference-like instance for a bound method
target -- the instance-method target for the weak
reference, must have im_self and im_func attributes
and be reconstructable via:
target.im_func.__get__( target.im_self )
which is true of built-in instance methods.
onDelete -- optional callback which will be called
when this weak reference ceases to be valid
(i.e. either the object or the function is garbage
collected). Should take a single argument,
which will be passed a pointer to this object.
"""
assert getattr(target.im_self, target.__name__) == target, \
("method %s isn't available as the attribute %s of %s" %
(target, target.__name__, target.im_self))
super(BoundNonDescriptorMethodWeakref, self).__init__(target, onDelete)
def __call__(self):
"""Return a strong reference to the bound method
If the target cannot be retrieved, then will
return None, otherwise returns a bound instance
method for our object and function.
Note:
You may call this method any number of times,
as it does not invalidate the reference.
"""
target = self.weakSelf()
if target is not None:
function = self.weakFunc()
if function is not None:
# Using partial() would be another option, but it erases the
# "signature" of the function. That is, after a function is
# curried, the inspect module can't be used to determine how
# many arguments the function expects, nor what keyword
# arguments it supports, and pydispatcher needs this
# information.
return getattr(target, function.__name__)
return None
def get_bound_method_weakref(target, onDelete):
"""Instantiates the appropiate BoundMethodWeakRef, depending on the details of
the underlying class method implementation"""
if hasattr(target, '__get__'):
# target method is a descriptor, so the default implementation works:
return BoundMethodWeakref(target=target, onDelete=onDelete)
else:
# no luck, use the alternative implementation:
return BoundNonDescriptorMethodWeakref(target=target, onDelete=onDelete)
| bsd-3-clause |
scott1028/pyfpdf | tests/cover/test_simple.py | 18 | 1080 | # -*- coding: utf-8 -*-
"Basic example to test PyFPDF"
#PyFPDF-cover-test:format=PDF
#PyFPDF-cover-test:fn=simple.pdf
#PyFPDF-cover-test:hash=1fd821a42cb5029a51727a6107b623ec
#PyFPDF-cover-test:pil=yes
#PyFPDF-cover-test:res=../tutorial/logo.png
#PyFPDF-cover-test:res=flower2.jpg
#PyFPDF-cover-test:res=lena.gif
import common # test utilities
from fpdf import FPDF
import sys
import os, os.path
@common.add_unittest
def dotest(outputname, nostamp):
pdf = FPDF()
if nostamp:
pdf._putinfo = lambda: common.test_putinfo(pdf)
pdf.add_page()
pdf.set_font('Arial', '', 14)
pdf.ln(10)
if nostamp:
data = "TEST-TEST-TEST"
else:
data = sys.version
pdf.write(5, 'hello world %s' % data)
path = os.path.join(common.basepath, os.pardir, "tutorial", "logo.png")
pdf.image(path, 50, 50)
pdf.image(os.path.join(common.basepath, "flower2.jpg"), 100, 50)
pdf.image(os.path.join(common.basepath, "lena.gif"), 50, 75)
pdf.output(outputname, 'F')
if __name__ == "__main__":
common.testmain(__file__, dotest)
| lgpl-3.0 |
mikebenfield/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 86 | 1234 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
lw = 2
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color='gold', lw=lw,
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color='teal', lw=lw,
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), color='yellowgreen', lw=lw,
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), color='cornflowerblue', lw=lw,
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, color='orange', lw=lw,
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), color='darkorchid', lw=lw,
linestyle='--', label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y=1, f(x))$")
plt.show()
| bsd-3-clause |
OSVR/UIforETW | bin/StripChromeSymbols.py | 3 | 9618 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script exists to work around severe performane problems when WPA or other
Windows Performance Toolkit programs try to load the symbols for the Chrome
web browser. Some combination of the enormous size of the symbols or the
enhanced debug information generated by /Zo causes WPA to take about twenty
minutes to process the symbols for chrome.dll and chrome_child.dll. When
profiling Chrome this delay happens with every new set of symbols, so with
every new version of Chrome.
This script uses xperf actions to dump a list of the symbols referenced in
an ETW trace. If chrome.dll, chrome_child.dll, content.dll, or blink_web.dll are
detected and if decoded symbols are not found in %_NT_SYMCACHE_PATH% (default is
c:\symcache) then RetrieveSymbols.exe is used to download the symbols from the
Chromium symbol server, pdbcopy.exe is used to strip the private symbols, and
then another xperf action is used to load the stripped symbols, thus converting
them to .symcache files that can be efficiently loaded by WPA.
Locally built Chrome symbols are also supported.
More details on the discovery of this slowness and the evolution of the fix
can be found here:
https://randomascii.wordpress.com/2014/11/04/slow-symbol-loading-in-microsofts-profiler-take-two/
Discussion and source code for RetrieveSymbols.exe can be found here:
https://randomascii.wordpress.com/2013/03/09/symbols-the-microsoft-way/
If "chromium-browser-symsrv" is not found in _NT_SYMBOL_PATH or RetrieveSymbols.exe
and pdbcopy.exe are not found then this script will exit early.
"""
from __future__ import print_function
import os
import sys
import re
import tempfile
import shutil
import subprocess
def main():
if len(sys.argv) < 2:
print("Usage: %s trace.etl" % sys.argv[0])
sys.exit(0)
symbol_path = os.environ.get("_NT_SYMBOL_PATH", "")
if symbol_path.count("chromium-browser-symsrv") == 0:
print("Chromium symbol server is not in _NT_SYMBOL_PATH. No symbol stripping needed.")
sys.exit(0)
script_dir = os.path.split(sys.argv[0])[0]
retrieve_path = os.path.join(script_dir, "RetrieveSymbols.exe")
pdbcopy_path = os.path.join(script_dir, "pdbcopy.exe")
# RetrieveSymbols.exe requires some support files. dbghelp.dll and symsrv.dll
# have to be in the same directory as RetrieveSymbols.exe and pdbcopy.exe must
# be in the path, so copy them all to the script directory.
for third_party in ["pdbcopy.exe", "dbghelp.dll", "symsrv.dll"]:
if not os.path.exists(third_party):
source = os.path.normpath(os.path.join(script_dir, r"..\third_party", \
third_party))
dest = os.path.normpath(os.path.join(script_dir, third_party))
shutil.copy2(source, dest)
if not os.path.exists(pdbcopy_path):
print("pdbcopy.exe not found. No symbol stripping is possible.")
sys.exit(0)
if not os.path.exists(retrieve_path):
print("RetrieveSymbols.exe not found. No symbol retrieval is possible.")
sys.exit(0)
tracename = sys.argv[1]
# Each symbol file that we pdbcopy gets copied to a separate directory so
# that we can support decoding symbols for multiple chrome versions without
# filename collisions.
tempdirs = []
# Typical output looks like:
# "[RSDS] PdbSig: {0e7712be-af06-4421-884b-496f833c8ec1}; Age: 33; Pdb: D:\src\chromium2\src\out\Release\initial\chrome.dll.pdb"
# Note that this output implies a .symcache filename like this:
# chrome.dll-0e7712beaf064421884b496f833c8ec121v2.symcache
# In particular, note that the xperf action prints the age in decimal, but the
# symcache names use the age in hexadecimal!
pdb_re = re.compile(r'"\[RSDS\] PdbSig: {(.*-.*-.*-.*-.*)}; Age: (.*); Pdb: (.*)"')
pdb_cached_re = re.compile(r"Found .*file - placed it in (.*)")
print("Pre-translating chrome symbols from stripped PDBs to avoid 10-15 minute translation times.")
symcache_files = []
# Keep track of the local symbol files so that we can temporarily rename them
# to stop xperf from using -- rename them from .pdb to .pdbx
local_symbol_files = []
#-tle = tolerate lost events
#-tti = tolerate time ivnersions
#-a symcache = show image and symbol identification (see xperf -help processing)
#-dbgid = show symbol identification information (see xperf -help symcache)
command = 'xperf -i "%s" -tle -tti -a symcache -dbgid' % tracename
print("> %s" % command)
found_uncached = False
raw_command_output = subprocess.check_output(command, stderr=subprocess.STDOUT)
command_output = str(raw_command_output).splitlines()
for line in command_output:
dllMatch = None
if line.count("chrome.dll") > 0 or line.count("chrome_child.dll") > 0:
dllMatch = "chrome.dll"
if line.count("blink_web.dll") > 0:
dllMatch = "blink_web.dll"
if line.count("\\content.dll") > 0:
dllMatch = "content.dll"
if dllMatch:
match = pdb_re.match(line)
if match:
guid, age, path = match.groups()
guid = guid.replace("-", "")
age = int(age) # Prefer for printing as hex
filepart = os.path.split(path)[1]
symcache_file = r"c:\symcache\%s-%s%xv2.symcache" % (dllMatch, guid, age)
if os.path.exists(symcache_file):
#print("Symcache file %s already exists. Skipping." % symcache_file)
continue
# Only print messages for chrome PDBs that aren't in the symcache
found_uncached = True
print("Found uncached reference to %s: %s - %s" % (filepart, guid, age, ))
symcache_files.append(symcache_file)
pdb_cache_path = None
retrieve_command = "%s %s %s %s" % (retrieve_path, guid, age, filepart)
print(" > %s" % retrieve_command)
for subline in os.popen(retrieve_command):
cache_match = pdb_cached_re.match(subline.strip())
if cache_match:
pdb_cache_path = cache_match.groups()[0]
if not pdb_cache_path:
# Look for locally built symbols
if os.path.exists(path):
pdb_cache_path = path
local_symbol_files.append(path)
if pdb_cache_path:
tempdir = tempfile.mkdtemp()
tempdirs.append(tempdir)
dest_path = os.path.join(tempdir, os.path.split(pdb_cache_path)[1])
print(" Copying PDB to %s" % dest_path)
for copyline in os.popen("%s %s %s -p" % (pdbcopy_path, pdb_cache_path, dest_path)):
print(" %s" % copyline.strip())
else:
print(" Failed to retrieve symbols.")
if tempdirs:
symbol_path = ";".join(tempdirs)
print("Stripped PDBs are in %s. Converting to symcache files now." % symbol_path)
os.environ["_NT_SYMBOL_PATH"] = symbol_path
# Create a list of to/from renamed tuples
renames = []
error = False
try:
for local_pdb in local_symbol_files:
temp_name = local_pdb + "x"
print("Renaming %s to %s to stop unstripped PDBs from being used." % (local_pdb, temp_name))
try:
os.rename(local_pdb, temp_name)
except:
# Rename can and does throw exceptions. We must catch and continue.
e = sys.exc_info()[0]
print("Hit exception while renaming %s to %s. Continuing.\n%s" % (local_pdb, temp_name, e))
else:
renames.append((local_pdb, temp_name))
#-build = build the symcache store for this trace (see xperf -help symcache)
gen_command = 'xperf -i "%s" -symbols -tle -tti -a symcache -build' % tracename
print("> %s" % gen_command)
for line in os.popen(gen_command).readlines():
pass # Don't print line
except KeyboardInterrupt:
# Catch Ctrl+C exception so that PDBs will get renamed back.
if renames:
print("Ctrl+C detected. Renaming PDBs back.")
error = True
for rename_names in renames:
try:
os.rename(rename_names[1], rename_names[0])
except:
# Rename can and does throw exceptions. We must catch and continue.
e = sys.exc_info()[0]
print("Hit exception while renaming %s back. Continuing.\n%s" % (rename_names[1], e))
for symcache_file in symcache_files:
if os.path.exists(symcache_file):
print("%s generated." % symcache_file)
else:
print("Error: %s not generated." % symcache_file)
error = True
# Delete the stripped PDB files
if error:
print("Retaining PDBs to allow rerunning xperf command-line.")
print("If re-running the command be sure to go:")
print("set _NT_SYMBOL_PATH=%s" % symbol_path)
else:
for directory in tempdirs:
shutil.rmtree(directory, ignore_errors=True)
else:
if found_uncached:
print("No PDBs copied, nothing to do.")
else:
print("No uncached PDBS found, nothing to do.")
if __name__ == "__main__":
main()
| apache-2.0 |
Tesora/tesora-horizon | openstack_dashboard/dashboards/project/firewalls/workflows.py | 2 | 15955 | # Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import netaddr
from horizon import exceptions
from horizon import forms
from horizon.utils import validators
from horizon import workflows
from openstack_dashboard import api
port_validator = validators.validate_port_or_colon_separated_port_range
class AddRuleAction(workflows.Action):
name = forms.CharField(
max_length=80,
label=_("Name"),
required=False)
description = forms.CharField(
max_length=80,
label=_("Description"),
required=False)
protocol = forms.ChoiceField(
label=_("Protocol"),
choices=[('tcp', _('TCP')),
('udp', _('UDP')),
('icmp', _('ICMP')),
('any', _('ANY'))],)
action = forms.ChoiceField(
label=_("Action"),
choices=[('allow', _('ALLOW')),
('deny', _('DENY')),
('reject', _('REJECT'))],)
source_ip_address = forms.IPField(
label=_("Source IP Address/Subnet"),
version=forms.IPv4 | forms.IPv6,
required=False, mask=True)
destination_ip_address = forms.IPField(
label=_("Destination IP Address/Subnet"),
version=forms.IPv4 | forms.IPv6,
required=False, mask=True)
source_port = forms.CharField(
max_length=80,
label=_("Source Port/Port Range"),
required=False,
validators=[port_validator])
destination_port = forms.CharField(
max_length=80,
label=_("Destination Port/Port Range"),
required=False,
validators=[port_validator])
ip_version = forms.ChoiceField(
label=_("IP Version"), required=False,
choices=[('4', '4'), ('6', '6')])
shared = forms.BooleanField(
label=_("Shared"), initial=False, required=False)
enabled = forms.BooleanField(
label=_("Enabled"), initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddRuleAction, self).__init__(request, *args, **kwargs)
def _check_ip_addr_and_ip_version(self, cleaned_data):
ip_version = int(str(cleaned_data.get('ip_version')))
src_ip = cleaned_data.get('source_ip_address')
dst_ip = cleaned_data.get('destination_ip_address')
msg = _('Source/Destination Network Address and IP version '
'are inconsistent. Please make them consistent.')
if (src_ip and
netaddr.IPNetwork(src_ip).version != ip_version):
self._errors['ip_version'] = self.error_class([msg])
elif (dst_ip and
netaddr.IPNetwork(dst_ip).version != ip_version):
self._errors['ip_version'] = self.error_class([msg])
def clean(self):
cleaned_data = super(AddRuleAction, self).clean()
self._check_ip_addr_and_ip_version(cleaned_data)
class Meta(object):
name = _("Rule")
permissions = ('openstack.services.network',)
help_text = _("Create a firewall rule.\n\n"
"A Firewall rule is an association of the following "
"attributes:\n\n"
"<li>IP Addresses: The addresses from/to which the "
"traffic filtration needs to be applied.</li>"
"<li>IP Version: The type of IP packets (IP V4/V6) "
"that needs to be filtered.</li>"
"<li>Protocol: Type of packets (UDP, ICMP, TCP, Any) "
"that needs to be checked.</li>"
"<li>Action: Action is the type of filtration "
"required, it can be Reject/Deny/Allow data "
"packets.</li>\n"
"The protocol and action fields are required, all "
"others are optional.")
class AddRuleStep(workflows.Step):
action_class = AddRuleAction
contributes = ("name", "description", "protocol", "action",
"source_ip_address", "source_port",
"destination_ip_address", "destination_port",
"enabled", "shared", "ip_version")
def contribute(self, data, context):
context = super(AddRuleStep, self).contribute(data, context)
if data:
if context['protocol'] == 'any':
del context['protocol']
for field in ['source_port',
'destination_port',
'source_ip_address',
'destination_ip_address']:
if not context[field]:
del context[field]
return context
class AddRule(workflows.Workflow):
slug = "addrule"
name = _("Add Rule")
finalize_button_name = _("Add")
success_message = _('Added Rule "%s".')
failure_message = _('Unable to add Rule "%s".')
success_url = "horizon:project:firewalls:index"
# fwaas is designed to support a wide range of vendor
# firewalls. Considering the multitude of vendor firewall
# features in place today, firewall_rule definition can
# involve more complex configuration over time. Hence,
# a workflow instead of a single form is used for
# firewall_rule add to be ready for future extension.
default_steps = (AddRuleStep,)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.fwaas.rule_create(request, **context)
return True
except Exception as e:
msg = self.format_status_message(self.failure_message) + str(e)
exceptions.handle(request, msg)
return False
class SelectRulesAction(workflows.Action):
rule = forms.MultipleChoiceField(
label=_("Rules"),
required=False,
widget=forms.CheckboxSelectMultiple(),
help_text=_("Create a policy with selected rules."))
class Meta(object):
name = _("Rules")
permissions = ('openstack.services.network',)
help_text = _("Select rules for your policy.")
def populate_rule_choices(self, request, context):
try:
tenant_id = self.request.user.tenant_id
rules = api.fwaas.rule_list_for_tenant(request, tenant_id)
rules = sorted(rules,
key=lambda rule: rule.name_or_id)
rule_list = [(rule.id, rule.name_or_id) for rule in rules
if not rule.firewall_policy_id]
except Exception as e:
rule_list = []
exceptions.handle(request,
_('Unable to retrieve rules (%(error)s).') % {
'error': str(e)})
return rule_list
class SelectRulesStep(workflows.Step):
action_class = SelectRulesAction
template_name = "project/firewalls/_update_rules.html"
contributes = ("firewall_rules",)
def contribute(self, data, context):
if data:
rules = self.workflow.request.POST.getlist("rule")
if rules:
rules = [r for r in rules if r != '']
context['firewall_rules'] = rules
return context
class SelectRoutersAction(workflows.Action):
router = forms.MultipleChoiceField(
label=_("Routers"),
required=False,
widget=forms.CheckboxSelectMultiple(),
help_text=_("Create a firewall with selected routers."))
class Meta(object):
name = _("Routers")
permissions = ('openstack.services.network',)
help_text = _("Select routers for your firewall.")
def populate_router_choices(self, request, context):
try:
tenant_id = self.request.user.tenant_id
routers_list = api.fwaas.firewall_unassociated_routers_list(
request, tenant_id)
except Exception as e:
routers_list = []
exceptions.handle(request,
_('Unable to retrieve routers (%(error)s).') % {
'error': str(e)})
routers_list = [(router.id, router.name_or_id)
for router in routers_list]
return routers_list
class SelectRoutersStep(workflows.Step):
action_class = SelectRoutersAction
template_name = "project/firewalls/_update_routers.html"
contributes = ("router_ids", "all_routers_selected",
"Select No Routers")
def contribute(self, data, context):
if data:
routers = self.workflow.request.POST.getlist("router")
if routers:
routers = [r for r in routers if r != '']
context['router_ids'] = routers
else:
context['router_ids'] = []
return context
class AddPolicyAction(workflows.Action):
name = forms.CharField(max_length=80,
label=_("Name"))
description = forms.CharField(max_length=80,
label=_("Description"),
required=False)
shared = forms.BooleanField(label=_("Shared"),
initial=False,
required=False)
audited = forms.BooleanField(label=_("Audited"),
initial=False,
required=False)
def __init__(self, request, *args, **kwargs):
super(AddPolicyAction, self).__init__(request, *args, **kwargs)
class Meta(object):
name = _("Policy")
permissions = ('openstack.services.network',)
help_text = _("Create a firewall policy with an ordered list "
"of firewall rules.\n\n"
"A firewall policy is an ordered collection of firewall "
"rules. So if the traffic matches the first rule, the "
"other rules are not executed. If the traffic does not "
"match the current rule, then the next rule is "
"executed. A firewall policy has the following "
"attributes:\n\n"
"<li>Shared: A firewall policy can be shared across "
"tenants. Thus it can also be made part of an audit "
"workflow wherein the firewall policy can be audited "
"by the relevant entity that is authorized.</li>"
"<li>Audited: When audited is set to True, it indicates "
"that the firewall policy has been audited. "
"Each time the firewall policy or the associated "
"firewall rules are changed, this attribute will be "
"set to False and will have to be explicitly set to "
"True through an update operation.</li>\n"
"The name field is required, all others are optional.")
class AddPolicyStep(workflows.Step):
action_class = AddPolicyAction
contributes = ("name", "description", "shared", "audited")
def contribute(self, data, context):
context = super(AddPolicyStep, self).contribute(data, context)
if data:
return context
class AddPolicy(workflows.Workflow):
slug = "addpolicy"
name = _("Add Policy")
finalize_button_name = _("Add")
success_message = _('Added Policy "%s".')
failure_message = _('Unable to add Policy "%s".')
success_url = "horizon:project:firewalls:index"
default_steps = (AddPolicyStep, SelectRulesStep)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.fwaas.policy_create(request, **context)
return True
except Exception as e:
msg = self.format_status_message(self.failure_message) + str(e)
exceptions.handle(request, msg)
return False
class AddFirewallAction(workflows.Action):
name = forms.CharField(max_length=80,
label=_("Name"),
required=False)
description = forms.CharField(max_length=80,
label=_("Description"),
required=False)
firewall_policy_id = forms.ChoiceField(label=_("Policy"))
admin_state_up = forms.ChoiceField(choices=[(True, _('UP')),
(False, _('DOWN'))],
label=_("Admin State"))
def __init__(self, request, *args, **kwargs):
super(AddFirewallAction, self).__init__(request, *args, **kwargs)
firewall_policy_id_choices = [('', _("Select a Policy"))]
try:
tenant_id = self.request.user.tenant_id
policies = api.fwaas.policy_list_for_tenant(request, tenant_id)
policies = sorted(policies, key=lambda policy: policy.name)
except Exception as e:
exceptions.handle(
request,
_('Unable to retrieve policy list (%(error)s).') % {
'error': str(e)})
policies = []
for p in policies:
firewall_policy_id_choices.append((p.id, p.name_or_id))
self.fields['firewall_policy_id'].choices = firewall_policy_id_choices
class Meta(object):
name = _("Firewall")
permissions = ('openstack.services.network',)
help_text = _("Create a firewall based on a policy.\n\n"
"A firewall represents a logical firewall resource that "
"a tenant can instantiate and manage. A firewall must "
"be associated with one policy, all other fields are "
"optional.")
class AddFirewallStep(workflows.Step):
action_class = AddFirewallAction
contributes = ("name", "firewall_policy_id", "description",
"admin_state_up")
def contribute(self, data, context):
context = super(AddFirewallStep, self).contribute(data, context)
context['admin_state_up'] = (context['admin_state_up'] == 'True')
return context
class AddFirewall(workflows.Workflow):
slug = "addfirewall"
name = _("Add Firewall")
finalize_button_name = _("Add")
success_message = _('Added Firewall "%s".')
failure_message = _('Unable to add Firewall "%s".')
success_url = "horizon:project:firewalls:index"
# fwaas is designed to support a wide range of vendor
# firewalls. Considering the multitude of vendor firewall
# features in place today, firewall definition can
# involve more complex configuration over time. Hence,
# a workflow instead of a single form is used for
# firewall_rule add to be ready for future extension.
default_steps = (AddFirewallStep, )
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.fwaas.firewall_create(request, **context)
return True
except Exception as e:
msg = self.format_status_message(self.failure_message) + str(e)
exceptions.handle(request, msg)
return False
| apache-2.0 |
tanmaykm/edx-platform | lms/djangoapps/support/tests/test_views.py | 17 | 15131 | # coding: UTF-8
"""
Tests for support views.
"""
from datetime import datetime, timedelta
import itertools
import json
import re
import ddt
from django.core.urlresolvers import reverse
from nose.plugins.attrib import attr
from pytz import UTC
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from lms.djangoapps.verify_student.models import VerificationDeadline
from student.models import CourseEnrollment, ManualEnrollmentAudit, ENROLLED_TO_ENROLLED
from student.roles import GlobalStaff, SupportStaffRole
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class SupportViewTestCase(ModuleStoreTestCase):
"""
Base class for support view tests.
"""
USERNAME = "support"
EMAIL = "support@example.com"
PASSWORD = "support"
def setUp(self):
"""Create a user and log in. """
super(SupportViewTestCase, self).setUp()
self.user = UserFactory(username=self.USERNAME, email=self.EMAIL, password=self.PASSWORD)
self.course = CourseFactory.create()
success = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(success, msg="Could not log in")
@attr(shard=3)
@ddt.ddt
class SupportViewAccessTests(SupportViewTestCase):
"""
Tests for access control of support views.
"""
@ddt.data(*(
(url_name, role, has_access)
for (url_name, (role, has_access))
in itertools.product((
'support:index',
'support:certificates',
'support:refund',
'support:enrollment',
'support:enrollment_list'
), (
(GlobalStaff, True),
(SupportStaffRole, True),
(None, False)
))
))
@ddt.unpack
def test_access(self, url_name, role, has_access):
if role is not None:
role().add_users(self.user)
url = reverse(url_name)
response = self.client.get(url)
if has_access:
self.assertEqual(response.status_code, 200)
else:
self.assertEqual(response.status_code, 403)
@ddt.data(
"support:index",
"support:certificates",
"support:refund",
"support:enrollment",
"support:enrollment_list"
)
def test_require_login(self, url_name):
url = reverse(url_name)
# Log out then try to retrieve the page
self.client.logout()
response = self.client.get(url)
# Expect a redirect to the login page
redirect_url = "{login_url}?next={original_url}".format(
login_url=reverse("signin_user"),
original_url=url,
)
self.assertRedirects(response, redirect_url)
class SupportViewIndexTests(SupportViewTestCase):
"""
Tests for the support index view.
"""
EXPECTED_URL_NAMES = [
"support:certificates",
"support:refund",
]
def setUp(self):
"""Make the user support staff. """
super(SupportViewIndexTests, self).setUp()
SupportStaffRole().add_users(self.user)
def test_index(self):
response = self.client.get(reverse("support:index"))
self.assertContains(response, "Support")
# Check that all the expected links appear on the index page.
for url_name in self.EXPECTED_URL_NAMES:
self.assertContains(response, reverse(url_name))
class SupportViewCertificatesTests(SupportViewTestCase):
"""
Tests for the certificates support view.
"""
def setUp(self):
"""Make the user support staff. """
super(SupportViewCertificatesTests, self).setUp()
SupportStaffRole().add_users(self.user)
def test_certificates_no_filter(self):
# Check that an empty initial filter is passed to the JavaScript client correctly.
response = self.client.get(reverse("support:certificates"))
self.assertContains(response, "userFilter: ''")
def test_certificates_with_user_filter(self):
# Check that an initial filter is passed to the JavaScript client.
url = reverse("support:certificates") + "?user=student@example.com"
response = self.client.get(url)
self.assertContains(response, "userFilter: 'student@example.com'")
def test_certificates_along_with_course_filter(self):
# Check that an initial filter is passed to the JavaScript client.
url = reverse("support:certificates") + "?user=student@example.com&course_id=" + unicode(self.course.id)
response = self.client.get(url)
self.assertContains(response, "userFilter: 'student@example.com'")
self.assertContains(response, "courseFilter: '" + unicode(self.course.id) + "'")
@ddt.ddt
class SupportViewEnrollmentsTests(SharedModuleStoreTestCase, SupportViewTestCase):
"""Tests for the enrollment support view."""
def setUp(self):
super(SupportViewEnrollmentsTests, self).setUp()
SupportStaffRole().add_users(self.user)
self.course = CourseFactory(display_name=u'teꜱᴛ')
self.student = UserFactory.create(username='student', email='test@example.com', password='test')
for mode in (
CourseMode.AUDIT, CourseMode.PROFESSIONAL, CourseMode.CREDIT_MODE,
CourseMode.NO_ID_PROFESSIONAL_MODE, CourseMode.VERIFIED, CourseMode.HONOR
):
CourseModeFactory.create(mode_slug=mode, course_id=self.course.id) # pylint: disable=no-member
self.verification_deadline = VerificationDeadline(
course_key=self.course.id, # pylint: disable=no-member
deadline=datetime.now(UTC) + timedelta(days=365)
)
self.verification_deadline.save()
CourseEnrollmentFactory.create(mode=CourseMode.AUDIT, user=self.student, course_id=self.course.id) # pylint: disable=no-member
self.url = reverse('support:enrollment_list', kwargs={'username_or_email': self.student.username})
def assert_enrollment(self, mode):
"""
Assert that the student's enrollment has the correct mode.
"""
enrollment = CourseEnrollment.get_enrollment(self.student, self.course.id) # pylint: disable=no-member
self.assertEqual(enrollment.mode, mode)
@ddt.data('username', 'email')
def test_get_enrollments(self, search_string_type):
url = reverse(
'support:enrollment_list',
kwargs={'username_or_email': getattr(self.student, search_string_type)}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(len(data), 1)
self.assertDictContainsSubset({
'mode': CourseMode.AUDIT,
'manual_enrollment': {},
'user': self.student.username,
'course_id': unicode(self.course.id), # pylint: disable=no-member
'is_active': True,
'verified_upgrade_deadline': None,
}, data[0])
self.assertEqual(
{CourseMode.VERIFIED, CourseMode.AUDIT, CourseMode.HONOR,
CourseMode.NO_ID_PROFESSIONAL_MODE, CourseMode.PROFESSIONAL},
{mode['slug'] for mode in data[0]['course_modes']}
)
def test_get_manual_enrollment_history(self):
ManualEnrollmentAudit.create_manual_enrollment_audit(
self.user,
self.student.email,
ENROLLED_TO_ENROLLED,
'Financial Assistance',
CourseEnrollment.objects.get(course_id=self.course.id, user=self.student) # pylint: disable=no-member
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertDictContainsSubset({
'enrolled_by': self.user.email,
'reason': 'Financial Assistance',
}, json.loads(response.content)[0]['manual_enrollment'])
@ddt.data('username', 'email')
def test_change_enrollment(self, search_string_type):
self.assertIsNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
url = reverse(
'support:enrollment_list',
kwargs={'username_or_email': getattr(self.student, search_string_type)}
)
response = self.client.post(url, data={
'course_id': unicode(self.course.id), # pylint: disable=no-member
'old_mode': CourseMode.AUDIT,
'new_mode': CourseMode.VERIFIED,
'reason': 'Financial Assistance'
})
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
self.assert_enrollment(CourseMode.VERIFIED)
@ddt.data(
({}, r"The field '\w+' is required."),
({'course_id': 'bad course key'}, 'Could not parse course key.'),
({
'course_id': 'course-v1:TestX+T101+2015',
'old_mode': CourseMode.AUDIT,
'new_mode': CourseMode.VERIFIED,
'reason': ''
}, 'Could not find enrollment for user'),
({
'course_id': None,
'old_mode': CourseMode.HONOR,
'new_mode': CourseMode.VERIFIED,
'reason': ''
}, r'User \w+ is not enrolled with mode ' + CourseMode.HONOR),
({
'course_id': 'course-v1:TestX+T101+2015',
'old_mode': CourseMode.AUDIT,
'new_mode': CourseMode.CREDIT_MODE,
'reason': 'Enrollment cannot be changed to credit mode'
}, '')
)
@ddt.unpack
def test_change_enrollment_bad_data(self, data, error_message):
# `self` isn't available from within the DDT declaration, so
# assign the course ID here
if 'course_id' in data and data['course_id'] is None:
data['course_id'] = unicode(self.course.id) # pylint: disable=no-member
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 400)
self.assertIsNotNone(re.match(error_message, response.content))
self.assert_enrollment(CourseMode.AUDIT)
self.assertIsNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
@ddt.data('honor', 'audit', 'verified', 'professional', 'no-id-professional')
def test_update_enrollment_for_all_modes(self, new_mode):
""" Verify support can changed the enrollment to all available modes
except credit. """
self.assert_update_enrollment('username', new_mode)
@ddt.data('honor', 'audit', 'verified', 'professional', 'no-id-professional')
def test_update_enrollment_for_ended_course(self, new_mode):
""" Verify support can changed the enrollment of archived course. """
self.set_course_end_date_and_expiry()
self.assert_update_enrollment('username', new_mode)
def test_update_enrollment_with_credit_mode_throws_error(self):
""" Verify that enrollment cannot be changed to credit mode. """
self.assert_update_enrollment('username', CourseMode.CREDIT_MODE)
@ddt.data('username', 'email')
def test_get_enrollments_with_expired_mode(self, search_string_type):
""" Verify that page can get the all modes with archived course. """
self.set_course_end_date_and_expiry()
url = reverse(
'support:enrollment_list',
kwargs={'username_or_email': getattr(self.student, search_string_type)}
)
response = self.client.get(url)
self._assert_generated_modes(response)
@ddt.data('username', 'email')
def test_update_enrollments_with_expired_mode(self, search_string_type):
""" Verify that enrollment can be updated to verified mode. """
self.set_course_end_date_and_expiry()
self.assertIsNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
self.assert_update_enrollment(search_string_type, CourseMode.VERIFIED)
def _assert_generated_modes(self, response):
"""Dry method to generate course modes dict and test with response data."""
modes = CourseMode.modes_for_course(self.course.id, include_expired=True) # pylint: disable=no-member
modes_data = []
for mode in modes:
expiry = mode.expiration_datetime.strftime('%Y-%m-%dT%H:%M:%SZ') if mode.expiration_datetime else None
modes_data.append({
'sku': mode.sku,
'expiration_datetime': expiry,
'name': mode.name,
'currency': mode.currency,
'bulk_sku': mode.bulk_sku,
'min_price': mode.min_price,
'suggested_prices': mode.suggested_prices,
'slug': mode.slug,
'description': mode.description
})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(len(data), 1)
self.assertEqual(
modes_data,
data[0]['course_modes']
)
self.assertEqual(
{CourseMode.VERIFIED, CourseMode.AUDIT, CourseMode.NO_ID_PROFESSIONAL_MODE,
CourseMode.PROFESSIONAL, CourseMode.HONOR},
{mode['slug'] for mode in data[0]['course_modes']}
)
def assert_update_enrollment(self, search_string_type, new_mode):
""" Dry method to update the enrollment and assert response."""
self.assertIsNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
url = reverse(
'support:enrollment_list',
kwargs={'username_or_email': getattr(self.student, search_string_type)}
)
response = self.client.post(url, data={
'course_id': unicode(self.course.id), # pylint: disable=no-member
'old_mode': CourseMode.AUDIT,
'new_mode': new_mode,
'reason': 'Financial Assistance'
})
# Enrollment cannot be changed to credit mode.
if new_mode == CourseMode.CREDIT_MODE:
self.assertEqual(response.status_code, 400)
else:
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
self.assert_enrollment(new_mode)
def set_course_end_date_and_expiry(self):
""" Set the course-end date and expire its verified mode."""
self.course.start = datetime(year=1970, month=1, day=1, tzinfo=UTC)
self.course.end = datetime(year=1970, month=1, day=10, tzinfo=UTC)
# change verified mode expiry.
verified_mode = CourseMode.objects.get(
course_id=self.course.id, # pylint: disable=no-member
mode_slug=CourseMode.VERIFIED
)
verified_mode.expiration_datetime = datetime(year=1970, month=1, day=9, tzinfo=UTC)
verified_mode.save()
| agpl-3.0 |
salamer/jolla | example/app.py | 1 | 1317 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from jolla import server
from jolla import plugins
from jolla import session
from jolla import HTTP404Error
from jolla import SessionError
session = session()
def index(request):
return plugins.render('index.html')
def chinese(request):
try:
if request['data']['ww'] == '海贼王':
return 'great anime'
except:
pass
return 'yeah!'
def data(request):
data = {'data': request['id']}
return plugins.render_json(data)
def add(request):
session.add_value('qq', 'ww')
return 'yes'
def get(request):
try:
data = session.get_value('qq')
except SessionError:
raise HTTP404Error
return data
def blog(request):
if request['method'] == 'GET':
return plugins.render_json({'name': session.get_value('name')})
else:
if request['method'] == 'POST':
session.add_value('name', request['data']['name'])
return 'ok'
class app(server.WebApp):
urls = [
(r'/', index),
(r'/data/<id>', data),
(r'/data', data),
(r'/add', add),
(r'/get', get),
(r'/blog', blog),
(r'/chinese', chinese)
]
if __name__ == '__main__':
server = server.jolla_server(app)
server.run_server()
| apache-2.0 |
dou800/php-buildpack-legacy | builds/runtimes/python-2.7.6/lib/python2.7/sqlite3/dbapi2.py | 55 | 2644 | # -*- coding: iso-8859-1 -*-
# pysqlite2/dbapi2.py: the DB-API 2.0 interface
#
# Copyright (C) 2004-2005 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import datetime
import time
from _sqlite3 import *
paramstyle = "qmark"
threadsafety = 1
apilevel = "2.0"
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def DateFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
version_info = tuple([int(x) for x in version.split(".")])
sqlite_version_info = tuple([int(x) for x in sqlite_version.split(".")])
Binary = buffer
def register_adapters_and_converters():
def adapt_date(val):
return val.isoformat()
def adapt_datetime(val):
return val.isoformat(" ")
def convert_date(val):
return datetime.date(*map(int, val.split("-")))
def convert_timestamp(val):
datepart, timepart = val.split(" ")
year, month, day = map(int, datepart.split("-"))
timepart_full = timepart.split(".")
hours, minutes, seconds = map(int, timepart_full[0].split(":"))
if len(timepart_full) == 2:
microseconds = int('{:0<6.6}'.format(timepart_full[1].decode()))
else:
microseconds = 0
val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds)
return val
register_adapter(datetime.date, adapt_date)
register_adapter(datetime.datetime, adapt_datetime)
register_converter("date", convert_date)
register_converter("timestamp", convert_timestamp)
register_adapters_and_converters()
# Clean up namespace
del(register_adapters_and_converters)
| mit |
speksofdust/BeyondDreams | beyonddreams/screens/screen.py | 1 | 6125 | # ---------------------------------------------------------------------------- #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# ---------------------------------------------------------------------------- #
from chareditor import CharEditorScreen
from Title import Title
from game import GameScreen
from game import GameSetupScreen
# Constant screen names
TITLE = 'title'
CHAR_EDITOR = 'character editor'
GAME = 'game'
GAME_SETUP = 'game setup'
CONFIG = 'config'
__all__ = ["TITLE", "CHAR_EDITOR", "GAME", "GAME_SETUP", "CONFIG",
]
screens = {
TITLE : TitleScreen,
CHAR_EDITOR : CharEditorScreen,
GAME: GameScreen,
GAME_SETUP: GameSetup,
#CONFIG: ConfigScreen,
#BESTIARY: BestiaryScreen,
#CHAR_VIEWER: CharViewerScreen,
}
__all__ = 'screens'
class ScreenNav:
"""Handles the navagation between 'screen objects'."""
def __init__(self):
self._last = None
self._current = None
@property
def current(self):
"""The current screen."""
return self._current
@property
def last(self):
"""The last screen."""
return self._last
def can_go_back(self):
"""True if can return to the previous screen."""
return (self._last is not None or self._current._can_go_back)
# ---- Handlers ------------------------------------------------------ #
def go_back(self):
"""Go back to the previous screen, if the current screen permits it."""
if self.can_go_back():
self._change_screen(self._last, self._current._cleanup_on_go_back)
def _goto(self, screen):
"""Goto given screen."""
if screen != self._current:
if screen == self._last: self.go_back
if screen in screens:
if isinstance(screen, str): screen = screens[screen]
if isinstance(screen, Screen):
self._change_screen(screen, self._current._cleanup_on_goto)
else: raise valueError("invalid screen: {}".format(screen))
def exit_to_title(self):
"""Exit from the current screen and go back to the title screen."""
if self._current.name != TITLE:
self._current.exit_to_title
def quit_to_title(self):
self._current.quit
def _change_screen(self, n, cleanup):
# helper for go_back and goto
if cleanup: # kill the current screen
self.current.end
x = self._current
self._current = n
self._last = None
x.cleanup
else: # keep both screens alive
x = self._last
self._last = self._current
self._current = x
class BDScreen:
"""Base class for Beyond Dreams "Screen" Objects.
This defines what will be displayed when
'session.screen' = a given screen object.
"""
_name = "" # Name must match key in 'screens'
def __init__(self):
# Bool States
self._running = False
self._can_go_back = False
self._cleanup_on_go_back = True
self._cleanup_on_goto = True
# eq, ne -- test 'x is self', then x 'isinstance of' and so on
def __eq__(self, x):
if x is not self:
if (isinstance(x, str) and x == self._name): return True
if (isinstance(x, BDScreen): return x._name == self._name
raise TypeError("cannot compare type '{}' to BDScreen type.".format(
x.type))
return True
def __ne__(self, x):
if x is not self:
if (isinstance(x, str) and x != name): return True
if isinstance(x, BDScreen): return x._name != self._name
raise TypeError("cannot compare type '{}' to BDScreen type.".format(
x.type))
return False
@property
def name(self):
"""The name of this screen."""
return self._name
def is_running(self):
"""True if this screen is currently running."""
return self._running
def start(self):
"""Start this screen."""
if session._screen != self:
try: self.pre_run
except: pass
session._screen = self
self.run
# Optional
def pre_run(self):
"""Called before the screen becomes active."""
raise NotImplementedError
def has_unsaved_data(self):
"""Return True if there is unsaved data."""
return False
def run(self):
raise NotImplementedError
def end(self):
"""Called to end this screen."""
pass
# Subclasses must call these
def exit_to_title(self):
"""Exit this screen and return to the title screen."""
raise NotImplementedError
def quit(self):
"""Quit the game and return to the desktop."""
raise NotImplementedError
def cleanup(self):
"""Called to kill this screen after screen transition."""
pass
| gpl-3.0 |
edensparkles/FIRSTAID | FIRST_AID/venv/Lib/site-packages/pip/utils/appdirs.py | 174 | 7896 | """
This code was taken from https://github.com/ActiveState/appdirs and modified
to suit our purposes.
"""
from __future__ import absolute_import
import os
import sys
from pip.compat import WINDOWS, expanduser
def user_cache_dir(appname):
r"""
Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go
in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the
non-roaming app data dir (the default returned by `user_data_dir`). Apps
typically put cache data somewhere *under* the given dir here. Some
examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
"""
if WINDOWS:
# Get the base path
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
# Add our app name and Cache directory to it
path = os.path.join(path, appname, "Cache")
elif sys.platform == "darwin":
# Get the base path
path = expanduser("~/Library/Caches")
# Add our app name to it
path = os.path.join(path, appname)
else:
# Get the base path
path = os.getenv("XDG_CACHE_HOME", expanduser("~/.cache"))
# Add our app name to it
path = os.path.join(path, appname)
return path
def user_data_dir(appname, roaming=False):
"""
Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in
$XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\ ...
...Application Data\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local ...
...Settings\Application Data\<AppName>
Win 7 (not roaming): C:\\Users\<username>\AppData\Local\<AppName>
Win 7 (roaming): C:\\Users\<username>\AppData\Roaming\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if WINDOWS:
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.join(os.path.normpath(_get_win_folder(const)), appname)
elif sys.platform == "darwin":
path = os.path.join(
expanduser('~/Library/Application Support/'),
appname,
)
else:
path = os.path.join(
os.getenv('XDG_DATA_HOME', expanduser("~/.local/share")),
appname,
)
return path
def user_config_dir(appname, roaming=True):
"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default True) can be set False to not use the
Windows roaming appdata directory. That means that for users on a
Windows network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName>
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>".
"""
if WINDOWS:
path = user_data_dir(appname, roaming=roaming)
elif sys.platform == "darwin":
path = user_data_dir(appname)
else:
path = os.getenv('XDG_CONFIG_HOME', expanduser("~/.config"))
path = os.path.join(path, appname)
return path
# for the discussion regarding site_config_dirs locations
# see <https://github.com/pypa/pip/issues/1733>
def site_config_dirs(appname):
"""Return a list of potential user-shared config dirs for this application.
"appname" is the name of application.
Typical user config directories are:
Mac OS X: /Library/Application Support/<AppName>/
Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in
$XDG_CONFIG_DIRS
Win XP: C:\Documents and Settings\All Users\Application ...
...Data\<AppName>\
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory
on Vista.)
Win 7: Hidden, but writeable on Win 7:
C:\ProgramData\<AppName>\
"""
if WINDOWS:
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
pathlist = [os.path.join(path, appname)]
elif sys.platform == 'darwin':
pathlist = [os.path.join('/Library/Application Support', appname)]
else:
# try looking in $XDG_CONFIG_DIRS
xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
if xdg_config_dirs:
pathlist = [
os.path.join(expanduser(x), appname)
for x in xdg_config_dirs.split(os.pathsep)
]
else:
pathlist = []
# always look in /etc directly as well
pathlist.append('/etc')
return pathlist
# -- Windows support functions --
def _get_win_folder_from_registry(csidl_name):
"""
This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
directory, _type = _winreg.QueryValueEx(key, shell_folder_name)
return directory
def _get_win_folder_with_ctypes(csidl_name):
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
if WINDOWS:
try:
import ctypes
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
_get_win_folder = _get_win_folder_from_registry
| mit |
cnsoft/kbengine-cocos2dx | kbe/res/scripts/common/Lib/importlib/test/abc.py | 86 | 2471 | import abc
import unittest
class FinderTests(unittest.TestCase, metaclass=abc.ABCMeta):
"""Basic tests for a finder to pass."""
@abc.abstractmethod
def test_module(self):
# Test importing a top-level module.
pass
@abc.abstractmethod
def test_package(self):
# Test importing a package.
pass
@abc.abstractmethod
def test_module_in_package(self):
# Test importing a module contained within a package.
# A value for 'path' should be used if for a meta_path finder.
pass
@abc.abstractmethod
def test_package_in_package(self):
# Test importing a subpackage.
# A value for 'path' should be used if for a meta_path finder.
pass
@abc.abstractmethod
def test_package_over_module(self):
# Test that packages are chosen over modules.
pass
@abc.abstractmethod
def test_failure(self):
# Test trying to find a module that cannot be handled.
pass
class LoaderTests(unittest.TestCase, metaclass=abc.ABCMeta):
@abc.abstractmethod
def test_module(self):
"""A module should load without issue.
After the loader returns the module should be in sys.modules.
Attributes to verify:
* __file__
* __loader__
* __name__
* No __path__
"""
pass
@abc.abstractmethod
def test_package(self):
"""Loading a package should work.
After the loader returns the module should be in sys.modules.
Attributes to verify:
* __name__
* __file__
* __package__
* __path__
* __loader__
"""
pass
@abc.abstractmethod
def test_lacking_parent(self):
"""A loader should not be dependent on it's parent package being
imported."""
pass
@abc.abstractmethod
def test_module_reuse(self):
"""If a module is already in sys.modules, it should be reused."""
pass
@abc.abstractmethod
def test_state_after_failure(self):
"""If a module is already in sys.modules and a reload fails
(e.g. a SyntaxError), the module should be in the state it was before
the reload began."""
pass
@abc.abstractmethod
def test_unloadable(self):
"""Test ImportError is raised when the loader is asked to load a module
it can't."""
pass
| lgpl-3.0 |
jicruz/heroku-bot | lib/youtube_dl/extractor/spike.py | 34 | 2296 | from __future__ import unicode_literals
import re
from .mtv import MTVServicesInfoExtractor
class SpikeIE(MTVServicesInfoExtractor):
_VALID_URL = r'https?://(?:[^/]+\.)?spike\.com/[^/]+/[\da-z]{6}(?:[/?#&]|$)'
_TESTS = [{
'url': 'http://www.spike.com/video-clips/lhtu8m/auction-hunters-can-allen-ride-a-hundred-year-old-motorcycle',
'md5': '1a9265f32b0c375793d6c4ce45255256',
'info_dict': {
'id': 'b9c8221a-4e50-479a-b86d-3333323e38ba',
'ext': 'mp4',
'title': 'Auction Hunters|December 27, 2013|4|414|Can Allen Ride A Hundred Year-Old Motorcycle?',
'description': 'md5:fbed7e82ed5fad493615b3094a9499cb',
'timestamp': 1388120400,
'upload_date': '20131227',
},
}, {
'url': 'http://www.spike.com/full-episodes/j830qm/lip-sync-battle-joel-mchale-vs-jim-rash-season-2-ep-209',
'md5': 'b25c6f16418aefb9ad5a6cae2559321f',
'info_dict': {
'id': '37ace3a8-1df6-48be-85b8-38df8229e241',
'ext': 'mp4',
'title': 'Lip Sync Battle|April 28, 2016|2|209|Joel McHale Vs. Jim Rash|Act 1',
'description': 'md5:a739ca8f978a7802f67f8016d27ce114',
},
}, {
'url': 'http://www.spike.com/video-clips/lhtu8m/',
'only_matching': True,
}, {
'url': 'http://www.spike.com/video-clips/lhtu8m',
'only_matching': True,
}, {
'url': 'http://bellator.spike.com/fight/atwr7k/bellator-158-michael-page-vs-evangelista-cyborg',
'only_matching': True,
}, {
'url': 'http://bellator.spike.com/video-clips/bw6k7n/bellator-158-foundations-michael-venom-page',
'only_matching': True,
}]
_FEED_URL = 'http://www.spike.com/feeds/mrss/'
_MOBILE_TEMPLATE = 'http://m.spike.com/videos/video.rbml?id=%s'
_CUSTOM_URL_REGEX = re.compile(r'spikenetworkapp://([^/]+/[-a-fA-F0-9]+)')
def _extract_mgid(self, webpage):
mgid = super(SpikeIE, self)._extract_mgid(webpage)
if mgid is None:
url_parts = self._search_regex(self._CUSTOM_URL_REGEX, webpage, 'episode_id')
video_type, episode_id = url_parts.split('/', 1)
mgid = 'mgid:arc:{0}:spike.com:{1}'.format(video_type, episode_id)
return mgid
| gpl-3.0 |
bslatkin/8-bits | appengine-mapreduce/python/test/mapreduce/handlers_test.py | 1 | 68704 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Disable "Invalid method name"
# pylint: disable-msg=C6409
# os_compat must be first to ensure timezones are UTC.
# Disable "unused import" and "invalid import order"
# pylint: disable-msg=W0611
from google.appengine.tools import os_compat
# pylint: enable-msg=W0611
# pylint: disable=unused-argument
import base64
import cgi
import datetime
from testlib import mox
import os
from mapreduce.lib import simplejson
import time
import urllib
import unittest
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_file_stub
from google.appengine.api import files
from google.appengine.api import memcache
from google.appengine.api.memcache import memcache_stub
from google.appengine.api.taskqueue import taskqueue_stub
from google.appengine.ext import db
from mapreduce.lib import key_range
from mapreduce import context
from mapreduce import control
from mapreduce import errors
from mapreduce import handlers
from mapreduce import hooks
from mapreduce import input_readers
from mapreduce import operation
from mapreduce import output_writers
from mapreduce import model
from mapreduce import quota
from mapreduce import test_support
from testlib import testutil
from mapreduce import mock_webapp
MAPPER_PARAMS = {"batch_size": 50}
PARAM_DONE_CALLBACK = model.MapreduceSpec.PARAM_DONE_CALLBACK
PARAM_DONE_CALLBACK_QUEUE = model.MapreduceSpec.PARAM_DONE_CALLBACK_QUEUE
class TestHooks(hooks.Hooks):
"""Test hooks class."""
def __init__(self, mapper):
super(TestHooks, self).__init__(mapper)
TestHooks.enqueue_worker_task_calls = []
TestHooks.enqueue_done_task_calls = []
TestHooks.enqueue_controller_task_calls = []
def enqueue_worker_task(self, task, queue_name):
self.enqueue_worker_task_calls.append((task, queue_name))
def enqueue_kickoff_task(self, task, queue_name):
# Tested by control_test.ControlTest.testStartMap_Hooks.
pass
def enqueue_done_task(self, task, queue_name):
self.enqueue_done_task_calls.append((task, queue_name))
def enqueue_controller_task(self, task, queue_name):
self.enqueue_controller_task_calls.append((task, queue_name))
class TestKind(db.Model):
"""Used for testing."""
foobar = db.StringProperty(default="meep")
def TestMap(entity):
"""Used for testing."""
pass
class TestException(Exception):
"""Test exception to use in test handlers."""
class MockTime(object):
"""Simple class to use for mocking time() funciton."""
now = time.time()
@staticmethod
def time():
"""Get current mock time."""
return MockTime.now
@staticmethod
def advance_time(delta):
"""Advance current mock time by delta."""
MockTime.now += delta
class TestEntity(db.Model):
"""Test entity class."""
class TestHandler(object):
"""Test handler which stores all processed entities keys.
Properties:
processed_keys: all keys of processed entities.
delay: advances mock time by this delay on every call.
"""
processed_keys = []
delay = 0
def __call__(self, entity):
"""Main handler process function.
Args:
entity: entity to process.
"""
TestHandler.processed_keys.append(str(entity.key()))
MockTime.advance_time(TestHandler.delay)
@staticmethod
def reset():
"""Clear processed_keys & reset delay to 0."""
TestHandler.processed_keys = []
TestHandler.delay = 0
class TestOperation(operation.Operation):
"""Test operation which records entity on execution."""
processed_keys = []
def __init__(self, entity):
self.entity = entity
def __call__(self, context):
TestOperation.processed_keys.append(str(self.entity.key()))
@classmethod
def reset(cls):
cls.processed_keys = []
def test_handler_raise_exception(entity):
"""Test handler function which always raises exception.
Raises:
TestException: always.
"""
raise TestException()
def test_handler_raise_fail_job_exception(entity):
"""Test handler function which always raises exception.
Raises:
FailJobError: always.
"""
raise errors.FailJobError()
def test_handler_raise_slice_retry_exception(entity):
"""Test handler function that always raises a fatal error.
Raises:
errors.RetrySliceError: always.
"""
raise errors.RetrySliceError("")
def test_handler_raise_shard_retry_exception(entity):
"""Test handler function that always raises a fatal error.
Raises:
files.ExistenceError: always.
"""
raise files.ExistenceError("")
def test_handler_yield_op(entity):
"""Test handler function which yields test operation twice for entity."""
yield TestOperation(entity)
yield TestOperation(entity)
def test_param_validator_success(params):
"""Test parameter validator that is successful."""
params["test"] = "good"
def test_param_validator_raise_exception(params):
"""Test parameter validator that fails."""
raise Exception("These params are bad")
def test_handler_yield_keys(entity):
"""Test handler which yeilds entity keys."""
yield entity.key()
class InputReader(input_readers.DatastoreInputReader):
"""Test input reader which records number of yields."""
yields = 0
def __iter__(self):
for entity in input_readers.DatastoreInputReader.__iter__(self):
InputReader.yields += 1
yield entity
@classmethod
def reset(cls):
cls.yields = 0
class TestOutputWriter(output_writers.OutputWriter):
"""Test output writer."""
# store lifecycle events.
events = []
@classmethod
def reset(cls):
cls.events = []
@classmethod
def validate(cls, mapper_spec):
assert isinstance(mapper_spec, model.MapperSpec)
if "fail_writer_validate" in mapper_spec.params:
raise Exception("Failed Validation")
@classmethod
def init_job(cls, mapreduce_state):
assert isinstance(mapreduce_state, model.MapreduceState)
cls.events.append("init_job")
@classmethod
def finalize_job(cls, mapreduce_state):
assert isinstance(mapreduce_state, model.MapreduceState)
cls.events.append("finalize_job")
@classmethod
def create(cls, mapreduce_state, shard_state):
assert isinstance(mapreduce_state, model.MapreduceState)
cls.events.append("create-" + str(shard_state.shard_number))
return cls()
def to_json(self):
return {}
@classmethod
def from_json(cls, json_dict):
return cls()
def write(self, data, ctx):
assert isinstance(ctx, context.Context)
self.events.append("write-" + str(data))
def finalize(self, ctx, shard_state):
assert isinstance(ctx, context.Context)
self.events.append("finalize-" + str(shard_state.shard_number))
class UnfinalizableTestOutputWriter(TestOutputWriter):
"""An output writer where all calls to finalize fail."""
def finalize(self, ctx, shard_state):
raise Exception("This will always break")
class MatchesContext(mox.Comparator):
"""Mox comparator to match context instances."""
def __init__(self, **kwargs):
self.kwargs = kwargs
def equals(self, ctx):
"""Check to see if ctx matches arguments."""
if self.kwargs.get("task_retry_count", 0) != ctx.task_retry_count:
return False
return True
def __repr__(self):
return "MatchesContext(%s)" % self.kwargs
class FixedShardSizeInputReader(input_readers.DatastoreInputReader):
"""Test reader which truncates the list of readers to specified size."""
readers_size = 3
@classmethod
def split_input(cls, mapper_spec):
readers = input_readers.DatastoreInputReader.split_input(mapper_spec)
return readers[:cls.readers_size]
ENTITY_KIND = "__main__.TestEntity"
MAPPER_HANDLER_SPEC = __name__ + "." + TestHandler.__name__
COUNTER_MAPPER_CALLS = context.COUNTER_MAPPER_CALLS
class MapreduceHandlerTestBase(testutil.HandlerTestBase):
"""Base class for all mapreduce handler tests.
Contains common fixture and utility methods.
"""
def setUp(self):
"""Sets up the test harness."""
testutil.HandlerTestBase.setUp(self)
TestHandler.reset()
TestOutputWriter.reset()
def find_task_by_name(self, tasks, name):
"""Find a task with given name.
Args:
tasks: iterable of tasks.
name: a name to look for.
Returns:
task or None
"""
for task in tasks:
if task["name"] == name:
return task
return None
def verify_input_reader_state(self, str_state, **kwargs):
"""Check that input reader state has expected values.
Args:
str_state: input reader state serialized into string.
"""
state = simplejson.loads(str_state)
self.assertEquals(ENTITY_KIND, state["entity_kind"])
self.assertTrue("key_range" in state or "current_key_range" in state,
"invalid state: %r" % str_state)
self.assertEquals(50, state["batch_size"])
def verify_shard_task(self, task, shard_id, slice_id=0, eta=None,
countdown=None, **kwargs):
"""Checks that all shard task properties have expected values.
Args:
task: task to check.
shard_id: expected shard id.
slice_id: expected slice_id.
eta: expected task eta.
countdown: expected task delay from now.
kwargs: Extra keyword arguments to pass to verify_mapreduce_spec.
"""
expected_task_name = handlers.MapperWorkerCallbackHandler.get_task_name(
shard_id, slice_id)
self.assertEquals(expected_task_name, task["name"])
self.assertEquals("POST", task["method"])
self.assertEquals("/mapreduce/worker_callback", task["url"])
if eta:
self.assertEquals(eta.strftime("%Y/%m/%d %H:%M:%S"), task["eta"])
if countdown:
expected_etc_sec = time.time() + countdown
eta_sec = time.mktime(time.strptime(task["eta"], "%Y/%m/%d %H:%M:%S"))
self.assertTrue(expected_etc_sec < eta_sec + 10)
payload = test_support.decode_task_payload(task)
self.assertEquals(str(shard_id), payload["shard_id"])
self.assertEquals(str(slice_id), payload["slice_id"])
self.assertTrue(payload["mapreduce_spec"])
mapreduce_spec = model.MapreduceSpec.from_json_str(
payload["mapreduce_spec"])
self.verify_mapreduce_spec(mapreduce_spec, **kwargs)
self.verify_input_reader_state(payload["input_reader_state"], **kwargs)
def verify_mapreduce_spec(self, mapreduce_spec, **kwargs):
"""Check all mapreduce spec properties to have expected values.
Args:
mapreduce_spec: mapreduce spec to check as MapreduceSpec.
kwargs: expected property values. Checks for default property value if
particular property is not specified.
"""
self.assertTrue(mapreduce_spec)
self.assertEquals(kwargs.get("mapper_handler_spec", MAPPER_HANDLER_SPEC),
mapreduce_spec.mapper.handler_spec)
self.assertEquals(kwargs.get("output_writer_spec", None),
mapreduce_spec.mapper.output_writer_spec)
self.assertEquals(ENTITY_KIND,
mapreduce_spec.mapper.params["entity_kind"])
self.assertEquals(kwargs.get("shard_count", 8),
mapreduce_spec.mapper.shard_count)
self.assertEquals(kwargs.get("hooks_class_name"),
mapreduce_spec.hooks_class_name)
def verify_shard_state(self, shard_state, **kwargs):
"""Checks that all shard state properties have expected values.
Args:
shard_state: shard state to check.
kwargs: expected property values. Checks for default property value if
particular property is not specified.
"""
self.assertTrue(shard_state)
self.assertEquals(kwargs.get("active", True), shard_state.active)
self.assertEquals(kwargs.get("processed", 0),
shard_state.counters_map.get(COUNTER_MAPPER_CALLS))
self.assertEquals(kwargs.get("result_status", None),
shard_state.result_status)
def verify_mapreduce_state(self, mapreduce_state, **kwargs):
"""Checks mapreduce state to have expected property values.
Args:
mapreduce_state: mapreduce state to check.
kwargs: expected property values. Checks for default property value if
particular property is not specified.
"""
self.assertTrue(mapreduce_state)
self.assertTrue(
mapreduce_state.chart_url.startswith("http://chart.apis.google.com/"),
"Wrong chart url: " + mapreduce_state.chart_url)
self.assertEquals(kwargs.get("active", True), mapreduce_state.active)
self.assertEquals(kwargs.get("processed", 0),
mapreduce_state.counters_map.get(COUNTER_MAPPER_CALLS))
self.assertEquals(kwargs.get("result_status", None),
mapreduce_state.result_status)
mapreduce_spec = mapreduce_state.mapreduce_spec
self.verify_mapreduce_spec(mapreduce_spec, **kwargs)
def verify_controller_task(self, task, **kwargs):
"""Checks that all update status task properties have expected values.
Args:
task: task to check.
kwargs: expected property values. Checks for default if property is not
specified.
"""
self.assertEquals("POST", task["method"])
self.assertEquals("/mapreduce/controller_callback", task["url"])
payload = test_support.decode_task_payload(task)
mapreduce_spec = model.MapreduceSpec.from_json_str(
payload["mapreduce_spec"])
self.verify_mapreduce_spec(mapreduce_spec, **kwargs)
def create_mapreduce_spec(self,
mapreduce_id,
shard_count=8,
mapper_handler_spec=MAPPER_HANDLER_SPEC,
mapper_parameters=None,
hooks_class_name=None,
output_writer_spec=None):
"""Create a new valid mapreduce_spec.
Args:
mapreduce_id: mapreduce id.
shard_count: number of shards in the handlers.
mapper_handler_spec: handler specification to use for handlers.
hooks_class_name: fully qualified name of the hooks class.
Returns:
new MapreduceSpec.
"""
params = {"entity_kind": __name__ + "." + TestEntity.__name__}
if mapper_parameters is not None:
params.update(mapper_parameters)
mapper_spec = model.MapperSpec(
mapper_handler_spec,
__name__ + ".InputReader",
params,
shard_count,
output_writer_spec=output_writer_spec)
mapreduce_spec = model.MapreduceSpec("my job",
mapreduce_id,
mapper_spec.to_json(),
hooks_class_name=hooks_class_name)
self.verify_mapreduce_spec(mapreduce_spec,
shard_count=shard_count,
mapper_handler_spec=mapper_handler_spec,
hooks_class_name=hooks_class_name,
output_writer_spec=output_writer_spec)
state = model.MapreduceState(
key_name=mapreduce_id,
last_poll_time=datetime.datetime.now())
state.mapreduce_spec = mapreduce_spec
state.active = True
state.shard_count = shard_count
state.active_shards = shard_count
state.put()
return mapreduce_spec
def create_shard_state(self, mapreduce_id, shard_number):
"""Creates a new valid shard state.
Args:
mapreduce_id: mapreduce id to create state for as string.
shard_number: shard number as int.
Returns:
new ShardState.
"""
shard_state = model.ShardState.create_new(mapreduce_id, shard_number)
self.verify_shard_state(shard_state)
return shard_state
def create_and_store_shard_state(self, mapreduce_id, shard_number):
"""Creates a new valid shard state and saves it into memcache.
Args:
mapreduce_id: mapreduce id to create state for as string.
shard_number: shard number as int.
Returns:
new ShardState.
"""
shard_state = self.create_shard_state(mapreduce_id, shard_number)
shard_state.put()
return shard_state
def key(self, entity_id):
"""Create a key for TestEntity with specified id.
Used to shorted expected data.
Args:
entity_id: entity id
Returns:
db.Key instance with specified id for TestEntity.
"""
return db.Key.from_path("TestEntity", entity_id)
class StartJobHandlerTest(MapreduceHandlerTestBase):
"""Test handlers.StartJobHandler."""
def setUp(self):
"""Sets up the test harness."""
MapreduceHandlerTestBase.setUp(self)
self.handler = handlers.StartJobHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/command/start_job"
self.handler.request.set("name", "my job")
self.handler.request.set(
"mapper_input_reader",
"mapreduce.input_readers.DatastoreInputReader")
self.handler.request.set("mapper_handler", MAPPER_HANDLER_SPEC)
self.handler.request.set("mapper_params.entity_kind",
(__name__ + "." + TestEntity.__name__))
self.handler.request.headers["X-Requested-With"] = "XMLHttpRequest"
def get_mapreduce_spec(self, task):
"""Get mapreduce spec form kickoff task payload."""
payload = test_support.decode_task_payload(task)
return model.MapreduceSpec.from_json_str(payload["mapreduce_spec"])
def testCSRF(self):
"""Tests that that handler only accepts AJAX requests."""
del self.handler.request.headers["X-Requested-With"]
self.handler.post()
self.assertEquals(403, self.handler.response.status)
def testSmoke(self):
"""Verifies main execution path of starting scan over several entities."""
for _ in range(100):
TestEntity().put()
self.handler.post()
# Only kickoff task should be there.
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
mapreduce_spec = self.get_mapreduce_spec(tasks[0])
self.assertTrue(mapreduce_spec)
self.assertEquals(MAPPER_HANDLER_SPEC, mapreduce_spec.mapper.handler_spec)
def testSmokeOtherApp(self):
"""Verifies main execution path of starting scan over several entities."""
apiproxy_stub_map.apiproxy.GetStub("datastore_v3").SetTrusted(True)
self.handler.request.set("mapper_params._app", "otherapp")
TestEntity(_app="otherapp").put()
self.handler.post()
# Only kickoff task should be there.
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
payload = test_support.decode_task_payload(tasks[0])
self.assertEquals("otherapp", payload["app"])
self.assertTrue(self.get_mapreduce_spec(tasks[0]))
def testRequiredParams(self):
"""Tests that required parameters are enforced."""
TestEntity().put()
self.handler.post()
self.handler.request.set("name", None)
self.assertRaises(errors.NotEnoughArgumentsError, self.handler.handle)
self.handler.request.set("name", "my job")
self.handler.request.set("mapper_input_reader", None)
self.assertRaises(errors.NotEnoughArgumentsError, self.handler.handle)
self.handler.request.set(
"mapper_input_reader",
"mapreduce.input_readers.DatastoreInputReader")
self.handler.request.set("mapper_handler", None)
self.assertRaises(errors.NotEnoughArgumentsError, self.handler.handle)
self.handler.request.set("mapper_handler", MAPPER_HANDLER_SPEC)
self.handler.request.set("mapper_params.entity_kind", None)
self.assertRaises(input_readers.BadReaderParamsError, self.handler.handle)
self.handler.request.set("mapper_params.entity_kind",
(__name__ + "." + TestEntity.__name__))
self.handler.post()
def testParameterValidationSuccess(self):
"""Tests validating user-supplied parameters."""
TestEntity().put()
self.handler.request.set("mapper_params.one", ["red", "blue"])
self.handler.request.set("mapper_params.two", "green")
self.handler.request.set("mapper_params_validator",
__name__ + ".test_param_validator_success")
self.handler.post()
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
mapreduce_spec = self.get_mapreduce_spec(tasks[0])
params = mapreduce_spec.mapper.params
self.assertEquals(["red", "blue"], params["one"])
self.assertEquals("green", params["two"])
# From the validator function
self.assertEquals("good", params["test"])
# Defaults always present.
self.assertEquals(model._DEFAULT_PROCESSING_RATE_PER_SEC,
params["processing_rate"])
self.assertEquals("default", params["queue_name"])
def testMapreduceParameters(self):
"""Tests propagation of user-supplied mapreduce parameters."""
TestEntity().put()
self.handler.request.set("params.one", ["red", "blue"])
self.handler.request.set("params.two", "green")
self.handler.request.set("params_validator",
__name__ + ".test_param_validator_success")
self.handler.post()
kickoff_task = self.taskqueue.GetTasks("default")[0]
mapreduce_spec = self.get_mapreduce_spec(kickoff_task)
params = mapreduce_spec.params
self.assertEquals(["red", "blue"], params["one"])
self.assertEquals("green", params["two"])
# From the validator function
self.assertEquals("good", params["test"])
def testParameterValidationFailure(self):
"""Tests when validating user-supplied parameters fails."""
self.handler.request.set("mapper_params_validator",
__name__ + ".test_param_validator_raise_exception")
try:
self.handler.handle()
self.fail()
except Exception, e:
self.assertEquals("These params are bad", str(e))
def testParameterValidationUnknown(self):
"""Tests the user-supplied parameter validation function cannot be found."""
self.handler.request.set("mapper_params_validator", "does_not_exist")
self.assertRaises(ImportError, self.handler.handle)
def testHandlerUnknown(self):
"""Tests when the handler function cannot be found."""
self.handler.request.set("mapper_handler", "does_not_exist")
self.assertRaises(ImportError, self.handler.handle)
def testInputReaderUnknown(self):
"""Tests when the input reader function cannot be found."""
self.handler.request.set("mapper_input_reader", "does_not_exist")
self.assertRaises(ImportError, self.handler.handle)
def testQueueName(self):
"""Tests that the optional queue_name parameter is used."""
TestEntity().put()
self.handler.request.set("mapper_params.queue_name", "crazy-queue")
self.handler.post()
tasks = self.taskqueue.GetTasks("crazy-queue")
self.assertEquals(1, len(tasks))
mapreduce_spec = self.get_mapreduce_spec(tasks[0])
self.assertEquals(
"crazy-queue",
mapreduce_spec.mapper.params["queue_name"])
self.assertEquals(0, len(self.taskqueue.GetTasks("default")))
def testProcessingRate(self):
"""Tests that the optional processing rate parameter is used."""
TestEntity().put()
self.handler.request.set("mapper_params.processing_rate", "1234")
self.handler.post()
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
mapreduce_spec = self.get_mapreduce_spec(tasks[0])
self.assertEquals(
1234,
mapreduce_spec.mapper.params["processing_rate"])
def testShardCount(self):
"""Tests that the optional shard count parameter is used."""
TestEntity().put()
self.handler.request.set("mapper_params.shard_count", "9")
self.handler.post()
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
mapreduce_spec = self.get_mapreduce_spec(tasks[0])
self.assertEquals(9, mapreduce_spec.mapper.shard_count)
def testOutputWriter(self):
"""Tests setting output writer parameter."""
TestEntity().put()
self.handler.request.set("mapper_output_writer",
__name__ + ".TestOutputWriter")
self.handler.handle()
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
mapreduce_spec = self.get_mapreduce_spec(tasks[0])
self.assertEquals("__main__.TestOutputWriter",
mapreduce_spec.mapper.output_writer_spec)
def testOutputWriterValidateFails(self):
TestEntity().put()
self.handler.request.set("mapper_output_writer",
__name__ + ".TestOutputWriter")
self.handler.request.set("mapper_params.fail_writer_validate",
"true")
self.assertRaises(Exception, self.handler.handle)
def testInvalidOutputWriter(self):
"""Tests setting output writer parameter."""
TestEntity().put()
self.handler.request.set("mapper_output_writer", "Foo")
self.assertRaises(ImportError, self.handler.handle)
class KickOffJobHandlerTest(MapreduceHandlerTestBase):
"""Test handlers.StartJobHandler."""
def setUp(self):
"""Sets up the test harness."""
MapreduceHandlerTestBase.setUp(self)
self.mapreduce_id = "mapreduce0"
self.mapreduce_spec = self.create_mapreduce_spec(self.mapreduce_id)
self.handler = handlers.KickOffJobHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/kickoffjob_callback"
self.handler.request.set(
"mapreduce_spec",
self.mapreduce_spec.to_json_str())
self.handler.request.headers["X-AppEngine-QueueName"] = "default"
def testCSRF(self):
"""Tests that that handler only accepts requests from the task queue."""
del self.handler.request.headers["X-AppEngine-QueueName"]
self.handler.post()
self.assertEquals(403, self.handler.response.status)
def testSmoke(self):
"""Verifies main execution path of starting scan over several entities."""
for i in range(100):
TestEntity().put()
self.handler.post()
shard_count = 8
state = model.MapreduceState.all()[0]
self.assertTrue(state)
self.assertTrue(state.active)
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(shard_count + 1, len(tasks))
for i in xrange(shard_count):
shard_id = model.ShardState.shard_id_from_number(self.mapreduce_id, i)
task_name = handlers.MapperWorkerCallbackHandler.get_task_name(
shard_id, 0)
shard_task = self.find_task_by_name(tasks, task_name)
self.assertTrue(shard_task)
tasks.remove(shard_task)
self.verify_shard_task(shard_task, shard_id, shard_count=8)
self.verify_shard_state(
model.ShardState.get_by_shard_id(shard_id))
# only update task should be left in tasks array
self.assertEquals(1, len(tasks))
self.verify_controller_task(tasks[0], shard_count=8)
def testHooks(self):
"""Verifies main execution path with a hooks class installed."""
for i in range(100):
TestEntity().put()
self.mapreduce_spec.hooks_class_name = __name__ + "." + TestHooks.__name__
self.handler.request.set(
"mapreduce_spec",
self.mapreduce_spec.to_json_str())
self.handler.post()
self.assertEquals(8, len(TestHooks.enqueue_worker_task_calls))
self.assertEquals(1, len(TestHooks.enqueue_controller_task_calls))
task, queue_name = TestHooks.enqueue_controller_task_calls[0]
self.assertEquals("default", queue_name)
self.assertEquals("/mapreduce/controller_callback", task.url)
def testRequiredParams(self):
"""Tests that required parameters are enforced."""
self.handler.post()
self.handler.request.set("mapreduce_spec", None)
self.assertRaises(errors.NotEnoughArgumentsError, self.handler.post)
def testInputReaderUnknown(self):
"""Tests when the input reader function cannot be found."""
self.mapreduce_spec.mapper.input_reader_spec = "does_not_exist"
self.handler.request.set("mapreduce_spec",
self.mapreduce_spec.to_json_str())
self.assertRaises(ImportError, self.handler.post)
def testQueueName(self):
"""Tests that the optional queue_name parameter is used."""
os.environ["HTTP_X_APPENGINE_QUEUENAME"] = "crazy-queue"
TestEntity().put()
self.handler.post()
del os.environ["HTTP_X_APPENGINE_QUEUENAME"]
self.assertEquals(0, len(self.taskqueue.GetTasks("default")))
self.assertEquals(9, len(self.taskqueue.GetTasks("crazy-queue")))
def testNoData(self):
self.handler.post()
self.assertEquals(9, len(self.taskqueue.GetTasks("default")))
state = model.MapreduceState.get_by_job_id(self.mapreduce_id)
self.assertTrue(state.active)
self.assertEquals(8, state.active_shards)
def testDifferentShardCount(self):
"""Verifies the case when input reader created diffrent shard number."""
for _ in range(100):
TestEntity().put()
self.mapreduce_spec.mapper.input_reader_spec = (
__name__ + ".FixedShardSizeInputReader")
self.handler.request.set(
"mapreduce_spec",
self.mapreduce_spec.to_json_str())
self.handler.post()
shard_count = FixedShardSizeInputReader.readers_size
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(shard_count + 1, len(tasks))
for i in xrange(shard_count):
shard_id = model.ShardState.shard_id_from_number(self.mapreduce_id, i)
task_name = handlers.MapperWorkerCallbackHandler.get_task_name(
shard_id, 0)
shard_task = self.find_task_by_name(tasks, task_name)
self.assertTrue(shard_task)
tasks.remove(shard_task)
self.verify_shard_task(shard_task, shard_id, shard_count=shard_count)
self.verify_shard_state(
model.ShardState.get_by_shard_id(shard_id))
# only update task should be left in tasks list
self.assertEquals(1, len(tasks))
self.verify_controller_task(tasks[0], shard_count=shard_count)
def testAppParam(self):
"""Tests that app parameter is correctly passed in the state."""
self.handler.request.set("app", "otherapp")
self.handler.post()
state = model.MapreduceState.all()[0]
self.assertTrue(state)
self.assertEquals("otherapp", state.app_id)
def testOutputWriter(self):
"""Test output writer initialization."""
for _ in range(100):
TestEntity().put()
self.mapreduce_spec.mapper.output_writer_spec = (
__name__ + ".TestOutputWriter")
self.handler.request.set(
"mapreduce_spec",
self.mapreduce_spec.to_json_str())
self.handler.post()
self.assertEquals(
["init_job", "create-0", "create-1", "create-2", "create-3",
"create-4", "create-5", "create-6", "create-7",],
TestOutputWriter.events)
class MapperWorkerCallbackHandlerTest(MapreduceHandlerTestBase):
"""Test handlers.MapperWorkerCallbackHandler."""
def setUp(self):
"""Sets up the test harness."""
MapreduceHandlerTestBase.setUp(self)
self.init()
def tearDown(self):
handlers._TEST_INJECTED_FAULTS.clear()
MapreduceHandlerTestBase.tearDown(self)
def init(self,
mapper_handler_spec=MAPPER_HANDLER_SPEC,
mapper_parameters=None,
hooks_class_name=None,
output_writer_spec=None):
"""Init everything needed for testing worker callbacks.
Args:
mapper_handler_spec: handler specification to use in test.
mapper_params: mapper specification to use in test.
hooks_class_name: fully qualified name of the hooks class to use in test.
"""
InputReader.reset()
self.handler = handlers.MapperWorkerCallbackHandler()
self.handler._time = MockTime.time
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/worker_callback"
self.mapreduce_id = "mapreduce0"
self.mapreduce_spec = self.create_mapreduce_spec(
self.mapreduce_id,
mapper_handler_spec=mapper_handler_spec,
hooks_class_name=hooks_class_name,
output_writer_spec=output_writer_spec,
mapper_parameters=mapper_parameters)
self.shard_number = 1
self.slice_id = 3
self.shard_state = self.create_and_store_shard_state(
self.mapreduce_id, self.shard_number)
self.shard_id = self.shard_state.shard_id
output_writer = None
if self.mapreduce_spec.mapper.output_writer_class():
output_writer = self.mapreduce_spec.mapper.output_writer_class()()
self.transient_state = model.TransientShardState(
"/mapreduce",
self.mapreduce_spec,
self.shard_id,
self.slice_id,
InputReader(ENTITY_KIND, [key_range.KeyRange()]),
InputReader(ENTITY_KIND, [key_range.KeyRange()]),
output_writer=output_writer
)
worker_params = self.transient_state.to_dict()
for param_name in worker_params:
self.handler.request.set(param_name, worker_params[param_name])
self.quota_manager = quota.QuotaManager(memcache.Client())
self.initial_quota = 100000
self.quota_manager.set(self.shard_id, self.initial_quota)
self.handler.request.headers["X-AppEngine-QueueName"] = "default"
def testCSRF(self):
"""Tests that that handler only accepts requests from the task queue."""
del self.handler.request.headers["X-AppEngine-QueueName"]
self.handler.post()
self.assertEquals(403, self.handler.response.status)
def testSmoke(self):
"""Test main execution path of entity scanning."""
e1 = TestEntity()
e1.put()
e2 = TestEntity()
e2.put()
self.handler.post()
self.assertEquals([str(e1.key()), str(e2.key())],
TestHandler.processed_keys)
# we should have finished
self.verify_shard_state(
model.ShardState.get_by_shard_id(self.shard_id),
active=False, processed=2,
result_status=model.ShardState.RESULT_SUCCESS)
# quota should be reclaimed correctly
self.assertEquals(self.initial_quota - len(TestHandler.processed_keys),
self.quota_manager.get(self.shard_id))
def testCompletedState(self):
self.shard_state.active = False
self.shard_state.put()
e1 = TestEntity()
e1.put()
self.handler.post()
# completed state => no data processed
self.assertEquals([], TestHandler.processed_keys)
self.verify_shard_state(
model.ShardState.get_by_shard_id(self.shard_id), active=False)
self.assertEquals(0, len(self.taskqueue.GetTasks("default")))
def testShardStateCollision(self):
handlers._TEST_INJECTED_FAULTS.add("worker_active_state_collision")
e1 = TestEntity()
e1.put()
self.handler.post()
# Data will still be processed
self.assertEquals([str(e1.key())], TestHandler.processed_keys)
# Shard state should not be overriden, i.e. left active.
self.verify_shard_state(
model.ShardState.get_by_shard_id(self.shard_id), active=True)
self.assertEquals(0, len(self.taskqueue.GetTasks("default")))
def testNoShardState(self):
"""Correct handling of missing shard state."""
self.shard_state.delete()
e1 = TestEntity()
e1.put()
self.handler.post()
# no state => no data processed
self.assertEquals([], TestHandler.processed_keys)
self.assertEquals(0, len(self.taskqueue.GetTasks("default")))
def testNoData(self):
"""Test no data to scan case."""
self.verify_shard_state(
model.ShardState.get_by_shard_id(self.shard_id), active=True)
self.handler.post()
self.assertEquals([], TestHandler.processed_keys)
self.verify_shard_state(
model.ShardState.get_by_shard_id(self.shard_id),
active=False,
result_status=model.ShardState.RESULT_SUCCESS)
def testUserAbort(self):
"""Tests a user-initiated abort of the shard."""
# Be sure to have an output writer for the abort step so we can confirm
# that the finalize() method is never called.
self.init(__name__ + ".test_handler_yield_keys",
output_writer_spec=__name__ + ".UnfinalizableTestOutputWriter")
model.MapreduceControl.abort(self.mapreduce_id, force_writes=True)
self.handler.post()
self.verify_shard_state(
model.ShardState.get_by_shard_id(self.shard_id),
active=False,
result_status=model.ShardState.RESULT_ABORTED)
def testLongProcessingShouldStartAnotherSlice(self):
"""Long scan.
If scanning takes too long, it should be paused, and new continuation task
should be spawned.
"""
e1 = TestEntity()
e1.put()
e2 = TestEntity()
e2.put()
TestHandler.delay = handlers._SLICE_DURATION_SEC + 10
self.handler.post()
# only first entity should be processed
self.assertEquals([str(e1.key())], TestHandler.processed_keys)
# quota should be reclaimed correctly
self.assertEquals(self.initial_quota - len(TestHandler.processed_keys),
self.quota_manager.get(self.shard_id))
# slice should be still active
self.verify_shard_state(
model.ShardState.get_by_shard_id(self.shard_id),
processed=1)
# new task should be spawned
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
self.verify_shard_task(tasks[0], self.shard_id, self.slice_id + 1)
def testLongProcessDataWithAllowCheckpoint(self):
"""Tests that process_data works with input_readers.ALLOW_CHECKPOINT."""
self.handler._start_time = 0
self.assertFalse(self.handler.process_data(input_readers.ALLOW_CHECKPOINT,
None,
None,
None))
def testScheduleSlice(self):
"""Test _schedule_slice method."""
input_reader = input_readers.DatastoreInputReader(
ENTITY_KIND,
[key_range.KeyRange(key_start=self.key(75),
key_end=self.key(100),
direction="ASC",
include_start=False,
include_end=True)])
self.handler._schedule_slice(
self.shard_state,
model.TransientShardState(
"/mapreduce", self.mapreduce_spec,
self.shard_id, 123, input_reader, input_reader))
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
self.verify_shard_task(tasks[0], self.shard_id, 123)
def testScheduleSlice_Eta(self):
"""Test _schedule_slice method."""
eta = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
input_reader = input_readers.DatastoreInputReader(
ENTITY_KIND,
[key_range.KeyRange(key_start=self.key(75),
key_end=self.key(100),
direction="ASC",
include_start=False,
include_end=True)])
self.handler._schedule_slice(
self.shard_state,
model.TransientShardState(
"/mapreduce", self.mapreduce_spec,
self.shard_id, 123, input_reader, input_reader),
eta=eta)
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
self.verify_shard_task(tasks[0], self.shard_id, 123, eta=eta)
def testScheduleSlice_Countdown(self):
"""Test _schedule_slice method."""
countdown = 60 * 60
input_reader = input_readers.DatastoreInputReader(
ENTITY_KIND,
[key_range.KeyRange(key_start=self.key(75),
key_end=self.key(100),
direction="ASC",
include_start=False,
include_end=True)])
self.handler._schedule_slice(
self.shard_state,
model.TransientShardState(
"/mapreduce", self.mapreduce_spec,
self.shard_id, 123, input_reader, input_reader),
countdown=countdown)
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
self.verify_shard_task(tasks[0], self.shard_id, 123, countdown=countdown)
def testScheduleSlice_QueuePreserved(self):
"""Tests that _schedule_slice will enqueue tasks on the calling queue."""
os.environ["HTTP_X_APPENGINE_QUEUENAME"] = "crazy-queue"
try:
query_range = input_readers.DatastoreInputReader(
ENTITY_KIND,
[key_range.KeyRange(key_start=self.key(75),
key_end=self.key(100),
direction="ASC",
include_start=False,
include_end=True)])
self.handler._schedule_slice(
self.shard_state,
model.TransientShardState(
"/mapreduce", self.mapreduce_spec,
self.shard_id, 123, query_range, query_range))
tasks = self.taskqueue.GetTasks("crazy-queue")
self.assertEquals(1, len(tasks))
self.verify_shard_task(tasks[0], self.shard_id, 123)
finally:
del os.environ["HTTP_X_APPENGINE_QUEUENAME"]
def testScheduleSlice_TombstoneErrors(self):
"""Tests when the scheduled slice already exists."""
self.handler._schedule_slice(self.shard_state, self.transient_state)
# This catches the exception.
self.handler._schedule_slice(self.shard_state, self.transient_state)
# The task won't re-enqueue because it has the same name.
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
def testScheduleSlice_Hooks(self):
"""Test _schedule_slice method with a hooks class installed."""
hooks_class_name = __name__ + '.' + TestHooks.__name__
self.init(hooks_class_name=hooks_class_name)
self.handler._schedule_slice(self.shard_state, self.transient_state)
self.assertEquals(0, len(self.taskqueue.GetTasks("default")))
self.assertEquals(1, len(TestHooks.enqueue_worker_task_calls))
task, queue_name = TestHooks.enqueue_worker_task_calls[0]
self.assertEquals("/mapreduce/worker_callback", task.url)
self.assertEquals("default", queue_name)
def testScheduleSlice_RaisingHooks(self):
"""Test _schedule_slice method with an empty hooks class installed.
The installed hooks class will raise NotImplementedError in response to
all method calls.
"""
hooks_class_name = hooks.__name__ + '.' + hooks.Hooks.__name__
self.init(hooks_class_name=hooks_class_name)
input_reader = input_readers.DatastoreInputReader(
ENTITY_KIND,
[key_range.KeyRange(key_start=self.key(75),
key_end=self.key(100),
direction="ASC",
include_start=False,
include_end=True)])
self.handler._schedule_slice(
self.shard_state,
model.TransientShardState(
"/mapreduce", self.mapreduce_spec,
self.shard_id, 123, input_reader, input_reader))
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
self.verify_shard_task(tasks[0], self.shard_id, 123,
hooks_class_name=hooks_class_name)
def testQuotaCanBeOptedOut(self):
"""Test work cycle if there was no quota at the very beginning."""
e1 = TestEntity()
e1.put()
self.init(mapper_parameters={"enable_quota": False})
self.quota_manager.set(self.shard_id, 0)
self.handler.post()
# something should still be processed.
self.assertEquals([str(e1.key())], TestHandler.processed_keys)
self.assertEquals(1, InputReader.yields)
# slice should be still active
self.verify_shard_state(
model.ShardState.get_by_shard_id(self.shard_id),
processed=1, active=False, result_status="success")
# new task should be spawned
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(0, len(tasks))
def testNoQuotaAtAll(self):
"""Test work cycle if there was no quota at the very beginning."""
TestEntity().put()
self.quota_manager.set(self.shard_id, 0)
self.handler.post()
# nothing should be processed.
self.assertEquals([], TestHandler.processed_keys)
self.assertEquals(0, InputReader.yields)
# slice should be still active
self.verify_shard_state(
model.ShardState.get_by_shard_id(self.shard_id),
processed=0)
# new task should be spawned
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
self.verify_shard_task(tasks[0], self.shard_id, self.slice_id + 1)
def testQuotaForPartialBatchOnly(self):
"""Test work cycle if there was quota for less than a batch."""
for i in range(handlers._QUOTA_BATCH_SIZE * 2):
TestEntity().put()
quota = handlers._QUOTA_BATCH_SIZE / 2
self.quota_manager.set(self.shard_id, quota)
self.handler.post()
# only quota size should be processed
self.assertEquals(quota, len(TestHandler.processed_keys))
self.assertEquals(0, self.quota_manager.get(self.shard_id))
self.assertEquals(quota, InputReader.yields)
# slice should be still active
self.verify_shard_state(
model.ShardState.get_by_shard_id(self.shard_id),
processed=quota)
# new task should be spawned
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
self.verify_shard_task(tasks[0], self.shard_id, self.slice_id + 1)
def testQuotaForBatchAndAHalf(self):
"""Test work cycle if there was quota for batch and a half."""
for i in range(handlers._QUOTA_BATCH_SIZE * 2):
TestEntity().put()
quota = 3 * handlers._QUOTA_BATCH_SIZE / 2
self.quota_manager.set(self.shard_id, quota)
self.handler.post()
# only quota size should be processed
self.assertEquals(quota, len(TestHandler.processed_keys))
self.assertEquals(0, self.quota_manager.get(self.shard_id))
self.assertEquals(quota, InputReader.yields)
# slice should be still active
self.verify_shard_state(
model.ShardState.get_by_shard_id(self.shard_id),
processed=quota)
# new task should be spawned
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
self.verify_shard_task(tasks[0], self.shard_id, self.slice_id + 1)
def testSlicRetryExceptionInHandler(self):
"""Test when a handler throws a fatal exception."""
self.init(__name__ + ".test_handler_raise_slice_retry_exception")
TestEntity().put()
# First time, it gets re-raised.
self.assertRaises(errors.RetrySliceError, self.handler.post)
self.verify_shard_state(
model.ShardState.get_by_shard_id(self.shard_id),
active=True,
processed=0)
# After the Nth attempt, we abort the whole job.
self.handler.task_retry_count = lambda: 25
try:
self.handler.post()
finally:
pass
self.verify_shard_state(
model.ShardState.get_by_shard_id(self.shard_id),
active=False,
result_status=model.ShardState.RESULT_FAILED,
processed=1)
def testShardRetryExceptionInHandler(self):
"""Test when a handler throws a fatal exception."""
self.init(__name__ + ".test_handler_raise_shard_retry_exception")
TestEntity().put()
self.handler.post()
shard_state = model.ShardState.get_by_shard_id(self.shard_id)
self.verify_shard_state(shard_state)
self.assertEquals(1, shard_state.retries)
self.assertEquals("", shard_state.last_work_item)
def testExceptionInHandler(self):
"""Test behavior when handler throws exception."""
self.init(__name__ + ".test_handler_raise_exception")
TestEntity().put()
# Stub out context._set
m = mox.Mox()
m.StubOutWithMock(context.Context, "_set", use_mock_anything=True)
# Record calls
context.Context._set(mox.IsA(context.Context))
# Context should not be flushed on error
context.Context._set(None)
m.ReplayAll()
try: # test, verify
self.assertRaises(TestException, self.handler.post)
# quota should be still consumed
self.assertEquals(self.initial_quota - 1,
self.quota_manager.get(self.shard_id))
# slice should be still active
shard_state = model.ShardState.get_by_shard_id(self.shard_id)
self.verify_shard_state(shard_state, processed=0)
# mapper calls counter should not be incremented
self.assertEquals(0, shard_state.counters_map.get(
context.COUNTER_MAPPER_CALLS))
# new task should not be spawned
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(0, len(tasks))
m.VerifyAll()
finally:
m.UnsetStubs()
def testFailJobExceptionInHandler(self):
"""Test behavior when handler throws exception."""
self.init(__name__ + ".test_handler_raise_fail_job_exception")
TestEntity().put()
# Stub out context._set
m = mox.Mox()
m.StubOutWithMock(context.Context, "_set", use_mock_anything=True)
# Record calls
context.Context._set(mox.IsA(context.Context))
# Context should not be flushed on error
context.Context._set(None)
m.ReplayAll()
try: # test, verify
self.handler.post()
# quota should be still consumed
self.assertEquals(self.initial_quota - 1,
self.quota_manager.get(self.shard_id))
# slice should not be active
shard_state = model.ShardState.get_by_shard_id(self.shard_id)
self.verify_shard_state(
shard_state,
processed=1,
active=False,
result_status = model.ShardState.RESULT_FAILED)
self.assertEquals(1, shard_state.counters_map.get(
context.COUNTER_MAPPER_CALLS))
# new task should not be spawned
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(0, len(tasks))
m.VerifyAll()
finally:
m.UnsetStubs()
def testContext(self):
"""Test proper context initialization."""
self.handler.request.headers["X-AppEngine-TaskExecutionCount"] = 5
TestEntity().put()
m = mox.Mox()
m.StubOutWithMock(context.Context, "_set", use_mock_anything=True)
context.Context._set(MatchesContext(task_retry_count=5))
context.Context._set(None)
m.ReplayAll()
try: # test, verify
self.handler.post()
m.VerifyAll()
finally:
m.UnsetStubs()
def testContextFlush(self):
"""Test context handling."""
TestEntity().put()
# Stub out context
m = mox.Mox()
m.StubOutWithMock(context.Context, "_set", use_mock_anything=True)
m.StubOutWithMock(context.Context, "flush", use_mock_anything=True)
# Record calls
context.Context._set(mox.IsA(context.Context))
context.Context.flush()
context.Context._set(None)
m.ReplayAll()
try: # test, verify
self.handler.post()
# 1 entity should be processed
self.assertEquals(1, len(TestHandler.processed_keys))
m.VerifyAll()
finally:
m.UnsetStubs()
def testOperationYield(self):
"""Test yielding operations from handler."""
self.init(__name__ + ".test_handler_yield_op")
e1 = TestEntity().put()
e2 = TestEntity().put()
self.handler.post()
self.assertEquals([str(e1), str(e1), str(e2), str(e2)],
TestOperation.processed_keys)
def testOutputWriter(self):
self.init(__name__ + ".test_handler_yield_keys",
output_writer_spec=__name__ + ".TestOutputWriter")
e1 = TestEntity().put()
e2 = TestEntity().put()
self.handler.post()
self.assertEquals(
["write-" + str(e1),
"write-" + str(e2),
"finalize-1",
], TestOutputWriter.events)
class ControllerCallbackHandlerTest(MapreduceHandlerTestBase):
"""Test handlers.ControllerCallbackHandler."""
def setUp(self):
"""Sets up the test harness."""
MapreduceHandlerTestBase.setUp(self)
self.handler = handlers.ControllerCallbackHandler()
self.handler._time = MockTime.time
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/worker_callback"
self.mapreduce_state = model.MapreduceState.create_new()
self.mapreduce_state.put()
self.mapreduce_id = self.mapreduce_state.key().name()
mapreduce_spec = self.create_mapreduce_spec(self.mapreduce_id, 3)
mapreduce_spec.params[PARAM_DONE_CALLBACK] = "/fin"
mapreduce_spec.params[PARAM_DONE_CALLBACK_QUEUE] = "crazy-queue"
self.mapreduce_state.mapreduce_spec = mapreduce_spec
self.mapreduce_state.chart_url = "http://chart.apis.google.com/chart?"
self.mapreduce_state.active = True
self.mapreduce_state.put()
self.verify_mapreduce_state(self.mapreduce_state, shard_count=3)
self.handler.request.set("mapreduce_spec", mapreduce_spec.to_json_str())
self.handler.request.set("serial_id", "1234")
self.quota_manager = quota.QuotaManager(memcache.Client())
self.handler.request.headers["X-AppEngine-QueueName"] = "default"
def verify_done_task(self):
tasks = self.taskqueue.GetTasks("crazy-queue")
self.assertEquals(1, len(tasks))
task = tasks[0]
self.assertTrue(task)
self.assertEquals("/fin", task["url"])
self.assertEquals("POST", task["method"])
headers = dict(task["headers"])
self.assertEquals(self.mapreduce_id, headers["Mapreduce-Id"])
def testCSRF(self):
"""Tests that that handler only accepts requests from the task queue."""
del self.handler.request.headers["X-AppEngine-QueueName"]
self.handler.post()
self.assertEquals(403, self.handler.response.status)
def testSmoke(self):
"""Verify main execution path.
Should aggregate all data from all shards correctly.
"""
# check that chart_url is updated.
self.mapreduce_state.chart_url = ""
self.mapreduce_state.put()
for i in range(3):
shard_state = self.create_shard_state(self.mapreduce_id, i)
shard_state.counters_map.increment(
COUNTER_MAPPER_CALLS, i * 2 + 1) # 1, 3, 5
# We should have mapreduce active even some (not all)
# shards are not active
if i == 0:
shard_state.active = False
shard_state.put()
self.handler.post()
mapreduce_state = model.MapreduceState.get_by_key_name(self.mapreduce_id)
# we should have 1 + 3 + 5 = 9 elements processed
self.verify_mapreduce_state(mapreduce_state, processed=9, shard_count=3)
self.assertEquals(0, mapreduce_state.failed_shards)
self.assertEquals(0, mapreduce_state.aborted_shards)
# new task should be spawned
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
self.verify_controller_task(tasks[0], shard_count=3)
def testMissingShardState(self):
"""Correct handling of missing shard state."""
self.handler.post()
mapreduce_state = model.MapreduceState.get_by_key_name(self.mapreduce_id)
self.verify_mapreduce_state(mapreduce_state, active=False, shard_count=3,
result_status=model.ShardState.RESULT_FAILED)
self.assertEquals(0, mapreduce_state.failed_shards)
self.assertEquals(0, mapreduce_state.aborted_shards)
# Abort signal should be present.
self.assertEquals(
model.MapreduceControl.ABORT,
db.get(model.MapreduceControl.get_key_by_job_id(
self.mapreduce_id)).command)
tasks = self.taskqueue.GetTasks("default")
# Finalize task should be spawned.
self.assertEquals(1, len(tasks))
self.assertEquals("/mapreduce/finalizejob_callback", tasks[0]["url"])
# Done Callback task should be spawned
self.verify_done_task()
def testAllShardsAreDone(self):
"""Mapreduce should become inactive when all shards have finished."""
for i in range(3):
shard_state = self.create_shard_state(self.mapreduce_id, i)
shard_state.counters_map.increment(
COUNTER_MAPPER_CALLS, i * 2 + 1) # 1, 3, 5
shard_state.active = False
shard_state.result_status = model.ShardState.RESULT_SUCCESS
shard_state.put()
self.handler.post()
mapreduce_state = model.MapreduceState.get_by_key_name(self.mapreduce_id)
self.verify_mapreduce_state(
mapreduce_state, processed=9, active=False, shard_count=3,
result_status=model.MapreduceState.RESULT_SUCCESS)
self.assertEquals(0, mapreduce_state.failed_shards)
self.assertEquals(0, mapreduce_state.aborted_shards)
tasks = self.taskqueue.GetTasks("default")
# Finalize task should be spawned.
self.assertEquals(1, len(tasks))
self.assertEquals("/mapreduce/finalizejob_callback", tasks[0]["url"])
# Done Callback task should be spawned
self.verify_done_task()
self.assertEquals(
3, len(model.ShardState.find_by_mapreduce_state(mapreduce_state)))
def testShardsDoneFinalizeOutputWriter(self):
self.mapreduce_state.mapreduce_spec.mapper.output_writer_spec = (
__name__ + '.' + TestOutputWriter.__name__)
self.handler.request.set("mapreduce_spec",
self.mapreduce_state.mapreduce_spec.to_json_str())
for i in range(3):
shard_state = self.create_shard_state(self.mapreduce_id, i)
shard_state.counters_map.increment(
COUNTER_MAPPER_CALLS, i * 2 + 1) # 1, 3, 5
shard_state.active = False
shard_state.result_status = model.ShardState.RESULT_SUCCESS
shard_state.put()
self.handler.post()
self.assertEquals(["finalize_job"], TestOutputWriter.events)
def testShardsDoneWithHooks(self):
self.mapreduce_state.mapreduce_spec.hooks_class_name = (
__name__ + '.' + TestHooks.__name__)
self.handler.request.set("mapreduce_spec",
self.mapreduce_state.mapreduce_spec.to_json_str())
for i in range(3):
shard_state = self.create_shard_state(self.mapreduce_id, i)
shard_state.active = False
shard_state.result_status = model.ShardState.RESULT_SUCCESS
shard_state.put()
self.handler.post()
self.assertEquals(1, len(TestHooks.enqueue_done_task_calls))
task, queue_name = TestHooks.enqueue_done_task_calls[0]
self.assertEquals('crazy-queue', queue_name)
self.assertEquals('/fin', task.url)
def testShardFailure(self):
"""Tests that when one shard fails the job will be aborted."""
for i in range(3):
shard_state = self.create_shard_state(self.mapreduce_id, i)
if i == 0:
shard_state.result_status = model.ShardState.RESULT_FAILED
shard_state.active = False
else:
shard_state.result_status = model.ShardState.RESULT_SUCCESS
shard_state.active = True
shard_state.put()
self.handler.post()
mapreduce_state = model.MapreduceState.get_by_key_name(self.mapreduce_id)
self.verify_mapreduce_state(
mapreduce_state, active=True, shard_count=3)
self.assertEquals(1, mapreduce_state.failed_shards)
self.assertEquals(0, mapreduce_state.aborted_shards)
# new task should be spawned
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
self.verify_controller_task(tasks[0], shard_count=3)
# Abort signal should be present.
self.assertEquals(
model.MapreduceControl.ABORT,
db.get(model.MapreduceControl.get_key_by_job_id(
self.mapreduce_id)).command)
def testShardFailureAllDone(self):
"""Tests that individual shard failure affects the job outcome."""
for i in range(3):
shard_state = self.create_shard_state(self.mapreduce_id, i)
shard_state.active = False
if i == 0:
shard_state.result_status = model.ShardState.RESULT_FAILED
else:
shard_state.result_status = model.ShardState.RESULT_SUCCESS
shard_state.put()
self.handler.post()
mapreduce_state = model.MapreduceState.get_by_key_name(self.mapreduce_id)
self.verify_mapreduce_state(
mapreduce_state, active=False, shard_count=3,
result_status=model.ShardState.RESULT_FAILED)
self.assertEquals(1, mapreduce_state.failed_shards)
self.assertEquals(0, mapreduce_state.aborted_shards)
tasks = self.taskqueue.GetTasks("default")
# Finalize task should be spawned.
self.assertEquals(1, len(tasks))
self.assertEquals("/mapreduce/finalizejob_callback", tasks[0]["url"])
# Done Callback task should be spawned
self.verify_done_task()
self.assertEquals(
3, len(model.ShardState.find_by_mapreduce_state(mapreduce_state)))
def testUserAbort(self):
"""Tests that user abort will stop the job."""
for i in range(3):
shard_state = self.create_shard_state(self.mapreduce_id, i)
shard_state.active = True
shard_state.put()
model.MapreduceControl.abort(self.mapreduce_id)
self.handler.post()
mapreduce_state = model.MapreduceState.get_by_key_name(self.mapreduce_id)
self.verify_mapreduce_state(
mapreduce_state, active=True, shard_count=3)
self.assertEquals(0, mapreduce_state.failed_shards)
self.assertEquals(0, mapreduce_state.aborted_shards)
# new task should be spawned
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
self.verify_controller_task(tasks[0], shard_count=3)
self.taskqueue.FlushQueue("default")
# Repeated calls to callback closure while the shards are active will
# result in a no op. As the controller waits for the shards to finish.
self.handler.post()
mapreduce_state = model.MapreduceState.get_by_key_name(self.mapreduce_id)
self.verify_mapreduce_state(
mapreduce_state, active=True, shard_count=3)
self.assertEquals(0, mapreduce_state.failed_shards)
self.assertEquals(0, mapreduce_state.aborted_shards)
tasks = self.taskqueue.GetTasks("default")
self.assertEquals(1, len(tasks))
self.verify_controller_task(tasks[0], shard_count=3)
self.taskqueue.FlushQueue("default")
# Force all shards to completion state (success, failure, or abort).
shard_state_list = model.ShardState.find_by_mapreduce_state(mapreduce_state)
self.assertEquals(3, len(shard_state_list))
shard_state_list[0].active = False
shard_state_list[0].result_status = model.ShardState.RESULT_SUCCESS
shard_state_list[1].active = False
shard_state_list[1].result_status = model.ShardState.RESULT_FAILED
shard_state_list[2].active = False
shard_state_list[2].result_status = model.ShardState.RESULT_ABORTED
db.put(shard_state_list)
self.handler.post()
mapreduce_state = model.MapreduceState.get_by_key_name(self.mapreduce_id)
self.verify_mapreduce_state(
mapreduce_state, active=False, shard_count=3,
result_status=model.ShardState.RESULT_ABORTED)
self.assertEquals(1, mapreduce_state.failed_shards)
self.assertEquals(1, mapreduce_state.aborted_shards)
tasks = self.taskqueue.GetTasks("default")
# Finalize task should be spawned.
self.assertEquals(1, len(tasks))
self.assertEquals("/mapreduce/finalizejob_callback", tasks[0]["url"])
# Done Callback task should be spawned
self.verify_done_task()
def testInitialQuota(self):
"""Tests that the controller gives shards no quota to start."""
shard_states = []
for i in range(3):
shard_state = self.create_shard_state(self.mapreduce_id, i)
shard_states.append(shard_state)
shard_state.put()
self.handler.post()
for shard_state in shard_states:
self.assertEquals(0, self.quota_manager.get(shard_state.shard_id))
def testQuotaRefill(self):
"""Test that controller refills quota after some time."""
shard_states = []
for i in range(3):
shard_state = self.create_shard_state(self.mapreduce_id, i)
shard_states.append(shard_state)
shard_state.put()
mapreduce_state = model.MapreduceState.get_by_key_name(self.mapreduce_id)
mapreduce_state.last_poll_time = \
datetime.datetime.utcfromtimestamp(int(MockTime.time()))
mapreduce_state.put()
self.handler.post()
# 0 second passed. No quota should be filled
for shard_state in shard_states:
self.assertEquals(0, self.quota_manager.get(shard_state.shard_id))
MockTime.advance_time(1)
self.handler.post()
# 1 second passed. ceil(33.3) = 34 quotas should be refilled.
# (100 entities/sec = 33.3 entities/shard/sec in our case).
for shard_state in shard_states:
self.assertEquals(333334, self.quota_manager.get(shard_state.shard_id))
def testQuotaIsSplitOnlyBetweenActiveShards(self):
"""Test that quota is split only between active shards."""
active_shard_states = []
for i in range(3):
shard_state = self.create_shard_state(self.mapreduce_id, i)
if i == 1:
shard_state.active = False
else:
active_shard_states.append(shard_state)
shard_state.put()
mapreduce_state = model.MapreduceState.get_by_key_name(self.mapreduce_id)
mapreduce_state.last_poll_time = \
datetime.datetime.fromtimestamp(int(MockTime.time()))
mapreduce_state.put()
MockTime.advance_time(1)
self.handler.post()
for shard_state in active_shard_states:
self.assertEquals(500000, self.quota_manager.get(shard_state.shard_id))
def testScheduleQueueName(self):
"""Tests that the calling queue name is preserved on schedule calls."""
os.environ["HTTP_X_APPENGINE_QUEUENAME"] = "crazy-queue"
try:
self.mapreduce_state.put()
for i in range(3):
shard_state = self.create_shard_state(self.mapreduce_id, i)
shard_state.put()
self.handler.post()
# new task should be spawned on the calling queue
tasks = self.taskqueue.GetTasks("crazy-queue")
self.assertEquals(1, len(tasks))
self.verify_controller_task(tasks[0], shard_count=3)
finally:
del os.environ["HTTP_X_APPENGINE_QUEUENAME"]
class CleanUpJobTest(testutil.HandlerTestBase):
"""Tests cleaning up jobs."""
def setUp(self):
"""Sets up the test harness."""
testutil.HandlerTestBase.setUp(self)
TestKind().put()
self.mapreduce_id = control.start_map(
"my job 1",
"__main__.TestMap",
"mapreduce.input_readers.DatastoreInputReader",
{"entity_kind": "__main__.TestKind"},
4)
self.handler = handlers.CleanUpJobHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/command/clean_up_job"
self.handler.request.headers["X-Requested-With"] = "XMLHttpRequest"
def KickOffMapreduce(self):
"""Executes pending kickoff task."""
kickoff_task = self.taskqueue.GetTasks("default")[0]
handler = handlers.KickOffJobHandler()
handler.initialize(mock_webapp.MockRequest(), mock_webapp.MockResponse())
handler.request.path = "/mapreduce/kickoffjob_callback"
handler.request.params.update(
cgi.parse_qsl(base64.b64decode(kickoff_task["body"])))
handler.request.headers["X-AppEngine-QueueName"] = "default"
handler.post()
self.taskqueue.DeleteTask("default", kickoff_task["name"])
def testCSRF(self):
"""Test that we check the X-Requested-With header."""
del self.handler.request.headers["X-Requested-With"]
self.handler.post()
self.assertEquals(403, self.handler.response.status)
def testBasic(self):
"""Tests cleaning up the job.
Note: This cleans up a running mapreduce, but that's okay because
the prohibition against doing so is done on the client side.
"""
self.KickOffMapreduce()
key = model.MapreduceState.get_key_by_job_id(self.mapreduce_id)
self.assertTrue(db.get(key))
self.handler.request.set("mapreduce_id", self.mapreduce_id)
self.handler.post()
result = simplejson.loads(self.handler.response.out.getvalue())
self.assertEquals({"status": ("Job %s successfully cleaned up." %
self.mapreduce_id) },
result)
self.assertFalse(db.get(key))
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
xcbat/vnpy | vnpy/api/sgit/pyscript/generate_struct.py | 15 | 1494 | # encoding: UTF-8
__author__ = 'CHENXY'
from sgit_data_type import *
def main():
"""主函数"""
fcpp = open('SgitFtdcUserApiStruct.h', 'r')
fpy = open('sgit_struct.py', 'w')
fpy.write('# encoding: UTF-8\n')
fpy.write('\n')
fpy.write('structDict = {}\n')
fpy.write('\n')
for no, line in enumerate(fcpp):
#print line
if '//' in line and '///' not in line:
continue
# 结构体申明注释
if '///' in line and '\t' not in line:
py_line = '#' + line[3:]
# 结构体变量注释
elif '\t///' in line:
py_line = '#' + line[4:]
# 结构体申明
elif 'struct ' in line:
content = line.split(' ')
name = content[1].replace('\n','')
py_line = '%s = {}\n' % name
# 结构体变量
elif '\t' in line and '///' not in line:
content = line.split('\t')
typedef = content[1]
type_ = typedefDict[typedef]
variable = content[2].replace(';\n', "")
py_line = '%s["%s"] = "%s"\n' % (name, variable, type_)
# 结构体结束
elif '}' in line:
py_line = "structDict['%s'] = %s\n\n" % (name, name)
# 结构体开始
elif '{' in line:
py_line = ''
# 其他
else:
py_line = '\n'
fpy.write(py_line.decode('gbk').encode('utf-8'))
if __name__ == '__main__':
main()
| mit |
hamogu/marxs | marxs/simulator/tests/test_simulator.py | 1 | 2812 | import numpy as np
from astropy.table import Table
import pytest
from ..simulator import KeepCol, Sequence, BaseContainer
from ...optics import FlatDetector
f1 = FlatDetector()
f2 = FlatDetector()
f3 = FlatDetector()
s_l2 = Sequence(elements=[f2, f3])
mission = Sequence(elements=[f1, s_l2])
def test_seach_all():
'''Check full search finding all elements.'''
assert [mission, s_l2] == mission.elements_of_class(Sequence)
assert [] == mission.elements_of_class(BaseContainer)
assert [mission, s_l2] == mission.elements_of_class(BaseContainer,
subclass_ok=True)
assert [f1, f2, f3] == mission.elements_of_class(FlatDetector)
assert [f1, f2, f3] == mission.elements_of_class(FlatDetector, subclass_ok=True)
def test_seach_top():
'''Check top search finding all elements.'''
assert [mission] == mission.elements_of_class(Sequence, stop_at_first=True)
assert [] == mission.elements_of_class(BaseContainer, stop_at_first=True)
assert [mission] == mission.elements_of_class(BaseContainer,
subclass_ok=True,
stop_at_first=True)
assert [f1, f2, f3] == mission.elements_of_class(FlatDetector,
stop_at_first=True)
assert [f1, f2, f3] == mission.elements_of_class(FlatDetector,
subclass_ok=True,
stop_at_first=True)
def test_format_saved_positions():
'''Reformat saved positions and drop nearly identical values.'''
pos0 = np.arange(20).reshape(5,4)
pos1 = pos0 + 1
pos2 = pos1 + 1e-4
pos = KeepCol('testcase')
pos.data = [pos0, pos1, pos2]
d = pos.format_positions(atol=1e-2)
assert d.shape == (5, 2, 3)
assert np.allclose(d[0, 0, :], [0, 1./3, 2./3])
'''Not drop values'''
d = pos.format_positions()
assert d.shape == (5, 3, 3)
def test_empty_format_saved_positions():
'''If the input contains no data, an error should be raised.'''
a = KeepCol('testcase')
with pytest.raises(ValueError) as e:
d = a.format_positions()
assert 'contains no data' in str(e.value)
def test_format_save_positions_fails():
'''Can only be used when data is in homogenous coordiantes.'''
a = KeepCol('testcase')
a.data = [np.arange(5), np.arange(5)]
with pytest.raises(ValueError) as e:
d = a.format_positions()
assert 'homogeneous coordinates' in str(e.value)
def test_to_array():
a = KeepCol('testcase')
col1 = Table([[1]], names=['testcase'])
col2 = Table([[4]], names=['testcase'])
a(col1)
a(col2)
assert np.all(np.sqrt(a) == [[1.], [2.]])
| gpl-3.0 |
x111ong/django | tests/template_tests/filter_tests/test_truncatewords.py | 215 | 1705 | from django.template.defaultfilters import truncatewords
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class TruncatewordsTests(SimpleTestCase):
@setup({'truncatewords01':
'{% autoescape off %}{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}{% endautoescape %}'})
def test_truncatewords01(self):
output = self.engine.render_to_string('truncatewords01', {'a': 'alpha & bravo', 'b': mark_safe('alpha & bravo')})
self.assertEqual(output, 'alpha & ... alpha & ...')
@setup({'truncatewords02': '{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}'})
def test_truncatewords02(self):
output = self.engine.render_to_string('truncatewords02', {'a': 'alpha & bravo', 'b': mark_safe('alpha & bravo')})
self.assertEqual(output, 'alpha & ... alpha & ...')
class FunctionTests(SimpleTestCase):
def test_truncate(self):
self.assertEqual(truncatewords('A sentence with a few words in it', 1), 'A ...')
def test_truncate2(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 5),
'A sentence with a few ...',
)
def test_overtruncate(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 100),
'A sentence with a few words in it',
)
def test_invalid_number(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 'not a number'),
'A sentence with a few words in it',
)
def test_non_string_input(self):
self.assertEqual(truncatewords(123, 2), '123')
| bsd-3-clause |
kirmani/hlpr_cadence | third_party/hlpr_manipulation/hlpr_manipulation_utils/src/hlpr_manipulation_utils/manipulator.py | 1 | 17522 | import roslib; roslib.load_manifest('hlpr_manipulation_utils')
from sensor_msgs.msg import JointState
from vector_msgs.msg import JacoCartesianVelocityCmd, LinearActuatorCmd, GripperCmd, GripperStat
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from wpi_jaco_msgs.msg import AngularCommand, CartesianCommand
#from wpi_jaco_msgs.srv import GravComp
from hlpr_manipulation_utils.arm_moveit import *
import rospy
from math import pi, sqrt
from collections import namedtuple
from control_msgs.msg import FollowJointTrajectoryGoal, FollowJointTrajectoryAction
import actionlib
import time
class Manipulator:
def __init__(self, arm_prefix = 'right'):
self.arm = Arm()
self.gripper = Gripper()
self.linear_actuator = LinearActuator()
class Gripper:
def __init__(self, prefix='right'):
self.pub_grp = rospy.Publisher('/vector/'+prefix+'_gripper/cmd', GripperCmd, queue_size = 10)
self.cmd = GripperCmd()
#i have it here but it is not useful
#rospy.Subscriber('/vector/right_gripper/joint_states', JointState, self.js_cb)
#self.last_js_update = None
#self.joint_state = None
rospy.Subscriber('/vector/'+prefix+'_gripper/stat', GripperStat, self.st_cb)
self.last_st_update = None
self.gripper_stat = GripperStat()
#def js_cb(self, inState):
# self.joint_state = inState.position
# self.last_js_update = rospy.get_time()
def st_cb(self, inStat):
self.gripperStat = inStat
self.last_st_update = None
def is_ready(self):
return self.gripperStat.is_ready
def is_reset(self):
return self.gripperStat.is_reset
def is_moving(self):
return self.gripperStat.is_moving
def object_detected(self):
return self.gripperStat.obj_detected
def get_pos(self):
return self.gripperStat.position
def get_commanded_pos(self):
return self.gripperStat.requested_position
def get_applied_current(self):
return self.gripperStat.current
def set_pos(self, position, speed = 0.02, force = 100, rate = 10, iterations = 5):
self.cmd.position = position
self.cmd.speed = speed
self.cmd.force = force
rrate = rospy.Rate(rate)
for i in range(0,iterations):
self.pub_grp.publish(self.cmd)
rrate.sleep()
def open(self, speed = 0.02, force = 100):
self.set_pos(0.085,speed,force)
def close(self, speed = 0.02, force = 100):
self.set_pos(0,speed,force)
class LinearActuator:
def __init__(self):
self.pub_lin = rospy.Publisher('/vector/linear_actuator_cmd', LinearActuatorCmd, queue_size = 10)
self.cmd = LinearActuatorCmd()
#i agree that the naming is weird
rospy.Subscriber('/vector/joint_states', JointState, self.js_cb)
self.last_js_update = None
self.joint_state = None
def js_cb(self, inState):
self.joint_state = inState.position
self.last_js_update = rospy.get_time()
def set_pos(self, position, vel = 0.):
self.cmd = LinearActuatorCmd()
self.cmd.desired_position_m = position
if not vel == 0:
print 'What are you thinking? Setting the vel back to 0. If you are sure, change this line in the code'
vel = 0.
#probably feed forward velocity
self.cmd.fdfwd_vel_mps = vel
self.pub_lin.publish(self.cmd)
class Arm:
def __init__(self, arm_prefix = 'right'):
self.pub_jaco_ang = rospy.Publisher('/jaco_arm/angular_cmd', AngularCommand, queue_size = 10, latch=True)
self.pub_jaco_cart = rospy.Publisher('/jaco_arm/cartesian_cmd', CartesianCommand, queue_size = 10, latch=True)
self._arm_prefix = arm_prefix
self.arm_joint_names = [ self._arm_prefix + "_shoulder_pan_joint", self._arm_prefix + "_shoulder_lift_joint", self._arm_prefix + "_elbow_joint",
self._arm_prefix + "_wrist_1_joint", self._arm_prefix + "_wrist_2_joint", self._arm_prefix + "_wrist_3_joint"]
self.joint_states = [0 for i in range(0,len( self.arm_joint_names))]
rospy.Subscriber('/vector/right_arm/joint_states', JointState, self.js_cb)
self.last_js_update = None
self.smooth_joint_trajectory_client = actionlib.SimpleActionClient('/jaco_arm/joint_velocity_controller/trajectory', FollowJointTrajectoryAction)
#if(self.smooth_joint_trajectory_client.wait_for_server(rospy.Duration(5.0))):
if(self.smooth_joint_trajectory_client.wait_for_server()):
self.traj_connection = True
else:
self.traj_connection = False
print self.traj_connection
self.angular_cmd = AngularCommand()
self.angular_cmd.armCommand = True
self.angular_cmd.fingerCommand = False
self.angular_cmd.repeat = True
self.cartesian_cmd = CartesianCommand()
self.cartesian_cmd.armCommand = True
self.cartesian_cmd.fingerCommand = False
self.cartesian_cmd.repeat = True
self._init_tuck_poses()
# if(rospy.wait_for_service('/jaco_arm/grav_comp')):
# self.gc_connection = True
# else:
self.gc_connection = False
# self.grav_comp_client = rospy.ServiceProxy('/jaco_arm/grav_comp', GravComp)
self.arm_planner = ArmMoveIt()
def _get_arm_joint_values(self, msg):
# Cycle through the active joints and populate
# a dictionary for those values
joint_values = dict()
for joint_name in self._arm_joint_names:
# Find that joint name in msg
idx = msg.name.index(joint_name)
# Populate the joint message in a dictionary
joint_values[joint_name] = msg.position[idx]
return joint_values
def enableGravComp(self):
#if(not self.gc_connection):
# print 'GravComp Service not available'
print self.grav_comp_client(True)
def disableGravComp(self):
#if(not self.gc_connection):
# print 'GravComp Service not available'
print self.grav_comp_client(False)
def js_cb(self, inState):
for i in range(0,len(inState.position)):
self.joint_states[i] = inState.position[i]
self.last_js_update = rospy.get_time()
def get_pos(self):
return self.joint_states
def ang_pos_cmd(self, angles):
if not len(angles) == len(self.arm_joint_names):
print "Number of desired joint angles does not match the number of available joints"
return
self.angular_cmd.position = True
self.angular_cmd.joints = angles
self.pub_jaco_ang.publish(self.angular_cmd)
def ang_vel_cmd(self, velocities):
if not len(velocities) == len(self.arm_joint_names):
print "Number of desired joint velocities does not match the number of available joints"
return
self.angular_cmd.position = False
self.angular_cmd.joints = velocities
self.pub_jaco_ang.publish(self.angular_cmd)
def cart_pos_cmd(self, pose):
if not len(pose) == 6:
print "Not enough pose parameters specified"
return
self.cartesian_cmd.position = True
self.cartesian_cmd.arm.linear.x = pose[0]
self.cartesian_cmd.arm.linear.y = pose[1]
self.cartesian_cmd.arm.linear.z = pose[2]
self.cartesian_cmd.arm.angular.x = pose[3]
self.cartesian_cmd.arm.angular.y = pose[4]
self.cartesian_cmd.arm.angular.z = pose[5]
self.pub_jaco_cart.publish(self.cartesian_cmd)
def cart_pos_cmd(self, translation, rotation):
if not len(translation) == 3:
print "Not enough translations specified"
return
if not len(rotation) == 3:
print "Not enough rotations specified"
return
pose = translation + rotation
self.cart_pos_cmd(pose)
def cart_vel_cmd(self, vels):
if not len(vels) == 6:
print "Not enough velocities specified"
return
self.cartesian_cmd.position = False
self.cartesian_cmd.arm.linear.x = vels[0]
self.cartesian_cmd.arm.linear.y = vels[1]
self.cartesian_cmd.arm.linear.z = vels[2]
self.cartesian_cmd.arm.angular.x = vels[3]
self.cartesian_cmd.arm.angular.y = vels[4]
self.cartesian_cmd.arm.angular.z = vels[5]
self.pub_jaco_cart.publish(self.cartesian_cmd)
def cart_vel_cmd(self, translation, rotation):
if not len(translation) == 3:
print "Not enough translation velocities specified"
return
if not len(rotation) == 3:
print "Not enough rotation velocities specified"
return
vels = translation + rotation
self.cart_pos_cmd(vels)
def ang_cmd_loop(self,angles,rate=10,iterations=5):
rrate = rospy.Rate(rate)
for i in range(0,iterations):
self.ang_pos_cmd(angles)
rrate.sleep()
def ang_cmd_wait(self,angles,epsilon=0.05, maxIter=50, rate=10):
error = epsilon + 1;
epsilon=5
iterNum = 0;
#self.ang_cmd_loop(angles,rate)
self.ang_pos_cmd(angles)
rrate = rospy.Rate(rate)
while error > epsilon and iterNum < maxIter:
error = vectorDiff(self.joint_states,angles)
iterNum += 1
rrate.sleep()
if iterNum == maxIter:
return False
return True
#the full handling of vels, accs and effs will come later
# only the waypoints are needed for wpi jaco! the rest gets thrown away anyway so feel free to skip
def sendWaypointTrajectory(self, waypoints, durations = 0., vels = 0., accs = 0., effs = 0.):
if not self.ang_cmd_wait(waypoints[0]):
print 'Cannot go to the first point in the trajectory'
return None
# else:
# print 'Went to first'
if not self.traj_connection:
print 'Action server connection was not established'
return None
joint_traj = JointTrajectory()
joint_traj.joint_names = self.arm_joint_names;
if not durations == 0:
if not len(durations) == waypoints:
raise Exception('The number of duration points is not equal to the number of provided waypoints')
if not vels == 0:
if not len(vels) == waypoints:
raise Exception('The number velocity points is not equal to the number of provided waypoints')
if not accs == 0:
if not len(accs) == waypoints:
raise Exception('The number acceleration points is not equal to the number of provided waypoints')
if not effs == 0:
if not len(effs) == waypoints:
raise Exception('The number effort points is not equal to the number of provided waypoints')
if not effs == 0:
if not (vels == 0 and accs == 0):
raise Exception('Cannot specify efforts with velocities and accelerations at the same time')
if (not accs == 0) and vels == 0:
raise Exception('Cannot specify accelerations without velocities')
total_time_from_start = 0.5;
for t in range(0, len(waypoints)):
point = JointTrajectoryPoint()
waypoint = waypoints[t]
if not len(waypoint) == len(joint_traj.joint_names):
raise Exception('The number of provided joint positions is not equal to the number of available joints for index: ' + str(t))
point.positions = waypoint
if not vels == 0.:
velocity = vels[t]
if not len(velocity) == len(joint_traj.joint_names):
raise Exception('The number of provided joint velocities is not equal to the number of available joints for index: ' + str(t))
point.velocities = velocity
if not accs == 0.:
acceleration = accs[t]
if not len(acceleration) == len(joint_traj.joint_names):
raise Exception('The number of provided joint accelerations is not equal to the number of available joints for index: ' + str(t))
point.accelerations = accelerations
if not effs == 0.:
effort = effs[t]
if not len(effort) == len(joint_traj.joint_names):
raise Exception('The number of provided joint efforts is not equal to the number of available joints for index: ' + str(t))
point.effort = effort
if not durations == 0.:
point.duration = duration
# Deal with increasing time for each trajectory point
point.time_from_start = rospy.Duration(total_time_from_start)
total_time_from_start = total_time_from_start + 1.0
# Set the points
joint_traj.points.append(point)
traj_goal = FollowJointTrajectoryGoal()
traj_goal.trajectory = joint_traj
self.smooth_joint_trajectory_client.send_goal(traj_goal)
self.smooth_joint_trajectory_client.wait_for_result()
return self.smooth_joint_trajectory_client.get_result()
# Expects waypoints to be in joint space
def execute_traj_moveit(self, waypoints):
# Cycle through waypoints
for point in waypoints:
plannedTraj = self.arm_planner.plan_jointTargetInput(point)
if plannedTraj == None or len(plannedTraj.joint_trajectory.points) < 1:
print "Error: no plan found"
return -1
else:
traj_goal = FollowJointTrajectoryGoal()
traj_goal.trajectory = plannedTraj.joint_trajectory
self.smooth_joint_trajectory_client.send_goal(traj_goal)
self.smooth_joint_trajectory_client.wait_for_result()
self.smooth_joint_trajectory_client.get_result()
return 1
# Expects waypoints to be in end effector space
def execute_pose_traj_moveit(self, waypoints):
# Cycle through waypoints
for point in waypoints:
plannedTraj = self.arm_planner.plan_poseTargetInput(point)
if plannedTraj == None or len(plannedTraj.joint_trajectory.points) < 1:
print "Error: no plan found"
return -1
else:
self.execute_plan_traj(plannedTraj)
return 1
def execute_plan_traj(self, plannedTraj):
traj_goal = FollowJointTrajectoryGoal()
traj_goal.trajectory = plannedTraj.joint_trajectory
self.smooth_joint_trajectory_client.send_goal(traj_goal)
self.smooth_joint_trajectory_client.wait_for_result()
self.smooth_joint_trajectory_client.get_result()
#TODO: figure this out
def upper_tuck(self, use_moveit=True, vanilla = False):
if use_moveit:
# Just last point
return self.execute_traj_moveit([self.ut_wps[-1]])
elif vanilla:
self.sendWaypointTrajectory(self.ut_wps)
return 1
else:
self._ut_with_network()
return 1
def upper_untuck(self, use_moveit=True, vanilla = False):
if use_moveit:
# Just last point
self.execute_traj_moveit([self.un_ut_wps[-1]])
elif vanilla:
self.sendWaypointTrajectory(self.un_ut_wps)
else:
self.untuck()
def lower_tuck(self, use_moveit=True, vanilla = False):
if use_moveit:
# Just last point
self.execute_traj_moveit([self.lt_wps[-1]])
elif vanilla:
self.sendWaypointTrajectory(self.lt_wps)
else:
self._lt_with_network()
def lower_untuck(self, use_moveit=True, vanilla = False):
if use_moveit:
# Just last point
self.execute_traj_moveit([self.un_lt_wps[-1]])
elif vanilla:
self.sendWaypointTrajectory(self.un_lt_wps)
else:
self.untuck()
def untuck(self, use_moveit=True):
if use_moveit:
# Just last point
self.execute_traj_moveit([self.tuck_network[-1]])
else:
self._untuck_with_network()
def _init_tuck_poses(self):
self.mid_wp = [-1.57, 3.14, 1.05, -1.57, 1.05, 1.57]
lt_wp0 = [-1.65, 3.68, 1.12, -2.13, 1.48, 2.10]
lt_wp1 = [-1.49, 4.00, 1.47, -1.74, 1.25, 1.96]
lt_wp2 = [-1.23, 4.50, 0.95, -2.31, 1.82, 1.96]
lt_wp3 = [-1.21, 4.76, 0.83, -2.60, 2.56, 1.63]
self.lt_wps = [lt_wp0, lt_wp1, lt_wp2, lt_wp3]
self.un_lt_wps = self.lt_wps[::-1]
ut_wp0 = [-1.60, 2.20, 0.80, -2.20, 1.50, 1.20]
ut_wp1 = [-1.70, 2.00, 1.00, -2.20, 2.00, 0.90]
ut_wp2 = [-1.80, 1.80, 1.00, -2.10, 2.50, 0.72]
ut_wp3 = [-1.90, 1.50, 0.50, -2.00, 3.0, 0.72]
self.ut_wps = [ut_wp0,ut_wp1,ut_wp2,ut_wp3]
self.un_ut_wps = self.ut_wps[::-1]
self.tuck_network = self.un_lt_wps + [self.mid_wp] + self.ut_wps
self.reversed_tuck_network = self.tuck_network[::-1]
def _find_closest_tuck_wp(self, tuck_wps, max_allowed_dist = 8.0):
if self.last_js_update is not None:
if self.last_js_update + 2.0 < rospy.get_time():
print 'The newest joint state information is too old.'
return None
minDiff = 1000;
minInd = -1
for i in range(0,len(tuck_wps)):
diff = vectorDiff(tuck_wps[i],self.joint_states)
if diff < minDiff:
minDiff = diff
minInd = i
if minDiff > max_allowed_dist:
print 'Current arm configuration ' + str(self.joint_states) + ' is too far from the tuck network: ' + str(minDiff) + ' ' + str(minInd)
return None
return minInd
def _lt_with_network(self):
ind = self._find_closest_tuck_wp(self.reversed_tuck_network)
if ind is not None:
self.sendWaypointTrajectory(self.reversed_tuck_network[ind:])
def _ut_with_network(self):
ind = self._find_closest_tuck_wp(self.tuck_network)
if ind is not None:
self.sendWaypointTrajectory(self.tuck_network[ind:])
def _untuck_with_network(self):
ind = self._find_closest_tuck_wp(self.tuck_network)
if ind is not None:
midPoint = 4;
if ind == midPoint:
wps = self.tuck_network[midPoint]
elif ind < midPoint:
wps = self.tuck_network[ind:midPoint+1]
else:
wps = self.reversed_tuck_network[(len(self.reversed_tuck_network)-ind+1):midPoint+1]
self.sendWaypointTrajectory(wps)
def vectorDiff(v1,v2):
error = 0;
l = min(len(v1),len(v2))
for i in range(0,l):
diff = (v1[i] - v2[i])
error += diff*diff;
error = sqrt(error)
return error
| mit |
sahiljain/catapult | third_party/mox3/mox3/tests/test_mox.py | 26 | 84855 | # Unit tests for Mox.
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a fork of the pymox library intended to work with Python 3.
# The file was modified by quermit@gmail.com and dawid.fatyga@gmail.com
import io
import re
import sys
from mox3 import mox
from mox3.tests import mox_helper
import six
import testtools
OS_LISTDIR = mox_helper.os.listdir
class ExpectedMethodCallsErrorTest(testtools.TestCase):
"""Test creation and string conversion of ExpectedMethodCallsError."""
def testAtLeastOneMethod(self):
self.assertRaises(ValueError, mox.ExpectedMethodCallsError, [])
def testOneError(self):
method = mox.MockMethod("testMethod", [], False)
method(1, 2).AndReturn('output')
e = mox.ExpectedMethodCallsError([method])
self.assertEqual(
"Verify: Expected methods never called:\n"
" 0. testMethod(1, 2) -> 'output'",
str(e))
def testManyErrors(self):
method1 = mox.MockMethod("testMethod", [], False)
method1(1, 2).AndReturn('output')
method2 = mox.MockMethod("testMethod", [], False)
method2(a=1, b=2, c="only named")
method3 = mox.MockMethod("testMethod2", [], False)
method3().AndReturn(44)
method4 = mox.MockMethod("testMethod", [], False)
method4(1, 2).AndReturn('output')
e = mox.ExpectedMethodCallsError([method1, method2, method3, method4])
self.assertEqual(
"Verify: Expected methods never called:\n"
" 0. testMethod(1, 2) -> 'output'\n"
" 1. testMethod(a=1, b=2, c='only named') -> None\n"
" 2. testMethod2() -> 44\n"
" 3. testMethod(1, 2) -> 'output'",
str(e))
class OrTest(testtools.TestCase):
"""Test Or correctly chains Comparators."""
def testValidOr(self):
"""Or should be True if either Comparator returns True."""
self.assertTrue(mox.Or(mox.IsA(dict), mox.IsA(str)) == {})
self.assertTrue(mox.Or(mox.IsA(dict), mox.IsA(str)) == 'test')
self.assertTrue(mox.Or(mox.IsA(str), mox.IsA(str)) == 'test')
def testInvalidOr(self):
"""Or should be False if both Comparators return False."""
self.assertFalse(mox.Or(mox.IsA(dict), mox.IsA(str)) == 0)
class AndTest(testtools.TestCase):
"""Test And correctly chains Comparators."""
def testValidAnd(self):
"""And should be True if both Comparators return True."""
self.assertTrue(mox.And(mox.IsA(str), mox.IsA(str)) == '1')
def testClauseOneFails(self):
"""And should be False if the first Comparator returns False."""
self.assertFalse(mox.And(mox.IsA(dict), mox.IsA(str)) == '1')
def testAdvancedUsage(self):
"""And should work with other Comparators.
Note: this test is reliant on In and ContainsKeyValue.
"""
test_dict = {"mock": "obj", "testing": "isCOOL"}
self.assertTrue(mox.And(mox.In("testing"),
mox.ContainsKeyValue("mock", "obj")) == test_dict)
def testAdvancedUsageFails(self):
"""Note: this test is reliant on In and ContainsKeyValue."""
test_dict = {"mock": "obj", "testing": "isCOOL"}
self.assertFalse(mox.And(mox.In("NOTFOUND"),
mox.ContainsKeyValue("mock", "obj")) == test_dict)
class FuncTest(testtools.TestCase):
"""Test Func correctly evaluates based upon true-false return."""
def testFuncTrueFalseEvaluation(self):
"""Should return True if the validating function returns True."""
equals_one = lambda x: x == 1
always_none = lambda x: None
self.assertTrue(mox.Func(equals_one) == 1)
self.assertFalse(mox.Func(equals_one) == 0)
self.assertFalse(mox.Func(always_none) == 1)
self.assertFalse(mox.Func(always_none) == 0)
self.assertFalse(mox.Func(always_none) is None)
def testFuncExceptionPropagation(self):
"""Exceptions within the validating function should propagate."""
class TestException(Exception):
pass
def raiseExceptionOnNotOne(value):
if value != 1:
raise TestException
else:
return True
self.assertTrue(mox.Func(raiseExceptionOnNotOne) == 1)
self.assertRaises(
TestException, mox.Func(raiseExceptionOnNotOne).__eq__, 2)
class SameElementsAsTest(testtools.TestCase):
"""SameElementsAs correctly identifies sequences with same elements."""
def testSortedLists(self):
"""Should return True if two lists are exactly equal."""
self.assertTrue(mox.SameElementsAs([1, 2.0, 'c']) == [1, 2.0, 'c'])
def testUnsortedLists(self):
"""Should return True if lists are unequal but have same elements."""
self.assertTrue(mox.SameElementsAs([1, 2.0, 'c']) == [2.0, 'c', 1])
def testUnhashableLists(self):
"""Should return True if lists have the same unhashable elements."""
self.assertTrue(mox.SameElementsAs([{'a': 1}, {2: 'b'}]) ==
[{2: 'b'}, {'a': 1}])
def testEmptyLists(self):
"""Should return True for two empty lists."""
self.assertTrue(mox.SameElementsAs([]) == [])
def testUnequalLists(self):
"""Should return False if the lists are not equal."""
self.assertFalse(mox.SameElementsAs([1, 2.0, 'c']) == [2.0, 'c'])
def testUnequalUnhashableLists(self):
"""Should return False if lists with unhashable items are unequal."""
self.assertFalse(mox.SameElementsAs(
[{'a': 1}, {2: 'b'}]) == [{2: 'b'}])
def testActualIsNotASequence(self):
"""Should return False if the actual object is not a sequence."""
self.assertFalse(mox.SameElementsAs([1]) == object())
def testOneUnhashableObjectInActual(self):
"""Store the entire iterator for a correct comparison.
In a previous version of SameElementsAs, iteration stopped when an
unhashable object was encountered and then was restarted, so the actual
list appeared smaller than it was.
"""
self.assertFalse(mox.SameElementsAs([1, 2]) == iter([{}, 1, 2]))
class ContainsKeyValueTest(testtools.TestCase):
"""Test ContainsKeyValue correctly identifies key/value pairs in a dict.
"""
def testValidPair(self):
"""Should return True if the key value is in the dict."""
self.assertTrue(mox.ContainsKeyValue("key", 1) == {"key": 1})
def testInvalidValue(self):
"""Should return False if the value is not correct."""
self.assertFalse(mox.ContainsKeyValue("key", 1) == {"key": 2})
def testInvalidKey(self):
"""Should return False if they key is not in the dict."""
self.assertFalse(mox.ContainsKeyValue("qux", 1) == {"key": 2})
class ContainsAttributeValueTest(testtools.TestCase):
"""Test ContainsAttributeValue identifies properties in an object."""
def setUp(self):
"""Create an object to test with."""
class TestObject(object):
key = 1
super(ContainsAttributeValueTest, self).setUp()
self.test_object = TestObject()
def testValidPair(self):
"""Return True if the object has the key attribute that matches."""
self.assertTrue(mox.ContainsAttributeValue("key", 1)
== self.test_object)
def testInvalidValue(self):
"""Should return False if the value is not correct."""
self.assertFalse(mox.ContainsKeyValue("key", 2) == self.test_object)
def testInvalidKey(self):
"""Should return False if they the object doesn't have the property."""
self.assertFalse(mox.ContainsKeyValue("qux", 1) == self.test_object)
class InTest(testtools.TestCase):
"""Test In correctly identifies a key in a list/dict."""
def testItemInList(self):
"""Should return True if the item is in the list."""
self.assertTrue(mox.In(1) == [1, 2, 3])
def testKeyInDict(self):
"""Should return True if the item is a key in a dict."""
self.assertTrue(mox.In("test") == {"test": "module"})
def testItemInTuple(self):
"""Should return True if the item is in the list."""
self.assertTrue(mox.In(1) == (1, 2, 3))
def testTupleInTupleOfTuples(self):
self.assertTrue(mox.In((1, 2, 3)) == ((1, 2, 3), (1, 2)))
def testItemNotInList(self):
self.assertFalse(mox.In(1) == [2, 3])
def testTupleNotInTupleOfTuples(self):
self.assertFalse(mox.In((1, 2)) == ((1, 2, 3), (4, 5)))
class NotTest(testtools.TestCase):
"""Test Not correctly identifies False predicates."""
def testItemInList(self):
"""Should return True if the item is NOT in the list."""
self.assertTrue(mox.Not(mox.In(42)) == [1, 2, 3])
def testKeyInDict(self):
"""Should return True if the item is NOT a key in a dict."""
self.assertTrue(mox.Not(mox.In("foo")) == {"key": 42})
def testInvalidKeyWithNot(self):
"""Should return False if they key is NOT in the dict."""
self.assertTrue(mox.Not(mox.ContainsKeyValue("qux", 1)) == {"key": 2})
class StrContainsTest(testtools.TestCase):
"""Test StrContains checks for substring occurrence of a parameter."""
def testValidSubstringAtStart(self):
"""Should return True if substring is at the start of the string."""
self.assertTrue(mox.StrContains("hello") == "hello world")
def testValidSubstringInMiddle(self):
"""Should return True if substring is in the middle of the string."""
self.assertTrue(mox.StrContains("lo wo") == "hello world")
def testValidSubstringAtEnd(self):
"""Should return True if the substring is at the end of the string."""
self.assertTrue(mox.StrContains("ld") == "hello world")
def testInvaildSubstring(self):
"""Should return False if the substring is not in the string."""
self.assertFalse(mox.StrContains("AAA") == "hello world")
def testMultipleMatches(self):
"""Should return True if there are multiple occurances of substring."""
self.assertTrue(mox.StrContains("abc") == "ababcabcabcababc")
class RegexTest(testtools.TestCase):
"""Test Regex correctly matches regular expressions."""
def testIdentifyBadSyntaxDuringInit(self):
"""The user should know immediately if a regex has bad syntax."""
self.assertRaises(re.error, mox.Regex, '(a|b')
def testPatternInMiddle(self):
"""Return True if the pattern matches at the middle of the string.
This ensures that re.search is used (instead of re.find).
"""
self.assertTrue(mox.Regex(r"a\s+b") == "x y z a b c")
def testNonMatchPattern(self):
"""Should return False if the pattern does not match the string."""
self.assertFalse(mox.Regex(r"a\s+b") == "x y z")
def testFlagsPassedCorrectly(self):
"""Should return True as we pass IGNORECASE flag."""
self.assertTrue(mox.Regex(r"A", re.IGNORECASE) == "a")
def testReprWithoutFlags(self):
"""repr should return the regular expression pattern."""
self.assertTrue(
repr(mox.Regex(r"a\s+b")) == "<regular expression 'a\s+b'>")
def testReprWithFlags(self):
"""repr should return the regular expression pattern and flags."""
self.assertTrue(repr(mox.Regex(r"a\s+b", flags=4)) ==
"<regular expression 'a\s+b', flags=4>")
class IsTest(testtools.TestCase):
"""Verify Is correctly checks equality based upon identity, not value."""
class AlwaysComparesTrue(object):
def __eq__(self, other):
return True
def __cmp__(self, other):
return 0
def __ne__(self, other):
return False
def testEqualityValid(self):
o1 = self.AlwaysComparesTrue()
self.assertTrue(mox.Is(o1), o1)
def testEqualityInvalid(self):
o1 = self.AlwaysComparesTrue()
o2 = self.AlwaysComparesTrue()
self.assertTrue(o1 == o2)
# but...
self.assertFalse(mox.Is(o1) == o2)
def testInequalityValid(self):
o1 = self.AlwaysComparesTrue()
o2 = self.AlwaysComparesTrue()
self.assertTrue(mox.Is(o1) != o2)
def testInequalityInvalid(self):
o1 = self.AlwaysComparesTrue()
self.assertFalse(mox.Is(o1) != o1)
def testEqualityInListValid(self):
o1 = self.AlwaysComparesTrue()
o2 = self.AlwaysComparesTrue()
isa_list = [mox.Is(o1), mox.Is(o2)]
str_list = [o1, o2]
self.assertTrue(isa_list == str_list)
def testEquailtyInListInvalid(self):
o1 = self.AlwaysComparesTrue()
o2 = self.AlwaysComparesTrue()
isa_list = [mox.Is(o1), mox.Is(o2)]
mixed_list = [o2, o1]
self.assertFalse(isa_list == mixed_list)
class IsATest(testtools.TestCase):
"""Verify IsA correctly checks equality based upon class type not value."""
def testEqualityValid(self):
"""Verify that == correctly identifies objects of the same type."""
self.assertTrue(mox.IsA(str) == 'test')
def testEqualityInvalid(self):
"""Verify that == correctly identifies objects of different types."""
self.assertFalse(mox.IsA(str) == 10)
def testInequalityValid(self):
"""Verify that != identifies objects of different type."""
self.assertTrue(mox.IsA(str) != 10)
def testInequalityInvalid(self):
"""Verify that != correctly identifies objects of the same type."""
self.assertFalse(mox.IsA(str) != "test")
def testEqualityInListValid(self):
"""Verify list contents are properly compared."""
isa_list = [mox.IsA(str), mox.IsA(str)]
str_list = ["abc", "def"]
self.assertTrue(isa_list == str_list)
def testEquailtyInListInvalid(self):
"""Verify list contents are properly compared."""
isa_list = [mox.IsA(str), mox.IsA(str)]
mixed_list = ["abc", 123]
self.assertFalse(isa_list == mixed_list)
def testSpecialTypes(self):
"""Verify that IsA can handle objects like io.StringIO."""
isA = mox.IsA(io.StringIO())
stringIO = io.StringIO()
self.assertTrue(isA == stringIO)
class IsAlmostTest(testtools.TestCase):
"""Verify IsAlmost correctly checks equality of floating point numbers."""
def testEqualityValid(self):
"""Verify that == correctly identifies nearly equivalent floats."""
self.assertEqual(mox.IsAlmost(1.8999999999), 1.9)
def testEqualityInvalid(self):
"""Verify that == correctly identifies non-equivalent floats."""
self.assertNotEqual(mox.IsAlmost(1.899), 1.9)
def testEqualityWithPlaces(self):
"""Verify that specifying places has the desired effect."""
self.assertNotEqual(mox.IsAlmost(1.899), 1.9)
self.assertEqual(mox.IsAlmost(1.899, places=2), 1.9)
def testNonNumericTypes(self):
"""Verify that IsAlmost handles non-numeric types properly."""
self.assertNotEqual(mox.IsAlmost(1.8999999999), '1.9')
self.assertNotEqual(mox.IsAlmost('1.8999999999'), 1.9)
self.assertNotEqual(mox.IsAlmost('1.8999999999'), '1.9')
class ValueRememberTest(testtools.TestCase):
"""Verify comparing argument against remembered value."""
def testValueEquals(self):
"""Verify that value will compare to stored value."""
value = mox.Value()
value.store_value('hello world')
self.assertEqual(value, 'hello world')
def testNoValue(self):
"""Verify that uninitialized value does not compare to empty values."""
value = mox.Value()
self.assertNotEqual(value, None)
self.assertNotEqual(value, False)
self.assertNotEqual(value, 0)
self.assertNotEqual(value, '')
self.assertNotEqual(value, ())
self.assertNotEqual(value, [])
self.assertNotEqual(value, {})
self.assertNotEqual(value, object())
self.assertNotEqual(value, set())
def testRememberValue(self):
"""Verify that comparing against remember will store argument."""
value = mox.Value()
remember = mox.Remember(value)
self.assertNotEqual(value, 'hello world') # value not yet stored.
self.assertEqual(remember, 'hello world') # store value here.
self.assertEqual(value, 'hello world') # compare against stored value.
class MockMethodTest(testtools.TestCase):
"""Test class to verify that the MockMethod class is working correctly."""
def setUp(self):
super(MockMethodTest, self).setUp()
self.expected_method = mox.MockMethod(
"testMethod", [], False)(['original'])
self.mock_method = mox.MockMethod(
"testMethod", [self.expected_method], True)
def testNameAttribute(self):
"""Should provide a __name__ attribute."""
self.assertEqual('testMethod', self.mock_method.__name__)
def testAndReturnNoneByDefault(self):
"""Should return None by default."""
return_value = self.mock_method(['original'])
self.assertTrue(return_value is None)
def testAndReturnValue(self):
"""Should return a specificed return value."""
expected_return_value = "test"
self.expected_method.AndReturn(expected_return_value)
return_value = self.mock_method(['original'])
self.assertTrue(return_value == expected_return_value)
def testAndRaiseException(self):
"""Should raise a specified exception."""
class TestException(Exception):
pass
expected_exception = TestException('test exception')
self.expected_method.AndRaise(expected_exception)
self.assertRaises(TestException, self.mock_method, ['original'])
def testWithSideEffects(self):
"""Should call state modifier."""
local_list = ['original']
def modifier(mutable_list):
self.assertTrue(local_list is mutable_list)
mutable_list[0] = 'mutation'
self.expected_method.WithSideEffects(modifier).AndReturn(1)
self.mock_method(local_list)
self.assertEqual('mutation', local_list[0])
def testWithReturningSideEffects(self):
"""Should call state modifier and propagate its return value."""
local_list = ['original']
expected_return = 'expected_return'
def modifier_with_return(mutable_list):
self.assertTrue(local_list is mutable_list)
mutable_list[0] = 'mutation'
return expected_return
self.expected_method.WithSideEffects(modifier_with_return)
actual_return = self.mock_method(local_list)
self.assertEqual('mutation', local_list[0])
self.assertEqual(expected_return, actual_return)
def testWithReturningSideEffectsWithAndReturn(self):
"""Should call state modifier and ignore its return value."""
local_list = ['original']
expected_return = 'expected_return'
unexpected_return = 'unexpected_return'
def modifier_with_return(mutable_list):
self.assertTrue(local_list is mutable_list)
mutable_list[0] = 'mutation'
return unexpected_return
self.expected_method.WithSideEffects(modifier_with_return).AndReturn(
expected_return)
actual_return = self.mock_method(local_list)
self.assertEqual('mutation', local_list[0])
self.assertEqual(expected_return, actual_return)
def testEqualityNoParamsEqual(self):
"""Methods with the same name and without params should be equal."""
expected_method = mox.MockMethod("testMethod", [], False)
self.assertEqual(self.mock_method, expected_method)
def testEqualityNoParamsNotEqual(self):
"""Methods with different names without params should not be equal."""
expected_method = mox.MockMethod("otherMethod", [], False)
self.assertNotEqual(self.mock_method, expected_method)
def testEqualityParamsEqual(self):
"""Methods with the same name and parameters should be equal."""
params = [1, 2, 3]
expected_method = mox.MockMethod("testMethod", [], False)
expected_method._params = params
self.mock_method._params = params
self.assertEqual(self.mock_method, expected_method)
def testEqualityParamsNotEqual(self):
"""Methods with same name and different params should not be equal."""
expected_method = mox.MockMethod("testMethod", [], False)
expected_method._params = [1, 2, 3]
self.mock_method._params = ['a', 'b', 'c']
self.assertNotEqual(self.mock_method, expected_method)
def testEqualityNamedParamsEqual(self):
"""Methods with the same name and same named params should be equal."""
named_params = {"input1": "test", "input2": "params"}
expected_method = mox.MockMethod("testMethod", [], False)
expected_method._named_params = named_params
self.mock_method._named_params = named_params
self.assertEqual(self.mock_method, expected_method)
def testEqualityNamedParamsNotEqual(self):
"""Methods with same name and diffnamed params should not be equal."""
expected_method = mox.MockMethod("testMethod", [], False)
expected_method._named_params = {"input1": "test", "input2": "params"}
self.mock_method._named_params = {
"input1": "test2", "input2": "params2"}
self.assertNotEqual(self.mock_method, expected_method)
def testEqualityWrongType(self):
"""Method should not be equal to an object of a different type."""
self.assertNotEqual(self.mock_method, "string?")
def testObjectEquality(self):
"""Equality of objects should work without a Comparator"""
instA = TestClass()
instB = TestClass()
params = [instA, ]
expected_method = mox.MockMethod("testMethod", [], False)
expected_method._params = params
self.mock_method._params = [instB, ]
self.assertEqual(self.mock_method, expected_method)
def testStrConversion(self):
method = mox.MockMethod("f", [], False)
method(1, 2, "st", n1=8, n2="st2")
self.assertEqual(str(method),
("f(1, 2, 'st', n1=8, n2='st2') -> None"))
method = mox.MockMethod("testMethod", [], False)
method(1, 2, "only positional")
self.assertEqual(str(method),
"testMethod(1, 2, 'only positional') -> None")
method = mox.MockMethod("testMethod", [], False)
method(a=1, b=2, c="only named")
self.assertEqual(str(method),
"testMethod(a=1, b=2, c='only named') -> None")
method = mox.MockMethod("testMethod", [], False)
method()
self.assertEqual(str(method), "testMethod() -> None")
method = mox.MockMethod("testMethod", [], False)
method(x="only 1 parameter")
self.assertEqual(str(method),
"testMethod(x='only 1 parameter') -> None")
method = mox.MockMethod("testMethod", [], False)
method().AndReturn('return_value')
self.assertEqual(str(method), "testMethod() -> 'return_value'")
method = mox.MockMethod("testMethod", [], False)
method().AndReturn(('a', {1: 2}))
self.assertEqual(str(method), "testMethod() -> ('a', {1: 2})")
class MockAnythingTest(testtools.TestCase):
"""Verify that the MockAnything class works as expected."""
def setUp(self):
super(MockAnythingTest, self).setUp()
self.mock_object = mox.MockAnything()
def testRepr(self):
"""Calling repr on a MockAnything instance must work."""
self.assertEqual('<MockAnything instance>', repr(self.mock_object))
def testCanMockStr(self):
self.mock_object.__str__().AndReturn("foo")
self.mock_object._Replay()
actual = str(self.mock_object)
self.mock_object._Verify()
self.assertEqual("foo", actual)
def testSetupMode(self):
"""Verify the mock will accept any call."""
self.mock_object.NonsenseCall()
self.assertTrue(len(self.mock_object._expected_calls_queue) == 1)
def testReplayWithExpectedCall(self):
"""Verify the mock replays method calls as expected."""
self.mock_object.ValidCall() # setup method call
self.mock_object._Replay() # start replay mode
self.mock_object.ValidCall() # make method call
def testReplayWithUnexpectedCall(self):
"""Unexpected method calls should raise UnexpectedMethodCallError."""
self.mock_object.ValidCall() # setup method call
self.mock_object._Replay() # start replay mode
self.assertRaises(mox.UnexpectedMethodCallError,
self.mock_object.OtherValidCall)
def testVerifyWithCompleteReplay(self):
"""Verify should not raise an exception for a valid replay."""
self.mock_object.ValidCall() # setup method call
self.mock_object._Replay() # start replay mode
self.mock_object.ValidCall() # make method call
self.mock_object._Verify()
def testVerifyWithIncompleteReplay(self):
"""Verify should raise an exception if the replay was not complete."""
self.mock_object.ValidCall() # setup method call
self.mock_object._Replay() # start replay mode
# ValidCall() is never made
self.assertRaises(
mox.ExpectedMethodCallsError, self.mock_object._Verify)
def testSpecialClassMethod(self):
"""Verify should not raise exception when special methods are used."""
self.mock_object[1].AndReturn(True)
self.mock_object._Replay()
returned_val = self.mock_object[1]
self.assertTrue(returned_val)
self.mock_object._Verify()
def testNonzero(self):
"""You should be able to use the mock object in an if."""
self.mock_object._Replay()
if self.mock_object:
pass
def testNotNone(self):
"""Mock should be comparable to None."""
self.mock_object._Replay()
if self.mock_object is not None:
pass
if self.mock_object is None:
pass
def testEquals(self):
"""A mock should be able to compare itself to another object."""
self.mock_object._Replay()
self.assertEqual(self.mock_object, self.mock_object)
def testEqualsMockFailure(self):
"""Verify equals identifies unequal objects."""
self.mock_object.SillyCall()
self.mock_object._Replay()
self.assertNotEqual(self.mock_object, mox.MockAnything())
def testEqualsInstanceFailure(self):
"""Verify equals identifies that objects are different instances."""
self.mock_object._Replay()
self.assertNotEqual(self.mock_object, TestClass())
def testNotEquals(self):
"""Verify not equals works."""
self.mock_object._Replay()
self.assertFalse(self.mock_object != self.mock_object)
def testNestedMockCallsRecordedSerially(self):
"""Test that nested calls work when recorded serially."""
self.mock_object.CallInner().AndReturn(1)
self.mock_object.CallOuter(1)
self.mock_object._Replay()
self.mock_object.CallOuter(self.mock_object.CallInner())
self.mock_object._Verify()
def testNestedMockCallsRecordedNested(self):
"""Test that nested cals work when recorded in a nested fashion."""
self.mock_object.CallOuter(self.mock_object.CallInner().AndReturn(1))
self.mock_object._Replay()
self.mock_object.CallOuter(self.mock_object.CallInner())
self.mock_object._Verify()
def testIsCallable(self):
"""Test that MockAnything can even mock a simple callable.
This is handy for "stubbing out" a method in a module with a mock, and
verifying that it was called.
"""
self.mock_object().AndReturn('mox0rd')
self.mock_object._Replay()
self.assertEqual('mox0rd', self.mock_object())
self.mock_object._Verify()
def testIsReprable(self):
"""Test that MockAnythings can be repr'd without causing a failure."""
self.assertTrue('MockAnything' in repr(self.mock_object))
class MethodCheckerTest(testtools.TestCase):
"""Tests MockMethod's use of MethodChecker method."""
def testUnboundMethodsRequiresInstance(self):
# SKIP TEST IN PYTHON 2.x (Ugly hack for python 2.6)
# REASON: semantics for unbound methods has changed only in Python 3
# so this test in earlier versions is invald
if sys.version_info < (3, 0):
return
instance = CheckCallTestClass()
method = mox.MockMethod('NoParameters', [], False,
CheckCallTestClass.NoParameters)
self.assertRaises(AttributeError, method)
method(instance)
self.assertRaises(AttributeError, method, instance, 1)
def testNoParameters(self):
method = mox.MockMethod('NoParameters', [], False,
CheckCallTestClass.NoParameters,
class_to_bind=CheckCallTestClass)
method()
self.assertRaises(AttributeError, method, 1)
self.assertRaises(AttributeError, method, 1, 2)
self.assertRaises(AttributeError, method, a=1)
self.assertRaises(AttributeError, method, 1, b=2)
def testOneParameter(self):
method = mox.MockMethod('OneParameter', [], False,
CheckCallTestClass.OneParameter,
class_to_bind=CheckCallTestClass)
self.assertRaises(AttributeError, method)
method(1)
method(a=1)
self.assertRaises(AttributeError, method, b=1)
self.assertRaises(AttributeError, method, 1, 2)
self.assertRaises(AttributeError, method, 1, a=2)
self.assertRaises(AttributeError, method, 1, b=2)
def testTwoParameters(self):
method = mox.MockMethod('TwoParameters', [], False,
CheckCallTestClass.TwoParameters,
class_to_bind=CheckCallTestClass)
self.assertRaises(AttributeError, method)
self.assertRaises(AttributeError, method, 1)
self.assertRaises(AttributeError, method, a=1)
self.assertRaises(AttributeError, method, b=1)
method(1, 2)
method(1, b=2)
method(a=1, b=2)
method(b=2, a=1)
self.assertRaises(AttributeError, method, b=2, c=3)
self.assertRaises(AttributeError, method, a=1, b=2, c=3)
self.assertRaises(AttributeError, method, 1, 2, 3)
self.assertRaises(AttributeError, method, 1, 2, 3, 4)
self.assertRaises(AttributeError, method, 3, a=1, b=2)
def testOneDefaultValue(self):
method = mox.MockMethod('OneDefaultValue', [], False,
CheckCallTestClass.OneDefaultValue,
class_to_bind=CheckCallTestClass)
method()
method(1)
method(a=1)
self.assertRaises(AttributeError, method, b=1)
self.assertRaises(AttributeError, method, 1, 2)
self.assertRaises(AttributeError, method, 1, a=2)
self.assertRaises(AttributeError, method, 1, b=2)
def testTwoDefaultValues(self):
method = mox.MockMethod('TwoDefaultValues', [], False,
CheckCallTestClass.TwoDefaultValues,
class_to_bind=CheckCallTestClass)
self.assertRaises(AttributeError, method)
self.assertRaises(AttributeError, method, c=3)
self.assertRaises(AttributeError, method, 1)
self.assertRaises(AttributeError, method, 1, d=4)
self.assertRaises(AttributeError, method, 1, d=4, c=3)
method(1, 2)
method(a=1, b=2)
method(1, 2, 3)
method(1, 2, 3, 4)
method(1, 2, c=3)
method(1, 2, c=3, d=4)
method(1, 2, d=4, c=3)
method(d=4, c=3, a=1, b=2)
self.assertRaises(AttributeError, method, 1, 2, 3, 4, 5)
self.assertRaises(AttributeError, method, 1, 2, e=9)
self.assertRaises(AttributeError, method, a=1, b=2, e=9)
def testArgs(self):
method = mox.MockMethod('Args', [], False, CheckCallTestClass.Args,
class_to_bind=CheckCallTestClass)
self.assertRaises(AttributeError, method)
self.assertRaises(AttributeError, method, 1)
method(1, 2)
method(a=1, b=2)
method(1, 2, 3)
method(1, 2, 3, 4)
self.assertRaises(AttributeError, method, 1, 2, a=3)
self.assertRaises(AttributeError, method, 1, 2, c=3)
def testKwargs(self):
method = mox.MockMethod('Kwargs', [], False, CheckCallTestClass.Kwargs,
class_to_bind=CheckCallTestClass)
self.assertRaises(AttributeError, method)
method(1)
method(1, 2)
method(a=1, b=2)
method(b=2, a=1)
self.assertRaises(AttributeError, method, 1, 2, 3)
self.assertRaises(AttributeError, method, 1, 2, a=3)
method(1, 2, c=3)
method(a=1, b=2, c=3)
method(c=3, a=1, b=2)
method(a=1, b=2, c=3, d=4)
self.assertRaises(AttributeError, method, 1, 2, 3, 4)
def testArgsAndKwargs(self):
method = mox.MockMethod('ArgsAndKwargs', [], False,
CheckCallTestClass.ArgsAndKwargs,
class_to_bind=CheckCallTestClass)
self.assertRaises(AttributeError, method)
method(1)
method(1, 2)
method(1, 2, 3)
method(a=1)
method(1, b=2)
self.assertRaises(AttributeError, method, 1, a=2)
method(b=2, a=1)
method(c=3, b=2, a=1)
method(1, 2, c=3)
class CheckCallTestClass(object):
def NoParameters(self):
pass
def OneParameter(self, a):
pass
def TwoParameters(self, a, b):
pass
def OneDefaultValue(self, a=1):
pass
def TwoDefaultValues(self, a, b, c=1, d=2):
pass
def Args(self, a, b, *args):
pass
def Kwargs(self, a, b=2, **kwargs):
pass
def ArgsAndKwargs(self, a, *args, **kwargs):
pass
class MockObjectTest(testtools.TestCase):
"""Verify that the MockObject class works as exepcted."""
def setUp(self):
super(MockObjectTest, self).setUp()
self.mock_object = mox.MockObject(TestClass)
def testSetupModeWithValidCall(self):
"""Verify the mock object properly mocks a basic method call."""
self.mock_object.ValidCall()
self.assertTrue(len(self.mock_object._expected_calls_queue) == 1)
def testSetupModeWithInvalidCall(self):
"""Rase UnknownMethodCallError for a non-member method call.
"""
# Note: assertRaises does not catch exceptions thrown by MockObject's
# __getattr__
try:
self.mock_object.InvalidCall()
self.fail("No exception thrown, expected UnknownMethodCallError")
except mox.UnknownMethodCallError:
pass
except Exception:
self.fail("Wrong exception type thrown,"
" expected UnknownMethodCallError")
def testReplayWithInvalidCall(self):
"""Rase UnknownMethodCallError for a non-member method call.
"""
self.mock_object.ValidCall() # setup method call
self.mock_object._Replay() # start replay mode
# Note: assertRaises does not catch exceptions thrown by MockObject's
# __getattr__
try:
self.mock_object.InvalidCall()
self.fail("No exception thrown, expected UnknownMethodCallError")
except mox.UnknownMethodCallError:
pass
except Exception:
self.fail("Wrong exception type thrown,"
" expected UnknownMethodCallError")
def testIsInstance(self):
"""Mock should be able to pass as an instance of the mocked class."""
self.assertTrue(isinstance(self.mock_object, TestClass))
def testFindValidMethods(self):
"""Mock should be able to mock all public methods."""
self.assertTrue('ValidCall' in self.mock_object._known_methods)
self.assertTrue('OtherValidCall' in self.mock_object._known_methods)
self.assertTrue('MyClassMethod' in self.mock_object._known_methods)
self.assertTrue('MyStaticMethod' in self.mock_object._known_methods)
self.assertTrue('_ProtectedCall' in self.mock_object._known_methods)
self.assertTrue('__PrivateCall' not in self.mock_object._known_methods)
self.assertTrue(
'_TestClass__PrivateCall' in self.mock_object._known_methods)
def testFindsSuperclassMethods(self):
"""Mock should be able to mock superclasses methods."""
self.mock_object = mox.MockObject(ChildClass)
self.assertTrue('ValidCall' in self.mock_object._known_methods)
self.assertTrue('OtherValidCall' in self.mock_object._known_methods)
self.assertTrue('MyClassMethod' in self.mock_object._known_methods)
self.assertTrue('ChildValidCall' in self.mock_object._known_methods)
def testAccessClassVariables(self):
"""Class variables should be accessible through the mock."""
self.assertTrue('SOME_CLASS_VAR' in self.mock_object._known_vars)
self.assertTrue('_PROTECTED_CLASS_VAR' in self.mock_object._known_vars)
self.assertEqual('test_value', self.mock_object.SOME_CLASS_VAR)
def testEquals(self):
"""A mock should be able to compare itself to another object."""
self.mock_object._Replay()
self.assertEqual(self.mock_object, self.mock_object)
def testEqualsMockFailure(self):
"""Verify equals identifies unequal objects."""
self.mock_object.ValidCall()
self.mock_object._Replay()
self.assertNotEqual(self.mock_object, mox.MockObject(TestClass))
def testEqualsInstanceFailure(self):
"""Verify equals identifies that objects are different instances."""
self.mock_object._Replay()
self.assertNotEqual(self.mock_object, TestClass())
def testNotEquals(self):
"""Verify not equals works."""
self.mock_object._Replay()
self.assertFalse(self.mock_object != self.mock_object)
def testMockSetItem_ExpectedSetItem_Success(self):
"""Test that __setitem__() gets mocked in Dummy.
In this test, _Verify() succeeds.
"""
dummy = mox.MockObject(TestClass)
dummy['X'] = 'Y'
dummy._Replay()
dummy['X'] = 'Y'
dummy._Verify()
def testMockSetItem_ExpectedSetItem_NoSuccess(self):
"""Test that __setitem__() gets mocked in Dummy.
In this test, _Verify() fails.
"""
dummy = mox.MockObject(TestClass)
dummy['X'] = 'Y'
dummy._Replay()
# NOT doing dummy['X'] = 'Y'
self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
def testMockSetItem_ExpectedNoSetItem_Success(self):
"""Test that __setitem__() gets mocked in Dummy."""
dummy = mox.MockObject(TestClass)
# NOT doing dummy['X'] = 'Y'
dummy._Replay()
def call():
dummy['X'] = 'Y'
self.assertRaises(mox.UnexpectedMethodCallError, call)
def testMockSetItem_ExpectedNoSetItem_NoSuccess(self):
"""Test that __setitem__() gets mocked in Dummy.
In this test, _Verify() fails.
"""
dummy = mox.MockObject(TestClass)
# NOT doing dummy['X'] = 'Y'
dummy._Replay()
# NOT doing dummy['X'] = 'Y'
dummy._Verify()
def testMockSetItem_ExpectedSetItem_NonmatchingParameters(self):
"""Test that __setitem__() fails if other parameters are expected."""
dummy = mox.MockObject(TestClass)
dummy['X'] = 'Y'
dummy._Replay()
def call():
dummy['wrong'] = 'Y'
self.assertRaises(mox.UnexpectedMethodCallError, call)
dummy._Verify()
def testMockSetItem_WithSubClassOfNewStyleClass(self):
class NewStyleTestClass(object):
def __init__(self):
self.my_dict = {}
def __setitem__(self, key, value):
self.my_dict[key], value
class TestSubClass(NewStyleTestClass):
pass
dummy = mox.MockObject(TestSubClass)
dummy[1] = 2
dummy._Replay()
dummy[1] = 2
dummy._Verify()
def testMockGetItem_ExpectedGetItem_Success(self):
"""Test that __getitem__() gets mocked in Dummy.
In this test, _Verify() succeeds.
"""
dummy = mox.MockObject(TestClass)
dummy['X'].AndReturn('value')
dummy._Replay()
self.assertEqual(dummy['X'], 'value')
dummy._Verify()
def testMockGetItem_ExpectedGetItem_NoSuccess(self):
"""Test that __getitem__() gets mocked in Dummy.
In this test, _Verify() fails.
"""
dummy = mox.MockObject(TestClass)
dummy['X'].AndReturn('value')
dummy._Replay()
# NOT doing dummy['X']
self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
def testMockGetItem_ExpectedNoGetItem_NoSuccess(self):
"""Test that __getitem__() gets mocked in Dummy."""
dummy = mox.MockObject(TestClass)
# NOT doing dummy['X']
dummy._Replay()
def call():
return dummy['X']
self.assertRaises(mox.UnexpectedMethodCallError, call)
def testMockGetItem_ExpectedGetItem_NonmatchingParameters(self):
"""Test that __getitem__() fails if other parameters are expected."""
dummy = mox.MockObject(TestClass)
dummy['X'].AndReturn('value')
dummy._Replay()
def call():
return dummy['wrong']
self.assertRaises(mox.UnexpectedMethodCallError, call)
dummy._Verify()
def testMockGetItem_WithSubClassOfNewStyleClass(self):
class NewStyleTestClass(object):
def __getitem__(self, key):
return {1: '1', 2: '2'}[key]
class TestSubClass(NewStyleTestClass):
pass
dummy = mox.MockObject(TestSubClass)
dummy[1].AndReturn('3')
dummy._Replay()
self.assertEqual('3', dummy.__getitem__(1))
dummy._Verify()
def testMockIter_ExpectedIter_Success(self):
"""Test that __iter__() gets mocked in Dummy.
In this test, _Verify() succeeds.
"""
dummy = mox.MockObject(TestClass)
iter(dummy).AndReturn(iter(['X', 'Y']))
dummy._Replay()
self.assertEqual([x for x in dummy], ['X', 'Y'])
dummy._Verify()
def testMockContains_ExpectedContains_Success(self):
"""Test that __contains__ gets mocked in Dummy.
In this test, _Verify() succeeds.
"""
dummy = mox.MockObject(TestClass)
dummy.__contains__('X').AndReturn(True)
dummy._Replay()
self.assertTrue('X' in dummy)
dummy._Verify()
def testMockContains_ExpectedContains_NoSuccess(self):
"""Test that __contains__() gets mocked in Dummy.
In this test, _Verify() fails.
"""
dummy = mox.MockObject(TestClass)
dummy.__contains__('X').AndReturn('True')
dummy._Replay()
# NOT doing 'X' in dummy
self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
def testMockContains_ExpectedContains_NonmatchingParameter(self):
"""Test that __contains__ fails if other parameters are expected."""
dummy = mox.MockObject(TestClass)
dummy.__contains__('X').AndReturn(True)
dummy._Replay()
def call():
return 'Y' in dummy
self.assertRaises(mox.UnexpectedMethodCallError, call)
dummy._Verify()
def testMockIter_ExpectedIter_NoSuccess(self):
"""Test that __iter__() gets mocked in Dummy.
In this test, _Verify() fails.
"""
dummy = mox.MockObject(TestClass)
iter(dummy).AndReturn(iter(['X', 'Y']))
dummy._Replay()
# NOT doing self.assertEqual([x for x in dummy], ['X', 'Y'])
self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
def testMockIter_ExpectedNoIter_NoSuccess(self):
"""Test that __iter__() gets mocked in Dummy."""
dummy = mox.MockObject(TestClass)
# NOT doing iter(dummy)
dummy._Replay()
def call():
return [x for x in dummy]
self.assertRaises(mox.UnexpectedMethodCallError, call)
def testMockIter_ExpectedGetItem_Success(self):
"""Test that __iter__() gets mocked in Dummy using getitem."""
dummy = mox.MockObject(SubscribtableNonIterableClass)
dummy[0].AndReturn('a')
dummy[1].AndReturn('b')
dummy[2].AndRaise(IndexError)
dummy._Replay()
self.assertEqual(['a', 'b'], [x for x in dummy])
dummy._Verify()
def testMockIter_ExpectedNoGetItem_NoSuccess(self):
"""Test that __iter__() gets mocked in Dummy using getitem."""
dummy = mox.MockObject(SubscribtableNonIterableClass)
# NOT doing dummy[index]
dummy._Replay()
function = lambda: [x for x in dummy]
self.assertRaises(mox.UnexpectedMethodCallError, function)
def testMockGetIter_WithSubClassOfNewStyleClass(self):
class NewStyleTestClass(object):
def __iter__(self):
return iter([1, 2, 3])
class TestSubClass(NewStyleTestClass):
pass
dummy = mox.MockObject(TestSubClass)
iter(dummy).AndReturn(iter(['a', 'b']))
dummy._Replay()
self.assertEqual(['a', 'b'], [x for x in dummy])
dummy._Verify()
def testInstantiationWithAdditionalAttributes(self):
mock_object = mox.MockObject(TestClass, attrs={"attr1": "value"})
self.assertEqual(mock_object.attr1, "value")
def testCantOverrideMethodsWithAttributes(self):
self.assertRaises(ValueError, mox.MockObject, TestClass,
attrs={"ValidCall": "value"})
def testCantMockNonPublicAttributes(self):
self.assertRaises(mox.PrivateAttributeError, mox.MockObject, TestClass,
attrs={"_protected": "value"})
self.assertRaises(mox.PrivateAttributeError, mox.MockObject, TestClass,
attrs={"__private": "value"})
class MoxTest(testtools.TestCase):
"""Verify Mox works correctly."""
def setUp(self):
super(MoxTest, self).setUp()
self.mox = mox.Mox()
def testCreateObject(self):
"""Mox should create a mock object."""
self.mox.CreateMock(TestClass)
def testVerifyObjectWithCompleteReplay(self):
"""Mox should replay and verify all objects it created."""
mock_obj = self.mox.CreateMock(TestClass)
mock_obj.ValidCall()
mock_obj.ValidCallWithArgs(mox.IsA(TestClass))
self.mox.ReplayAll()
mock_obj.ValidCall()
mock_obj.ValidCallWithArgs(TestClass("some_value"))
self.mox.VerifyAll()
def testVerifyObjectWithIncompleteReplay(self):
"""Mox should raise an exception if a mock didn't replay completely."""
mock_obj = self.mox.CreateMock(TestClass)
mock_obj.ValidCall()
self.mox.ReplayAll()
# ValidCall() is never made
self.assertRaises(mox.ExpectedMethodCallsError, self.mox.VerifyAll)
def testEntireWorkflow(self):
"""Test the whole work flow."""
mock_obj = self.mox.CreateMock(TestClass)
mock_obj.ValidCall().AndReturn("yes")
self.mox.ReplayAll()
ret_val = mock_obj.ValidCall()
self.assertEqual("yes", ret_val)
self.mox.VerifyAll()
def testSignatureMatchingWithComparatorAsFirstArg(self):
"""Test that the first argument can be a comparator."""
def VerifyLen(val):
"""This will raise an exception when not given a list.
This exception will be raised when trying to infer/validate the
method signature.
"""
return len(val) != 1
mock_obj = self.mox.CreateMock(TestClass)
# This intentionally does not name the 'nine' param so it triggers
# deeper inspection.
mock_obj.MethodWithArgs(mox.Func(VerifyLen), mox.IgnoreArg(), None)
self.mox.ReplayAll()
mock_obj.MethodWithArgs([1, 2], "foo", None)
self.mox.VerifyAll()
def testCallableObject(self):
"""Test recording calls to a callable object works."""
mock_obj = self.mox.CreateMock(CallableClass)
mock_obj("foo").AndReturn("qux")
self.mox.ReplayAll()
ret_val = mock_obj("foo")
self.assertEqual("qux", ret_val)
self.mox.VerifyAll()
def testInheritedCallableObject(self):
"""Recording calls to an object inheriting from a callable object."""
mock_obj = self.mox.CreateMock(InheritsFromCallable)
mock_obj("foo").AndReturn("qux")
self.mox.ReplayAll()
ret_val = mock_obj("foo")
self.assertEqual("qux", ret_val)
self.mox.VerifyAll()
def testCallOnNonCallableObject(self):
"""Test that you cannot call a non-callable object."""
mock_obj = self.mox.CreateMock("string is not callable")
self.assertRaises(TypeError, mock_obj)
def testCallableObjectWithBadCall(self):
"""Test verifying calls to a callable object works."""
mock_obj = self.mox.CreateMock(CallableClass)
mock_obj("foo").AndReturn("qux")
self.mox.ReplayAll()
self.assertRaises(mox.UnexpectedMethodCallError, mock_obj, "ZOOBAZ")
def testCallableObjectVerifiesSignature(self):
mock_obj = self.mox.CreateMock(CallableClass)
# Too many arguments
self.assertRaises(AttributeError, mock_obj, "foo", "bar")
def testUnorderedGroup(self):
"""Test that using one unordered group works."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Method(1).InAnyOrder()
mock_obj.Method(2).InAnyOrder()
self.mox.ReplayAll()
mock_obj.Method(2)
mock_obj.Method(1)
self.mox.VerifyAll()
def testUnorderedGroupsInline(self):
"""Unordered groups should work in the context of ordered calls."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(1).InAnyOrder()
mock_obj.Method(2).InAnyOrder()
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
mock_obj.Method(2)
mock_obj.Method(1)
mock_obj.Close()
self.mox.VerifyAll()
def testMultipleUnorderdGroups(self):
"""Multiple unoreded groups should work."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Method(1).InAnyOrder()
mock_obj.Method(2).InAnyOrder()
mock_obj.Foo().InAnyOrder('group2')
mock_obj.Bar().InAnyOrder('group2')
self.mox.ReplayAll()
mock_obj.Method(2)
mock_obj.Method(1)
mock_obj.Bar()
mock_obj.Foo()
self.mox.VerifyAll()
def testMultipleUnorderdGroupsOutOfOrder(self):
"""Multiple unordered groups should maintain external order"""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Method(1).InAnyOrder()
mock_obj.Method(2).InAnyOrder()
mock_obj.Foo().InAnyOrder('group2')
mock_obj.Bar().InAnyOrder('group2')
self.mox.ReplayAll()
mock_obj.Method(2)
self.assertRaises(mox.UnexpectedMethodCallError, mock_obj.Bar)
def testUnorderedGroupWithReturnValue(self):
"""Unordered groups should work with return values."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(1).InAnyOrder().AndReturn(9)
mock_obj.Method(2).InAnyOrder().AndReturn(10)
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
actual_two = mock_obj.Method(2)
actual_one = mock_obj.Method(1)
mock_obj.Close()
self.assertEqual(9, actual_one)
self.assertEqual(10, actual_two)
self.mox.VerifyAll()
def testUnorderedGroupWithComparator(self):
"""Unordered groups should work with comparators."""
def VerifyOne(cmd):
if not isinstance(cmd, str):
self.fail('Unexpected type passed to comparator: ' + str(cmd))
return cmd == 'test'
def VerifyTwo(cmd):
return True
mock_obj = self.mox.CreateMockAnything()
mock_obj.Foo(['test'], mox.Func(VerifyOne), bar=1).InAnyOrder().\
AndReturn('yes test')
mock_obj.Foo(['test'], mox.Func(VerifyTwo), bar=1).InAnyOrder().\
AndReturn('anything')
self.mox.ReplayAll()
mock_obj.Foo(['test'], 'anything', bar=1)
mock_obj.Foo(['test'], 'test', bar=1)
self.mox.VerifyAll()
def testMultipleTimes(self):
"""Test if MultipleTimesGroup works."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Method(1).MultipleTimes().AndReturn(9)
mock_obj.Method(2).AndReturn(10)
mock_obj.Method(3).MultipleTimes().AndReturn(42)
self.mox.ReplayAll()
actual_one = mock_obj.Method(1)
second_one = mock_obj.Method(1) # This tests MultipleTimes.
actual_two = mock_obj.Method(2)
actual_three = mock_obj.Method(3)
mock_obj.Method(3)
mock_obj.Method(3)
self.mox.VerifyAll()
self.assertEqual(9, actual_one)
# Repeated calls should return same number.
self.assertEqual(9, second_one)
self.assertEqual(10, actual_two)
self.assertEqual(42, actual_three)
def testMultipleTimesUsingIsAParameter(self):
"""Test if MultipleTimesGroup works with a IsA parameter."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(mox.IsA(str)).MultipleTimes("IsA").AndReturn(9)
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
actual_one = mock_obj.Method("1")
second_one = mock_obj.Method("2") # This tests MultipleTimes.
mock_obj.Close()
self.mox.VerifyAll()
self.assertEqual(9, actual_one)
# Repeated calls should return same number.
self.assertEqual(9, second_one)
def testMutlipleTimesUsingFunc(self):
"""Test that the Func is not evaluated more times than necessary.
If a Func() has side effects, it can cause a passing test to fail.
"""
self.counter = 0
def MyFunc(actual_str):
"""Increment the counter if actual_str == 'foo'."""
if actual_str == 'foo':
self.counter += 1
return True
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(mox.Func(MyFunc)).MultipleTimes()
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
mock_obj.Method('foo')
mock_obj.Method('foo')
mock_obj.Method('not-foo')
mock_obj.Close()
self.mox.VerifyAll()
self.assertEqual(2, self.counter)
def testMultipleTimesThreeMethods(self):
"""Test if MultipleTimesGroup works with three or more methods."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(1).MultipleTimes().AndReturn(9)
mock_obj.Method(2).MultipleTimes().AndReturn(8)
mock_obj.Method(3).MultipleTimes().AndReturn(7)
mock_obj.Method(4).AndReturn(10)
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
actual_three = mock_obj.Method(3)
mock_obj.Method(1)
actual_two = mock_obj.Method(2)
mock_obj.Method(3)
actual_one = mock_obj.Method(1)
actual_four = mock_obj.Method(4)
mock_obj.Close()
self.assertEqual(9, actual_one)
self.assertEqual(8, actual_two)
self.assertEqual(7, actual_three)
self.assertEqual(10, actual_four)
self.mox.VerifyAll()
def testMultipleTimesMissingOne(self):
"""Test if MultipleTimesGroup fails if one method is missing."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(1).MultipleTimes().AndReturn(9)
mock_obj.Method(2).MultipleTimes().AndReturn(8)
mock_obj.Method(3).MultipleTimes().AndReturn(7)
mock_obj.Method(4).AndReturn(10)
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
mock_obj.Method(3)
mock_obj.Method(2)
mock_obj.Method(3)
mock_obj.Method(3)
mock_obj.Method(2)
self.assertRaises(mox.UnexpectedMethodCallError, mock_obj.Method, 4)
def testMultipleTimesTwoGroups(self):
"""Test if MultipleTimesGroup works with a group after a
MultipleTimesGroup.
"""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(1).MultipleTimes().AndReturn(9)
mock_obj.Method(3).MultipleTimes("nr2").AndReturn(42)
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
actual_one = mock_obj.Method(1)
mock_obj.Method(1)
actual_three = mock_obj.Method(3)
mock_obj.Method(3)
mock_obj.Close()
self.assertEqual(9, actual_one)
self.assertEqual(42, actual_three)
self.mox.VerifyAll()
def testMultipleTimesTwoGroupsFailure(self):
"""Test if MultipleTimesGroup fails with a group after a
MultipleTimesGroup.
"""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(1).MultipleTimes().AndReturn(9)
mock_obj.Method(3).MultipleTimes("nr2").AndReturn(42)
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
mock_obj.Method(1)
mock_obj.Method(1)
mock_obj.Method(3)
self.assertRaises(mox.UnexpectedMethodCallError, mock_obj.Method, 1)
def testWithSideEffects(self):
"""Test side effect operations actually modify their target objects."""
def modifier(mutable_list):
mutable_list[0] = 'mutated'
mock_obj = self.mox.CreateMockAnything()
mock_obj.ConfigureInOutParameter(
['original']).WithSideEffects(modifier)
mock_obj.WorkWithParameter(['mutated'])
self.mox.ReplayAll()
local_list = ['original']
mock_obj.ConfigureInOutParameter(local_list)
mock_obj.WorkWithParameter(local_list)
self.mox.VerifyAll()
def testWithSideEffectsException(self):
"""Test side effect operations actually modify their target objects."""
class TestException(Exception):
pass
def modifier(mutable_list):
mutable_list[0] = 'mutated'
mock_obj = self.mox.CreateMockAnything()
method = mock_obj.ConfigureInOutParameter(['original'])
method.WithSideEffects(modifier).AndRaise(TestException('exception'))
mock_obj.WorkWithParameter(['mutated'])
self.mox.ReplayAll()
local_list = ['original']
self.assertRaises(TestException,
mock_obj.ConfigureInOutParameter,
local_list)
mock_obj.WorkWithParameter(local_list)
self.mox.VerifyAll()
def testStubOutMethod(self):
"""Test that a method is replaced with a MockObject."""
test_obj = TestClass()
method_type = type(test_obj.OtherValidCall)
# Replace OtherValidCall with a mock.
self.mox.StubOutWithMock(test_obj, 'OtherValidCall')
self.assertTrue(isinstance(test_obj.OtherValidCall, mox.MockObject))
self.assertFalse(type(test_obj.OtherValidCall) is method_type)
test_obj.OtherValidCall().AndReturn('foo')
self.mox.ReplayAll()
actual = test_obj.OtherValidCall()
self.mox.VerifyAll()
self.mox.UnsetStubs()
self.assertEqual('foo', actual)
self.assertTrue(type(test_obj.OtherValidCall) is method_type)
def testStubOutMethod_Unbound_Comparator(self):
instance = TestClass()
self.mox.StubOutWithMock(TestClass, 'OtherValidCall')
TestClass.OtherValidCall(mox.IgnoreArg()).AndReturn('foo')
self.mox.ReplayAll()
actual = TestClass.OtherValidCall(instance)
self.mox.VerifyAll()
self.mox.UnsetStubs()
self.assertEqual('foo', actual)
def testStubOutMethod_Unbound_Subclass_Comparator(self):
self.mox.StubOutWithMock(
mox_helper.TestClassFromAnotherModule, 'Value')
mox_helper.TestClassFromAnotherModule.Value(
mox.IsA(mox_helper.ChildClassFromAnotherModule)).AndReturn('foo')
self.mox.ReplayAll()
instance = mox_helper.ChildClassFromAnotherModule()
actual = mox_helper.TestClassFromAnotherModule.Value(instance)
self.mox.VerifyAll()
self.mox.UnsetStubs()
self.assertEqual('foo', actual)
def testStubOuMethod_Unbound_WithOptionalParams(self):
self.mox = mox.Mox()
self.mox.StubOutWithMock(TestClass, 'OptionalArgs')
TestClass.OptionalArgs(mox.IgnoreArg(), foo=2)
self.mox.ReplayAll()
t = TestClass()
TestClass.OptionalArgs(t, foo=2)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOutMethod_Unbound_ActualInstance(self):
instance = TestClass()
self.mox.StubOutWithMock(TestClass, 'OtherValidCall')
TestClass.OtherValidCall(instance).AndReturn('foo')
self.mox.ReplayAll()
actual = TestClass.OtherValidCall(instance)
self.mox.VerifyAll()
self.mox.UnsetStubs()
self.assertEqual('foo', actual)
def testStubOutMethod_Unbound_DifferentInstance(self):
instance = TestClass()
self.mox.StubOutWithMock(TestClass, 'OtherValidCall')
TestClass.OtherValidCall(instance).AndReturn('foo')
self.mox.ReplayAll()
# This should fail, since the instances are different
self.assertRaises(mox.UnexpectedMethodCallError,
TestClass.OtherValidCall, "wrong self")
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOutMethod_Unbound_NamedUsingPositional(self):
"""Check positional parameters can be matched to keyword arguments."""
self.mox.StubOutWithMock(mox_helper.ExampleClass, 'NamedParams')
instance = mox_helper.ExampleClass()
mox_helper.ExampleClass.NamedParams(instance, 'foo', baz=None)
self.mox.ReplayAll()
mox_helper.ExampleClass.NamedParams(instance, 'foo', baz=None)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOutMethod_Unbound_NamedUsingPositional_SomePositional(self):
"""Check positional parameters can be matched to keyword arguments."""
self.mox.StubOutWithMock(mox_helper.ExampleClass, 'TestMethod')
instance = mox_helper.ExampleClass()
mox_helper.ExampleClass.TestMethod(instance, 'one', 'two', 'nine')
self.mox.ReplayAll()
mox_helper.ExampleClass.TestMethod(instance, 'one', 'two', 'nine')
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOutMethod_Unbound_SpecialArgs(self):
self.mox.StubOutWithMock(mox_helper.ExampleClass, 'SpecialArgs')
instance = mox_helper.ExampleClass()
mox_helper.ExampleClass.SpecialArgs(instance, 'foo', None, bar='bar')
self.mox.ReplayAll()
mox_helper.ExampleClass.SpecialArgs(instance, 'foo', None, bar='bar')
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOutMethod_Bound_SimpleTest(self):
t = self.mox.CreateMock(TestClass)
t.MethodWithArgs(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('foo')
self.mox.ReplayAll()
actual = t.MethodWithArgs(None, None)
self.mox.VerifyAll()
self.mox.UnsetStubs()
self.assertEqual('foo', actual)
def testStubOutMethod_Bound_NamedUsingPositional(self):
"""Check positional parameters can be matched to keyword arguments."""
self.mox.StubOutWithMock(mox_helper.ExampleClass, 'NamedParams')
instance = mox_helper.ExampleClass()
instance.NamedParams('foo', baz=None)
self.mox.ReplayAll()
instance.NamedParams('foo', baz=None)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOutMethod_Bound_NamedUsingPositional_SomePositional(self):
"""Check positional parameters can be matched to keyword arguments."""
self.mox.StubOutWithMock(mox_helper.ExampleClass, 'TestMethod')
instance = mox_helper.ExampleClass()
instance.TestMethod(instance, 'one', 'two', 'nine')
self.mox.ReplayAll()
instance.TestMethod(instance, 'one', 'two', 'nine')
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOutMethod_Bound_SpecialArgs(self):
self.mox.StubOutWithMock(mox_helper.ExampleClass, 'SpecialArgs')
instance = mox_helper.ExampleClass()
instance.SpecialArgs(instance, 'foo', None, bar='bar')
self.mox.ReplayAll()
instance.SpecialArgs(instance, 'foo', None, bar='bar')
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOutMethod_Func_PropgatesExceptions(self):
"""Errors in Func comparator should propagate to the calling method."""
class TestException(Exception):
pass
def raiseExceptionOnNotOne(value):
if value == 1:
return True
else:
raise TestException
test_obj = TestClass()
self.mox.StubOutWithMock(test_obj, 'MethodWithArgs')
test_obj.MethodWithArgs(
mox.IgnoreArg(), mox.Func(raiseExceptionOnNotOne)).AndReturn(1)
test_obj.MethodWithArgs(
mox.IgnoreArg(), mox.Func(raiseExceptionOnNotOne)).AndReturn(1)
self.mox.ReplayAll()
self.assertEqual(test_obj.MethodWithArgs('ignored', 1), 1)
self.assertRaises(TestException,
test_obj.MethodWithArgs, 'ignored', 2)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOut_SignatureMatching_init_(self):
self.mox.StubOutWithMock(mox_helper.ExampleClass, '__init__')
mox_helper.ExampleClass.__init__(mox.IgnoreArg())
self.mox.ReplayAll()
# Create an instance of a child class, which calls the parent
# __init__
mox_helper.ChildExampleClass()
self.mox.VerifyAll()
self.mox.UnsetStubs()
# FIXME(dhellmann): Skip this test until someone can debug why it
# fails on python 3.4.
@testtools.skipIf(six.PY3, "This test needs to be fixed for python 3")
def testStubOutClass_OldStyle(self):
"""Test a mocked class whose __init__ returns a Mock."""
self.mox.StubOutWithMock(mox_helper, 'TestClassFromAnotherModule')
self.assertTrue(isinstance(mox_helper.TestClassFromAnotherModule,
mox.MockObject))
mock_instance = self.mox.CreateMock(
mox_helper.TestClassFromAnotherModule)
mox_helper.TestClassFromAnotherModule().AndReturn(mock_instance)
mock_instance.Value().AndReturn('mock instance')
self.mox.ReplayAll()
a_mock = mox_helper.TestClassFromAnotherModule()
actual = a_mock.Value()
self.mox.VerifyAll()
self.mox.UnsetStubs()
self.assertEqual('mock instance', actual)
def testStubOutClass(self):
self.mox.StubOutClassWithMocks(mox_helper, 'CallableClass')
# Instance one
mock_one = mox_helper.CallableClass(1, 2)
mock_one.Value().AndReturn('mock')
# Instance two
mock_two = mox_helper.CallableClass(8, 9)
mock_two('one').AndReturn('called mock')
self.mox.ReplayAll()
one = mox_helper.CallableClass(1, 2)
actual_one = one.Value()
two = mox_helper.CallableClass(8, 9)
actual_two = two('one')
self.mox.VerifyAll()
self.mox.UnsetStubs()
# Verify the correct mocks were returned
self.assertEqual(mock_one, one)
self.assertEqual(mock_two, two)
# Verify
self.assertEqual('mock', actual_one)
self.assertEqual('called mock', actual_two)
def testStubOutClass_NotAClass(self):
self.assertRaises(TypeError, self.mox.StubOutClassWithMocks,
mox_helper, 'MyTestFunction')
def testStubOutClassNotEnoughCreated(self):
self.mox.StubOutClassWithMocks(mox_helper, 'CallableClass')
mox_helper.CallableClass(1, 2)
mox_helper.CallableClass(8, 9)
self.mox.ReplayAll()
mox_helper.CallableClass(1, 2)
self.assertRaises(mox.ExpectedMockCreationError, self.mox.VerifyAll)
self.mox.UnsetStubs()
def testStubOutClassWrongSignature(self):
self.mox.StubOutClassWithMocks(mox_helper, 'CallableClass')
self.assertRaises(AttributeError, mox_helper.CallableClass)
self.mox.UnsetStubs()
def testStubOutClassWrongParameters(self):
self.mox.StubOutClassWithMocks(mox_helper, 'CallableClass')
mox_helper.CallableClass(1, 2)
self.mox.ReplayAll()
self.assertRaises(mox.UnexpectedMethodCallError,
mox_helper.CallableClass, 8, 9)
self.mox.UnsetStubs()
def testStubOutClassTooManyCreated(self):
self.mox.StubOutClassWithMocks(mox_helper, 'CallableClass')
mox_helper.CallableClass(1, 2)
self.mox.ReplayAll()
mox_helper.CallableClass(1, 2)
self.assertRaises(mox.UnexpectedMockCreationError,
mox_helper.CallableClass, 8, 9)
self.mox.UnsetStubs()
def testWarnsUserIfMockingMock(self):
"""Test that user is warned if they try to stub out a MockAnything."""
self.mox.StubOutWithMock(TestClass, 'MyStaticMethod')
self.assertRaises(TypeError, self.mox.StubOutWithMock, TestClass,
'MyStaticMethod')
def testStubOutFirstClassMethodVerifiesSignature(self):
self.mox.StubOutWithMock(mox_helper, 'MyTestFunction')
# Wrong number of arguments
self.assertRaises(AttributeError, mox_helper.MyTestFunction, 1)
self.mox.UnsetStubs()
def _testMethodSignatureVerification(self, stubClass):
# If stubClass is true, the test is run against an a stubbed out class,
# else the test is run against a stubbed out instance.
if stubClass:
self.mox.StubOutWithMock(mox_helper.ExampleClass, "TestMethod")
obj = mox_helper.ExampleClass()
else:
obj = mox_helper.ExampleClass()
self.mox.StubOutWithMock(mox_helper.ExampleClass, "TestMethod")
self.assertRaises(AttributeError, obj.TestMethod)
self.assertRaises(AttributeError, obj.TestMethod, 1)
self.assertRaises(AttributeError, obj.TestMethod, nine=2)
obj.TestMethod(1, 2)
obj.TestMethod(1, 2, 3)
obj.TestMethod(1, 2, nine=3)
self.assertRaises(AttributeError, obj.TestMethod, 1, 2, 3, 4)
self.mox.UnsetStubs()
def testStubOutClassMethodVerifiesSignature(self):
self._testMethodSignatureVerification(stubClass=True)
def testStubOutObjectMethodVerifiesSignature(self):
self._testMethodSignatureVerification(stubClass=False)
def testStubOutObject(self):
"""Test than object is replaced with a Mock."""
class Foo(object):
def __init__(self):
self.obj = TestClass()
foo = Foo()
self.mox.StubOutWithMock(foo, "obj")
self.assertTrue(isinstance(foo.obj, mox.MockObject))
foo.obj.ValidCall()
self.mox.ReplayAll()
foo.obj.ValidCall()
self.mox.VerifyAll()
self.mox.UnsetStubs()
self.assertFalse(isinstance(foo.obj, mox.MockObject))
def testForgotReplayHelpfulMessage(self):
"""If there is an AttributeError on a MockMethod, give helpful msg."""
foo = self.mox.CreateMockAnything()
bar = self.mox.CreateMockAnything()
foo.GetBar().AndReturn(bar)
bar.ShowMeTheMoney()
# Forgot to replay!
try:
foo.GetBar().ShowMeTheMoney()
except AttributeError as e:
self.assertEqual(
'MockMethod has no attribute "ShowMeTheMoney". '
'Did you remember to put your mocks in replay mode?', str(e))
class ReplayTest(testtools.TestCase):
"""Verify Replay works properly."""
def testReplay(self):
"""Replay should put objects into replay mode."""
mock_obj = mox.MockObject(TestClass)
self.assertFalse(mock_obj._replay_mode)
mox.Replay(mock_obj)
self.assertTrue(mock_obj._replay_mode)
class MoxTestBaseTest(testtools.TestCase):
"""Verify that all tests in class derived from MoxTestBase are wrapped."""
def setUp(self):
super(MoxTestBaseTest, self).setUp()
self.mox = mox.Mox()
self.addCleanup(self.mox.UnsetStubs)
self.test_mox = mox.Mox()
self.addCleanup(self.test_mox.UnsetStubs)
self.test_stubs = mox.stubout.StubOutForTesting()
self.addCleanup(self.test_stubs.UnsetAll)
self.addCleanup(self.test_stubs.SmartUnsetAll)
self.result = testtools.TestResult()
def _setUpTestClass(self):
"""Replacement for setUp in the test class instance.
Assigns a mox.Mox instance as the mox attribute of the test instance.
Replacement Mox instance is under our control before setUp is called
in the test class instance.
"""
self.test.mox = self.test_mox
self.test.stubs = self.test_stubs
def _CreateTest(self, test_name):
"""Create a test from our example mox class.
The created test instance is assigned to this instances test attribute.
"""
self.test = mox_helper.ExampleMoxTest(test_name)
self.mox.stubs.Set(self.test, 'setUp', self._setUpTestClass)
def _VerifySuccess(self):
"""Run the checks to confirm test method completed successfully."""
self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
self.mox.StubOutWithMock(self.test_mox, 'VerifyAll')
self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
self.test_mox.UnsetStubs()
self.test_mox.VerifyAll()
self.test_stubs.UnsetAll()
self.test_stubs.SmartUnsetAll()
self.mox.ReplayAll()
self.test.run(result=self.result)
self.assertTrue(self.result.wasSuccessful())
self.mox.VerifyAll()
self.mox.UnsetStubs() # Needed to call the real VerifyAll() below.
self.test_mox.VerifyAll()
def testSuccess(self):
"""Successful test method execution test."""
self._CreateTest('testSuccess')
self._VerifySuccess()
def testSuccessNoMocks(self):
"""testSuccess() unsets all the mocks. Vverify they've been unset."""
self._CreateTest('testSuccess')
self.test.run(result=self.result)
self.assertTrue(self.result.wasSuccessful())
self.assertEqual(OS_LISTDIR, mox_helper.os.listdir)
def testStubs(self):
"""Test that "self.stubs" is provided as is useful."""
self._CreateTest('testHasStubs')
self._VerifySuccess()
def testStubsNoMocks(self):
"""Let testHasStubs() unset the stubs by itself."""
self._CreateTest('testHasStubs')
self.test.run(result=self.result)
self.assertTrue(self.result.wasSuccessful())
self.assertEqual(OS_LISTDIR, mox_helper.os.listdir)
def testExpectedNotCalled(self):
"""Stubbed out method is not called."""
self._CreateTest('testExpectedNotCalled')
self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
# Don't stub out VerifyAll - that's what causes the test to fail
self.test_mox.UnsetStubs()
self.test_stubs.UnsetAll()
self.test_stubs.SmartUnsetAll()
self.mox.ReplayAll()
self.test.run(result=self.result)
self.assertFalse(self.result.wasSuccessful())
self.mox.VerifyAll()
def testExpectedNotCalledNoMocks(self):
"""Let testExpectedNotCalled() unset all the mocks by itself."""
self._CreateTest('testExpectedNotCalled')
self.test.run(result=self.result)
self.assertFalse(self.result.wasSuccessful())
self.assertEqual(OS_LISTDIR, mox_helper.os.listdir)
def testUnexpectedCall(self):
"""Stubbed out method is called with unexpected arguments."""
self._CreateTest('testUnexpectedCall')
self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
# Ensure no calls are made to VerifyAll()
self.mox.StubOutWithMock(self.test_mox, 'VerifyAll')
self.test_mox.UnsetStubs()
self.test_stubs.UnsetAll()
self.test_stubs.SmartUnsetAll()
self.mox.ReplayAll()
self.test.run(result=self.result)
self.assertFalse(self.result.wasSuccessful())
self.mox.VerifyAll()
def testFailure(self):
"""Failing assertion in test method."""
self._CreateTest('testFailure')
self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
# Ensure no calls are made to VerifyAll()
self.mox.StubOutWithMock(self.test_mox, 'VerifyAll')
self.test_mox.UnsetStubs()
self.test_stubs.UnsetAll()
self.test_stubs.SmartUnsetAll()
self.mox.ReplayAll()
self.test.run(result=self.result)
self.assertFalse(self.result.wasSuccessful())
self.mox.VerifyAll()
def testMixin(self):
"""Run test from mix-in test class, ensure it passes."""
self._CreateTest('testStat')
self._VerifySuccess()
def testMixinAgain(self):
"""Run same test as above but from the current test class.
Ensures metaclass properly wrapped test methods from all base classes.
If unsetting of stubs doesn't happen, this will fail.
"""
self._CreateTest('testStatOther')
self._VerifySuccess()
class VerifyTest(testtools.TestCase):
"""Verify Verify works properly."""
def testVerify(self):
"""Verify should be called for all objects.
Should throw an exception because the expected behavior did not occur.
"""
mock_obj = mox.MockObject(TestClass)
mock_obj.ValidCall()
mock_obj._Replay()
self.assertRaises(mox.ExpectedMethodCallsError, mox.Verify, mock_obj)
class ResetTest(testtools.TestCase):
"""Verify Reset works properly."""
def testReset(self):
"""Should empty all queues and put mocks in record mode."""
mock_obj = mox.MockObject(TestClass)
mock_obj.ValidCall()
self.assertFalse(mock_obj._replay_mode)
mock_obj._Replay()
self.assertTrue(mock_obj._replay_mode)
self.assertEqual(1, len(mock_obj._expected_calls_queue))
mox.Reset(mock_obj)
self.assertFalse(mock_obj._replay_mode)
self.assertEqual(0, len(mock_obj._expected_calls_queue))
class MyTestCase(testtools.TestCase):
"""Simulate the use of a fake wrapper around Python's unittest library."""
def setUp(self):
super(MyTestCase, self).setUp()
self.critical_variable = 42
self.another_critical_variable = 42
def testMethodOverride(self):
"""Should be properly overriden in a derived class."""
self.assertEqual(42, self.another_critical_variable)
self.another_critical_variable += 1
class MoxTestBaseMultipleInheritanceTest(mox.MoxTestBase, MyTestCase):
"""Test that multiple inheritance can be used with MoxTestBase."""
def setUp(self):
super(MoxTestBaseMultipleInheritanceTest, self).setUp()
self.another_critical_variable = 99
def testMultipleInheritance(self):
"""Should be able to access members created by all parent setUp()."""
self.assertTrue(isinstance(self.mox, mox.Mox))
self.assertEqual(42, self.critical_variable)
def testMethodOverride(self):
"""Should run before MyTestCase.testMethodOverride."""
self.assertEqual(99, self.another_critical_variable)
self.another_critical_variable = 42
super(MoxTestBaseMultipleInheritanceTest, self).testMethodOverride()
self.assertEqual(43, self.another_critical_variable)
class MoxTestDontMockProperties(MoxTestBaseTest):
def testPropertiesArentMocked(self):
mock_class = self.mox.CreateMock(ClassWithProperties)
self.assertRaises(mox.UnknownMethodCallError,
lambda: mock_class.prop_attr)
class TestClass(object):
"""This class is used only for testing the mock framework."""
SOME_CLASS_VAR = "test_value"
_PROTECTED_CLASS_VAR = "protected value"
def __init__(self, ivar=None):
self.__ivar = ivar
def __eq__(self, rhs):
return self.__ivar == rhs
def __ne__(self, rhs):
return not self.__eq__(rhs)
def ValidCall(self):
pass
def MethodWithArgs(self, one, two, nine=None):
pass
def OtherValidCall(self):
pass
def OptionalArgs(self, foo='boom'):
pass
def ValidCallWithArgs(self, *args, **kwargs):
pass
@classmethod
def MyClassMethod(cls):
pass
@staticmethod
def MyStaticMethod():
pass
def _ProtectedCall(self):
pass
def __PrivateCall(self):
pass
def __DoNotMock(self):
pass
def __getitem__(self, key):
"""Return the value for key."""
return self.d[key]
def __setitem__(self, key, value):
"""Set the value for key to value."""
self.d[key] = value
def __contains__(self, key):
"""Returns True if d contains the key."""
return key in self.d
def __iter__(self):
pass
class ChildClass(TestClass):
"""This inherits from TestClass."""
def __init__(self):
TestClass.__init__(self)
def ChildValidCall(self):
pass
class CallableClass(object):
"""This class is callable, and that should be mockable!"""
def __init__(self):
pass
def __call__(self, param):
return param
class ClassWithProperties(object):
def setter_attr(self, value):
pass
def getter_attr(self):
pass
prop_attr = property(getter_attr, setter_attr)
class SubscribtableNonIterableClass(object):
def __getitem__(self, index):
raise IndexError
class InheritsFromCallable(CallableClass):
"""This class should be mockable; it inherits from a callable class."""
pass
if __name__ == '__main__':
testtools.main()
| bsd-3-clause |
miguelparaiso/OdooAccessible | addons/stock/wizard/__init__.py | 323 | 1149 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_move
import stock_return_picking
import stock_change_product_qty
import make_procurement_product
import orderpoint_procurement
import stock_transfer_details
| agpl-3.0 |
nckx/dstat | plugins/dstat_disk_svctm.py | 4 | 2496 | ### Author: David Nicklay <david-d$nicklay,com>
### Modified from disk-util: Dag Wieers <dag$wieers,com>
class dstat_plugin(dstat):
"""
The average service time (in milliseconds) for I/O requests that were
issued to the device.
Warning! Do not trust this field any more.
"""
def __init__(self):
self.version = 2
self.nick = ('svctm',)
self.type = 'f'
self.width = 4
self.scale = 1
self.diskfilter = re.compile('^([hsv]d[a-z]+\d+|cciss/c\d+d\d+p\d+|dm-\d+|md\d+|mmcblk\d+p\d0|VxVM\d+)$')
self.open('/proc/diskstats')
self.cols = 1
self.struct = dict( nr_ios=0, tot_ticks=0 )
def discover(self, *objlist):
ret = []
for l in self.splitlines():
if len(l) < 13: continue
if l[3:] == ['0',] * 11: continue
name = l[2]
ret.append(name)
for item in objlist: ret.append(item)
if not ret:
raise Exception, "No suitable block devices found to monitor"
return ret
def vars(self):
ret = []
if op.disklist:
varlist = op.disklist
else:
varlist = []
blockdevices = [os.path.basename(filename) for filename in glob.glob('/sys/block/*')]
for name in self.discover:
if self.diskfilter.match(name): continue
if name not in blockdevices: continue
varlist.append(name)
varlist.sort()
for name in varlist:
if name in self.discover:
ret.append(name)
return ret
def name(self):
return self.vars
def extract(self):
for l in self.splitlines():
if len(l) < 13: continue
if l[3:] == ['0',] * 11: continue
if l[3] == '0' and l[7] == '0': continue
name = l[2]
if name not in self.vars or name == 'total': continue
self.set2[name] = dict(
nr_ios = long(l[3])+long(l[7]),
tot_ticks = long(l[12]),
)
for name in self.vars:
tput = ( self.set2[name]['nr_ios'] - self.set1[name]['nr_ios'] )
if tput:
util = ( self.set2[name]['tot_ticks'] - self.set1[name]['tot_ticks'] )
self.val[name] = ( util * 1.0 / tput, )
else:
self.val[name] = ( 0.0, )
if step == op.delay:
self.set1.update(self.set2)
| gpl-2.0 |
lampeh/FooPing | server/FooPingReceiver.py | 1 | 3008 | ##
# FooPing Demo Receiver
# Copyright 2014 Hauke Lampe
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##
import sys
import socket
import json
import os
import hashlib
import hmac
import tempfile
import time
import base64
from StringIO import StringIO
from Crypto.Cipher import AES
from gzip import GzipFile
# TODO: dual-stack
UDP_IP = "0.0.0.0"
UDP_PORT = 23042
# TODO: support unique keys per client
key = hashlib.sha256(b'm!ToSC]vb=:<b&XL.|Yq#LYE{V+$Mc~y').digest()
block_size = 16
mac_key = hashlib.sha256(b'sM[N9+l8~N7Ox_7^EI>s|vLkiVXo-[T').digest()
mac_size = 20
def compare_digest(x, y):
## return early if type or length don't match
if not (isinstance(x, bytes) and isinstance(y, bytes)):
raise TypeError("both inputs should be instances of bytes")
if len(x) != len(y):
return False
## don't return early when comparing. timing is independent of result
## xor all bytes. result == 0 if x == y
result = 0
for a, b in zip(x, y):
result |= ord(a) ^ ord(b)
return result == 0
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((UDP_IP, UDP_PORT))
clientData = {}
while True:
try:
# TODO: support fragmented packets
raw, addr = sock.recvfrom(1500)
print "packet from: " + addr[0] + ":" + str(addr[1])
data = raw[:-mac_size]
mac = raw[-mac_size:]
mac2 = hmac.new(mac_key, data, hashlib.sha1).digest()
if (not compare_digest(mac, mac2)):
print "MAC mismatch!"
print base64.b64encode(mac)
print base64.b64encode(mac2)
continue
msg = GzipFile(fileobj = StringIO(AES.new(key, AES.MODE_CFB, data[:block_size]).decrypt(data[block_size:]))).read()
print msg
print
packet = json.loads(msg)
pingData = {}
pingData.update(packet[0])
pingData.update({ 'ipaddr': addr[0], 'ts_rcvd': int(time.time()*1000) })
# TODO: clients can overwrite each others data
client = pingData["client"]
if client in clientData:
clientData[client].update(pingData)
else:
clientData[client] = pingData
# TODO: clean up tempfile if write/rename fails
fd, fn = tempfile.mkstemp(prefix="data.json.", dir=".", text=True)
f = os.fdopen(fd, "w")
f.write(json.dumps([clientData]))
os.fchmod(fd, 0444)
f.close()
os.rename(fn, 'data.json');
except Exception:
print "*** exception in packet from: " + addr[0] + ":" + str(addr[1])
print sys.exc_info()
pass
| agpl-3.0 |
52ai/django-ccsds | tests/m2m_through/tests.py | 1 | 15770 | from __future__ import unicode_literals
from datetime import datetime
from operator import attrgetter
from django.test import TestCase
from .models import (
CustomMembership, Employee, Event, Friendship, Group, Ingredient,
Invitation, Membership, Person, PersonSelfRefM2M, Recipe, RecipeIngredient,
Relationship,
)
class M2mThroughTests(TestCase):
def setUp(self):
self.bob = Person.objects.create(name='Bob')
self.jim = Person.objects.create(name='Jim')
self.jane = Person.objects.create(name='Jane')
self.rock = Group.objects.create(name='Rock')
self.roll = Group.objects.create(name='Roll')
def test_retrieve_intermediate_items(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
expected = ['Jane', 'Jim']
self.assertQuerysetEqual(
self.rock.members.all(),
expected,
attrgetter("name")
)
def test_get_on_intermediate_model(self):
Membership.objects.create(person=self.jane, group=self.rock)
queryset = Membership.objects.get(person=self.jane, group=self.rock)
self.assertEqual(
repr(queryset),
'<Membership: Jane is a member of Rock>'
)
def test_filter_on_intermediate_model(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
queryset = Membership.objects.filter(group=self.rock)
expected = [
'<Membership: Jim is a member of Rock>',
'<Membership: Jane is a member of Rock>',
]
self.assertQuerysetEqual(
queryset,
expected
)
def test_cannot_use_add_on_m2m_with_intermediary_model(self):
msg = 'Cannot use add() on a ManyToManyField which specifies an intermediary model'
with self.assertRaisesMessage(AttributeError, msg):
self.rock.members.add(self.bob)
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
def test_cannot_use_create_on_m2m_with_intermediary_model(self):
msg = 'Cannot use create() on a ManyToManyField which specifies an intermediary model'
with self.assertRaisesMessage(AttributeError, msg):
self.rock.members.create(name='Annie')
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
def test_cannot_use_remove_on_m2m_with_intermediary_model(self):
Membership.objects.create(person=self.jim, group=self.rock)
msg = 'Cannot use remove() on a ManyToManyField which specifies an intermediary model'
with self.assertRaisesMessage(AttributeError, msg):
self.rock.members.remove(self.jim)
self.assertQuerysetEqual(
self.rock.members.all(),
['Jim', ],
attrgetter("name")
)
def test_cannot_use_setattr_on_m2m_with_intermediary_model(self):
msg = 'Cannot set values on a ManyToManyField which specifies an intermediary model'
members = list(Person.objects.filter(name__in=['Bob', 'Jim']))
with self.assertRaisesMessage(AttributeError, msg):
setattr(self.rock, 'members', members)
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
def test_clear_removes_all_the_m2m_relationships(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
self.rock.members.clear()
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
def test_retrieve_reverse_intermediate_items(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jim, group=self.roll)
expected = ['Rock', 'Roll']
self.assertQuerysetEqual(
self.jim.group_set.all(),
expected,
attrgetter("name")
)
def test_cannot_use_add_on_reverse_m2m_with_intermediary_model(self):
msg = 'Cannot use add() on a ManyToManyField which specifies an intermediary model'
with self.assertRaisesMessage(AttributeError, msg):
self.bob.group_set.add(self.bob)
self.assertQuerysetEqual(
self.bob.group_set.all(),
[]
)
def test_cannot_use_create_on_reverse_m2m_with_intermediary_model(self):
msg = 'Cannot use create() on a ManyToManyField which specifies an intermediary model'
with self.assertRaisesMessage(AttributeError, msg):
self.bob.group_set.create(name='Funk')
self.assertQuerysetEqual(
self.bob.group_set.all(),
[]
)
def test_cannot_use_remove_on_reverse_m2m_with_intermediary_model(self):
Membership.objects.create(person=self.bob, group=self.rock)
msg = 'Cannot use remove() on a ManyToManyField which specifies an intermediary model'
with self.assertRaisesMessage(AttributeError, msg):
self.bob.group_set.remove(self.rock)
self.assertQuerysetEqual(
self.bob.group_set.all(),
['Rock', ],
attrgetter('name')
)
def test_cannot_use_setattr_on_reverse_m2m_with_intermediary_model(self):
msg = 'Cannot set values on a ManyToManyField which specifies an intermediary model'
members = list(Group.objects.filter(name__in=['Rock', 'Roll']))
with self.assertRaisesMessage(AttributeError, msg):
setattr(self.bob, 'group_set', members)
self.assertQuerysetEqual(
self.bob.group_set.all(),
[]
)
def test_clear_on_reverse_removes_all_the_m2m_relationships(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jim, group=self.roll)
self.jim.group_set.clear()
self.assertQuerysetEqual(
self.jim.group_set.all(),
[]
)
def test_query_model_by_attribute_name_of_related_model(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
Membership.objects.create(person=self.bob, group=self.roll)
Membership.objects.create(person=self.jim, group=self.roll)
Membership.objects.create(person=self.jane, group=self.roll)
self.assertQuerysetEqual(
Group.objects.filter(members__name='Bob'),
['Roll', ],
attrgetter("name")
)
def test_query_first_model_by_intermediate_model_attribute(self):
Membership.objects.create(
person=self.jane, group=self.roll,
invite_reason="She was just awesome."
)
Membership.objects.create(
person=self.jim, group=self.roll,
invite_reason="He is good."
)
Membership.objects.create(person=self.bob, group=self.roll)
qs = Group.objects.filter(
membership__invite_reason="She was just awesome."
)
self.assertQuerysetEqual(
qs,
['Roll'],
attrgetter("name")
)
def test_query_second_model_by_intermediate_model_attribute(self):
Membership.objects.create(
person=self.jane, group=self.roll,
invite_reason="She was just awesome."
)
Membership.objects.create(
person=self.jim, group=self.roll,
invite_reason="He is good."
)
Membership.objects.create(person=self.bob, group=self.roll)
qs = Person.objects.filter(
membership__invite_reason="She was just awesome."
)
self.assertQuerysetEqual(
qs,
['Jane'],
attrgetter("name")
)
def test_query_model_by_related_model_name(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
Membership.objects.create(person=self.bob, group=self.roll)
Membership.objects.create(person=self.jim, group=self.roll)
Membership.objects.create(person=self.jane, group=self.roll)
self.assertQuerysetEqual(
Person.objects.filter(group__name="Rock"),
['Jane', 'Jim'],
attrgetter("name")
)
def test_query_model_by_custom_related_name(self):
CustomMembership.objects.create(person=self.bob, group=self.rock)
CustomMembership.objects.create(person=self.jim, group=self.rock)
self.assertQuerysetEqual(
Person.objects.filter(custom__name="Rock"),
['Bob', 'Jim'],
attrgetter("name")
)
def test_query_model_by_intermediate_can_return_non_unique_queryset(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(
person=self.jane, group=self.rock,
date_joined=datetime(2006, 1, 1)
)
Membership.objects.create(
person=self.bob, group=self.roll,
date_joined=datetime(2004, 1, 1))
Membership.objects.create(person=self.jim, group=self.roll)
Membership.objects.create(
person=self.jane, group=self.roll,
date_joined=datetime(2004, 1, 1))
qs = Person.objects.filter(
membership__date_joined__gt=datetime(2004, 1, 1)
)
self.assertQuerysetEqual(
qs,
['Jane', 'Jim', 'Jim'],
attrgetter("name")
)
def test_custom_related_name_forward_empty_qs(self):
self.assertQuerysetEqual(
self.rock.custom_members.all(),
[]
)
def test_custom_related_name_reverse_empty_qs(self):
self.assertQuerysetEqual(
self.bob.custom.all(),
[]
)
def test_custom_related_name_forward_non_empty_qs(self):
CustomMembership.objects.create(person=self.bob, group=self.rock)
CustomMembership.objects.create(person=self.jim, group=self.rock)
self.assertQuerysetEqual(
self.rock.custom_members.all(),
['Bob', 'Jim'],
attrgetter("name")
)
def test_custom_related_name_reverse_non_empty_qs(self):
CustomMembership.objects.create(person=self.bob, group=self.rock)
CustomMembership.objects.create(person=self.jim, group=self.rock)
self.assertQuerysetEqual(
self.bob.custom.all(),
['Rock'],
attrgetter("name")
)
def test_custom_related_name_doesnt_conflict_with_fky_related_name(self):
CustomMembership.objects.create(person=self.bob, group=self.rock)
self.assertQuerysetEqual(
self.bob.custom_person_related_name.all(),
['<CustomMembership: Bob is a member of Rock>']
)
def test_through_fields(self):
"""
Tests that relations with intermediary tables with multiple FKs
to the M2M's ``to`` model are possible.
"""
event = Event.objects.create(title='Rockwhale 2014')
Invitation.objects.create(event=event, inviter=self.bob, invitee=self.jim)
Invitation.objects.create(event=event, inviter=self.bob, invitee=self.jane)
self.assertQuerysetEqual(
event.invitees.all(),
['Jane', 'Jim'],
attrgetter('name')
)
class M2mThroughReferentialTests(TestCase):
def test_self_referential_empty_qs(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
self.assertQuerysetEqual(
tony.friends.all(),
[]
)
def test_self_referential_non_symmetrical_first_side(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
self.assertQuerysetEqual(
tony.friends.all(),
['Chris'],
attrgetter("name")
)
def test_self_referential_non_symmetrical_second_side(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
self.assertQuerysetEqual(
chris.friends.all(),
[]
)
def test_self_referential_non_symmetrical_clear_first_side(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
chris.friends.clear()
self.assertQuerysetEqual(
chris.friends.all(),
[]
)
# Since this isn't a symmetrical relation, Tony's friend link still exists.
self.assertQuerysetEqual(
tony.friends.all(),
['Chris'],
attrgetter("name")
)
def test_self_referential_symmetrical(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
Friendship.objects.create(
first=chris, second=tony, date_friended=datetime.now()
)
self.assertQuerysetEqual(
tony.friends.all(),
['Chris'],
attrgetter("name")
)
self.assertQuerysetEqual(
chris.friends.all(),
['Tony'],
attrgetter("name")
)
def test_through_fields_self_referential(self):
john = Employee.objects.create(name='john')
peter = Employee.objects.create(name='peter')
mary = Employee.objects.create(name='mary')
harry = Employee.objects.create(name='harry')
Relationship.objects.create(source=john, target=peter, another=None)
Relationship.objects.create(source=john, target=mary, another=None)
Relationship.objects.create(source=john, target=harry, another=peter)
self.assertQuerysetEqual(
john.subordinates.all(),
['peter', 'mary', 'harry'],
attrgetter('name')
)
class M2mThroughToFieldsTests(TestCase):
def setUp(self):
self.pea = Ingredient.objects.create(iname='pea')
self.potato = Ingredient.objects.create(iname='potato')
self.tomato = Ingredient.objects.create(iname='tomato')
self.curry = Recipe.objects.create(rname='curry')
RecipeIngredient.objects.create(recipe=self.curry, ingredient=self.potato)
RecipeIngredient.objects.create(recipe=self.curry, ingredient=self.pea)
RecipeIngredient.objects.create(recipe=self.curry, ingredient=self.tomato)
def test_retrieval(self):
# Forward retrieval
self.assertQuerysetEqual(
self.curry.ingredients.all(),
[self.pea, self.potato, self.tomato], lambda x: x
)
# Backward retrieval
self.assertEqual(self.tomato.recipes.get(), self.curry)
def test_choices(self):
field = Recipe._meta.get_field('ingredients')
self.assertEqual(
[choice[0] for choice in field.get_choices(include_blank=False)],
['pea', 'potato', 'tomato']
)
| bsd-3-clause |
nokute78/fluent-bit | plugins/out_kafka/librdkafka-1.6.0/tests/performance_plot.py | 3 | 2902 | #!/usr/bin/env python3
#
import sys, json
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
def semver2int (semver):
if semver == 'trunk':
semver = '0.10.0.0'
vi = 0
i = 0
for v in reversed(semver.split('.')):
vi += int(v) * (i * 10)
i += 1
return vi
def get_perf_data (perfname, stats):
""" Return [labels,x,y,errs] for perfname 'mb_per_sec' as a numpy arrays
labels: broker versions
x: list with identical value (to plot on same x point)
y: perfname counter (average)
errs: errors
"""
ver = defaultdict(list)
# Per version:
# * accumulate values
# * calculate average
# * calculate error
# Accumulate values per version
for x in stats:
v = str(x[0])
ver[v].append(x[1][perfname])
print('%s is %s' % (perfname, ver))
labels0 = sorted(ver.keys(), key=semver2int)
y0 = list()
errs0 = list()
# Maintain order by using labels0
for v in labels0:
# Calculate average
avg = sum(ver[v]) / float(len(ver[v]))
y0.append(avg)
# Calculate error
errs0.append(max(ver[v]) - avg)
labels = np.array(labels0)
y1 = np.array(y0)
x1 = np.array(range(0, len(labels)))
errs = np.array(errs0)
return [labels,x1,y1,errs]
def plot (description, name, stats, perfname, outfile=None):
labels,x,y,errs = get_perf_data(perfname, stats)
colors = np.random.rand(len(labels))
plt.title('%s: %s %s' % (description, name, perfname))
plt.xlabel('Kafka version')
plt.ylabel(perfname)
plt.errorbar(x, y, yerr=errs, alpha=0.5)
plt.xticks(x, labels, rotation='vertical')
plt.margins(0.2)
plt.subplots_adjust(bottom=0.2)
if outfile is None:
plt.show()
else:
plt.savefig(outfile, bbox_inches='tight')
return
if __name__ == '__main__':
outfile = sys.argv[1]
reports = []
for rf in sys.argv[2:]:
with open(rf) as f:
reports.append(json.load(f))
stats = defaultdict(list)
# Extract performance test data
for rep in reports:
perfs = rep.get('tests', dict()).get('0038_performance', list).get('report', None)
if perfs is None:
continue
for perf in perfs:
for n in ['producer','consumer']:
o = perf.get(n, None)
if o is None:
print('no %s in %s' % (n, perf))
continue
stats[n].append((rep.get('broker_version', 'unknown'), o))
for t in ['producer','consumer']:
for perfname in ['mb_per_sec', 'records_per_sec']:
plot('librdkafka 0038_performance test: %s (%d samples)' % \
(outfile, len(reports)),
t, stats[t], perfname, outfile='%s_%s_%s.png' % (outfile, t, perfname))
| apache-2.0 |
toshywoshy/ansible | test/units/utils/test_shlex.py | 197 | 1290 | # (c) 2015, Marius Gedminas <marius@gedmin.as>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import unittest
from ansible.utils.shlex import shlex_split
class TestSplit(unittest.TestCase):
def test_trivial(self):
self.assertEqual(shlex_split("a b c"), ["a", "b", "c"])
def test_unicode(self):
self.assertEqual(shlex_split(u"a b \u010D"), [u"a", u"b", u"\u010D"])
def test_quoted(self):
self.assertEqual(shlex_split('"a b" c'), ["a b", "c"])
def test_comments(self):
self.assertEqual(shlex_split('"a b" c # d', comments=True), ["a b", "c"])
def test_error(self):
self.assertRaises(ValueError, shlex_split, 'a "b')
| gpl-3.0 |
godfather1103/WeiboRobot | python27/1.0/lib/site-packages/pip/commands/list.py | 269 | 7251 | from __future__ import absolute_import
import logging
from pip._vendor import pkg_resources
from pip.basecommand import Command
from pip.exceptions import DistributionNotFound
from pip.index import FormatControl, fmt_ctl_formats, PackageFinder, Search
from pip.req import InstallRequirement
from pip.utils import get_installed_distributions, dist_is_editable
from pip.wheel import WheelCache
from pip.cmdoptions import make_option_group, index_group
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages (excluding editables)')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages (excluding editables)')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.outdated:
self.run_outdated(options)
elif options.uptodate:
self.run_uptodate(options)
elif options.editable:
self.run_editables(options)
else:
self.run_listing(options)
def run_outdated(self, options):
for dist, version, typ in self.find_packages_latest_versions(options):
if version > dist.parsed_version:
logger.info(
'%s (Current: %s Latest: %s [%s])',
dist.project_name, dist.version, version, typ,
)
def find_packages_latest_versions(self, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
dependency_links = []
for dist in get_installed_distributions(local_only=options.local,
user_only=options.user):
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
include_editables=False,
)
format_control = FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
for dist in installed_packages:
req = InstallRequirement.from_line(
dist.key, None, isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
typ = 'unknown'
try:
link = finder.find_requirement(req, True)
# If link is None, means installed version is most
# up-to-date
if link is None:
continue
except DistributionNotFound:
continue
else:
canonical_name = pkg_resources.safe_name(req.name).lower()
formats = fmt_ctl_formats(format_control, canonical_name)
search = Search(
req.name,
canonical_name,
formats)
remote_version = finder._link_package_versions(
link, search).version
if link.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
yield dist, remote_version, typ
def run_listing(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
)
self.output_package_listing(installed_packages)
def run_editables(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=True,
)
self.output_package_listing(installed_packages)
def output_package_listing(self, installed_packages):
installed_packages = sorted(
installed_packages,
key=lambda dist: dist.project_name.lower(),
)
for dist in installed_packages:
if dist_is_editable(dist):
line = '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
line = '%s (%s)' % (dist.project_name, dist.version)
logger.info(line)
def run_uptodate(self, options):
uptodate = []
for dist, version, typ in self.find_packages_latest_versions(options):
if dist.parsed_version == version:
uptodate.append(dist)
self.output_package_listing(uptodate)
| gpl-3.0 |
MoisesTedeschi/python | Scripts-Python/Modulos-Diversos/python-com-scrapy/Lib/tokenize.py | 9 | 27030 | """Tokenization help for Python programs.
tokenize(readline) is a generator that breaks a stream of bytes into
Python tokens. It decodes the bytes according to PEP-0263 for
determining source file encoding.
It accepts a readline-like method which is called repeatedly to get the
next line of input (or b"" for EOF). It generates 5-tuples with these
members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators. Additionally, all token lists start with an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
'Michael Foord')
from builtins import open as _builtin_open
from codecs import lookup, BOM_UTF8
import collections
from io import TextIOWrapper
from itertools import chain
import itertools as _itertools
import re
import sys
from token import *
cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
import token
__all__ = token.__all__ + ["tokenize", "detect_encoding",
"untokenize", "TokenInfo"]
del token
EXACT_TOKEN_TYPES = {
'(': LPAR,
')': RPAR,
'[': LSQB,
']': RSQB,
':': COLON,
',': COMMA,
';': SEMI,
'+': PLUS,
'-': MINUS,
'*': STAR,
'/': SLASH,
'|': VBAR,
'&': AMPER,
'<': LESS,
'>': GREATER,
'=': EQUAL,
'.': DOT,
'%': PERCENT,
'{': LBRACE,
'}': RBRACE,
'==': EQEQUAL,
'!=': NOTEQUAL,
'<=': LESSEQUAL,
'>=': GREATEREQUAL,
'~': TILDE,
'^': CIRCUMFLEX,
'<<': LEFTSHIFT,
'>>': RIGHTSHIFT,
'**': DOUBLESTAR,
'+=': PLUSEQUAL,
'-=': MINEQUAL,
'*=': STAREQUAL,
'/=': SLASHEQUAL,
'%=': PERCENTEQUAL,
'&=': AMPEREQUAL,
'|=': VBAREQUAL,
'^=': CIRCUMFLEXEQUAL,
'<<=': LEFTSHIFTEQUAL,
'>>=': RIGHTSHIFTEQUAL,
'**=': DOUBLESTAREQUAL,
'//': DOUBLESLASH,
'//=': DOUBLESLASHEQUAL,
'...': ELLIPSIS,
'->': RARROW,
'@': AT,
'@=': ATEQUAL,
}
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
def __repr__(self):
annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
self._replace(type=annotated_type))
@property
def exact_type(self):
if self.type == OP and self.string in EXACT_TOKEN_TYPES:
return EXACT_TOKEN_TYPES[self.string]
else:
return self.type
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'
Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
Binnumber = r'0[bB](?:_?[01])+'
Octnumber = r'0[oO](?:_?[0-7])+'
Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*'
Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?',
r'\.[0-9](?:_?[0-9])*') + maybe(Exponent)
Expfloat = r'[0-9](?:_?[0-9])*' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Return the empty string, plus all of the valid string prefixes.
def _all_string_prefixes():
# The valid string prefixes. Only contain the lower case versions,
# and don't contain any permuations (include 'fr', but not
# 'rf'). The various permutations will be generated.
_valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
# if we add binary f-strings, add: ['fb', 'fbr']
result = {''}
for prefix in _valid_string_prefixes:
for t in _itertools.permutations(prefix):
# create a list with upper and lower versions of each
# character
for u in _itertools.product(*[(c, c.upper()) for c in t]):
result.add(''.join(u))
return result
def _compile(expr):
return re.compile(expr, re.UNICODE)
# Note that since _all_string_prefixes includes the empty string,
# StringPrefix can be the empty string (making it optional).
StringPrefix = group(*_all_string_prefixes())
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Single-line ' or " string.
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
r"//=?", r"->",
r"[+\-*/%&@|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
# For a given string prefix plus quotes, endpats maps it to a regex
# to match the remainder of that string. _prefix can be empty, for
# a normal single or triple quoted string (with no prefix).
endpats = {}
for _prefix in _all_string_prefixes():
endpats[_prefix + "'"] = Single
endpats[_prefix + '"'] = Double
endpats[_prefix + "'''"] = Single3
endpats[_prefix + '"""'] = Double3
# A set of all of the single and triple quoted string prefixes,
# including the opening quotes.
single_quoted = set()
triple_quoted = set()
for t in _all_string_prefixes():
for u in (t + '"', t + "'"):
single_quoted.add(u)
for u in (t + '"""', t + "'''"):
triple_quoted.add(u)
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
self.encoding = None
def add_whitespace(self, start):
row, col = start
if row < self.prev_row or row == self.prev_row and col < self.prev_col:
raise ValueError("start ({},{}) precedes previous end ({},{})"
.format(row, col, self.prev_row, self.prev_col))
row_offset = row - self.prev_row
if row_offset:
self.tokens.append("\\\n" * row_offset)
self.prev_col = 0
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
it = iter(iterable)
indents = []
startline = False
for t in it:
if len(t) == 2:
self.compat(t, it)
break
tok_type, token, start, end, line = t
if tok_type == ENCODING:
self.encoding = token
continue
if tok_type == ENDMARKER:
break
if tok_type == INDENT:
indents.append(token)
continue
elif tok_type == DEDENT:
indents.pop()
self.prev_row, self.prev_col = end
continue
elif tok_type in (NEWLINE, NL):
startline = True
elif startline and indents:
indent = indents[-1]
if start[1] >= len(indent):
self.tokens.append(indent)
self.prev_col = len(indent)
startline = False
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
indents = []
toks_append = self.tokens.append
startline = token[0] in (NEWLINE, NL)
prevstring = False
for tok in chain([token], iterable):
toknum, tokval = tok[:2]
if toknum == ENCODING:
self.encoding = tokval
continue
if toknum in (NAME, NUMBER):
tokval += ' '
# Insert a space between two consecutive strings
if toknum == STRING:
if prevstring:
tokval = ' ' + tokval
prevstring = True
else:
prevstring = False
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
It returns a bytes object, encoded using the ENCODING
token, which is the first token sequence output by tokenize.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited input:
# Output bytes will tokenize back to the input
t1 = [tok[:2] for tok in tokenize(f.readline)]
newcode = untokenize(t1)
readline = BytesIO(newcode).readline
t2 = [tok[:2] for tok in tokenize(readline)]
assert t1 == t2
"""
ut = Untokenizer()
out = ut.untokenize(iterable)
if ut.encoding is not None:
out = out.encode(ut.encoding)
return out
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if encoding != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
if not blank_re.match(first):
return default, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = _builtin_open(filename, 'rb')
try:
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text
except:
buffer.close()
raise
def tokenize(readline):
"""
The tokenize() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as bytes. Alternatively, readline
can be a callable function terminating with StopIteration:
readline = open(myfile, 'rb').__next__ # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
The first token sequence will always be an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
# This import is here to avoid problems when the itertools module is not
# built yet and tokenize is imported.
from itertools import chain, repeat
encoding, consumed = detect_encoding(readline)
rl_gen = iter(readline, b"")
empty = repeat(b"")
return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
def _tokenize(readline, encoding):
lnum = parenlev = continued = 0
numchars = '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
if encoding is not None:
if encoding == "utf-8-sig":
# BOM will already have been stripped.
encoding = "utf-8"
yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
last_line = b''
line = b''
while True: # loop over lines in stream
try:
# We capture the value of the line variable here because
# readline uses the empty string '' to signal end of input,
# hence `line` itself will always be overwritten at the end
# of this loop.
last_line = line
line = readline()
except StopIteration:
line = b''
if encoding is not None:
line = line.decode(encoding)
lnum += 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield TokenInfo(STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield TokenInfo(ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ':
column += 1
elif line[pos] == '\t':
column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f':
column = 0
else:
break
pos += 1
if pos == max:
break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
yield TokenInfo(COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
pos += len(comment_token)
yield TokenInfo(NL, line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = _compile(PseudoToken).match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
if start == end:
continue
token, initial = line[start:end], line[start]
if (initial in numchars or # ordinary number
(initial == '.' and token != '.' and token != '...')):
yield TokenInfo(NUMBER, token, spos, epos, line)
elif initial in '\r\n':
if parenlev > 0:
yield TokenInfo(NL, token, spos, epos, line)
else:
yield TokenInfo(NEWLINE, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield TokenInfo(COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = _compile(endpats[token])
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield TokenInfo(STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
# Check up to the first 3 chars of the token to see if
# they're in the single_quoted set. If so, they start
# a string.
# We're using the first 3, because we're looking for
# "rb'" (for example) at the start of the token. If
# we switch to longer prefixes, this needs to be
# adjusted.
# Note that initial == token[:1].
# Also note that single quote checking must come after
# triple quote checking (above).
elif (initial in single_quoted or
token[:2] in single_quoted or
token[:3] in single_quoted):
if token[-1] == '\n': # continued string
strstart = (lnum, start)
# Again, using the first 3 chars of the
# token. This is looking for the matching end
# regex for the correct type of quote
# character. So it's really looking for
# endpats["'"] or endpats['"'], by trying to
# skip string prefix characters, if any.
endprog = _compile(endpats.get(initial) or
endpats.get(token[1]) or
endpats.get(token[2]))
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield TokenInfo(STRING, token, spos, epos, line)
elif initial.isidentifier(): # ordinary name
yield TokenInfo(NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{':
parenlev += 1
elif initial in ')]}':
parenlev -= 1
yield TokenInfo(OP, token, spos, epos, line)
else:
yield TokenInfo(ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos += 1
# Add an implicit NEWLINE if the input doesn't end in one
if last_line and last_line[-1] not in '\r\n':
yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '')
for indent in indents[1:]: # pop remaining indent levels
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
# An undocumented, backwards compatible, API for all the places in the standard
# library that expect to be able to use tokenize with strings
def generate_tokens(readline):
return _tokenize(readline, None)
def main():
import argparse
# Helper error handling routines
def perror(message):
print(message, file=sys.stderr)
def error(message, filename=None, location=None):
if location:
args = (filename,) + location + (message,)
perror("%s:%d:%d: error: %s" % args)
elif filename:
perror("%s: error: %s" % (filename, message))
else:
perror("error: %s" % message)
sys.exit(1)
# Parse the arguments and options
parser = argparse.ArgumentParser(prog='python -m tokenize')
parser.add_argument(dest='filename', nargs='?',
metavar='filename.py',
help='the file to tokenize; defaults to stdin')
parser.add_argument('-e', '--exact', dest='exact', action='store_true',
help='display token names using the exact type')
args = parser.parse_args()
try:
# Tokenize the input
if args.filename:
filename = args.filename
with _builtin_open(filename, 'rb') as f:
tokens = list(tokenize(f.readline))
else:
filename = "<stdin>"
tokens = _tokenize(sys.stdin.readline, None)
# Output the tokenization
for token in tokens:
token_type = token.type
if args.exact:
token_type = token.exact_type
token_range = "%d,%d-%d,%d:" % (token.start + token.end)
print("%-20s%-15s%-15r" %
(token_range, tok_name[token_type], token.string))
except IndentationError as err:
line, column = err.args[1][1:3]
error(err.args[0], filename, (line, column))
except TokenError as err:
line, column = err.args[1]
error(err.args[0], filename, (line, column))
except SyntaxError as err:
error(err, filename)
except OSError as err:
error(err)
except KeyboardInterrupt:
print("interrupted\n")
except Exception as err:
perror("unexpected error: %s" % err)
raise
if __name__ == "__main__":
main()
| gpl-3.0 |
sdmathis/LitecoinNEW | contrib/spendfrom/spendfrom.py | 792 | 10053 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19332 if testnet else 9332
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| mit |
danielfrg/jupyterhub-kubernetes_spawner | kubernetes_spawner/kube.py | 1 | 6588 |
import os
from . import swagger_client as swagger
from .swagger_client.models.v1_pod import V1Pod
from .swagger_client.models.v1_pod_spec import V1PodSpec
from .swagger_client.models.v1_object_meta import V1ObjectMeta
from .swagger_client.models.v1_container import V1Container
from .swagger_client.models.v1_container_port import V1ContainerPort
from .swagger_client.models.v1_env_var import V1EnvVar
from .swagger_client.models.v1_env_var_source import V1EnvVarSource
from .swagger_client.models.v1_volume import V1Volume
from .swagger_client.models.v1_volume_mount import V1VolumeMount
from .swagger_client.models.v1_persistent_volume_claim_volume_source import V1PersistentVolumeClaimVolumeSource
from .swagger_client.models.v1_nfs_volume_source import V1NFSVolumeSource
from .swagger_client.models.v1_object_field_selector import V1ObjectFieldSelector
from .swagger_client.models.v1_resource_requirements import V1ResourceRequirements
from .swagger_client.models.v1_glusterfs_volume_source import V1GlusterfsVolumeSource
class KubernetesClient(object):
def __init__(self, host, token, verify_ssl=True, ssl_ca_cert=None):
swagger.Configuration().verify_ssl = verify_ssl
# swagger.Configuration().ssl_ca_cert = ssl_ca_cert
self.client = swagger.ApiClient(host)
self.client.default_headers["Authorization"] = token
self.client.default_headers["Content-Type"] = "application/json"
self.api = swagger.ApivApi(self.client)
self.default_namespace = "default"
@classmethod
def from_username_password(cls, host, username, password, *args, **kwargs):
swagger.Configuration().username = username
swagger.Configuration().password = password
token = swagger.Configuration().get_basic_auth_token()
return cls(host, token, *args, **kwargs)
@classmethod
def from_service_account(cls, host, *args, **kwargs):
fpath = "/var/run/secrets/kubernetes.io/serviceaccount/token"
if not os.path.exists(fpath):
raise Exception("Token file '{}' not found".format(fpath))
with open(fpath, "r") as f:
token = "Bearer {}".format(f.read().strip())
return cls(host, token, *args, **kwargs)
def launch_pod(self, pod, namespace=None):
namespace = namespace or self.default_namespace
self.api.create_namespaced_pod(pod, namespace=namespace)
def get_pod(self, name, namespace=None):
namespace = namespace or self.default_namespace
try:
return self.api.read_namespaced_pod(name=name, namespace=namespace)
except swagger.rest.ApiException:
return None
def delete_pod(self, name, namespace=None):
namespace = namespace or self.default_namespace
self.api.delete_namespaced_pod(name=name, namespace=namespace, body={})
def get_service(self, name, namespace=None):
namespace = namespace or self.default_namespace
return self.api.read_namespaced_service(name=name, namespace=namespace)
class Pod(V1Pod):
def __init__(self, name, *args, **kwargs):
super(Pod, self).__init__(*args, **kwargs)
self.kind = "Pod"
self.api_version = "v1"
self.metadata = V1ObjectMeta()
self.metadata.name = None
self.metadata.labels = {}
self.spec = V1PodSpec()
self.spec.containers = []
self.spec.volumes = []
self._name = None
self.name = name
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
self.metadata.name = self._name
self.add_label("name", name)
def add_label(self, name, value):
self.metadata.labels.update({name: value})
def add_container(self, container):
self.spec.containers.append(container)
def add_pvc_volume(self, name, claim_name):
volume = V1Volume()
volume.name = name
pvc_source = V1PersistentVolumeClaimVolumeSource()
pvc_source.claim_name = claim_name
volume.persistent_volume_claim = pvc_source
self.spec.volumes.append(volume)
def add_nfs_volume(self, name, nfs_server_ip, nfs_server_share):
volume = V1Volume()
volume.name = name
nfs_source = V1NFSVolumeSource()
nfs_source.server = nfs_server_ip
nfs_source.path = nfs_server_share
volume.nfs = nfs_source
self.spec.volumes.append(volume)
def add_glusterfs_volume(self, name, gluster_endpoint, gluster_path):
volume = V1Volume()
volume.name = name
gfs_source = V1GlusterfsVolumeSource()
gfs_source.read_only = False
gfs_source.endpoints = gluster_endpoint
gfs_source.path = gluster_path
volume.glusterfs = gfs_source
self.spec.volumes.append(volume)
class Container(V1Container):
def __init__(self, *args, **kwargs):
super(Container, self).__init__(*args, **kwargs)
self.name = "{name}"
self.ports = []
self.env = []
self.volume_mounts = []
self.add_pod_ip_env()
self.add_default_resources()
def add_port(self, port):
port_ = V1ContainerPort()
port_.container_port = port
self.ports.append(port_)
def add_env(self, name, value):
env_ = V1EnvVar()
env_.name = name
env_.value = value
self.env.append(env_)
def add_pod_ip_env(self):
env_ = V1EnvVar()
env_.name = "POD_IP"
field_selector = V1ObjectFieldSelector()
field_selector.field_path = "status.podIP"
env_source = V1EnvVarSource()
env_source.field_ref = field_selector
env_.value_from = env_source
self.env.append(env_)
def add_default_resources(self):
self.resources = V1ResourceRequirements()
self.resources.requests = {"cpu": 0.25, "memory": "1Gi"}
self.resources.limits = {"cpu": 0.25, "memory": "1Gi"}
def add_volume(self, name, path):
volume_mount = V1VolumeMount()
volume_mount.name = name
volume_mount.mount_path = path
self.volume_mounts.append(volume_mount)
def set_command(self, command):
if isinstance(command, str):
self.command = command.split(" ")
if isinstance(command, list):
self.command = command
class BaseContainer(Container):
def __init__(self, name, image, *args, **kwargs):
super(BaseContainer, self).__init__(*args, **kwargs)
self.name = name
self.image = image
| apache-2.0 |
aio-libs/multidict | tests/test_abc.py | 1 | 3176 | from collections.abc import Mapping, MutableMapping
import pytest
from multidict import MultiMapping, MutableMultiMapping
from multidict._compat import USE_CYTHON
from multidict._multidict_py import CIMultiDict as PyCIMultiDict
from multidict._multidict_py import CIMultiDictProxy as PyCIMultiDictProxy
from multidict._multidict_py import MultiDict as PyMultiDict # noqa: E402
from multidict._multidict_py import MultiDictProxy as PyMultiDictProxy
if USE_CYTHON:
from multidict._multidict import ( # type: ignore
CIMultiDict,
CIMultiDictProxy,
MultiDict,
MultiDictProxy,
)
@pytest.fixture(
params=([MultiDict, CIMultiDict] if USE_CYTHON else [])
+ [PyMultiDict, PyCIMultiDict],
ids=(["MultiDict", "CIMultiDict"] if USE_CYTHON else [])
+ ["PyMultiDict", "PyCIMultiDict"],
)
def cls(request):
return request.param
@pytest.fixture(
params=(
[(MultiDictProxy, MultiDict), (CIMultiDictProxy, CIMultiDict)]
if USE_CYTHON
else []
)
+ [(PyMultiDictProxy, PyMultiDict), (PyCIMultiDictProxy, PyCIMultiDict)],
ids=(["MultiDictProxy", "CIMultiDictProxy"] if USE_CYTHON else [])
+ ["PyMultiDictProxy", "PyCIMultiDictProxy"],
)
def proxy_classes(request):
return request.param
def test_abc_inheritance():
assert issubclass(MultiMapping, Mapping)
assert not issubclass(MultiMapping, MutableMapping)
assert issubclass(MutableMultiMapping, Mapping)
assert issubclass(MutableMultiMapping, MutableMapping)
class A(MultiMapping):
def __getitem__(self, key):
pass
def __iter__(self):
pass
def __len__(self):
pass
def getall(self, key, default=None):
super().getall(key, default)
def getone(self, key, default=None):
super().getone(key, default)
def test_abc_getall():
with pytest.raises(KeyError):
A().getall("key")
def test_abc_getone():
with pytest.raises(KeyError):
A().getone("key")
class B(A, MutableMultiMapping):
def __setitem__(self, key, value):
pass
def __delitem__(self, key):
pass
def add(self, key, value):
super().add(key, value)
def extend(self, *args, **kwargs):
super().extend(*args, **kwargs)
def popall(self, key, default=None):
super().popall(key, default)
def popone(self, key, default=None):
super().popone(key, default)
def test_abc_add():
with pytest.raises(NotImplementedError):
B().add("key", "val")
def test_abc_extend():
with pytest.raises(NotImplementedError):
B().extend()
def test_abc_popone():
with pytest.raises(KeyError):
B().popone("key")
def test_abc_popall():
with pytest.raises(KeyError):
B().popall("key")
def test_multidict_inheritance(cls):
assert issubclass(cls, MultiMapping)
assert issubclass(cls, MutableMultiMapping)
def test_proxy_inheritance(proxy_classes):
proxy, _ = proxy_classes
assert issubclass(proxy, MultiMapping)
assert not issubclass(proxy, MutableMultiMapping)
def test_generic_type_in_runtime():
MultiMapping[str]
MutableMultiMapping[str]
| apache-2.0 |
taedori81/gentlecoffee | saleor/registration/views.py | 13 | 5525 | try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import login as auth_login, logout as auth_logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import (
login as django_login_view, password_change)
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from django.utils import timezone
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
from . import forms
from .models import EmailConfirmationRequest, EmailChangeRequest
from . import utils
now = timezone.now
def login(request):
local_host = utils.get_local_host(request)
ctx = {
'facebook_login_url': utils.get_facebook_login_url(local_host),
'google_login_url': utils.get_google_login_url(local_host)}
return django_login_view(request, authentication_form=forms.LoginForm,
extra_context=ctx)
def logout(request):
auth_logout(request)
messages.success(request, _('You have been successfully logged out.'))
return redirect(settings.LOGIN_REDIRECT_URL)
def oauth_callback(request, service):
local_host = utils.get_local_host(request)
form = forms.OAuth2CallbackForm(service=service, local_host=local_host,
data=request.GET)
if form.is_valid():
try:
user = form.get_authenticated_user()
except ValueError as e:
messages.error(request, smart_text(e))
else:
auth_login(request, user=user)
messages.success(request, _('You are now logged in.'))
return redirect(settings.LOGIN_REDIRECT_URL)
else:
for _field, errors in form.errors.items():
for error in errors:
messages.error(request, error)
return redirect('registration:login')
def request_email_confirmation(request):
local_host = utils.get_local_host(request)
form = forms.RequestEmailConfirmationForm(local_host=local_host,
data=request.POST or None)
if form.is_valid():
form.send()
msg = _('Confirmation email has been sent. '
'Please check your inbox.')
messages.success(request, msg)
return redirect(settings.LOGIN_REDIRECT_URL)
return TemplateResponse(request,
'registration/request_email_confirmation.html',
{'form': form})
@login_required
def request_email_change(request):
form = forms.RequestEmailChangeForm(
local_host=utils.get_local_host(request), user=request.user,
data=request.POST or None)
if form.is_valid():
form.send()
msg = _('Confirmation email has been sent. '
'Please check your inbox.')
messages.success(request, msg)
return redirect(settings.LOGIN_REDIRECT_URL)
return TemplateResponse(
request, 'registration/request_email_confirmation.html',
{'form': form})
def confirm_email(request, token):
if not request.POST:
try:
email_confirmation_request = EmailConfirmationRequest.objects.get(
token=token, valid_until__gte=now())
# TODO: cronjob (celery task) to delete stale tokens
except EmailConfirmationRequest.DoesNotExist:
return TemplateResponse(request, 'registration/invalid_token.html')
user = email_confirmation_request.get_authenticated_user()
email_confirmation_request.delete()
auth_login(request, user)
messages.success(request, _('You are now logged in.'))
form = forms.SetOrRemovePasswordForm(user=request.user,
data=request.POST or None)
if form.is_valid():
form.save()
messages.success(request, _('Password has been successfully changed.'))
return redirect(settings.LOGIN_REDIRECT_URL)
return TemplateResponse(
request, 'registration/set_password.html', {'form': form})
def change_email(request, token):
try:
email_change_request = EmailChangeRequest.objects.get(
token=token, valid_until__gte=now())
# TODO: cronjob (celery task) to delete stale tokens
except EmailChangeRequest.DoesNotExist:
return TemplateResponse(request, 'registration/invalid_token.html')
# if another user is logged in, we need to log him out, to allow the email
# owner confirm his identity
if (request.user.is_authenticated() and
request.user != email_change_request.user):
auth_logout(request)
if not request.user.is_authenticated():
query = urlencode({
'next': request.get_full_path(),
'email': email_change_request.user.email})
login_url = utils.url(path=settings.LOGIN_URL, query=query)
return redirect(login_url)
request.user.email = email_change_request.email
request.user.save()
email_change_request.delete()
messages.success(request, _('Your email has been successfully changed'))
return redirect(settings.LOGIN_REDIRECT_URL)
def change_password(request):
return password_change(
request, template_name='registration/change_password.html',
post_change_redirect=reverse('profile:details'))
| bsd-3-clause |
obimod/taiga-back | tests/integration/test_custom_attributes_tasks.py | 20 | 7862 | # Copyright (C) 2015 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2015 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2015 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.urlresolvers import reverse
from taiga.base.utils import json
from .. import factories as f
import pytest
pytestmark = pytest.mark.django_db
#########################################################
# Task Custom Attributes
#########################################################
def test_task_custom_attribute_duplicate_name_error_on_create(client):
custom_attr_1 = f.TaskCustomAttributeFactory()
member = f.MembershipFactory(user=custom_attr_1.project.owner,
project=custom_attr_1.project,
is_owner=True)
url = reverse("task-custom-attributes-list")
data = {"name": custom_attr_1.name,
"project": custom_attr_1.project.pk}
client.login(member.user)
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
def test_task_custom_attribute_duplicate_name_error_on_update(client):
custom_attr_1 = f.TaskCustomAttributeFactory()
custom_attr_2 = f.TaskCustomAttributeFactory(project=custom_attr_1.project)
member = f.MembershipFactory(user=custom_attr_1.project.owner,
project=custom_attr_1.project,
is_owner=True)
url = reverse("task-custom-attributes-detail", kwargs={"pk": custom_attr_2.pk})
data = {"name": custom_attr_1.name}
client.login(member.user)
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400
def test_task_custom_attribute_duplicate_name_error_on_move_between_projects(client):
custom_attr_1 = f.TaskCustomAttributeFactory()
custom_attr_2 = f.TaskCustomAttributeFactory(name=custom_attr_1.name)
member = f.MembershipFactory(user=custom_attr_1.project.owner,
project=custom_attr_1.project,
is_owner=True)
f.MembershipFactory(user=custom_attr_1.project.owner,
project=custom_attr_2.project,
is_owner=True)
url = reverse("task-custom-attributes-detail", kwargs={"pk": custom_attr_2.pk})
data = {"project": custom_attr_1.project.pk}
client.login(member.user)
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400
#########################################################
# Task Custom Attributes Values
#########################################################
def test_task_custom_attributes_values_when_create_us(client):
task = f.TaskFactory()
assert task.custom_attributes_values.attributes_values == {}
def test_task_custom_attributes_values_update(client):
task = f.TaskFactory()
member = f.MembershipFactory(user=task.project.owner,
project=task.project,
is_owner=True)
custom_attr_1 = f.TaskCustomAttributeFactory(project=task.project)
ct1_id = "{}".format(custom_attr_1.id)
custom_attr_2 = f.TaskCustomAttributeFactory(project=task.project)
ct2_id = "{}".format(custom_attr_2.id)
custom_attrs_val = task.custom_attributes_values
url = reverse("task-custom-attributes-values-detail", args=[task.id])
data = {
"attributes_values": {
ct1_id: "test_1_updated",
ct2_id: "test_2_updated"
},
"version": custom_attrs_val.version
}
assert task.custom_attributes_values.attributes_values == {}
client.login(member.user)
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 200
assert response.data["attributes_values"] == data["attributes_values"]
task = task.__class__.objects.get(id=task.id)
assert task.custom_attributes_values.attributes_values == data["attributes_values"]
def test_task_custom_attributes_values_update_with_error_invalid_key(client):
task = f.TaskFactory()
member = f.MembershipFactory(user=task.project.owner,
project=task.project,
is_owner=True)
custom_attr_1 = f.TaskCustomAttributeFactory(project=task.project)
ct1_id = "{}".format(custom_attr_1.id)
custom_attr_2 = f.TaskCustomAttributeFactory(project=task.project)
custom_attrs_val = task.custom_attributes_values
url = reverse("task-custom-attributes-values-detail", args=[task.id])
data = {
"attributes_values": {
ct1_id: "test_1_updated",
"123456": "test_2_updated"
},
"version": custom_attrs_val.version
}
assert task.custom_attributes_values.attributes_values == {}
client.login(member.user)
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400
def test_task_custom_attributes_values_delete_task(client):
task = f.TaskFactory()
member = f.MembershipFactory(user=task.project.owner,
project=task.project,
is_owner=True)
custom_attr_1 = f.TaskCustomAttributeFactory(project=task.project)
ct1_id = "{}".format(custom_attr_1.id)
custom_attr_2 = f.TaskCustomAttributeFactory(project=task.project)
ct2_id = "{}".format(custom_attr_2.id)
custom_attrs_val = task.custom_attributes_values
url = reverse("tasks-detail", args=[task.id])
client.login(member.user)
response = client.json.delete(url)
assert response.status_code == 204
assert not task.__class__.objects.filter(id=task.id).exists()
assert not custom_attrs_val.__class__.objects.filter(id=custom_attrs_val.id).exists()
#########################################################
# Test tristres triggers :-P
#########################################################
def test_trigger_update_taskcustomvalues_afeter_remove_taskcustomattribute(client):
task = f.TaskFactory()
member = f.MembershipFactory(user=task.project.owner,
project=task.project,
is_owner=True)
custom_attr_1 = f.TaskCustomAttributeFactory(project=task.project)
ct1_id = "{}".format(custom_attr_1.id)
custom_attr_2 = f.TaskCustomAttributeFactory(project=task.project)
ct2_id = "{}".format(custom_attr_2.id)
custom_attrs_val = task.custom_attributes_values
custom_attrs_val.attributes_values = {ct1_id: "test_1", ct2_id: "test_2"}
custom_attrs_val.save()
assert ct1_id in custom_attrs_val.attributes_values.keys()
assert ct2_id in custom_attrs_val.attributes_values.keys()
url = reverse("task-custom-attributes-detail", kwargs={"pk": custom_attr_2.pk})
client.login(member.user)
response = client.json.delete(url)
assert response.status_code == 204
custom_attrs_val = custom_attrs_val.__class__.objects.get(id=custom_attrs_val.id)
assert not custom_attr_2.__class__.objects.filter(pk=custom_attr_2.pk).exists()
assert ct1_id in custom_attrs_val.attributes_values.keys()
assert ct2_id not in custom_attrs_val.attributes_values.keys()
| agpl-3.0 |
dmilith/SublimeText3-dmilith | Package Storage/lsp_utils/node-runtime/12.20.2/node/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/simple_copy.py | 11 | 1333 | # Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A clone of the default copy.deepcopy that doesn't handle cyclic
structures or complex types except for dicts and lists. This is
because gyp copies so large structure that small copy overhead ends up
taking seconds in a project the size of Chromium."""
class Error(Exception):
pass
__all__ = ["Error", "deepcopy"]
def deepcopy(x):
"""Deep copy operation on gyp objects such as strings, ints, dicts
and lists. More than twice as fast as copy.deepcopy but much less
generic."""
try:
return _deepcopy_dispatch[type(x)](x)
except KeyError:
raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' +
'or expand simple_copy support.' % type(x))
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x):
return x
try:
types = bool, float, int, str, type, type(None), long, unicode
except NameError: # Python 3
types = bool, float, int, str, type, type(None)
for x in types:
d[x] = _deepcopy_atomic
def _deepcopy_list(x):
return [deepcopy(a) for a in x]
d[list] = _deepcopy_list
def _deepcopy_dict(x):
y = {}
for key, value in x.items():
y[deepcopy(key)] = deepcopy(value)
return y
d[dict] = _deepcopy_dict
del d
| mit |
Austin503/pyglet | contrib/layout/layout/gl/__init__.py | 29 | 6125 | #!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from pyglet.gl import *
from pyglet.event import *
from layout.css import *
from layout.content import *
from layout.frame import *
from layout.locator import *
from layout.view import *
from layout.gl.device import *
from layout.gl.event import *
from layout.gl.image import *
from layout.builders.htmlbuilder import *
from layout.builders.xmlbuilder import *
from layout.builders.xhtmlbuilder import *
__all__ = ['Layout', 'select']
class GLLayout(LayoutEventDispatcher):
# Disable this if you don't want the layout to resize with the window
# and position itself to cover the entire window automatically.
size_to_window = True
def __init__(self, render_device=None, locator=None):
super(GLLayout, self).__init__()
self.replaced_element_factories = []
if not locator:
locator = LocalFileLocator()
self.locator = locator
if not render_device:
render_device = GLRenderDevice(self.locator)
self.render_device = render_device
self.document = Document()
self.view = DocumentView(self.render_device, self.document)
self.add_replaced_element_factory(ImageReplacedElementFactory(locator))
# If the layout is added to a window event stack, the following
# variables are taken care of automatically (x, y, viewport).
# Position of layout within projection (window, +ve Y is up)
self.x = 0
self.y = 0
def add_replaced_element_factory(self, factory):
# XXX duplication
self.replaced_element_factories.append(factory)
self.view.frame_builder.add_replaced_element_factory(factory)
def set_data(self, data, builder_class):
self.document = Document()
self.view.set_document(self.document)
for factory in self.replaced_element_factories:
self.view.frame_builder.add_replaced_element_factory(factory)
builder = builder_class(self.document)
builder.feed(data)
builder.close()
self._mouse_over_elements = set()
def set_xhtml(self, data):
self.set_data(data, XHTMLBuilder)
def set_html(self, data):
self.set_data(data, HTMLBuilder)
# Duplicate the public properties of DocumentView here for convenience
def set_viewport_x(self, x):
self.view.viewport_x = x
viewport_x = property(lambda self: self.view.viewport_x,
set_viewport_x)
def set_viewport_y(self, y):
self.view.viewport_y = y
viewport_y = property(lambda self: self.view.viewport_y,
set_viewport_y)
def set_viewport_width(self, width):
self.view.viewport_width = width
viewport_width = property(lambda self: self.view.viewport_width,
set_viewport_width)
def set_viewport_height(self, height):
self.view.viewport_height = height
viewport_height = property(lambda self: self.view.viewport_height,
set_viewport_height)
canvas_width = property(lambda self: self.view.canvas_width)
canvas_height = property(lambda self: self.view.canvas_height)
def draw(self):
glPushMatrix()
glLoadIdentity()
glTranslatef(self.x, self.y, 0)
self.view.draw()
glPopMatrix()
def constrain_viewport(self):
'''Ensure the viewport is not showing anything that's not the
canvas, unless the canvas is smaller than the viewport, in which
case it will be aligned top/left.
'''
if self.canvas_width < self.viewport_width:
self.viewport_x = 0
else:
self.viewport_x = min(max(0, self.viewport_x),
self.canvas_width - self.viewport_width)
if self.canvas_height < self.viewport_height:
self.viewport_y = 0
else:
self.viewport_y = min(max(0, self.viewport_y),
self.canvas_height - self.viewport_height)
# Window event handlers.
def on_resize(self, width, height):
if self.size_to_window:
self.view.viewport_width = width
self.view.viewport_height = height
self.x = 0
self.y = height
self.constrain_viewport()
return EVENT_UNHANDLED
def on_mouse_scroll(self, x, y, dx, dy):
self.viewport_x += dx * 30
self.viewport_y -= dy * 30
self.constrain_viewport()
def on_mouse_press(self, x, y, button, modifiers):
x -= self.x
y -= self.y
elements = self.view.get_elements_for_point(x, y)
for element in elements[::-1]:
handled = self.dispatch_event(element, 'on_mouse_press',
x, y, button, modifiers)
if handled:
return EVENT_HANDLED
return EVENT_UNHANDLED
def on_mouse_motion(self, x, y, dx, dy):
x -= self.x
y -= self.y
elements = self.view.get_elements_for_point(x, y)
elements_set = set(elements)
for element in self._mouse_over_elements - elements_set:
self.dispatch_event(element, 'on_mouse_leave', x, y)
element.remove_pseudo_class('hover')
self.document.element_style_modified(element)
for element in elements_set - self._mouse_over_elements:
self.dispatch_event(element, 'on_mouse_enter', x, y)
element.add_pseudo_class('hover')
self.document.element_style_modified(element)
self._mouse_over_elements = elements_set
def on_mouse_leave(self, x, y):
x -= self.x
y -= self.y
for element in self._mouse_over_elements:
self.dispatch_event(element, 'on_mouse_leave', x, y)
element.remove_pseudo_class('hover')
self.document.element_style_modified(element)
self._mouse_over_elements = set()
# As long as there's not going to be any other render device around, call
# this one by a nicer name.
Layout = GLLayout
| bsd-3-clause |
wuhengzhi/chromium-crosswalk | tools/perf/benchmarks/media.py | 3 | 4626 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from telemetry import benchmark
from telemetry.page import legacy_page_test
from telemetry.value import list_of_scalar_values
from telemetry.value import scalar
from measurements import media
import page_sets
class _MSEMeasurement(legacy_page_test.LegacyPageTest):
def __init__(self):
super(_MSEMeasurement, self).__init__()
def ValidateAndMeasurePage(self, page, tab, results):
media_metric = tab.EvaluateJavaScript('window.__testMetrics')
trace = media_metric['id'] if 'id' in media_metric else None
metrics = media_metric['metrics'] if 'metrics' in media_metric else []
for m in metrics:
trace_name = '%s.%s' % (m, trace)
if isinstance(metrics[m], list):
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, trace_name, units='ms',
values=[float(v) for v in metrics[m]],
important=True))
else:
results.AddValue(scalar.ScalarValue(
results.current_page, trace_name, units='ms',
value=float(metrics[m]), important=True))
# android: See media.android.tough_video_cases below
# win8: crbug.com/531618
# crbug.com/565180: Only include cases that report time_to_play
@benchmark.Disabled('android', 'win8')
class Media(perf_benchmark.PerfBenchmark):
"""Obtains media metrics for key user scenarios."""
test = media.Media
page_set = page_sets.ToughVideoCasesPageSet
@classmethod
def Name(cls):
return 'media.tough_video_cases'
# crbug.com/565180: Only include cases that don't report time_to_play
@benchmark.Disabled('android', 'win8')
class MediaExtra(perf_benchmark.PerfBenchmark):
"""Obtains extra media metrics for key user scenarios."""
test = media.Media
page_set = page_sets.ToughVideoCasesExtraPageSet
@classmethod
def Name(cls):
return 'media.tough_video_cases_extra'
@benchmark.Disabled('android', 'mac')
class MediaNetworkSimulation(perf_benchmark.PerfBenchmark):
"""Obtains media metrics under different network simulations."""
test = media.Media
page_set = page_sets.MediaCnsCasesPageSet
@classmethod
def Name(cls):
return 'media.media_cns_cases'
@benchmark.Enabled('android')
@benchmark.Disabled('l', 'android-webview') # WebView: crbug.com/419689
class MediaAndroid(perf_benchmark.PerfBenchmark):
"""Obtains media metrics for key user scenarios on Android."""
test = media.Media
tag = 'android'
page_set = page_sets.ToughVideoCasesPageSet
# Exclude is_4k and 50 fps media files (garden* & crowd*).
options = {'story_label_filter_exclude': 'is_4k,is_50fps'}
@classmethod
def ShouldDisable(cls, possible_browser): # crbug.com/448092
"""Disable test for Android One device."""
return cls.IsSvelte(possible_browser)
@classmethod
def Name(cls):
return 'media.android.tough_video_cases'
@benchmark.Enabled('chromeos')
class MediaChromeOS4kOnly(perf_benchmark.PerfBenchmark):
"""Benchmark for media performance on ChromeOS using only is_4k test content.
"""
test = media.Media
tag = 'chromeOS4kOnly'
page_set = page_sets.ToughVideoCasesPageSet
options = {
'story_label_filter': 'is_4k',
# Exclude is_50fps test files: crbug/331816
'story_label_filter_exclude': 'is_50fps'
}
@classmethod
def Name(cls):
return 'media.chromeOS4kOnly.tough_video_cases'
@benchmark.Enabled('chromeos')
class MediaChromeOS(perf_benchmark.PerfBenchmark):
"""Benchmark for media performance on all ChromeOS platforms.
This benchmark does not run is_4k content, there's a separate benchmark for
that.
"""
test = media.Media
tag = 'chromeOS'
page_set = page_sets.ToughVideoCasesPageSet
# Exclude is_50fps test files: crbug/331816
options = {'story_label_filter_exclude': 'is_4k,is_50fps'}
@classmethod
def Name(cls):
return 'media.chromeOS.tough_video_cases'
@benchmark.Disabled('android-webview') # crbug.com/419689
class MediaSourceExtensions(perf_benchmark.PerfBenchmark):
"""Obtains media metrics for key media source extensions functions."""
test = _MSEMeasurement
page_set = page_sets.MseCasesPageSet
@classmethod
def Name(cls):
return 'media.mse_cases'
def SetExtraBrowserOptions(self, options):
# Needed to allow XHR requests to return stream objects.
options.AppendExtraBrowserArgs(
['--enable-experimental-web-platform-features',
'--disable-gesture-requirement-for-media-playback'])
| bsd-3-clause |
moolitayer/cockpit | test-avocado/libnetwork.py | 26 | 3621 | # -*- coding: utf-8 -*-
# This file is part of Cockpit.
#
# Copyright (C) 2015 Red Hat, Inc.
#
# Cockpit is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Cockpit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Cockpit; If not, see <http://www.gnu.org/licenses/>.
# This library have to be run under root or other user who has rights
# for modprobe, brctl, ip tools
# !!!!!! at the end, please call "clear" function for destory created ifaces and bridge
# usecase:
# a=Network('brname'); a.addiface('x1'); DOWHATEWERYOUWANTWITHx1 ;a.clear()
import re
from avocado.utils import process
class Network():
def __init__(self, brname=None):
self.brname = brname
self.interfaces=[]
process.run("modprobe veth", shell=True)
if self.brname and self.checkifbridgeexist():
print "adding bridge " + self.brname
self.createbridge()
def clear(self):
self.deleteallinterfaces()
if self.brname and not self.checkifbridgeexist():
print "deleting bridge " + self.brname
self.delbridge()
def checkifbridgeexist(self):
out = process.run("brctl show", shell=True)
if re.search('%s\s+' % self.brname ,out.stdout) is None:
return True
else:
return False
def createbridge(self):
process.run("brctl addbr %s" % self.brname, shell=True)
process.run("brctl stp %s off" % self.brname, shell=True)
process.run("ip link set dev %s up" % self.brname, shell=True)
def delbridge(self):
process.run("ip link set dev %s down" % self.brname, shell=True)
process.run("brctl delbr %s" % self.brname, shell=True)
def addiface(self, ifname, bridge=True):
if ifname in self.interfaces:
raise("Unable to add network interface %s (already exit)" % ifname)
process.run("ip link add name %sbr type veth peer name %s" % (ifname, ifname), shell=True)
process.run("ip link set dev %sbr up" % ifname, shell=True)
process.run("ip link set dev %s up" % ifname, shell=True)
if self.brname and bridge:
process.run("brctl addif %s %s" % (self.brname,ifname), shell=True)
self.interfaces.append(ifname)
def isifinbridge(self,ifname):
if self.brname:
out = process.run("brctl show %s" % self.brname, shell=True)
if re.search('\s+%s$' % ifname ,out.stdout):
return True
return False
def deliface(self, ifname):
if ifname in self.interfaces:
if self.isifinbridge(ifname):
process.run("brctl delif %s %s" % (self.brname,ifname), shell=True)
process.run("ip link set dev %s down" % ifname, shell=True)
process.run("ip link set dev %sbr down" % ifname, shell=True)
process.run("ip link del dev %sbr type veth" % ifname, shell=True)
self.interfaces.remove(ifname)
else:
raise ("Unable to remove interface %s (does not exist)" % ifname)
def deleteallinterfaces(self):
for interface in self.interfaces:
self.deliface(interface)
| lgpl-2.1 |
cmvac/demagorgon.repository | plugin.audio.m80/jscrypto.py | 77 | 4201 | import hashlib
import json
import base64
import pyaes
from pkcs7 import PKCS7Encoder
import os, urllib2,urllib
import cookielib
def evpKDF(passwd, salt, key_size=8, iv_size=4, iterations=1, hash_algorithm="md5"):
target_key_size = key_size + iv_size
derived_bytes = ""
number_of_derived_words = 0
block = None
hasher = hashlib.new(hash_algorithm)
while number_of_derived_words < target_key_size:
if block is not None:
hasher.update(block)
hasher.update(passwd)
hasher.update(salt)
block = hasher.digest()
hasher = hashlib.new(hash_algorithm)
for i in range(1, iterations):
hasher.update(block)
block = hasher.digest()
hasher = hashlib.new(hash_algorithm)
derived_bytes += block[0: min(len(block), (target_key_size - number_of_derived_words) * 4)]
number_of_derived_words += len(block)/4
return {
"key": derived_bytes[0: key_size * 4],
"iv": derived_bytes[key_size * 4:]
}
def encode(plaintext,passphrase,saltsize=8):
salt= os.urandom(saltsize)
data = evpKDF(passphrase,salt)
decryptor = pyaes.new(data['key'], pyaes.MODE_CBC, IV=data['iv'])
plaintext = PKCS7Encoder().encode(plaintext)
enctext= decryptor.encrypt(plaintext)
return base64.b64encode("Salted__"+salt+enctext)
##''if salt is provided, it should be string
##ciphertext is base64 and passphrase is string
def decode(ciphertext,passphrase,salt=None):
ciphertext=base64.b64decode(ciphertext)
if not salt:
salt=ciphertext[8:16]
ciphertext=ciphertext[16:]
data = evpKDF(passphrase, salt)
decryptor = pyaes.new(data['key'], pyaes.MODE_CBC, IV=data['iv'])
d= decryptor.decrypt(ciphertext)
return PKCS7Encoder().decode(d)
def getUrl(url, cookieJar=None,post=None, timeout=20, headers=None):
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
#opener = urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
if headers:
for h,hv in headers:
req.add_header(h,hv)
response = opener.open(req,post,timeout=timeout)
link=response.read()
response.close()
return link;
def gettvnDecryptedURL(cookiejar=None,globalkey="XXX",passphrase="turbo", videoid="835", ref="http://www.moje-filmy.tk/tv/tvn", pubkeyurl='http://www.moje-filmy.tk/film/cryption/getPublicKey', handshakeurl="http://www.moje-filmy.tk/film/cryption/handshake", getvideourl="http://www.moje-filmy.tk/tv/get"):
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5
if cookiejar==None:
jw=cookielib.LWPCookieJar()
else:
jw=cookiejar
pubkey=getUrl(pubkeyurl,cookieJar=jw,headers=[('Referer',ref)])
pubkey=eval(pubkey)["publickey"]
key=encode(globalkey, passphrase)
key2 = RSA.importKey(pubkey)
cipher = PKCS1_v1_5.new(key2)
ciphertext = cipher.encrypt(key)
getpart=base64.b64encode(ciphertext)
post={'key':getpart}
post = urllib.urlencode(post)
challenge=getUrl(handshakeurl,post=post,cookieJar=jw,headers=[('Referer',ref)])
challenge=eval(challenge)["challenge"]
cc=encode( videoid, key)
post={'key':cc}
post = urllib.urlencode(post)
url=getUrl(getvideourl,post=post,cookieJar=jw,headers=[('Referer',ref)])
url=eval(url)["url"]
finalurl=decode(url, key)
print finalurl
finalurl=eval(finalurl)["url"]
finalurl=finalurl.replace('\\/','/')
return finalurl
#import binascii
#import hashlib
#key, iv = EVP_BytesToKey(hashlib.md5, pp, salt, 32, 16, 1)
#print key,iv
#print 1/0
#print 'salt=%s' % binascii.b2a_hex(salt)
#print 'key=%s' % binascii.b2a_hex(key)
#print 'iv =%s' % binascii.b2a_hex(iv)
#print decode(ct,pp,salt)
#decryptor = pyaes.new(key, pyaes.MODE_CBC, IV=iv)
#d= decryptor.decrypt(ct)
#print d
#print pubkey
#c=cookielib.LWPCookieJar()
#print gettvnDecryptedURL(c)
| gpl-2.0 |
lanbing510/GTDWeb | django/utils/six.py | 408 | 30194 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.9.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
### Additional customizations for Django ###
if PY3:
memoryview = memoryview
buffer_types = (bytes, bytearray, memoryview)
else:
# memoryview and buffer are not strictly equivalent, but should be fine for
# django core usage (mainly BinaryField). However, Jython doesn't support
# buffer (see http://bugs.jython.org/issue1521), so we have to be careful.
if sys.platform.startswith('java'):
memoryview = memoryview
else:
memoryview = buffer
buffer_types = (bytearray, memoryview)
| gpl-2.0 |
xianian/qt-creator | share/qtcreator/debugger/boosttypes.py | 2 | 5200 | ############################################################################
#
# Copyright (C) 2015 The Qt Company Ltd.
# Contact: http://www.qt.io/licensing
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms and
# conditions see http://www.qt.io/terms-conditions. For further information
# use the contact form at http://www.qt.io/contact-us.
#
# GNU Lesser General Public License Usage
# Alternatively, this file may be used under the terms of the GNU Lesser
# General Public License version 2.1 or version 3 as published by the Free
# Software Foundation and appearing in the file LICENSE.LGPLv21 and
# LICENSE.LGPLv3 included in the packaging of this file. Please review the
# following information to ensure the GNU Lesser General Public License
# requirements will be met: https://www.gnu.org/licenses/lgpl.html and
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
#
# In addition, as a special exception, The Qt Company gives you certain additional
# rights. These rights are described in The Qt Company LGPL Exception
# version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
#
#############################################################################
from dumper import *
def qdump__boost__bimaps__bimap(d, value):
#leftType = d.templateArgument(value.type, 0)
#rightType = d.templateArgument(value.type, 1)
size = int(value["core"]["node_count"])
d.putItemCount(size)
if d.isExpanded():
d.putPlainChildren(value)
def qdump__boost__optional(d, value):
if int(value["m_initialized"]) == 0:
d.putSpecialValue(SpecialUninitializedValue)
d.putNumChild(0)
else:
type = d.templateArgument(value.type, 0)
storage = value["m_storage"]
if d.isReferenceType(type):
d.putItem(storage.cast(type.target().pointer()).dereference())
else:
d.putItem(storage.cast(type))
d.putBetterType(value.type)
def qdump__boost__shared_ptr(d, value):
# s boost::shared_ptr<int>
# pn boost::detail::shared_count
# pi_ 0x0 boost::detail::sp_counted_base *
# px 0x0 int *
if d.isNull(value["pn"]["pi_"]):
d.putValue("(null)")
d.putNumChild(0)
return
if d.isNull(value["px"]):
d.putValue("(null)")
d.putNumChild(0)
return
countedbase = value["pn"]["pi_"].dereference()
weakcount = int(countedbase["weak_count_"])
usecount = int(countedbase["use_count_"])
d.check(weakcount >= 0)
d.check(usecount <= 10*1000*1000)
val = value["px"].dereference()
type = val.type
# handle boost::shared_ptr<int>::element_type as int
if str(type).endswith(">::element_type"):
type = type.strip_typedefs()
if d.isSimpleType(type):
d.putNumChild(3)
d.putItem(val)
d.putBetterType(value.type)
else:
d.putEmptyValue()
d.putNumChild(3)
if d.isExpanded():
with Children(d, 3):
d.putSubItem("data", val)
d.putIntItem("weakcount", weakcount)
d.putIntItem("usecount", usecount)
def qdump__boost__container__list(d, value):
r = value["members_"]["m_icont"]["data_"]["root_plus_size_"]
n = toInteger(r["size_"])
d.putItemCount(n)
if d.isExpanded():
innerType = d.templateArgument(value.type, 0)
offset = 2 * d.ptrSize()
with Children(d, n):
p = r["root_"]["next_"]
for i in xrange(n):
d.putSubItem("%s" % i, d.createValue(d.pointerValue(p) + offset, innerType))
p = p["next_"]
def qdump__boost__gregorian__date(d, value):
d.putValue(int(value["days_"]), JulianDate)
d.putNumChild(0)
def qdump__boost__posix_time__ptime(d, value):
ms = int(int(value["time_"]["time_count_"]["value_"]) / 1000)
d.putValue("%s/%s" % divmod(ms, 86400000), JulianDateAndMillisecondsSinceMidnight)
d.putNumChild(0)
def qdump__boost__posix_time__time_duration(d, value):
d.putValue(int(int(value["ticks_"]["value_"]) / 1000), MillisecondsSinceMidnight)
d.putNumChild(0)
def qdump__boost__unordered__unordered_set(d, value):
base = d.addressOf(value)
ptrSize = d.ptrSize()
size = d.extractInt(base + 2 * ptrSize)
d.putItemCount(size)
if d.isExpanded():
innerType = d.templateArgument(value.type, 0)
bucketCount = d.extractInt(base + ptrSize)
offset = int((innerType.sizeof + ptrSize - 1) / ptrSize) * ptrSize
with Children(d, size, maxNumChild=10000):
afterBuckets = d.extractPointer(base + 5 * ptrSize)
afterBuckets += bucketCount * ptrSize
item = d.extractPointer(afterBuckets)
for j in d.childRange():
d.putSubItem(j, d.createValue(item - offset, innerType))
item = d.extractPointer(item)
| lgpl-2.1 |
koharjidan/litecoin | contrib/seeds/makeseeds.py | 1 | 4297 | #!/usr/bin/env python
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = set([
"130.211.129.106", "178.63.107.226",
"83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6",
"54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211",
"54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214",
"54.94.195.96", "54.94.200.247"
])
import re
import sys
import dns.resolver
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):9333$")
PATTERN_AGENT = re.compile(r"^(\/Satoshi:0.8.6\/|\/Satoshi:0.9.(2|3)\/|\/Satoshi:0.10.\d{1,2}\/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
# Match only IPv4
m = PATTERN_IPV4.match(sline[0])
if m is None:
return None
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'ip': m.group(1),
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
}
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
result = []
asn_count = {}
for ip in ips:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid IPv4 address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['ipnum']))
<<<<<<< HEAD
ips = []
pattern = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):9333")
for line in lines:
m = pattern.match(line)
if m is None:
continue
ip = 0
for i in range(0,4):
ip = ip + (int(m.group(i+1)) << (8*(i)))
if ip == 0:
continue
ips.append(ip)
for row in range(0, min(NSEEDS,len(ips)), 8):
print " " + ", ".join([ "0x%08x"%i for i in ips[row:row+8] ]) + ","
=======
for ip in ips:
print ip['ip']
>>>>>>> d1691e599121d643db2c1f2b5f5529eb64f2a771
if __name__ == '__main__':
main()
| mit |
bczmufrn/frequencia | frequencia/justificativas/rules.py | 1 | 1401 | import rules
from frequencia.accounts.rules import is_gestor, is_bolsista
from frequencia.vinculos.utils import get_setores
#Predicates
@rules.predicate
def is_justificativa_author(user, justificativa):
try:
return justificativa.vinculo in user.vinculos.all()
except:
return None
@rules.predicate
def is_justificativa_chefe(user, justificativa):
try:
user_setores = get_setores(user)
return user.has_perm('accounts.is_coordenador_chefe') and justificativa.vinculo.setor in user_setores
except:
return None
@rules.predicate
def can_reabrir(user, justificativa):
try:
return user.has_perm('accounts.is_gestor') and justificativa.status
except:
return None
@rules.predicate
def can_delete(user, justificativa):
try:
return is_justificativa_author(user, justificativa) and not justificativa.status
except:
return None
#Custom predicates
justificativa_viewer = is_justificativa_author | is_justificativa_chefe | is_gestor
#Rules
rules.add_perm('tipo_justificativa.can_manage', is_gestor)
rules.add_perm('justificativa.justificativa_author', is_justificativa_author)
rules.add_perm('justificativa.can_create', is_bolsista)
rules.add_perm('justificativa.can_view', justificativa_viewer)
rules.add_perm('justificativa.can_analyse', is_justificativa_chefe)
rules.add_perm('justificativa.can_reabrir', can_reabrir)
rules.add_perm('justificativa.can_delete', can_delete) | mit |
mlaitinen/odoo | addons/account_anglo_saxon/purchase.py | 427 | 2043 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class purchase_order(osv.osv):
_name = "purchase.order"
_inherit = "purchase.order"
_description = "Purchase Order"
def _choose_account_from_po_line(self, cr, uid, order_line, context=None):
account_id = super(purchase_order, self)._choose_account_from_po_line(cr, uid, order_line, context=context)
if order_line.product_id and not order_line.product_id.type == 'service':
acc_id = order_line.product_id.property_stock_account_input and order_line.product_id.property_stock_account_input.id
if not acc_id:
acc_id = order_line.product_id.categ_id.property_stock_account_input_categ and order_line.product_id.categ_id.property_stock_account_input_categ.id
if acc_id:
fpos = order_line.order_id.fiscal_position or False
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, acc_id)
return account_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pf4d/dolfin-adjoint | tests_dolfin/heat/heat.py | 1 | 1357 | import sys
from dolfin import *
from dolfin_adjoint import *
f = Expression("x[0]*(x[0]-1)*x[1]*(x[1]-1)")
mesh = UnitSquareMesh(4, 4)
V = FunctionSpace(mesh, "CG", 1)
def main(ic, annotate=True):
u = TrialFunction(V)
v = TestFunction(V)
u_0 = Function(V, name="Solution")
u_0.assign(ic, annotate=False)
u_1 = Function(V, name="NextSolution")
dt = Constant(0.1)
F = ( (u - u_0)/dt*v + inner(grad(u), grad(v)) + f*v)*dx
bc = DirichletBC(V, 1.0, "on_boundary")
a, L = lhs(F), rhs(F)
t = float(dt)
T = 1.0
n = 1
while t <= T:
solve(a == L, u_0, bc, annotate=annotate, solver_parameters={"linear_solver": "cg", "preconditioner": "ilu", "krylov_solver": {"absolute_tolerance": 1.0e-16, "relative_tolerance": 1.0e-200}})
t += float(dt)
return u_0
if __name__ == "__main__":
ic = Function(V, name="InitialCondition")
u = main(ic)
adj_html("forward.html", "forward")
adj_html("adjoint.html", "adjoint")
J = Functional(u*u*u*u*dx*dt[FINISH_TIME])
m = Control(u)
Jm = assemble(u*u*u*u*dx)
dJdm = compute_gradient(J, m, forget=False)
HJm = hessian(J, m, warn=False)
def J(ic):
u = main(ic, annotate=False)
return assemble(u*u*u*u*dx)
minconv = taylor_test(J, m, Jm, dJdm, HJm=HJm, seed=100)
assert minconv > 1.9
| lgpl-3.0 |
darkryder/django | django/core/management/utils.py | 67 | 3739 | from __future__ import unicode_literals
import os
import sys
from subprocess import PIPE, Popen
from django.apps import apps as installed_apps
from django.utils import six
from django.utils.crypto import get_random_string
from django.utils.encoding import DEFAULT_LOCALE_ENCODING, force_text
from .base import CommandError
def popen_wrapper(args, os_err_exc_type=CommandError, stdout_encoding='utf-8'):
"""
Friendly wrapper around Popen.
Returns stdout output, stderr output and OS status code.
"""
try:
p = Popen(args, shell=False, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt')
except OSError as e:
strerror = force_text(e.strerror, DEFAULT_LOCALE_ENCODING, strings_only=True)
six.reraise(os_err_exc_type, os_err_exc_type('Error executing %s: %s' %
(args[0], strerror)), sys.exc_info()[2])
output, errors = p.communicate()
return (
force_text(output, stdout_encoding, strings_only=True, errors='strict'),
force_text(errors, DEFAULT_LOCALE_ENCODING, strings_only=True, errors='replace'),
p.returncode
)
def handle_extensions(extensions):
"""
Organizes multiple extensions that are separated with commas or passed by
using --extension/-e multiple times.
For example: running 'django-admin makemessages -e js,txt -e xhtml -a'
would result in an extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
{'.html', '.js', '.py'}
>>> handle_extensions(['.html, txt,.tpl'])
{'.html', '.tpl', '.txt'}
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ', '').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
return set(ext_list)
def find_command(cmd, path=None, pathext=None):
if path is None:
path = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(path, six.string_types):
path = [path]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD').split(os.pathsep)
# don't use extensions if the command ends with one of them
for ext in pathext:
if cmd.endswith(ext):
pathext = ['']
break
# check if we find the command on PATH
for p in path:
f = os.path.join(p, cmd)
if os.path.isfile(f):
return f
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None
def get_random_secret_key():
"""
Return a 50 character random string usable as a SECRET_KEY setting value.
"""
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
return get_random_string(50, chars)
def parse_apps_and_model_labels(labels):
"""
Parse a list of "app_label.ModelName" or "app_label" strings into actual
objects and return a two-element tuple:
(set of model classes, set of app_configs).
Raise a CommandError if some specified models or apps don't exist.
"""
apps = set()
models = set()
for label in labels:
if '.' in label:
try:
model = installed_apps.get_model(label)
except LookupError:
raise CommandError('Unknown model: %s' % label)
models.add(model)
else:
try:
app_config = installed_apps.get_app_config(label)
except LookupError as e:
raise CommandError(str(e))
apps.add(app_config)
return models, apps
| bsd-3-clause |
lduarte1991/edx-platform | common/djangoapps/entitlements/api/v1/filters.py | 18 | 1215 | from django_filters import rest_framework as filters
from entitlements.models import CourseEntitlement
class CharListFilter(filters.CharFilter):
""" Filters a field via a comma-delimited list of values. """
def filter(self, qs, value): # pylint: disable=method-hidden
if value not in (None, ''):
value = value.split(',')
return super(CharListFilter, self).filter(qs, value)
class UUIDListFilter(CharListFilter):
""" Filters a field via a comma-delimited list of UUIDs. """
def __init__(self, name='uuid', label=None, widget=None, method=None, lookup_expr='in', required=False,
distinct=False, exclude=False, **kwargs):
super(UUIDListFilter, self).__init__(
name=name,
label=label,
widget=widget,
method=method,
lookup_expr=lookup_expr,
required=required,
distinct=distinct,
exclude=exclude,
**kwargs
)
class CourseEntitlementFilter(filters.FilterSet):
uuid = UUIDListFilter()
user = filters.CharFilter(name='user__username')
class Meta:
model = CourseEntitlement
fields = ('uuid', 'user')
| agpl-3.0 |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/nnvm/python/nnvm/compiler/param_dict.py | 2 | 2538 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Helper utility to save parameter dict"""
import tvm
_save_param_dict = tvm.get_global_func("nnvm.compiler._save_param_dict")
_load_param_dict = tvm.get_global_func("nnvm.compiler._load_param_dict")
def save_param_dict(params):
"""Save parameter dictionary to binary bytes.
The result binary bytes can be loaded by the
GraphModule with API "load_params".
Parameters
----------
params : dict of str to NDArray
The parameter dictionary.
Returns
-------
param_bytes: bytearray
Serialized parameters.
Examples
--------
.. code-block:: python
# compile and save the modules to file.
graph, lib, params = nnvm.compiler.build(
graph, target, shape={"data", data_shape}, params=params)
module = graph_runtime.create(graph, lib, tvm.gpu(0))
# save the parameters as byte array
param_bytes = nnvm.compiler.save_param_dict(params)
# We can serialize the param_bytes and load it back later.
# Pass in byte array to module to directly set parameters
module["load_params"](param_bytes)
"""
args = []
for k, v in params.items():
args.append(k)
args.append(tvm.nd.array(v))
return _save_param_dict(*args)
def load_param_dict(param_bytes):
"""Load parameter dictionary to binary bytes.
Parameters
----------
param_bytes: bytearray
Serialized parameters.
Returns
-------
params : dict of str to NDArray
The parameter dictionary.
"""
if isinstance(param_bytes, (bytes, str)):
param_bytes = bytearray(param_bytes)
load_arr = _load_param_dict(param_bytes)
return {v.name : v.array for v in load_arr}
| apache-2.0 |
64studio/pdk | pdk/xml_legacy/sax/expatreader.py | 11 | 15122 | """
SAX driver for the pyexpat C module. This driver works with
pyexpat.__version__ == '2.22'.
"""
version = "0.20"
from xml.sax._exceptions import *
from xml.sax.handler import feature_validation, feature_namespaces
from xml.sax.handler import feature_namespace_prefixes
from xml.sax.handler import feature_external_ges, feature_external_pes
from xml.sax.handler import feature_string_interning
from xml.sax.handler import property_xml_string, property_interning_dict
# xml.parsers.expat does not raise ImportError in Jython
import sys
if sys.platform[:4] == "java":
raise SAXReaderNotAvailable("expat not available in Java", None)
del sys
try:
from xml.parsers import expat
except ImportError:
raise SAXReaderNotAvailable("expat not supported", None)
else:
if not hasattr(expat, "ParserCreate"):
raise SAXReaderNotAvailable("expat not supported", None)
from xml.sax import xmlreader, saxutils, handler
AttributesImpl = xmlreader.AttributesImpl
AttributesNSImpl = xmlreader.AttributesNSImpl
# If we're using a sufficiently recent version of Python, we can use
# weak references to avoid cycles between the parser and content
# handler, otherwise we'll just have to pretend.
try:
import _weakref
except ImportError:
def _mkproxy(o):
return o
else:
import weakref
_mkproxy = weakref.proxy
del weakref, _weakref
# --- ExpatLocator
class ExpatLocator(xmlreader.Locator):
"""Locator for use with the ExpatParser class.
This uses a weak reference to the parser object to avoid creating
a circular reference between the parser and the content handler.
"""
def __init__(self, parser):
self._ref = _mkproxy(parser)
def getColumnNumber(self):
parser = self._ref
if parser._parser is None:
return None
return parser._parser.ErrorColumnNumber
def getLineNumber(self):
parser = self._ref
if parser._parser is None:
return 1
return parser._parser.ErrorLineNumber
def getPublicId(self):
parser = self._ref
if parser is None:
return None
return parser._source.getPublicId()
def getSystemId(self):
parser = self._ref
if parser is None:
return None
return parser._source.getSystemId()
# --- ExpatParser
class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
"""SAX driver for the pyexpat C module."""
def __init__(self, namespaceHandling=0, bufsize=2**16-20):
xmlreader.IncrementalParser.__init__(self, bufsize)
self._source = xmlreader.InputSource()
self._parser = None
self._namespaces = namespaceHandling
self._lex_handler_prop = None
self._parsing = 0
self._entity_stack = []
self._external_ges = 1
self._interning = None
self._namespace_prefixes = 1
# XMLReader methods
def parse(self, source):
"Parse an XML document from a URL or an InputSource."
source = saxutils.prepare_input_source(source)
self._source = source
self.reset()
self._cont_handler.setDocumentLocator(ExpatLocator(self))
try:
xmlreader.IncrementalParser.parse(self, source)
finally:
# Drop reference to Expat parser, but read potential
# error state before that. Also, if close has completed,
# we don't have a parser anymore, anyway.
if self._parser:
self._ColumnNumber = self._parser.ErrorColumnNumber
self._LineNumber = self._parser.ErrorLineNumber
self._parser = None
def prepareParser(self, source):
if source.getSystemId() != None:
self._parser.SetBase(source.getSystemId())
# Redefined setContentHandler to allow changing handlers during parsing
def setContentHandler(self, handler):
xmlreader.IncrementalParser.setContentHandler(self, handler)
if self._parsing:
self._reset_cont_handler()
def getFeature(self, name):
if name == feature_namespaces:
return self._namespaces
elif name == feature_string_interning:
return self._interning is not None
elif name == feature_namespace_prefixes:
return self._namespace_prefixes
elif name in (feature_validation, feature_external_pes):
return 0
elif name == feature_external_ges:
return self._external_ges
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
if self._parsing:
raise SAXNotSupportedException("Cannot set features while parsing")
if name == feature_namespaces:
self._namespaces = state
elif name == feature_external_ges:
self._external_ges = state
elif name == feature_string_interning:
if state:
if self._interning is None:
self._interning = {}
else:
self._interning = None
elif name == feature_namespace_prefixes:
self._namespace_prefixes = state
elif name == feature_validation:
if state:
raise SAXNotSupportedException(
"expat does not support validation")
elif name == feature_external_pes:
if state:
raise SAXNotSupportedException(
"expat does not read external parameter entities")
else:
raise SAXNotRecognizedException(
"Feature '%s' not recognized" % name)
def getProperty(self, name):
if name == handler.property_lexical_handler:
return self._lex_handler_prop
elif name == property_interning_dict:
return self._interning
elif name == property_xml_string:
if self._parser:
if hasattr(self._parser, "GetInputContext"):
return self._parser.GetInputContext()
else:
raise SAXNotRecognizedException(
"This version of expat does not support getting"
" the XML string")
else:
raise SAXNotSupportedException(
"XML string cannot be returned when not parsing")
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
if name == handler.property_lexical_handler:
self._lex_handler_prop = value
if self._parsing:
self._reset_lex_handler_prop()
elif name == property_interning_dict:
self._interning = value
elif name == property_xml_string:
raise SAXNotSupportedException("Property '%s' cannot be set" %
name)
else:
raise SAXNotRecognizedException("Property '%s' not recognized" %
name)
# IncrementalParser methods
def feed(self, data, isFinal = 0):
if not self._parsing:
self.reset()
self._parsing = 1
self._cont_handler.startDocument()
try:
# The isFinal parameter is internal to the expat reader.
# If it is set to true, expat will check validity of the entire
# document. When feeding chunks, they are not normally final -
# except when invoked from close.
self._parser.Parse(data, isFinal)
except expat.error, e:
exc = SAXParseException(expat.ErrorString(e.code), e, self)
# FIXME: when to invoke error()?
self._err_handler.fatalError(exc)
def close(self):
if self._entity_stack:
# If we are completing an external entity, do nothing here
return
self.feed("", isFinal = 1)
self._cont_handler.endDocument()
self._parsing = 0
# break cycle created by expat handlers pointing to our methods
self._parser = None
def _reset_cont_handler(self):
self._parser.ProcessingInstructionHandler = \
self._cont_handler.processingInstruction
self._parser.CharacterDataHandler = self._cont_handler.characters
def _reset_lex_handler_prop(self):
lex = self._lex_handler_prop
parser = self._parser
if lex is None:
parser.CommentHandler = None
parser.StartCdataSectionHandler = None
parser.EndCdataSectionHandler = None
parser.StartDoctypeDeclHandler = None
parser.EndDoctypeDeclHandler = None
else:
parser.CommentHandler = lex.comment
parser.StartCdataSectionHandler = lex.startCDATA
parser.EndCdataSectionHandler = lex.endCDATA
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EndDoctypeDeclHandler = lex.endDTD
def reset(self):
if self._namespaces:
self._parser = expat.ParserCreate(None, " ",
intern=self._interning)
self._parser.namespace_prefixes = 1
self._parser.StartElementHandler = self.start_element_ns
self._parser.EndElementHandler = self.end_element_ns
else:
self._parser = expat.ParserCreate(intern = self._interning)
self._parser.StartElementHandler = self.start_element
self._parser.EndElementHandler = self.end_element
self._reset_cont_handler()
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
self._parser.NotationDeclHandler = self.notation_decl
self._parser.StartNamespaceDeclHandler = self.start_namespace_decl
self._parser.EndNamespaceDeclHandler = self.end_namespace_decl
self._decl_handler_prop = None
if self._lex_handler_prop:
self._reset_lex_handler_prop()
# self._parser.DefaultHandler =
# self._parser.DefaultHandlerExpand =
# self._parser.NotStandaloneHandler =
self._parser.ExternalEntityRefHandler = self.external_entity_ref
try:
self._parser.SkippedEntityHandler = self.skipped_entity_handler
except AttributeError:
# This pyexpat does not support SkippedEntity
pass
self._parser.SetParamEntityParsing(
expat.XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE)
self._parsing = 0
self._entity_stack = []
# default values when _parser goes aways
self._ColumnNumber = None
self._LineNumber = 1
# Locator methods
def getColumnNumber(self):
if self._parser is None:
return self._ColumnNumber
return self._parser.ErrorColumnNumber
def getLineNumber(self):
if self._parser is None:
return self._LineNumber
return self._parser.ErrorLineNumber
def getPublicId(self):
return self._source.getPublicId()
def getSystemId(self):
return self._source.getSystemId()
# event handlers
def start_element(self, name, attrs):
self._cont_handler.startElement(name, AttributesImpl(attrs))
def end_element(self, name):
self._cont_handler.endElement(name)
def start_element_ns(self, name, attrs):
pair = name.split()
if len(pair) == 1:
# no namespace
elem_qname = name
pair = (None, name)
elif len(pair) == 3:
# namespace plus prefix
elem_qname = "%s:%s" % (pair[2], pair[1])
pair = pair[0], pair[1]
else:
# default namespace
elem_qname = pair[1]
pair = tuple(pair)
newattrs = {}
qnames = {}
for (aname, value) in attrs.items():
parts = aname.split()
length = len(parts)
if length == 1:
# no namespace
qname = aname
apair = (None, aname)
elif length == 3:
qname = "%s:%s" % (parts[2], parts[1])
apair = parts[0], parts[1]
else:
# default namespace
qname = parts[1]
apair = tuple(parts)
newattrs[apair] = value
qnames[apair] = qname
self._cont_handler.startElementNS(pair, elem_qname,
AttributesNSImpl(newattrs, qnames))
def end_element_ns(self, name):
pair = name.split()
if len(pair) == 1:
pair = (None, name)
elif len(pair) == 3:
pair = pair[0], pair[1]
else:
pair = tuple(pair)
self._cont_handler.endElementNS(pair, None)
# this is not used (call directly to ContentHandler)
def processing_instruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
# this is not used (call directly to ContentHandler)
def character_data(self, data):
self._cont_handler.characters(data)
def start_namespace_decl(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def end_namespace_decl(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
self._lex_handler_prop.startDTD(name, pubid, sysid)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
self._dtd_handler.unparsedEntityDecl(name, pubid, sysid, notation_name)
def notation_decl(self, name, base, sysid, pubid):
self._dtd_handler.notationDecl(name, pubid, sysid)
def external_entity_ref(self, context, base, sysid, pubid):
if not self._external_ges:
return 1
source = self._ent_handler.resolveEntity(pubid, sysid)
source = saxutils.prepare_input_source(source,
self._source.getSystemId() or
"")
self._entity_stack.append((self._parser, self._source))
self._parser = self._parser.ExternalEntityParserCreate(context)
self._source = source
try:
xmlreader.IncrementalParser.parse(self, source)
except:
return 0 # FIXME: save error info here?
(self._parser, self._source) = self._entity_stack[-1]
del self._entity_stack[-1]
return 1
def skipped_entity_handler(self, name, is_pe):
if is_pe:
# The SAX spec requires to report skipped PEs with a '%'
name = '%'+name
self._cont_handler.skippedEntity(name)
# ---
def create_parser(*args, **kwargs):
return apply(ExpatParser, args, kwargs)
# ---
if __name__ == "__main__":
import xml.sax
p = create_parser()
p.setContentHandler(xml.sax.XMLGenerator())
p.setErrorHandler(xml.sax.ErrorHandler())
p.parse("../../../hamlet.xml")
| gpl-2.0 |
BassantMorsi/finderApp | lib/python2.7/site-packages/numpy/doc/structured_arrays.py | 56 | 11442 | """
=================
Structured Arrays
=================
Introduction
============
NumPy provides powerful capabilities to create arrays of structured datatype.
These arrays permit one to manipulate the data by named fields. A simple
example will show what is meant.: ::
>>> x = np.array([(1,2.,'Hello'), (2,3.,"World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
>>> x
array([(1, 2.0, 'Hello'), (2, 3.0, 'World')],
dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
Here we have created a one-dimensional array of length 2. Each element of
this array is a structure that contains three items, a 32-bit integer, a 32-bit
float, and a string of length 10 or less. If we index this array at the second
position we get the second structure: ::
>>> x[1]
(2,3.,"World")
Conveniently, one can access any field of the array by indexing using the
string that names that field. ::
>>> y = x['bar']
>>> y
array([ 2., 3.], dtype=float32)
>>> y[:] = 2*y
>>> y
array([ 4., 6.], dtype=float32)
>>> x
array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
In these examples, y is a simple float array consisting of the 2nd field
in the structured type. But, rather than being a copy of the data in the structured
array, it is a view, i.e., it shares exactly the same memory locations.
Thus, when we updated this array by doubling its values, the structured
array shows the corresponding values as doubled as well. Likewise, if one
changes the structured array, the field view also changes: ::
>>> x[1] = (-1,-1.,"Master")
>>> x
array([(1, 4.0, 'Hello'), (-1, -1.0, 'Master')],
dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
>>> y
array([ 4., -1.], dtype=float32)
Defining Structured Arrays
==========================
One defines a structured array through the dtype object. There are
**several** alternative ways to define the fields of a record. Some of
these variants provide backward compatibility with Numeric, numarray, or
another module, and should not be used except for such purposes. These
will be so noted. One specifies record structure in
one of four alternative ways, using an argument (as supplied to a dtype
function keyword or a dtype object constructor itself). This
argument must be one of the following: 1) string, 2) tuple, 3) list, or
4) dictionary. Each of these is briefly described below.
1) String argument.
In this case, the constructor expects a comma-separated list of type
specifiers, optionally with extra shape information. The fields are
given the default names 'f0', 'f1', 'f2' and so on.
The type specifiers can take 4 different forms: ::
a) b1, i1, i2, i4, i8, u1, u2, u4, u8, f2, f4, f8, c8, c16, a<n>
(representing bytes, ints, unsigned ints, floats, complex and
fixed length strings of specified byte lengths)
b) int8,...,uint8,...,float16, float32, float64, complex64, complex128
(this time with bit sizes)
c) older Numeric/numarray type specifications (e.g. Float32).
Don't use these in new code!
d) Single character type specifiers (e.g H for unsigned short ints).
Avoid using these unless you must. Details can be found in the
NumPy book
These different styles can be mixed within the same string (but why would you
want to do that?). Furthermore, each type specifier can be prefixed
with a repetition number, or a shape. In these cases an array
element is created, i.e., an array within a record. That array
is still referred to as a single field. An example: ::
>>> x = np.zeros(3, dtype='3int8, float32, (2,3)float64')
>>> x
array([([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])],
dtype=[('f0', '|i1', 3), ('f1', '>f4'), ('f2', '>f8', (2, 3))])
By using strings to define the record structure, it precludes being
able to name the fields in the original definition. The names can
be changed as shown later, however.
2) Tuple argument: The only relevant tuple case that applies to record
structures is when a structure is mapped to an existing data type. This
is done by pairing in a tuple, the existing data type with a matching
dtype definition (using any of the variants being described here). As
an example (using a definition using a list, so see 3) for further
details): ::
>>> x = np.zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')]))
>>> x
array([0, 0, 0])
>>> x['r']
array([0, 0, 0], dtype=uint8)
In this case, an array is produced that looks and acts like a simple int32 array,
but also has definitions for fields that use only one byte of the int32 (a bit
like Fortran equivalencing).
3) List argument: In this case the record structure is defined with a list of
tuples. Each tuple has 2 or 3 elements specifying: 1) The name of the field
('' is permitted), 2) the type of the field, and 3) the shape (optional).
For example::
>>> x = np.zeros(3, dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
>>> x
array([(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]])],
dtype=[('x', '>f4'), ('y', '>f4'), ('value', '>f4', (2, 2))])
4) Dictionary argument: two different forms are permitted. The first consists
of a dictionary with two required keys ('names' and 'formats'), each having an
equal sized list of values. The format list contains any type/shape specifier
allowed in other contexts. The names must be strings. There are two optional
keys: 'offsets' and 'titles'. Each must be a correspondingly matching list to
the required two where offsets contain integer offsets for each field, and
titles are objects containing metadata for each field (these do not have
to be strings), where the value of None is permitted. As an example: ::
>>> x = np.zeros(3, dtype={'names':['col1', 'col2'], 'formats':['i4','f4']})
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[('col1', '>i4'), ('col2', '>f4')])
The other dictionary form permitted is a dictionary of name keys with tuple
values specifying type, offset, and an optional title. ::
>>> x = np.zeros(3, dtype={'col1':('i1',0,'title 1'), 'col2':('f4',1,'title 2')})
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'col1'), '|i1'), (('title 2', 'col2'), '>f4')])
Accessing and modifying field names
===================================
The field names are an attribute of the dtype object defining the structure.
For the last example: ::
>>> x.dtype.names
('col1', 'col2')
>>> x.dtype.names = ('x', 'y')
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'x'), '|i1'), (('title 2', 'y'), '>f4')])
>>> x.dtype.names = ('x', 'y', 'z') # wrong number of names
<type 'exceptions.ValueError'>: must replace all names at once with a sequence of length 2
Accessing field titles
====================================
The field titles provide a standard place to put associated info for fields.
They do not have to be strings. ::
>>> x.dtype.fields['x'][2]
'title 1'
Accessing multiple fields at once
====================================
You can access multiple fields at once using a list of field names: ::
>>> x = np.array([(1.5,2.5,(1.0,2.0)),(3.,4.,(4.,5.)),(1.,3.,(2.,6.))],
dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
Notice that `x` is created with a list of tuples. ::
>>> x[['x','y']]
array([(1.5, 2.5), (3.0, 4.0), (1.0, 3.0)],
dtype=[('x', '<f4'), ('y', '<f4')])
>>> x[['x','value']]
array([(1.5, [[1.0, 2.0], [1.0, 2.0]]), (3.0, [[4.0, 5.0], [4.0, 5.0]]),
(1.0, [[2.0, 6.0], [2.0, 6.0]])],
dtype=[('x', '<f4'), ('value', '<f4', (2, 2))])
The fields are returned in the order they are asked for.::
>>> x[['y','x']]
array([(2.5, 1.5), (4.0, 3.0), (3.0, 1.0)],
dtype=[('y', '<f4'), ('x', '<f4')])
Filling structured arrays
=========================
Structured arrays can be filled by field or row by row. ::
>>> arr = np.zeros((5,), dtype=[('var1','f8'),('var2','f8')])
>>> arr['var1'] = np.arange(5)
If you fill it in row by row, it takes a take a tuple
(but not a list or array!)::
>>> arr[0] = (10,20)
>>> arr
array([(10.0, 20.0), (1.0, 0.0), (2.0, 0.0), (3.0, 0.0), (4.0, 0.0)],
dtype=[('var1', '<f8'), ('var2', '<f8')])
Record Arrays
=============
For convenience, numpy provides "record arrays" which allow one to access
fields of structured arrays by attribute rather than by index. Record arrays
are structured arrays wrapped using a subclass of ndarray,
:class:`numpy.recarray`, which allows field access by attribute on the array
object, and record arrays also use a special datatype, :class:`numpy.record`,
which allows field access by attribute on the individual elements of the array.
The simplest way to create a record array is with :func:`numpy.rec.array`: ::
>>> recordarr = np.rec.array([(1,2.,'Hello'),(2,3.,"World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
>>> recordarr.bar
array([ 2., 3.], dtype=float32)
>>> recordarr[1:2]
rec.array([(2, 3.0, 'World')],
dtype=[('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')])
>>> recordarr[1:2].foo
array([2], dtype=int32)
>>> recordarr.foo[1:2]
array([2], dtype=int32)
>>> recordarr[1].baz
'World'
numpy.rec.array can convert a wide variety of arguments into record arrays,
including normal structured arrays: ::
>>> arr = array([(1,2.,'Hello'),(2,3.,"World")],
... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')])
>>> recordarr = np.rec.array(arr)
The numpy.rec module provides a number of other convenience functions for
creating record arrays, see :ref:`record array creation routines
<routines.array-creation.rec>`.
A record array representation of a structured array can be obtained using the
appropriate :ref:`view`: ::
>>> arr = np.array([(1,2.,'Hello'),(2,3.,"World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')])
>>> recordarr = arr.view(dtype=dtype((np.record, arr.dtype)),
... type=np.recarray)
For convenience, viewing an ndarray as type `np.recarray` will automatically
convert to `np.record` datatype, so the dtype can be left out of the view: ::
>>> recordarr = arr.view(np.recarray)
>>> recordarr.dtype
dtype((numpy.record, [('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')]))
To get back to a plain ndarray both the dtype and type must be reset. The
following view does so, taking into account the unusual case that the
recordarr was not a structured type: ::
>>> arr2 = recordarr.view(recordarr.dtype.fields or recordarr.dtype, np.ndarray)
Record array fields accessed by index or by attribute are returned as a record
array if the field has a structured type but as a plain ndarray otherwise. ::
>>> recordarr = np.rec.array([('Hello', (1,2)),("World", (3,4))],
... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])])
>>> type(recordarr.foo)
<type 'numpy.ndarray'>
>>> type(recordarr.bar)
<class 'numpy.core.records.recarray'>
Note that if a field has the same name as an ndarray attribute, the ndarray
attribute takes precedence. Such fields will be inaccessible by attribute but
may still be accessed by index.
"""
from __future__ import division, absolute_import, print_function
| mit |
MikeAmy/django | tests/template_tests/test_custom.py | 116 | 19622 | from __future__ import unicode_literals
import os
from unittest import skipUnless
from django.template import Context, Engine, TemplateSyntaxError
from django.template.base import Node
from django.template.library import InvalidTemplateLibrary
from django.test import SimpleTestCase
from django.test.utils import extend_sys_path
from django.utils import six
from .templatetags import custom, inclusion
from .utils import ROOT
LIBRARIES = {
'custom': 'template_tests.templatetags.custom',
'inclusion': 'template_tests.templatetags.inclusion',
}
class CustomFilterTests(SimpleTestCase):
def test_filter(self):
engine = Engine(libraries=LIBRARIES)
t = engine.from_string("{% load custom %}{{ string|trim:5 }}")
self.assertEqual(
t.render(Context({"string": "abcdefghijklmnopqrstuvwxyz"})),
"abcde"
)
class TagTestCase(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(app_dirs=True, libraries=LIBRARIES)
super(TagTestCase, cls).setUpClass()
def verify_tag(self, tag, name):
self.assertEqual(tag.__name__, name)
self.assertEqual(tag.__doc__, 'Expected %s __doc__' % name)
self.assertEqual(tag.__dict__['anything'], 'Expected %s __dict__' % name)
class SimpleTagTests(TagTestCase):
def test_simple_tags(self):
c = Context({'value': 42})
templates = [
('{% load custom %}{% no_params %}', 'no_params - Expected result'),
('{% load custom %}{% one_param 37 %}', 'one_param - Expected result: 37'),
('{% load custom %}{% explicit_no_context 37 %}', 'explicit_no_context - Expected result: 37'),
('{% load custom %}{% no_params_with_context %}',
'no_params_with_context - Expected result (context value: 42)'),
('{% load custom %}{% params_and_context 37 %}',
'params_and_context - Expected result (context value: 42): 37'),
('{% load custom %}{% simple_two_params 37 42 %}', 'simple_two_params - Expected result: 37, 42'),
('{% load custom %}{% simple_one_default 37 %}', 'simple_one_default - Expected result: 37, hi'),
('{% load custom %}{% simple_one_default 37 two="hello" %}',
'simple_one_default - Expected result: 37, hello'),
('{% load custom %}{% simple_one_default one=99 two="hello" %}',
'simple_one_default - Expected result: 99, hello'),
('{% load custom %}{% simple_one_default 37 42 %}',
'simple_one_default - Expected result: 37, 42'),
('{% load custom %}{% simple_unlimited_args 37 %}', 'simple_unlimited_args - Expected result: 37, hi'),
('{% load custom %}{% simple_unlimited_args 37 42 56 89 %}',
'simple_unlimited_args - Expected result: 37, 42, 56, 89'),
('{% load custom %}{% simple_only_unlimited_args %}', 'simple_only_unlimited_args - Expected result: '),
('{% load custom %}{% simple_only_unlimited_args 37 42 56 89 %}',
'simple_only_unlimited_args - Expected result: 37, 42, 56, 89'),
('{% load custom %}{% simple_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" four=1|add:3 %}',
'simple_unlimited_args_kwargs - Expected result: 37, 42, 56 / eggs=scrambled, four=4'),
]
for entry in templates:
t = self.engine.from_string(entry[0])
self.assertEqual(t.render(c), entry[1])
for entry in templates:
t = self.engine.from_string("%s as var %%}Result: {{ var }}" % entry[0][0:-2])
self.assertEqual(t.render(c), "Result: %s" % entry[1])
def test_simple_tag_errors(self):
errors = [
("'simple_one_default' received unexpected keyword argument 'three'",
'{% load custom %}{% simple_one_default 99 two="hello" three="foo" %}'),
("'simple_two_params' received too many positional arguments",
'{% load custom %}{% simple_two_params 37 42 56 %}'),
("'simple_one_default' received too many positional arguments",
'{% load custom %}{% simple_one_default 37 42 56 %}'),
("'simple_unlimited_args_kwargs' received some positional argument(s) after some keyword argument(s)",
'{% load custom %}{% simple_unlimited_args_kwargs 37 40|add:2 eggs="scrambled" 56 four=1|add:3 %}'),
("'simple_unlimited_args_kwargs' received multiple values for keyword argument 'eggs'",
'{% load custom %}{% simple_unlimited_args_kwargs 37 eggs="scrambled" eggs="scrambled" %}'),
]
for entry in errors:
with self.assertRaisesMessage(TemplateSyntaxError, entry[0]):
self.engine.from_string(entry[1])
for entry in errors:
with self.assertRaisesMessage(TemplateSyntaxError, entry[0]):
self.engine.from_string("%s as var %%}" % entry[1][0:-2])
def test_simple_tag_escaping_autoescape_off(self):
c = Context({'name': "Jack & Jill"}, autoescape=False)
t = self.engine.from_string("{% load custom %}{% escape_naive %}")
self.assertEqual(t.render(c), "Hello Jack & Jill!")
def test_simple_tag_naive_escaping(self):
c = Context({'name': "Jack & Jill"})
t = self.engine.from_string("{% load custom %}{% escape_naive %}")
self.assertEqual(t.render(c), "Hello Jack & Jill!")
def test_simple_tag_explicit_escaping(self):
# Check we don't double escape
c = Context({'name': "Jack & Jill"})
t = self.engine.from_string("{% load custom %}{% escape_explicit %}")
self.assertEqual(t.render(c), "Hello Jack & Jill!")
def test_simple_tag_format_html_escaping(self):
# Check we don't double escape
c = Context({'name': "Jack & Jill"})
t = self.engine.from_string("{% load custom %}{% escape_format_html %}")
self.assertEqual(t.render(c), "Hello Jack & Jill!")
def test_simple_tag_registration(self):
# Test that the decorators preserve the decorated function's docstring, name and attributes.
self.verify_tag(custom.no_params, 'no_params')
self.verify_tag(custom.one_param, 'one_param')
self.verify_tag(custom.explicit_no_context, 'explicit_no_context')
self.verify_tag(custom.no_params_with_context, 'no_params_with_context')
self.verify_tag(custom.params_and_context, 'params_and_context')
self.verify_tag(custom.simple_unlimited_args_kwargs, 'simple_unlimited_args_kwargs')
self.verify_tag(custom.simple_tag_without_context_parameter, 'simple_tag_without_context_parameter')
def test_simple_tag_missing_context(self):
# The 'context' parameter must be present when takes_context is True
msg = (
"'simple_tag_without_context_parameter' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.from_string('{% load custom %}{% simple_tag_without_context_parameter 123 %}')
class InclusionTagTests(TagTestCase):
def test_inclusion_tags(self):
c = Context({'value': 42})
templates = [
('{% load inclusion %}{% inclusion_no_params %}', 'inclusion_no_params - Expected result\n'),
('{% load inclusion %}{% inclusion_one_param 37 %}', 'inclusion_one_param - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_explicit_no_context 37 %}',
'inclusion_explicit_no_context - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_no_params_with_context %}',
'inclusion_no_params_with_context - Expected result (context value: 42)\n'),
('{% load inclusion %}{% inclusion_params_and_context 37 %}',
'inclusion_params_and_context - Expected result (context value: 42): 37\n'),
('{% load inclusion %}{% inclusion_two_params 37 42 %}',
'inclusion_two_params - Expected result: 37, 42\n'),
(
'{% load inclusion %}{% inclusion_one_default 37 %}',
'inclusion_one_default - Expected result: 37, hi\n'
),
('{% load inclusion %}{% inclusion_one_default 37 two="hello" %}',
'inclusion_one_default - Expected result: 37, hello\n'),
('{% load inclusion %}{% inclusion_one_default one=99 two="hello" %}',
'inclusion_one_default - Expected result: 99, hello\n'),
('{% load inclusion %}{% inclusion_one_default 37 42 %}',
'inclusion_one_default - Expected result: 37, 42\n'),
('{% load inclusion %}{% inclusion_unlimited_args 37 %}',
'inclusion_unlimited_args - Expected result: 37, hi\n'),
('{% load inclusion %}{% inclusion_unlimited_args 37 42 56 89 %}',
'inclusion_unlimited_args - Expected result: 37, 42, 56, 89\n'),
('{% load inclusion %}{% inclusion_only_unlimited_args %}',
'inclusion_only_unlimited_args - Expected result: \n'),
('{% load inclusion %}{% inclusion_only_unlimited_args 37 42 56 89 %}',
'inclusion_only_unlimited_args - Expected result: 37, 42, 56, 89\n'),
('{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" four=1|add:3 %}',
'inclusion_unlimited_args_kwargs - Expected result: 37, 42, 56 / eggs=scrambled, four=4\n'),
]
for entry in templates:
t = self.engine.from_string(entry[0])
self.assertEqual(t.render(c), entry[1])
def test_inclusion_tag_errors(self):
errors = [
("'inclusion_one_default' received unexpected keyword argument 'three'",
'{% load inclusion %}{% inclusion_one_default 99 two="hello" three="foo" %}'),
("'inclusion_two_params' received too many positional arguments",
'{% load inclusion %}{% inclusion_two_params 37 42 56 %}'),
("'inclusion_one_default' received too many positional arguments",
'{% load inclusion %}{% inclusion_one_default 37 42 56 %}'),
("'inclusion_one_default' did not receive value(s) for the argument(s): 'one'",
'{% load inclusion %}{% inclusion_one_default %}'),
("'inclusion_unlimited_args' did not receive value(s) for the argument(s): 'one'",
'{% load inclusion %}{% inclusion_unlimited_args %}'),
(
"'inclusion_unlimited_args_kwargs' received some positional argument(s) "
"after some keyword argument(s)",
'{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 40|add:2 eggs="boiled" 56 four=1|add:3 %}',
),
("'inclusion_unlimited_args_kwargs' received multiple values for keyword argument 'eggs'",
'{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 eggs="scrambled" eggs="scrambled" %}'),
]
for entry in errors:
with self.assertRaisesMessage(TemplateSyntaxError, entry[0]):
self.engine.from_string(entry[1])
def test_include_tag_missing_context(self):
# The 'context' parameter must be present when takes_context is True
msg = (
"'inclusion_tag_without_context_parameter' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.from_string('{% load inclusion %}{% inclusion_tag_without_context_parameter 123 %}')
def test_inclusion_tags_from_template(self):
c = Context({'value': 42})
templates = [
('{% load inclusion %}{% inclusion_no_params_from_template %}',
'inclusion_no_params_from_template - Expected result\n'),
('{% load inclusion %}{% inclusion_one_param_from_template 37 %}',
'inclusion_one_param_from_template - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_explicit_no_context_from_template 37 %}',
'inclusion_explicit_no_context_from_template - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_no_params_with_context_from_template %}',
'inclusion_no_params_with_context_from_template - Expected result (context value: 42)\n'),
('{% load inclusion %}{% inclusion_params_and_context_from_template 37 %}',
'inclusion_params_and_context_from_template - Expected result (context value: 42): 37\n'),
('{% load inclusion %}{% inclusion_two_params_from_template 37 42 %}',
'inclusion_two_params_from_template - Expected result: 37, 42\n'),
('{% load inclusion %}{% inclusion_one_default_from_template 37 %}',
'inclusion_one_default_from_template - Expected result: 37, hi\n'),
('{% load inclusion %}{% inclusion_one_default_from_template 37 42 %}',
'inclusion_one_default_from_template - Expected result: 37, 42\n'),
('{% load inclusion %}{% inclusion_unlimited_args_from_template 37 %}',
'inclusion_unlimited_args_from_template - Expected result: 37, hi\n'),
('{% load inclusion %}{% inclusion_unlimited_args_from_template 37 42 56 89 %}',
'inclusion_unlimited_args_from_template - Expected result: 37, 42, 56, 89\n'),
('{% load inclusion %}{% inclusion_only_unlimited_args_from_template %}',
'inclusion_only_unlimited_args_from_template - Expected result: \n'),
('{% load inclusion %}{% inclusion_only_unlimited_args_from_template 37 42 56 89 %}',
'inclusion_only_unlimited_args_from_template - Expected result: 37, 42, 56, 89\n'),
]
for entry in templates:
t = self.engine.from_string(entry[0])
self.assertEqual(t.render(c), entry[1])
def test_inclusion_tag_registration(self):
# Test that the decorators preserve the decorated function's docstring, name and attributes.
self.verify_tag(inclusion.inclusion_no_params, 'inclusion_no_params')
self.verify_tag(inclusion.inclusion_one_param, 'inclusion_one_param')
self.verify_tag(inclusion.inclusion_explicit_no_context, 'inclusion_explicit_no_context')
self.verify_tag(inclusion.inclusion_no_params_with_context, 'inclusion_no_params_with_context')
self.verify_tag(inclusion.inclusion_params_and_context, 'inclusion_params_and_context')
self.verify_tag(inclusion.inclusion_two_params, 'inclusion_two_params')
self.verify_tag(inclusion.inclusion_one_default, 'inclusion_one_default')
self.verify_tag(inclusion.inclusion_unlimited_args, 'inclusion_unlimited_args')
self.verify_tag(inclusion.inclusion_only_unlimited_args, 'inclusion_only_unlimited_args')
self.verify_tag(inclusion.inclusion_tag_without_context_parameter, 'inclusion_tag_without_context_parameter')
self.verify_tag(inclusion.inclusion_tag_use_l10n, 'inclusion_tag_use_l10n')
self.verify_tag(inclusion.inclusion_unlimited_args_kwargs, 'inclusion_unlimited_args_kwargs')
def test_15070_use_l10n(self):
"""
Test that inclusion tag passes down `use_l10n` of context to the
Context of the included/rendered template as well.
"""
c = Context({})
t = self.engine.from_string('{% load inclusion %}{% inclusion_tag_use_l10n %}')
self.assertEqual(t.render(c).strip(), 'None')
c.use_l10n = True
self.assertEqual(t.render(c).strip(), 'True')
def test_no_render_side_effect(self):
"""
#23441 -- InclusionNode shouldn't modify its nodelist at render time.
"""
engine = Engine(app_dirs=True, libraries=LIBRARIES)
template = engine.from_string('{% load inclusion %}{% inclusion_no_params %}')
count = template.nodelist.get_nodes_by_type(Node)
template.render(Context({}))
self.assertEqual(template.nodelist.get_nodes_by_type(Node), count)
def test_render_context_is_cleared(self):
"""
#24555 -- InclusionNode should push and pop the render_context stack
when rendering. Otherwise, leftover values such as blocks from
extending can interfere with subsequent rendering.
"""
engine = Engine(app_dirs=True, libraries=LIBRARIES)
template = engine.from_string('{% load inclusion %}{% inclusion_extends1 %}{% inclusion_extends2 %}')
self.assertEqual(template.render(Context({})).strip(), 'one\ntwo')
class AssignmentTagTests(TagTestCase):
def test_assignment_tags(self):
c = Context({'value': 42})
t = self.engine.from_string('{% load custom %}{% assignment_no_params as var %}The result is: {{ var }}')
self.assertEqual(t.render(c), 'The result is: assignment_no_params - Expected result')
def test_assignment_tag_registration(self):
# Test that the decorators preserve the decorated function's docstring, name and attributes.
self.verify_tag(custom.assignment_no_params, 'assignment_no_params')
def test_assignment_tag_missing_context(self):
# The 'context' parameter must be present when takes_context is True
msg = (
"'assignment_tag_without_context_parameter' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.from_string('{% load custom %}{% assignment_tag_without_context_parameter 123 as var %}')
class TemplateTagLoadingTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.egg_dir = os.path.join(ROOT, 'eggs')
super(TemplateTagLoadingTests, cls).setUpClass()
def test_load_error(self):
msg = (
"Invalid template library specified. ImportError raised when "
"trying to load 'template_tests.broken_tag': cannot import name "
"'?Xtemplate'?"
)
with six.assertRaisesRegex(self, InvalidTemplateLibrary, msg):
Engine(libraries={
'broken_tag': 'template_tests.broken_tag',
})
def test_load_error_egg(self):
egg_name = '%s/tagsegg.egg' % self.egg_dir
msg = (
"Invalid template library specified. ImportError raised when "
"trying to load 'tagsegg.templatetags.broken_egg': cannot "
"import name '?Xtemplate'?"
)
with extend_sys_path(egg_name):
with six.assertRaisesRegex(self, InvalidTemplateLibrary, msg):
Engine(libraries={
'broken_egg': 'tagsegg.templatetags.broken_egg',
})
def test_load_working_egg(self):
ttext = "{% load working_egg %}"
egg_name = '%s/tagsegg.egg' % self.egg_dir
with extend_sys_path(egg_name):
engine = Engine(libraries={
'working_egg': 'tagsegg.templatetags.working_egg',
})
engine.from_string(ttext)
@skipUnless(six.PY3, "Python 3 only -- Python 2 doesn't have annotations.")
def test_load_annotated_function(self):
Engine(libraries={
'annotated_tag_function': 'template_tests.annotated_tag_function',
})
| bsd-3-clause |
Bridgewater/appetite | src/modules/conn_manager.py | 1 | 31814 | #!/usr/bin/env python
# pylint: disable=too-complex,relative-import,no-member,invalid-name,too-many-arguments,import-error,too-many-nested-blocks
"""Command Manger
Module to handle ssh communication. Commands are either dictated though
functions or restricted configuration files.
"""
import os
import json
import time
import re
import datetime
import uuid
import paramiko
from scp import SCPClient
import consts
import logger
import helpers
# Creating global object to share
CREDS = helpers.create_obj(
{
"SSH_USER": "",
"SSH_KEYFILE": "",
"APP_DIR": "",
"APP_BIN": "",
"DRY_RUN": False,
"PK": ""
}
)
COMMAND_RESTART_NAME = 'restart'
COMMAND_MODULE_INIT = 'initization'
COMMAND_MODULE_CUSTOM = 'custom_command'
COMMAND_MODULE_BUILTIN = 'buildin_function'
# Used to check configuration for mandatory stanzas
MANDATORY_COMMAND_STANZAS = [
COMMAND_RESTART_NAME
]
# Retry limits
MAX_SSH_RETRIES = 3
MAX_SCP_RETRIES = 4
SESSION_TIMEOUT = 30
SESSION_SHELL_TIMEOUT = 3600
SESSION_RESPONSE_TIMEOUT = 300
CONNECTION_TIMEOUT = 10
SESSION_SHELL_EXIT = uuid.uuid4().hex
# Filtering for error ssh messasge.
ERROR_MESSAGES = [
'No such file or directory',
'Permission denied',
'No such file'
]
def set_globals(user, keyfile, port, app_dir, app_bin, dry_run=False):
"""Set global vars"""
CREDS.SSH_USER = user
CREDS.SSH_KEYFILE = os.path.expanduser(keyfile)
CREDS.SSH_PORT = port
CREDS.PK = None
CREDS.APP_DIR = app_dir
CREDS.APP_BIN = app_bin
CREDS.DRY_RUN = dry_run
if len(keyfile) < 1 or len(CREDS.SSH_USER) < 1:
CREDS.DRY_RUN = True
else:
CREDS.PK = paramiko.RSAKey.from_private_key_file(CREDS.SSH_KEYFILE)
class SshAppCommand(object): # pylint: disable=too-many-instance-attributes
"""Class to store single stored command"""
user = None
password = None
def __init__(self, config, name, options, index=-1): # pylint: disable=too-many-branches
"""Init single ssh command"""
self.limit_to_hosts = []
self.exclude_hosts = []
self.limited_host_list = []
self.limit_sites = []
self.limit_indexes = []
self.suppress_limit_to_hosts_warnings = True
self.use_auth = []
self.name = name
self.cmd = None
self.use_root = False
self.use_app_binary = True
self.pre_install = False
self.index = index
self.only_run_on_init = False
self.delay = consts.REMOTE_CMD_RUN_SLEEP_TIMER
for option in options:
try:
config_option = config.get(name, option).strip('"\'')
if option == 'limit_to_hosts':
self.limit_to_hosts = json.loads(config_option)
elif option == 'exclude_hosts':
self.exclude_hosts = json.loads(config_option)
elif option == 'limit_sites':
self.limit_sites = json.loads(config_option)
elif option == 'limit_indexes':
self.limit_indexes = json.loads(config_option)
elif option == 'use_root':
self.use_root = config.getboolean(name, option)
elif option == 'use_app_binary':
self.use_app_binary = config.getboolean(name, option)
elif option == 'suppress_limit_to_hosts_warnings':
self.suppress_limit_to_hosts_warnings = config.getboolean(name, option)
elif option == 'use_auth':
self.use_auth = json.loads(config_option)
elif option == 'pre_install':
self.pre_install = config.getboolean(name, option)
elif option == 'only_run_on_init':
self.only_run_on_init = config.getboolean(name, option)
elif option == 'cmd':
self.cmd = config_option
elif option == 'delay':
self.delay = int(config_option)
except Exception as e:
logger.errorout("Problem getting option from command conf",
name=name,
option=option,
module=COMMAND_MODULE_INIT,
error_msg=str(e))
def generate_limited_hosts(self, template_values):
host_groups = template_values["host_groups"]
exclude_hosts = [host for host_class in self.exclude_hosts
if host_class in host_groups["app_class"]
for host in host_groups["app_class"][host_class]]
limited_hosts = [host for host_class in self.limit_to_hosts
if host_class in host_groups["app_class"]
for host in host_groups["app_class"][host_class]] \
if len(self.limit_to_hosts) > 0 else host_groups["all"]
tvalue = dict(template_values)
tvalue["host_groups"]["limited_hosts"] = list(set(limited_hosts) - set(exclude_hosts))
return tvalue
def can_host_use(self, host):
"""Checks if command can be used by host"""
if host.app_class in self.exclude_hosts:
return False
valid = False
# Is host valid to run command
if len(self.limit_to_hosts) < 1 or host.app_class in self.limit_to_hosts:
valid = True
# Limit host to a site
if valid and len(self.limit_sites) > 0 and str(host.site) not in self.limit_sites:
valid = False
# Limit host to a index
if valid and len(self.limit_indexes) > 0 and str(host.host_index) not in self.limit_indexes:
valid = False
return valid
class SshAppAuth(object):
def __init__(self, config, name, options, template_values):
"""Auth handling"""
self.__postfix = ""
self.__postfix_filtered = ""
self.__postfix_reads = {}
self.__inputs = []
self.__delay = consts.REMOTE_AUTH_RUN_SLEEP_TIMER
self.__template_values = template_values
for option in options:
try:
config_option = config.get(name, option)
if option == 'postfix':
self.__prefix = config_option.strip('"\'')
self.__postfix_filtered = self.__prefix
# Find template values using regex
template_groups = re.findall("\\{\\{\\s*\\w+\\s*\\}\\}", self.__prefix, re.DOTALL)
if template_groups:
auth_kv = {}
for template_key in template_groups:
field = template_key.strip("{} ")
if field not in auth_kv:
auth_kv[field] = {"fields": []}
if template_key not in auth_kv[field]["fields"]:
auth_kv[field]["fields"].append(template_key)
# Replace cmd vars with locally generated vars which hide values
for k, v in auth_kv.items():
for j2_value in v["fields"]:
self.__postfix_filtered = self.__postfix_filtered.replace(j2_value, "$%s" % k)
self.__postfix_reads[k] = "{{ %s }}" % k
except Exception as e:
logger.errorout("Problem getting option from auth",
name=name,
module=COMMAND_MODULE_INIT,
error_msg=str(e))
@property
def postfix(self):
"""Return prefix with rendered values"""
return self.__render_values(self.__prefix)
@property
def postfix_filtered(self):
"""Return prefix with rendered values are rendered outside the prefix"""
return {"prefix": self.__postfix_filtered, "reads": {k: self.__render_values(v)
for k, v in self.__postfix_reads.items()}}
def __render_values(self, value):
"""Render values based on templates"""
return helpers.render_template(value.strip('"\''), self.__template_values)
class SshAppCommands(object):
"""Class to run stored cmd commands
Commands are loaded in from a file and stored for use.
These commands are reference from the deploymentmethods.conf.
"""
_commands = {}
__auth = {}
def __init__(self, commands_config, template_values):
"""Init Shh App Commands"""
self.template_values = template_values
# set xform for config otherwise text will be normalized to lowercase
self.config = helpers.get_config(commands_config)
self.load_commands()
def load_commands(self):
"""Load restricted cmd command"""
sections = self.config.sections()
index = 0
for section in sections:
options = self.config.options(section)
if section.startswith('auth'):
self.__auth[section] = SshAppAuth(self.config, section, options, self.template_values)
continue
self._commands[section] = SshAppCommand(self.config, section, options, index)
index += 1
if not set(MANDATORY_COMMAND_STANZAS) <= set(self._commands):
logger.errorout("Missing command stanza",
needed=MANDATORY_COMMAND_STANZAS,
module=COMMAND_MODULE_INIT)
def find_commands(self, command_name):
"""Basic command to find command"""
return self._commands[command_name] if command_name in self._commands \
else None
def enhance_commands(self, host, commands_list, templating_values):
"""Checks and enhances cmd with known listed commands and extra variables"""
tvalues = helpers.merge_templates(templating_values)
# preserve order while getting rid of dup entries
unique_list = []
[unique_list.append(single_command) for single_command in commands_list if single_command not in unique_list] # pylint: disable=expression-not-assigned
invalid_commands = [command for command in list(unique_list)
if command not in self._commands]
if len(invalid_commands) > 0:
logger.warning("Invalid command(s) found",
commands=invalid_commands,
module=COMMAND_MODULE_INIT)
return []
enhance_list = [self._commands[command] for command in unique_list]
filtered_commands = [{"cmd": helpers.render_template(command.cmd, command.generate_limited_hosts(tvalues)),
"command": command}
for command in enhance_list if not command.only_run_on_init or not host.manifest_found]
return filtered_commands
def get_cmd(self, ecommand, is_clean=False):
"""Generate cmd with full path"""
app_cmd = ecommand['cmd']
command = ecommand['command']
# Can only use auth with app binary
if command.use_app_binary:
app_cmd = "%s %s" % (os.path.join(CREDS.APP_DIR, CREDS.APP_BIN), app_cmd)
helpers.cmd_check(app_cmd)
reads = {}
if not is_clean:
if command.use_auth:
add_auth, reads = self.filtered_auth(command.use_auth)
app_cmd = "%s %s" % (app_cmd, add_auth)
if command.use_root:
app_cmd = "sudo %s" % app_cmd
return {"cmd": app_cmd, "reads": reads}
def filtered_auth(self, use_auth):
"""Return auths where reads are seperate"""
auth_prefixes = [self.__auth[auth].postfix_filtered for auth in use_auth]
prefixes = " ".join([auth["prefix"] for auth in auth_prefixes])
reads = {k: v for auth in auth_prefixes for k, v in auth["reads"].items()}
return prefixes, reads
def get_cmd_clean(self, ecommand):
"""Returns clean cmd command"""
return self.get_cmd(ecommand, is_clean=True)["cmd"]
def run_command(self, ecommand, host):
"""Run single stored command"""
command = ecommand['command']
# Check to see if host can run command
if not command.can_host_use(host):
if not command.suppress_limit_to_hosts_warnings:
logger.warn("Invalid host for command",
command=command.name,
hostname=host.hostname,
module=COMMAND_MODULE_INIT,
allowed_hosts=command.limit_to_hosts)
return False
# Call root is already taken applied in get_cmd
ssh_run = SshRun(host.hostname, host.ssh_hostname, "",
helpers.get_function_name(), False)
logger.info("SSH Started",
state=0,
hostname=host.hostname,
command=command.name,
module=COMMAND_MODULE_CUSTOM)
results = ssh_run.run_single(self.get_cmd(ecommand))
ssh_run.close_ssh_channel()
_log_rc(results,
"SSH Finished",
state=1,
auth=command.use_auth,
app_binary=command.use_app_binary,
hostname=host.hostname,
command=command.name,
cmd=self.get_cmd_clean(ecommand),
output=results,
module=COMMAND_MODULE_CUSTOM)
return True
class SshRun(object):
"""Class wraps ssh command to allow detailed logging and extendability"""
def __init__(self, hostname, ssh_hostname, filepath, function_name, is_root):
"""Init for ssh run class"""
self.ssh_hostname = ssh_hostname
self.hostname = hostname
self.filepath = filepath
self.function_name = function_name
self.is_root = is_root
self.ssh = None
self._ssh_cmds = []
@staticmethod
def get_ssh_client(hostname, ssh_hostname):
"""Tries to create ssh client
Create ssh client based on the username and ssh key
"""
if not CREDS.SSH_KEYFILE:
logger.errorout("ssh_keyfile not set",
module=COMMAND_MODULE_CUSTOM)
retries = 0
while retries < MAX_SSH_RETRIES:
try:
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=ssh_hostname,
username=CREDS.SSH_USER,
port=CREDS.SSH_PORT,
pkey=CREDS.PK,
timeout=CONNECTION_TIMEOUT)
return ssh
except paramiko.BadAuthenticationType:
logger.error("BadAuthenticationType",
hostname=hostname,
module=COMMAND_MODULE_CUSTOM)
return
except paramiko.AuthenticationException:
logger.error("Authentication failed",
hostname=hostname,
module=COMMAND_MODULE_CUSTOM)
return
except paramiko.BadHostKeyException:
logger.error("BadHostKeyException",
fix="Edit known_hosts file to remove the entry",
hostname=hostname,
module=COMMAND_MODULE_CUSTOM)
return
except paramiko.SSHException:
logger.error("SSHException",
hostname=hostname,
module=COMMAND_MODULE_CUSTOM)
return
except Exception as e:
if retries == 0:
logger.error("Problems connecting to host",
hostname=hostname,
module=COMMAND_MODULE_CUSTOM,
error=e.message)
retries += 1
time.sleep(1)
logger.error("Can not connect to host",
hostname=hostname,
module=COMMAND_MODULE_CUSTOM)
return None
def add_cmd(self, cmd):
"""Adds a single command to the list"""
self._ssh_cmds.append(cmd)
def create_ssh_channel(self):
"""Crete a ssh channel for running command"""
if CREDS.DRY_RUN:
return True
if not self.ssh:
self.ssh = SshRun.get_ssh_client(self.hostname, self.ssh_hostname)
return self.ssh
def close_ssh_channel(self):
"""Close ssh channel if already open"""
if self.ssh and not CREDS.DRY_RUN:
self.ssh.close()
self.ssh = None
def run(self):
"""Runs the list of commands in order
This does not run a single session, each command is a seperate connection
"""
outputs = []
if not self.create_ssh_channel():
return
for cmd in self._ssh_cmds:
outputs.append(self.run_single(cmd, self.ssh))
self.close_ssh_channel()
run_obj = {
"rc": next((1 for output in outputs if output['rc'] > 0), 0),
"outputs": outputs
}
return run_obj
def _add_root(self, cmd):
"""If root is given, add to command"""
return "sudo %s" % cmd if self.is_root else cmd
def run_single(self, command, ssh=None):
"""Runs a single cmd command on the remote host
"""
if not ssh:
if not self.create_ssh_channel():
return {"rc": 1,
"stderror": "Error creating ssh channel",
"stdout": "",
"function": self.function_name}
ssh = self.ssh
reads = None
cmd = command
if isinstance(command, dict):
cmd = command['cmd']
reads = command['reads']
rc = 0
std_out = ""
std_error = ""
if not CREDS.DRY_RUN:
# Dangerous, only use if commands are filtered/protected
# Only commands either defined here or in the command.conf should
# run here.
if reads:
# Only use invoke shell if needed
channel = ssh.invoke_shell() # nosec
channel.settimeout(SESSION_SHELL_TIMEOUT)
# Remove any ssh login messages
send_command(channel, "")
read_commands = []
for param, value in reads.items():
read_commands.append("read -s %s" % param)
read_commands.append(value)
# Don't want to log any read commands
send_command(channel, read_commands)
std_out, std_error, rc = send_command(channel, self._add_root(cmd))
else:
stdin, stdout, stderr = ssh.exec_command(self._add_root(cmd), get_pty=True, timeout=SESSION_TIMEOUT) # nosec
rc = stdout.channel.recv_exit_status()
std_out = stdout.read()
std_error = stderr.read()
stdin.flush()
return {"stdout": std_out,
"stderror": std_error,
"function": self.function_name,
"rc": rc}
# Helper ssh function
def copy_to_host(host, remote_file, local_file, is_root=False):
"""Copy file to remote host
Function first copies file to remote user directory and then moving.
Final move depends to right to directory.
"""
_lpath, lfilename = os.path.split(local_file)
rpath, rfilename = os.path.split(remote_file)
local_filepath = os.path.join('./', lfilename)
if len(rfilename) < 1:
rfilename = lfilename
remote_file = os.path.join(rpath, rfilename)
if not os.path.isfile(local_file):
logger.error("File to copy does not exist",
file=local_file,
module=COMMAND_MODULE_CUSTOM)
return False
ssh_run = SshRun(host.hostname, host.ssh_hostname, local_file,
helpers.get_function_name(), is_root)
if not ssh_run.create_ssh_channel():
return False
success = True
if not CREDS.DRY_RUN:
retries = 0
success = False
while retries < MAX_SCP_RETRIES:
# Copies file to remote users directory
with SCPClient(ssh_run.ssh.get_transport()) as scp:
try:
scp.put(local_file, lfilename)
success = True
break
except Exception as e:
if _error_check(e.message, remote_file, host.hostname,
"copy_to_host"):
return
retries += 1
time.sleep(2)
if not success:
logger.error("Problem using scp",
hostname=host.hostname,
local_file=local_file,
module=COMMAND_MODULE_CUSTOM)
return False
if local_filepath != remote_file:
# Only copy file if not the same location
ssh_run.add_cmd("mkdir -p %s" % rpath)
ssh_run.add_cmd("mv %s %s" % (lfilename, remote_file))
if is_root:
# Make sure root permission are set if needed
ssh_run.add_cmd("chown root:root %s" % remote_file)
ssh_run.add_cmd("restorecon %s" % remote_file)
results = ssh_run.run()
_log_rc(results,
ssh_run.function_name,
hostname=host.hostname,
remote_file=remote_file,
local_file=local_file,
outputs=results['outputs'],
module=COMMAND_MODULE_BUILTIN)
return results['rc'] < 1
def untar(host, location, is_root):
"""Copy and untar file on remote host"""
_path, tar = os.path.split(host.tar_file)
func_name = helpers.get_function_name()
outcome = {'rc': 1}
tar_cmd = "tar -zxvf %s -C %s" % (tar, location)
if copy_to_host(host, "./", host.tar_file, False):
ssh_run = SshRun(host.hostname, host.ssh_hostname, host.tar_file, func_name, is_root)
# Untar bundle
ssh_run.add_cmd(tar_cmd)
# Remove old tar file
ssh_run.add_cmd("rm -rf ./%s" % tar)
outcome = ssh_run.run()
_log_rc(outcome,
func_name,
cmd=tar_cmd,
hostname=host.hostname,
content=outcome['outputs'][0]['stdout'].split('\r\n') if outcome['rc'] < 1 else "",
location=location,
module=COMMAND_MODULE_BUILTIN)
return True
def delete(host, remote_object, is_root=False, app_path_check=True):
"""Delete file/folder on remote host
Function limits deleting to only files in the application directory
"""
if not check_path(remote_object, app_path_check):
return False
results = run_cmd(host, "rm -rf %s" % remote_object, remote_object,
helpers.get_function_name(), is_root)
return results['rc'] < 1
def check_connection(host):
"""Check to see if the connection to the host is valid"""
results = run_cmd(host, "echo checking_connection")
return results['rc'] < 1
def rotate_logs(host, log_path, retention, is_root=False):
"""Function to rotate appetite logs"""
if not check_path(log_path):
return False
results = run_cmd(host, "find %s -type f -mtime +%d -delete" % (log_path, retention),
helpers.get_function_name(), log_path, is_root)
return results['rc'] < 1
def clear_files(host, app_path, file_regex, is_root=False):
"""Removes files from server"""
if not check_path(app_path):
return False
results = run_cmd(host, "rm -f %s " % os.path.join(app_path, file_regex),
helpers.get_function_name(), app_path, is_root)
return results['rc'] < 1
def get_file_content(host, remote_file, local_file, is_root=False):
"""Get content of file from remote host"""
helpers.create_path(local_file)
return run_cmd(host, "cat %s" % remote_file,
helpers.get_function_name(), remote_file, is_root, False)
def get_json_file(host, remote_file, local_file, is_root=False):
"""Get json file from remote host"""
if CREDS.DRY_RUN:
return False
file_content = get_file_content(host, remote_file, local_file, is_root)
if file_content['rc'] > 0:
return False
if not CREDS.DRY_RUN:
try:
json.loads(file_content['stdout'])
except ValueError as e:
if _error_check(e.message, remote_file, host.hostname,
"get_json_file"):
return False
with open(local_file, 'w') as f:
f.write(file_content['stdout'])
return True
def run_cmd(host, cmd, path="", func_name=None, is_root=False, show_stdout=True):
"""Generic function to run single commands"""
func_name = func_name if func_name else helpers.get_function_name()
ssh_run = SshRun(host.hostname, host.ssh_hostname, path,
func_name, is_root)
output = ssh_run.run_single(cmd)
ssh_run.close_ssh_channel()
# Clear stdout if needed
updated_output = output
if not show_stdout:
updated_output = output.copy()
updated_output['stdout'] = ""
_log_rc(output,
ssh_run.function_name,
hostname=host.hostname,
cmd=cmd,
module=COMMAND_MODULE_BUILTIN,
output=updated_output
)
return output
def check_path(remote_object, app_path_check=True):
"""Check if path is with in application directory"""
# Should only be allow to delete things the app directory
if app_path_check:
if not remote_object.startswith(CREDS.APP_DIR):
logger.warn("Can only delete files with in the app dir",
path=remote_object,
module=COMMAND_MODULE_CUSTOM,
path_check=app_path_check)
return False
return True
def send_command(channel, send_cmds, std_out=None, std_err=None):
"""Execute commands in an interactive shell"""
# Get first line to extract out messages
send_to_channel(channel, "\r")
# Run actual commands
if isinstance(send_cmds, list):
for cmd in send_cmds:
send_to_channel(channel, "%s" % cmd)
else:
send_to_channel(channel, "%s" % send_cmds)
# Run final command, this will help find the end of execution
send_to_channel(channel, "echo %s $?" % SESSION_SHELL_EXIT)
# wait and get output from full execution
stdout, stderr, rc = get_std_out_from_channel(channel)
stderr += get_std_error_from_channel(channel)
# Can add to existing std out and error
if std_out is not None:
std_out += stdout
if std_err is not None:
std_err += stderr
return stdout, stderr, rc
def send_to_channel(channel, cmd):
"""Send commands to an existing channel"""
while not channel.send_ready():
time.sleep(1)
channel.send("%s\n" % cmd)
time.sleep(1)
def get_std_error_from_channel(channel):
"""Get std Error from an existing channel"""
stderr = ""
# Make sure we read everything off the error buffer
if channel.recv_stderr_ready():
error_buff = channel.recv_stderr(1024)
while error_buff:
stderr += error_buff
error_buff = channel.recv_stderr(1024)
return stderr
def get_std_out_from_channel(channel): # pylint: disable=too-many-branches,too-many-locals
"""Read all std out and filter content"""
stdout = ""
stderr = ""
overall_time = {"secs": 0, "start_dt": datetime.datetime.now()}
no_response_time = {"secs": 0, "start_dt": datetime.datetime.now()}
rc = 0
re_prompt_compiled = None
all_cmd_parsed = False
# Limit time exec can run
while (overall_time["secs"] < SESSION_SHELL_TIMEOUT and
no_response_time["secs"] < SESSION_RESPONSE_TIMEOUT and not all_cmd_parsed):
# Timers to exit if response takes too long or unresponsive
overall_time["secs"] = (datetime.datetime.now() - overall_time["start_dt"]).seconds
no_response_time["secs"] = (datetime.datetime.now() - no_response_time["start_dt"]).seconds
if channel.recv_ready():
no_response_time["start_dt"] = datetime.datetime.now()
# Lots of filtering since it is using an interactive shell
std_buff = re.sub(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]', '', channel.recv(9999)).replace('\b', '').replace('\r', '')
lines = std_buff.split("\n")
if not re_prompt_compiled:
first_valid_line = next((line for line in lines if len(line) > 0), None)
if first_valid_line:
# Exit out characters for regex and insert wildcard for path
re_prompt = re.sub(r'([\.\\\+\*\?\[\^\]\$\(\)\{\}\!\<\>\|\:\-])', r'\\\1', first_valid_line).replace("~", ".*")
# Compiled regex to remove bash prefix from commandline
re_prompt_compiled = re.compile(re_prompt)
new_out = []
if re_prompt_compiled:
for line in lines:
# Remove bash prefix
bash_found = re_prompt_compiled.search(line)
new_line = re_prompt_compiled.sub('', line)
# Look for the exit token
if SESSION_SHELL_EXIT in new_line:
if 'echo' not in new_line:
# Found end of command
rc = int(new_line[-1])
stdout += "\n".join(new_out)
all_cmd_parsed = True
break
elif not bash_found and len(new_line) > 0:
# Incase theres a continuation of the line like a load bar, might make output messy but better
# then having a huge amount of lines
if len(lines) == 1:
stdout += new_line
else:
new_out.append(new_line)
if all_cmd_parsed:
break
stdout += "\n".join(new_out)
time.sleep(1)
if overall_time["secs"] >= SESSION_SHELL_TIMEOUT:
stderr += "Shell session timed out.\n"
if no_response_time["secs"] >= SESSION_RESPONSE_TIMEOUT:
stderr += "Shell session no response, could be waiting for input.\n"
return stdout, stderr, rc
# Helper error checking
def _error_check(err_msg, remote_file, hostname, function_name):
"""Generic error checker for communication"""
if len(err_msg) > 0:
error_msg = next((err for err in ERROR_MESSAGES if err in err_msg), "Communication Error")
logger.error(error_msg,
function=function_name,
filename=remote_file,
hostname=hostname,
module=COMMAND_MODULE_CUSTOM)
def _log_rc(cmd_output, funct_name, **kvarg):
"""Generic logger that picks correct log type based on return code"""
rc = cmd_output['rc'] if 'rc' in cmd_output else cmd_output
logger.log(logger.decide_level(rc),
funct_name,
**kvarg
)
| apache-2.0 |
bhenne/MoSP | mosp_examples/pause_wiggler.py | 1 | 1479 | #!/bin/env python
""" Pausing example: person is paused at every node
- random movement
- at every node the person stops for 20 ticks
- uses pause_movement and Simulation.person_alarm_clock for waking up
(could alternativly be implemented using a special Location at every node)
- output to visual player, which is executed as child process
"""
import sys
sys.path.append("..")
from mosp.core import Simulation, Person
from mosp.geo import osm
from mosp.impl import movement
from mosp.monitors import ChildprocessPlayerChamplainMonitor, SocketPlayerMonitor
__author__ = "B. Henne"
__contact__ = "henne@dcsec.uni-hannover.de"
__copyright__ = "(c) 2011, DCSec, Leibniz Universitaet Hannover, Germany"
__license__ = "GPLv3"
class PauseWiggler(Person):
"""Implements a person with random movement pausing at any node.
@author: B. Henne"""
next_target = movement.person_next_target_random
def act_at_node(self, node):
"""Implementation of act_at_node: person paused at any node for 20 ticks."""
self.pause_movement(20)
def main():
"""Defines the simulation, map, monitors, persons."""
s = Simulation(geo=osm.OSMModel('../data/minimap1.osm'), rel_speed=20)
#m = s.add_monitor(ChildprocessPlayerChamplainMonitor, 2)
m = s.add_monitor(SocketPlayerMonitor, 2)
s.add_persons(PauseWiggler, 1, monitor=m)
s.run(until=500000, real_time=True, monitor=True)
if __name__ == '__main__':
main()
| gpl-3.0 |
Knotis/django-oauth-toolkit | oauth2_provider/tests/settings.py | 14 | 2859 | import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = ()
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'example.sqlite',
}
}
ALLOWED_HOSTS = []
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = ''
STATIC_URL = '/static/'
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = "1234567890evonove"
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'oauth2_provider.tests.urls'
TEMPLATE_DIRS = ()
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.admin',
'oauth2_provider',
'oauth2_provider.tests',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'oauth2_provider': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': True,
},
}
}
OAUTH2_PROVIDER = {
'_SCOPES': ['example']
}
import django
if django.VERSION[:2] < (1, 6):
TEST_RUNNER = 'discover_runner.DiscoverRunner'
INSTALLED_APPS += ('discover_runner',)
else:
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
| bsd-2-clause |
mouradmourafiq/django-calendar | calendars/models/cals.py | 1 | 20998 | # -*- coding: utf-8 -*-
'''
Created on Mar 20, 2011
@author: Mourad Mourafiq
@copyright: Copyright © 2011
other contributers:
'''
import os
import heapq
from django.db import models
from django.conf import settings
from django.template.defaultfilters import truncatewords
from django.contrib.auth.models import User
from datetime import datetime, timedelta
from django.core.urlresolvers import reverse
from dateutil import rrule
from django.utils.translation import ugettext, ugettext_lazy as _
from django.template.defaultfilters import date
from django.db.models import signals
from django.template.defaultfilters import timesince
from calendars.managers import ActiveManager
from calendars.settings import *
from calendars.files import get_attachment_path
from calendars.models.recursions import Recursion
if "djangosphinx" in settings.INSTALLED_APPS:
from djangosphinx.models import SphinxSearch
else:
SphinxSearch = None
class Event(models.Model):
"""
Event model
Contains all the basics about events,
"""
author = models.ForeignKey(User, blank=True, null=True, related_name="created_events")
title = models.CharField(max_length=140, verbose_name=_('Title'),
blank=False)
slug = models.SlugField(max_length=140, verbose_name=_('slug'),
help_text=_('Letters, numbers, underscore and hyphen.'
' Do not use reserved words \'create\','
' \'history\' and \'edit\'.'),blank=True)
created_at = models.DateTimeField(_('created at'), default=datetime.now)
modified_on = models.DateTimeField(_('modified on'), default=datetime.now)
is_active = models.BooleanField(default=True)
users = models.ManyToManyField(User, related_name="events", through='Calendar')
start = models.DateTimeField(_("start"))
end = models.DateTimeField(_("end"), blank=True, null=True, help_text=_("The end time must be later than the start time."))
allDay = models.BooleanField(default=False)
category = models.CharField(max_length=1, choices=EVENT_CATEGORY)
priority = models.CharField(max_length=1, choices=EVENT_PRIORITY)
recursion = models.ForeignKey(Recursion, null=True, blank=True, verbose_name=_("recursion"))
end_recurring_period = models.DateTimeField(_("end recurring period"),
null=True, blank=True,
help_text=_("This date is ignored for one time only events."))
active = ActiveManager()
objects = models.Manager()
if SphinxSearch:
search_events = SphinxSearch(
index='event event_delta',
weights={
'title':100,
'slug':100,
},
)
class Meta:
verbose_name = _('Event')
verbose_name_plural = _('Events')
app_label = 'calendars'
def __unicode__(self):
return self.title
def get_indiv_cal(self):
return '<a href= "%s" data-analytic="profile" data-tooltip="user"> \
%s %s </a>:\
<h3><a class="cal_title" href= "%s" > %s </a></h3>'% (
reverse('profiles_profile_detail',args=[self.author.username]),
self.author.first_name,
self.author.last_name,
self.get_absolute_url(),
self.title,)
def attachments(self):
return AttachmentEvent.objects.filter(event__exact=self)
def attachment_profile(self):
attachments = AttachmentEvent.objects.filter(event__exact=self).order_by('-uploaded_on')
if attachments.count()>0:
return attachments[0].thumbnail.url
else :
return False
def get_url(self):
"""Return the cal URL for an article"""
return self.slug
@models.permalink
def get_absolute_url(self):
url = 'event_view'
return (url, [self.get_url()])
@models.permalink
def get_edit_url(self):
url = 'event_edit'
return (url, [self.get_url()])
@models.permalink
def get_upload_photo_url(self):
url = 'event_upload_photo'
return (url, [self.get_url()])
@models.permalink
def get_cancel_url(self):
url = 'event_cancel'
return (url, [self.get_url()])
@models.permalink
def get_reactivate_url(self):
url = 'event_reactivate'
return (url, [self.get_url()])
def get_occurrences(self, start, end):
"""
>>> recursion = Recursion(frequency = "MONTHLY", name = "Monthly")
>>> recursion.save()
>>> event = Event(recursion=recursion, start=datetime.datetime(2008,1,1), end=datetime.datetime(2008,1,2))
>>> event.recursion
<recursion: Monthly>
>>> occurrences = event.get_occurrences(datetime.datetime(2008,1,24), datetime.datetime(2008,3,2))
>>> ["%s to %s" %(o.start, o.end) for o in occurrences]
['2008-02-01 00:00:00 to 2008-02-02 00:00:00', '2008-03-01 00:00:00 to 2008-03-02 00:00:00']
Ensure that if an event has no recursion, that it appears only once.
>>> event = Event(start=datetime.datetime(2008,1,1,8,0), end=datetime.datetime(2008,1,1,9,0))
>>> occurrences = event.get_occurrences(datetime.datetime(2008,1,24), datetime.datetime(2008,3,2))
>>> ["%s to %s" %(o.start, o.end) for o in occurrences]
[]
"""
persisted_occurrences = self.occurrence_set.all()
occ_replacer = OccurrenceReplacer(persisted_occurrences)
occurrences = self._get_occurrence_list(start, end)
final_occurrences = []
for occ in occurrences:
# replace occurrences with their persisted counterparts
if occ_replacer.has_occurrence(occ):
p_occ = occ_replacer.get_occurrence(
occ)
# ...but only if they are within this period
if p_occ.start < end and p_occ.end >= start:
final_occurrences.append(p_occ)
else:
final_occurrences.append(occ)
# then add persisted occurrences which originated outside of this period but now
# fall within it
final_occurrences += occ_replacer.get_additional_occurrences(start, end)
return final_occurrences
def get_rrule_object(self):
if self.recursion is not None:
params = self.recursion.get_params()
frequency = 'rrule.%s' % self.recursion.frequency
return rrule.rrule(eval(frequency), dtstart=self.start, **params)
def _create_occurrence(self, start, end=None):
if end is None:
end = start + (self.end - self.start)
return Occurrence(event=self, start=start, end=end, original_start=start, original_end=end)
def get_occurrence(self, date):
rule = self.get_rrule_object()
if rule:
next_occurrence = rule.after(date, inc=True)
else:
next_occurrence = self.start
if next_occurrence == date:
try:
return Occurrence.objects.get(event=self, original_start=date)
except Occurrence.DoesNotExist:
return self._create_occurrence(next_occurrence)
def has_occurrence(self, date):
try:
return Occurrence.objects.get(event=self, original_start=date)
except Occurrence.DoesNotExist:
return None
def _get_occurrence_list(self, start, end):
"""
returns a list of occurrences for this event from start to end.
"""
difference = (self.end - self.start)
if self.recursion is not None:
occurrences = []
if self.end_recurring_period and self.end_recurring_period < end:
end = self.end_recurring_period
rule = self.get_rrule_object()
o_starts = rule.between(start - difference, end, inc=True)
# #check if the first occurrence doesn't much the original event, if so append the original
# if not self.start in o_starts:
# # check if event is in the period
# if self.start < end and self.end >= start:
# return [self._create_occurrence(self.start)]
#continue with normal occurrences
for o_start in o_starts:
o_end = o_start + difference
occurrences.append(self._create_occurrence(o_start, o_end))
return occurrences
else:
# check if event is in the period
if self.start < end and self.end >= start:
return [self._create_occurrence(self.start)]
else:
return []
def _occurrences_after_generator(self, after=None):
"""
returns a generator that produces unpresisted occurrences after the
datetime ``after``.
"""
if after is None:
after = datetime.now()
rule = self.get_rrule_object()
if rule is None:
if self.end > after:
yield self._create_occurrence(self.start, self.end)
raise StopIteration
date_iter = iter(rule)
difference = self.end - self.start
while True:
o_start = date_iter.next()
if o_start > self.end_recurring_period:
raise StopIteration
o_end = o_start + difference
if o_end > after:
yield self._create_occurrence(o_start, o_end)
def occurrences_after(self, after=None):
"""
returns a generator that produces occurrences after the datetime
``after``. Includes all of the persisted Occurrences.
"""
occ_replacer = OccurrenceReplacer(self.occurrence_set.all())
generator = self._occurrences_after_generator(after)
while True:
next = generator.next()
yield occ_replacer.get_occurrence(next)
class AttachmentEvent(models.Model):
event = models.ForeignKey(Event, verbose_name=_('Event'))
picture = models.ImageField(upload_to=get_attachment_path, default=DEFAULT_PICTURE, blank=True, null=True)
thumbnail = models.ImageField(upload_to='uploads/thumbs/cals/', blank=True, null=True,
editable=False)
uploaded_by = models.ForeignKey(User, blank=True, verbose_name=_('Uploaded by'), null=True)
uploaded_on = models.DateTimeField(default=datetime.now,verbose_name=_('Upload date'))
class Meta:
app_label = 'calendars'
def save(self, force_insert=False, force_update=False):
#get mtime stats from file
thumb_update = False
if self.thumbnail:
try:
if self.picture:
statinfo1 = os.stat(self.picture.path)
statinfo2 = os.stat(self.thumbnail.path)
if statinfo1 > statinfo2:
thumb_update = True
else:
self.picture = DEFAULT_PICTURE
thumb_update = True
except OSError:
thumb_update = True
if self.picture and not self.thumbnail or thumb_update:
from PIL import Image
THUMB_SIZE = (200,200)
#self.thumbnail = self.picture
image = Image.open(self.picture)
if image.mode not in ('L', 'RGB'):
image = image.convert('RGB')
image.thumbnail(THUMB_SIZE, Image.ANTIALIAS)
(head, tail) = os.path.split(self.picture.path)
(a, b) = os.path.split(self.picture.name)
if not os.path.isdir(head + '/uploads/thumbs/cals'):
os.mkdir(head + '/uploads/thumbs/cals')
image.save(head + '/uploads/thumbs/cals/' + tail)
self.thumbnail = 'uploads/thumbs/cals/' + b
super(AttachmentEvent, self).save()
class Stat(models.Model):
"""
A cal to be planned with friends
"""
event = models.ForeignKey(Event)
can_join = models.ManyToManyField(User, blank=True, null=True, related_name='join')
acception_bar = models.IntegerField(default=0)
min_number_guests = models.IntegerField(default=0)
max_number_guests = models.IntegerField(default=0)
close = models.BooleanField(default=False)
stopped = models.BooleanField(default=False)
valid = models.BooleanField(default=False)
class Meta:
verbose_name = _('Stat')
verbose_name_plural = _('Stats')
app_label = 'calendars'
class Calendar(models.Model):
"""
A manytomany relationship between the Event and
the Calender
"""
event = models.ForeignKey(Event)
user = models.ForeignKey(User)
status = models.CharField(max_length=1, verbose_name=_('RSPV status'),
choices=RSPV_STATUS, null=True, blank=True)
is_guest = models.BooleanField(default=False)
stats = models.ForeignKey(Stat)
class Meta:
verbose_name = _('Calendar')
verbose_name_plural = _('Calendars')
app_label = 'calendars'
def accept(self):
"""when a user accept a cal invitation"""
if not self.stats.stopped and self.status != RSPV_YES:
self.stats.acception_bar = self.stats.acception_bar + 1
if self.stats.acception_bar >= self.stats.min_number_guests and not self.stats.valid:
self.stats.valid = True
if self.stats.acception_bar == self.stats.max_number_guests and not self.stats.stopped:
self.stats.stopped = True
self.status = RSPV_YES
self.stats.save()
self.save()
return True
else:
return False
def maybe_accept(self):
"""when a user is may be attending an invitation"""
if self.status == RSPV_YES:
return self.cancel(RSPV_MAYBE)
else:
self.status = RSPV_MAYBE
self.stats.save()
self.save()
def refuse(self):
"""when a user decline an invitation"""
if self.status == RSPV_YES:
return self.cancel(RSPV_NO)
else:
self.status = RSPV_NO
self.stats.save()
self.save()
def cancel(self, status=RSPV_NO):
"""cancel a confirmed invitation"""
if self.stats.acception_bar > 0:
self.stats.acception_bar = self.stats.acception_bar - 1
if self.stats.acception_bar < self.stats.min_number_guests and self.stats.valid:
self.stats.valid = False
if self.stats.acception_bar < self.stats.max_number_guests and self.stats.stopped:
self.stats.stopped = False
self.status = status
self.stats.save()
self.save()
class Occurrence(models.Model):
event = models.ForeignKey(Event, verbose_name=_("event"))
start = models.DateTimeField(_("start"))
end = models.DateTimeField(_("end"))
cancelled = models.BooleanField(_("cancelled"), default=False)
original_start = models.DateTimeField(_("original start"))
original_end = models.DateTimeField(_("original end"))
class Meta:
verbose_name = _("occurrence")
verbose_name_plural = _("occurrences")
app_label = 'calendars'
def moved(self):
return self.original_start != self.start or self.original_end != self.end
moved = property(moved)
def move(self, new_start, new_end):
self.start = new_start
self.end = new_end
self.save()
def cancel(self):
self.cancelled = True
self.save()
def uncancel(self):
self.cancelled = False
self.save()
def get_absolute_url(self):
if self.pk is not None:
return reverse('occurrence_view', kwargs={'occurrence_id': self.pk})
query_string = '?'
qs_parts = ['year=%d', 'month=%d', 'day=%d', 'hour=%d', 'minute=%d', 'second=%d']
qs_vars = (self.start.year, self.start.month, self.start.day,
self.start.hour, self.start.minute, self.start.second)
query_string += '&'.join(qs_parts[:6]) % qs_vars[:6]
return '/occurrence/%(url)s/%(start)s' % {'url':self.event.get_url(),
'start':query_string, }
def get_cancel_url(self):
if self.pk is not None:
return reverse('occurrence_cancel', kwargs={'occurrence_id': self.pk})
query_string = '?'
qs_parts = ['year=%d', 'month=%d', 'day=%d', 'hour=%d', 'minute=%d', 'second=%d']
qs_vars = (self.start.year, self.start.month, self.start.day,
self.start.hour, self.start.minute, self.start.second)
query_string += '&'.join(qs_parts[:6]) % qs_vars[:6]
return '/occurrence/%(url)s/_cancel/%(start)s' % {'url':self.event.get_url(),
'start':query_string, }
def get_reactivate_url(self):
if self.pk is not None:
return reverse('occurrence_reactivate', kwargs={'occurrence_id': self.pk})
def get_edit_url(self):
if self.pk is not None:
return reverse('occurrence_edit', kwargs={'occurrence_id': self.pk,})
query_string = '?'
qs_parts = ['year=%d', 'month=%d', 'day=%d', 'hour=%d', 'minute=%d', 'second=%d']
qs_vars = (self.start.year, self.start.month, self.start.day,
self.start.hour, self.start.minute, self.start.second)
query_string += '&'.join(qs_parts[:6]) % qs_vars[:6]
return '/occurrence/%(url)s/_edit/%(start)s' % {'url':self.event.get_url(),
'start':query_string, }
def __unicode__(self):
return ugettext("%(start)s to %(end)s") % {
'start': self.start,
'end': self.end,
}
def __cmp__(self, other):
rank = cmp(self.start, other.start)
if rank == 0:
return cmp(self.end, other.end)
return rank
def __eq__(self, other):
return self.event == other.event and self.original_start == other.original_start and self.original_end == other.original_end
class OccurrenceReplacer(object):
"""
When getting a list of occurrences, the last thing that needs to be done
before passing it forward is to make sure all of the occurrences that
have been stored in the datebase replace, in the list you are returning,
the generated ones that are equivalent. This class makes this easier.
"""
def __init__(self, persisted_occurrences):
lookup = [((occ.event, occ.original_start, occ.original_end), occ) for
occ in persisted_occurrences]
self.lookup = dict(lookup)
def get_occurrence(self, occ):
"""
Return a persisted occurrences matching the occ and remove it from lookup since it
has already been matched
"""
return self.lookup.pop(
(occ.event, occ.original_start, occ.original_end),
occ)
def has_occurrence(self, occ):
return (occ.event, occ.original_start, occ.original_end) in self.lookup
def get_additional_occurrences(self, start, end):
"""
Return persisted occurrences which are now in the period
"""
return [occ for key, occ in self.lookup.items() if (occ.start < end and occ.end >= start and not occ.cancelled)]
class EventListManager(object):
"""
This class is responsible for doing functions on a list of events. It is
used to when one has a list of events and wants to access the occurrences
from these events in as a group
"""
def __init__(self, events):
self.events = events
def occurrences_after(self, after=None):
"""
It is often useful to know what the next occurrence is given a list of
events. This function produces a generator that yields the
the most recent occurrence after the date ``after`` from any of the
events in ``self.events``
"""
if after is None:
after = datetime.now()
occ_replacer = OccurrenceReplacer(
Occurrence.objects.filter(eventcal__in=self.events))
generators = [event._occurrences_after_generator(after) for event in self.events]
occurrences = []
for generator in generators:
try:
heapq.heappush(occurrences, (generator.next(), generator))
except StopIteration:
pass
while True:
if len(occurrences) == 0: raise StopIteration
generator = occurrences[0][1]
try:
next = heapq.heapreplace(occurrences, (generator.next(), generator))[0]
except StopIteration:
next = heapq.heappop(occurrences)[0]
yield occ_replacer.get_occurrence(next)
| bsd-2-clause |
ezralanglois/arachnid | arachnid/core/gui/property/ButtonDelegate.py | 1 | 10446 | ''' Defines a set of delegates to display widgets in a table/tree cell
.. Created on Dec 11, 2010
.. codeauthor:: Robert Langlois <rl2528@columbia.edu>
'''
#from ..dialogs.WorkflowDialog import Dialog as WorkflowDialog
from ..util.qt4_loader import QtGui,QtCore, qtSignal
import os, logging
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.INFO)
class CheckboxWidget(QtGui.QWidget):
''' Display a checkbox in a table/tree cell
'''
def __init__(self, parent=None):
'''Initialize a font dialog
:Parameters:
parent : QObject
Parent of the checkbox widget
'''
QtGui.QWidget.__init__(self, parent)
#self.setStyleSheet("QWidget { background-color: White }")
self.button = QtGui.QCheckBox(self)
self.spacer = QtGui.QSpacerItem(10, 14, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.formLayout = QtGui.QFormLayout(self)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.ExpandingFieldsGrow)#FieldsStayAtSizeHint)
self.formLayout.setLabelAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)#QtCore.Qt.AlignLeading|
self.formLayout.setFormAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)#QtCore.Qt.AlignLeading|
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setMargin(0)
self.formLayout.setObjectName("formLayout")
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.button)
self.formLayout.setItem(0, QtGui.QFormLayout.FieldRole, self.spacer)
self.button.setFocusPolicy(QtCore.Qt.StrongFocus)
class DialogWidget(QtGui.QWidget):
''' Abtract class to create a button, which displays a dialog
'''
editFinished = qtSignal()
def __init__(self, parent=None, icon=None, keep_editor=False):
'''Initialize a font dialog
:Parameters:
parent : QObject
Parent of the checkbox widget
icon : QIcon
Icon for the button
keep_editor : bool
Keep the text editor
'''
QtGui.QWidget.__init__(self, parent)
if icon is None: icon = ":/mini/mini/folder.png"
self.button = QtGui.QToolButton(self)
self.action = QtGui.QAction(QtGui.QIcon(icon), "", self)
self.button.setDefaultAction(self.action)
if keep_editor:
self.layout = QtGui.QHBoxLayout(self)
self.layout.setObjectName("dialogLayout")
self.layout.setContentsMargins(0, 0, 5, 0)
self.field = QtGui.QLineEdit(self)
self.layout.addWidget(self.field)
self.layout.addWidget(self.button)
else:
self.spacer = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.formLayout = QtGui.QFormLayout(self)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.ExpandingFieldsGrow)#FieldsStayAtSizeHint)
self.formLayout.setLabelAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.formLayout.setFormAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignRight|QtCore.Qt.AlignTop)
self.formLayout.setContentsMargins(0, 0, 5, 0)
self.formLayout.setObjectName("formLayout")
self.formLayout.setItem(0, QtGui.QFormLayout.LabelRole, self.spacer)
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.button)
self.button.setFocusPolicy(QtCore.Qt.StrongFocus)
self.connect(self.action, QtCore.SIGNAL("triggered()"), self.showDialog)
def showDialog(self):
''' Display a dialog (abstract)
'''
pass
"""
class WorkflowWidget(DialogWidget):
''' Create a button and display a workflow dialog on press
:Parameters:
operations : list
List of options
parent : QObject
Parent of the checkbox widget
'''
operationsUpdated = qtSignal('PyQt_PyObject')
def __init__(self, operations, parent=None):
"Initialize a font dialog"
DialogWidget.__init__(self, parent, ':/mini/mini/script_edit.png')
self.workflow_ops = []
self.dialog = WorkflowDialog(operations, parent)
self.connect(self.dialog, QtCore.SIGNAL('operationsUpdated(PyQt_PyObject)'), self.onOperationsUpdated)
def showDialog(self):
''' Display a workflow dialog
'''
self.dialog.open(self.workflow_ops)
def onOperationsUpdated(self, items):
'''Emit operationsUpdated signal whem the operations dialog
is closed.
:Parameters:
items : list
List of operations
'''
self.setWorkflow(items)
self.operationsUpdated.emit(items)
def setWorkflow(self, items):
''' Set the list of current operations
:Parameters:
items : list
List of operations
'''
self.workflow_ops = items
def workflow(self):
''' Get the list of current operations
:Returns:
items : list
List of operations
'''
return self.workflow_ops
"""
class FontDialogWidget(DialogWidget):
''' Create a button and display font dialog on press
'''
fontChanged = qtSignal(QtGui.QFont)
def __init__(self, parent=None):
'''Initialize a font dialog
:Parameters:
parent : QObject
Parent of the checkbox widget
'''
DialogWidget.__init__(self, parent)
self.font = QtGui.QFont()
def showDialog(self):
''' Display a QFontDialog
'''
_logger.debug("Show dialog")
curfont, ok = QtGui.QFontDialog.getFont(self.font, None, "Label Font")
if ok:
self.font = curfont
self.fontChanged.emit(self.font)
self.editFinished.emit()
def setCurrentFont(self, font):
''' Set the current font
:Parameters:
font : QFont
Font to display
'''
self.font = font
def selectedFont(self):
''' Get the current font
:Returns:
font : QFont
Selected font
'''
return self.font
class FileDialogWidget(DialogWidget):
''' Create a button and display file dialog on press
'''
fileChanged = qtSignal(object)
def __init__(self, stype, filter="", path="", parent=None):
'''Initialize a font dialog
:Parameters:
stype : str
Type of file dialog: open or save
filter : str
Semi-colon separated list of file filters
path : str
Starting directory for file dialog
parent : QObject
Parent of the checkbox widget
'''
DialogWidget.__init__(self, parent, keep_editor=True)
self.filename = ""
self.filter = filter
self.path = path
self.filetype = stype
self.field.setText(self.filename)
self.field_text = None
self.field.editingFinished.connect(self.updateFilename)
def showDialog(self):
''' Display a file dialog
'''
_logger.debug("Show dialog %s"%self.filetype)
if self.filetype == 'file-list':
filenames = QtGui.QFileDialog.getOpenFileNames(None, 'Open files', self.path, self.filter)
if isinstance(filenames, tuple): filenames = filenames[0]
if hasattr(self.filename, 'make'):
filename = self.filename.make(filenames)
else:
filename = self.filename.__class__(filenames)
elif self.filetype == 'open':
filename = QtGui.QFileDialog.getOpenFileName(None, 'Open file', self.path, self.filter)
if isinstance(filename, tuple): filename = filename[0]
else:
filename = QtGui.QFileDialog.getSaveFileName(None, 'Save file', self.path, self.filter)
if isinstance(filename, tuple): filename = filename[0]
if filename:
self.filename = filename
self.fileChanged.emit(self.filename)
self.editFinished.emit()
def updateFilename(self):
''' Update the filename from the line edit
'''
filename = self.field.text()
if filename == self.field_text: return
if self.filetype == 'file-list' and filename.find(',') != -1:
filename = filename.split(",")[0]
if not os.path.isdir(filename):
self.path = os.path.dirname(str(filename))
else: self.path = filename
if self.filetype == 'open' and not os.path.exists(filename) and filename != "" and 1 == 0:
self.field.setText("")
self.showDialog()
elif self.field.text() != "":
filename = self.field.text()
if hasattr(self.filename, 'make'):
filename = self.filename.make(filename)
else:
filename = self.filename.__class__(filename)
self.filename = filename
self.fileChanged.emit(self.filename)
self.editFinished.emit()
def setCurrentFilename(self, filename):
''' Set the current filename
:Parameters:
filename : str
Filename to display
'''
self.filename = filename
single_file = filename[0] if isinstance(filename, list) else filename
if not os.path.isdir(single_file):
self.path = os.path.dirname(single_file)
else: self.path = single_file
self.field.setText(single_file)
self.field_text = single_file
def selectedFilename(self):
''' Get the current filename
:Returns:
filename : str
Selected filename
'''
return self.filename
| gpl-2.0 |
ZhangXinNan/tensorflow | tensorflow/contrib/boosted_trees/estimator_batch/trainer_hooks.py | 28 | 9303 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hooks for use with GTFlow Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn.session_run_hook import SessionRunArgs
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
from tensorflow.python.training.summary_io import SummaryWriterCache
class FeatureImportanceSummarySaver(session_run_hook.SessionRunHook):
"""Hook to save feature importance summaries."""
def __init__(self, model_dir, every_n_steps=1):
"""Create a FeatureImportanceSummarySaver Hook.
This hook creates scalar summaries representing feature importance
for each feature column during training.
Args:
model_dir: model base output directory.
every_n_steps: frequency, in number of steps, for logging summaries.
Raises:
ValueError: If one of the arguments is invalid.
"""
if model_dir is None:
raise ValueError("model dir must be specified.")
self._model_dir = model_dir
self._every_n_steps = every_n_steps
self._last_triggered_step = None
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use FeatureImportanceSummarySaver.")
graph = ops.get_default_graph()
self._feature_names_tensor = graph.get_tensor_by_name(
"gbdt/feature_names:0")
self._feature_usage_counts_tensor = graph.get_tensor_by_name(
"gbdt/feature_usage_counts:0")
self._feature_gains_tensor = graph.get_tensor_by_name(
"gbdt/feature_gains:0")
def before_run(self, run_context):
del run_context # Unused by feature importance summary saver hook.
requests = {
"global_step": self._global_step_tensor,
"feature_names": self._feature_names_tensor,
"feature_usage_counts": self._feature_usage_counts_tensor,
"feature_gains": self._feature_gains_tensor
}
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
del run_context # Unused by feature importance summary saver hook.
# Read result tensors.
global_step = run_values.results["global_step"]
feature_names = run_values.results["feature_names"]
feature_usage_counts = run_values.results["feature_usage_counts"]
feature_gains = run_values.results["feature_gains"]
# Ensure summaries are logged at desired frequency
if (self._last_triggered_step is not None and
global_step < self._last_triggered_step + self._every_n_steps):
return
# Validate tensors.
if (len(feature_names) != len(feature_usage_counts) or
len(feature_names) != len(feature_gains)):
raise RuntimeError(
"Feature names and importance measures have inconsistent lengths.")
# Compute total usage.
total_usage_count = 0.0
for usage_count in feature_usage_counts:
total_usage_count += usage_count
usage_count_norm = 1.0 / total_usage_count if total_usage_count else 1.0
# Compute total gain.
total_gain = 0.0
for gain in feature_gains:
total_gain += gain
gain_norm = 1.0 / total_gain if total_gain else 1.0
# Output summary for each feature.
self._last_triggered_step = global_step
for (name, usage_count, gain) in zip(feature_names, feature_usage_counts,
feature_gains):
output_dir = os.path.join(self._model_dir, name.decode("utf-8"))
summary_writer = SummaryWriterCache.get(output_dir)
usage_count_summary = Summary(value=[
Summary.Value(
tag="feature_importance/usage_counts", simple_value=usage_count)
])
usage_fraction_summary = Summary(value=[
Summary.Value(
tag="feature_importance/usage_fraction",
simple_value=usage_count * usage_count_norm)
])
summary_writer.add_summary(usage_count_summary, global_step)
summary_writer.add_summary(usage_fraction_summary, global_step)
gains_summary = Summary(value=[
Summary.Value(tag="feature_importance/gains", simple_value=gain)
])
gains_fraction_summary = Summary(value=[
Summary.Value(
tag="feature_importance/gains_fraction",
simple_value=gain * gain_norm)
])
summary_writer.add_summary(gains_summary, global_step)
summary_writer.add_summary(gains_fraction_summary, global_step)
class FeedFnHook(session_run_hook.SessionRunHook):
"""Runs feed_fn and sets the feed_dict accordingly."""
def __init__(self, feed_fn):
self.feed_fn = feed_fn
def before_run(self, run_context):
del run_context # unused by FeedFnHook.
return session_run_hook.SessionRunArgs(fetches=None, feed_dict=self.feed_fn)
class StopAfterNTrees(session_run_hook.SessionRunHook):
"""Stop training after building N full trees."""
def __init__(self, n, num_attempted_trees_tensor, num_finalized_trees_tensor,
override_global_step_value=None):
self._num_trees = n
# num_attempted_trees_tensor and num_finalized_trees_tensor are both
# tensors.
self._num_attempted_trees_tensor = num_attempted_trees_tensor
self._num_finalized_trees_tensor = num_finalized_trees_tensor
self._override_global_step_value = override_global_step_value
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created.")
if self._override_global_step_value is not None:
self._override_global_step_op = state_ops.assign(
self._global_step_tensor, self._override_global_step_value)
def before_run(self, run_context):
del run_context # unused by StopTrainingAfterNTrees.
return session_run_hook.SessionRunArgs({
"num_attempted_trees": self._num_attempted_trees_tensor,
"num_finalized_trees": self._num_finalized_trees_tensor,
})
def after_run(self, run_context, run_values):
num_attempted_trees = run_values.results["num_attempted_trees"]
num_finalized_trees = run_values.results["num_finalized_trees"]
assert num_attempted_trees is not None
assert num_finalized_trees is not None
# Stop when the required number of finalized trees is reached, or when we
# try enough times to build a tree but keep failing.
if (num_finalized_trees >= self._num_trees or
num_attempted_trees > 2 * self._num_trees):
logging.info("Requesting stop since we have reached %d trees.",
num_finalized_trees)
if self._override_global_step_value is not None:
logging.info("Overriding global steps value.")
run_context.session.run(self._override_global_step_op)
run_context.request_stop()
class SwitchTrainOp(session_run_hook.SessionRunHook):
"""Hook that switches the train op after specified number of steps.
Hook that replaces the train op depending on the number of steps of training
that have taken place. The first_train_op is used till train_steps steps
are reached. Thereafter the second_train_op is used.
"""
def __init__(self, first_train_op, train_steps, second_train_op):
"""Initializes a `SwitchTrainOp`."""
self._first_train_op = first_train_op
self._second_train_op = second_train_op
self._train_steps = train_steps
def _get_train_op_for_global_step(self, current_step):
"""Gets train_op for current global step."""
if current_step < self._train_steps:
return self._first_train_op
return self._second_train_op
def begin(self):
self._global_step_tensor = training_util.get_global_step()
self._current_train_op = control_flow_ops.no_op()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use SwitchTrainOp.")
def before_run(self, run_context): # pylint: disable=unused-argument
return session_run_hook.SessionRunArgs(
{"global_step": self._global_step_tensor,
"train_op": self._current_train_op})
def after_run(self, run_context, run_values):
self._current_train_op = self._get_train_op_for_global_step(
run_values.results["global_step"])
| apache-2.0 |
gvb/odoo | addons/point_of_sale/report/__init__.py | 381 | 1238 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pos_users_product
import account_statement
import pos_receipt
import pos_invoice
import pos_lines
import pos_details
import pos_payment_report
import pos_report
import pos_order_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
makinacorpus/django | django/core/files/locks.py | 114 | 1789 | """
Portable file locking utilities.
Based partially on example by Jonathan Feignberg <jdf@pobox.com> in the Python
Cookbook, licensed under the Python Software License.
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203
Example Usage::
>>> from django.core.files import locks
>>> with open('./file', 'wb') as f:
... locks.lock(f, locks.LOCK_EX)
... f.write('Django')
"""
__all__ = ('LOCK_EX','LOCK_SH','LOCK_NB','lock','unlock')
system_type = None
try:
import win32con
import win32file
import pywintypes
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
LOCK_SH = 0
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
__overlapped = pywintypes.OVERLAPPED()
system_type = 'nt'
except (ImportError, AttributeError):
pass
try:
import fcntl
LOCK_EX = fcntl.LOCK_EX
LOCK_SH = fcntl.LOCK_SH
LOCK_NB = fcntl.LOCK_NB
system_type = 'posix'
except (ImportError, AttributeError):
pass
def fd(f):
"""Get a filedescriptor from something which could be a file or an fd."""
return f.fileno() if hasattr(f, 'fileno') else f
if system_type == 'nt':
def lock(file, flags):
hfile = win32file._get_osfhandle(fd(file))
win32file.LockFileEx(hfile, flags, 0, -0x10000, __overlapped)
def unlock(file):
hfile = win32file._get_osfhandle(fd(file))
win32file.UnlockFileEx(hfile, 0, -0x10000, __overlapped)
elif system_type == 'posix':
def lock(file, flags):
fcntl.lockf(fd(file), flags)
def unlock(file):
fcntl.lockf(fd(file), fcntl.LOCK_UN)
else:
# File locking is not supported.
LOCK_EX = LOCK_SH = LOCK_NB = None
# Dummy functions that don't do anything.
def lock(file, flags):
pass
def unlock(file):
pass
| bsd-3-clause |
ConstantineLignos/Codeswitchador | tools/filter_cs.py | 1 | 2151 | #!/usr/bin/env python
"""
Remove non-codeswitched tweets.
Constantine Lignos
February 2013
"""
# Copyright (c) 2013 Constantine Lignos
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import codecs
# Hack to allow import of split_token, forces this to be run from subdir.
sys.path.append('..')
from eval_codeswitch import split_token
LANGS_NEEDED = ('e', 's')
output = codecs.getwriter('utf_8')(sys.stdout)
for line in codecs.getreader('utf_8')(sys.stdin):
line = line.rstrip()
try:
_, tags = zip(*[split_token(token) for token in line.split()])
except ValueError as err:
print >> sys.stderr, err
print >> sys.stderr, "From line:", repr(line)
continue
# Skip any tags with multiple annotations
unique_tags = set(tags)
if not all(lang in unique_tags for lang in LANGS_NEEDED):
continue
print >> output, line
| bsd-2-clause |
cgstudiomap/cgstudiomap | main/eggs/phonenumbers-7.1.1-py2.7.egg/phonenumbers/shortdata/region_AU.py | 11 | 1309 | """Auto-generated file, do not edit by hand. AU metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_AU = PhoneMetadata(id='AU', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[027]\\d{2}|1\\d{2,7}', possible_number_pattern='\\d{3,8}'),
toll_free=PhoneNumberDesc(national_number_pattern='1(?:258885|555)|733', possible_number_pattern='\\d{3,7}', example_number='733'),
premium_rate=PhoneNumberDesc(national_number_pattern='1(?:2(?:34|456)|9\\d{4,6})', possible_number_pattern='\\d{4,8}', example_number='191123'),
emergency=PhoneNumberDesc(national_number_pattern='000|1(?:06|12)', possible_number_pattern='\\d{3}', example_number='112'),
short_code=PhoneNumberDesc(national_number_pattern='000|1(?:06|1(?:00|2|9[46])|2(?:[23]\\d|4\\d{2,3}|5\\d{3,4}|8(?:2|[013-9]\\d))|555|9(?:[13-5]\\d{3}|[679]\\d{5}))|225|7(?:33|67)', possible_number_pattern='\\d{3,8}', example_number='112'),
standard_rate=PhoneNumberDesc(national_number_pattern='1(?:1\\d{2}|24733)|225|767', possible_number_pattern='\\d{3,6}', example_number='225'),
carrier_specific=PhoneNumberDesc(national_number_pattern='1(?:258885|555)', possible_number_pattern='\\d{4,7}', example_number='1555'),
short_data=True)
| agpl-3.0 |
fritsvanveen/QGIS | python/ext-libs/requests/__init__.py | 149 | 2215 | # -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
Requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('http://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2016 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.10.0'
__build__ = 0x021000
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2016 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
import warnings
# urllib3's DependencyWarnings should be silenced.
from .packages.urllib3.exceptions import DependencyWarning
warnings.simplefilter('ignore', DependencyWarning)
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError,
FileModeWarning, ConnectTimeout, ReadTimeout
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
import warnings
# FileModeWarnings go off per the default.
warnings.simplefilter('default', FileModeWarning, append=True)
| gpl-2.0 |
WildGenie/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/generator-output/gyptest-subdir2-deep.py | 216 | 1034 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a target from a .gyp file a few subdirectories
deep when the --generator-output= option is used to put the build
configuration files in a separate directory tree.
"""
import TestGyp
# Android doesn't support --generator-output.
test = TestGyp.TestGyp(formats=['!android'])
test.writable(test.workpath('src'), False)
test.writable(test.workpath('src/subdir2/deeper/build'), True)
test.run_gyp('deeper.gyp',
'-Dset_symroot=1',
'--generator-output=' + test.workpath('gypfiles'),
chdir='src/subdir2/deeper')
test.build('deeper.gyp', test.ALL, chdir='gypfiles')
chdir = 'gypfiles'
if test.format == 'xcode':
chdir = 'src/subdir2/deeper'
test.run_built_executable('deeper',
chdir=chdir,
stdout="Hello from deeper.c\n")
test.pass_test()
| gpl-3.0 |
jmesteve/asterisk | openerp/addons/analytic/analytic.py | 8 | 17640 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class account_analytic_account(osv.osv):
_name = 'account.analytic.account'
_inherit = ['mail.thread']
_description = 'Analytic Account'
_track = {
'state': {
'analytic.mt_account_pending': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'pending',
'analytic.mt_account_closed': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'close',
'analytic.mt_account_opened': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'open',
},
}
def _compute_level_tree(self, cr, uid, ids, child_ids, res, field_names, context=None):
currency_obj = self.pool.get('res.currency')
recres = {}
def recursive_computation(account):
result2 = res[account.id].copy()
for son in account.child_ids:
result = recursive_computation(son)
for field in field_names:
if (account.currency_id.id != son.currency_id.id) and (field!='quantity'):
result[field] = currency_obj.compute(cr, uid, son.currency_id.id, account.currency_id.id, result[field], context=context)
result2[field] += result[field]
return result2
for account in self.browse(cr, uid, ids, context=context):
if account.id not in child_ids:
continue
recres[account.id] = recursive_computation(account)
return recres
def _debit_credit_bal_qtty(self, cr, uid, ids, fields, arg, context=None):
res = {}
if context is None:
context = {}
child_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)]))
for i in child_ids:
res[i] = {}
for n in fields:
res[i][n] = 0.0
if not child_ids:
return res
where_date = ''
where_clause_args = [tuple(child_ids)]
if context.get('from_date', False):
where_date += " AND l.date >= %s"
where_clause_args += [context['from_date']]
if context.get('to_date', False):
where_date += " AND l.date <= %s"
where_clause_args += [context['to_date']]
cr.execute("""
SELECT a.id,
sum(
CASE WHEN l.amount > 0
THEN l.amount
ELSE 0.0
END
) as debit,
sum(
CASE WHEN l.amount < 0
THEN -l.amount
ELSE 0.0
END
) as credit,
COALESCE(SUM(l.amount),0) AS balance,
COALESCE(SUM(l.unit_amount),0) AS quantity
FROM account_analytic_account a
LEFT JOIN account_analytic_line l ON (a.id = l.account_id)
WHERE a.id IN %s
""" + where_date + """
GROUP BY a.id""", where_clause_args)
for row in cr.dictfetchall():
res[row['id']] = {}
for field in fields:
res[row['id']][field] = row[field]
return self._compute_level_tree(cr, uid, ids, child_ids, res, fields, context)
def name_get(self, cr, uid, ids, context=None):
res = []
if not ids:
return res
if isinstance(ids, (int, long)):
ids = [ids]
for id in ids:
elmt = self.browse(cr, uid, id, context=context)
res.append((id, self._get_one_full_name(elmt)))
return res
def _get_full_name(self, cr, uid, ids, name=None, args=None, context=None):
if context == None:
context = {}
res = {}
for elmt in self.browse(cr, uid, ids, context=context):
res[elmt.id] = self._get_one_full_name(elmt)
return res
def _get_one_full_name(self, elmt, level=6):
if level<=0:
return '...'
if elmt.parent_id and not elmt.type == 'template':
parent_path = self._get_one_full_name(elmt.parent_id, level-1) + " / "
else:
parent_path = ''
return parent_path + elmt.name
def _child_compute(self, cr, uid, ids, name, arg, context=None):
result = {}
if context is None:
context = {}
for account in self.browse(cr, uid, ids, context=context):
result[account.id] = map(lambda x: x.id, [child for child in account.child_ids if child.state != 'template'])
return result
def _get_analytic_account(self, cr, uid, ids, context=None):
company_obj = self.pool.get('res.company')
analytic_obj = self.pool.get('account.analytic.account')
accounts = []
for company in company_obj.browse(cr, uid, ids, context=context):
accounts += analytic_obj.search(cr, uid, [('company_id', '=', company.id)])
return accounts
def _set_company_currency(self, cr, uid, ids, name, value, arg, context=None):
if isinstance(ids, (int, long)):
ids=[ids]
for account in self.browse(cr, uid, ids, context=context):
if account.company_id:
if account.company_id.currency_id.id != value:
raise osv.except_osv(_('Error!'), _("If you set a company, the currency selected has to be the same as it's currency. \nYou can remove the company belonging, and thus change the currency, only on analytic account of type 'view'. This can be really useful for consolidation purposes of several companies charts with different currencies, for example."))
if value:
return cr.execute("""update account_analytic_account set currency_id=%s where id=%s""", (value, account.id, ))
def _currency(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context=context):
if rec.company_id:
result[rec.id] = rec.company_id.currency_id.id
else:
result[rec.id] = rec.currency_id.id
return result
_columns = {
'name': fields.char('Account/Contract Name', size=128, required=True, track_visibility='onchange'),
'complete_name': fields.function(_get_full_name, type='char', string='Full Name'),
'code': fields.char('Reference', select=True, track_visibility='onchange'),
'type': fields.selection([('view','Analytic View'), ('normal','Analytic Account'),('contract','Contract or Project'),('template','Template of Contract')], 'Type of Account', required=True,
help="If you select the View Type, it means you won\'t allow to create journal entries using that account.\n"\
"The type 'Analytic account' stands for usual accounts that you only want to use in accounting.\n"\
"If you select Contract or Project, it offers you the possibility to manage the validity and the invoicing options for this account.\n"\
"The special type 'Template of Contract' allows you to define a template with default data that you can reuse easily."),
'template_id': fields.many2one('account.analytic.account', 'Template of Contract'),
'description': fields.text('Description'),
'parent_id': fields.many2one('account.analytic.account', 'Parent Analytic Account', select=2),
'child_ids': fields.one2many('account.analytic.account', 'parent_id', 'Child Accounts'),
'child_complete_ids': fields.function(_child_compute, relation='account.analytic.account', string="Account Hierarchy", type='many2many'),
'line_ids': fields.one2many('account.analytic.line', 'account_id', 'Analytic Entries'),
'balance': fields.function(_debit_credit_bal_qtty, type='float', string='Balance', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'debit': fields.function(_debit_credit_bal_qtty, type='float', string='Debit', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'credit': fields.function(_debit_credit_bal_qtty, type='float', string='Credit', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'quantity': fields.function(_debit_credit_bal_qtty, type='float', string='Quantity', multi='debit_credit_bal_qtty'),
'quantity_max': fields.float('Prepaid Service Units', help='Sets the higher limit of time to work on the contract, based on the timesheet. (for instance, number of hours in a limited support contract.)'),
'partner_id': fields.many2one('res.partner', 'Customer'),
'user_id': fields.many2one('res.users', 'Project Manager', track_visibility='onchange'),
'manager_id': fields.many2one('res.users', 'Account Manager', track_visibility='onchange'),
'date_start': fields.date('Start Date'),
'date': fields.date('End Date', select=True, track_visibility='onchange'),
'company_id': fields.many2one('res.company', 'Company', required=False), #not required because we want to allow different companies to use the same chart of account, except for leaf accounts.
'state': fields.selection([('template', 'Template'),('draft','New'),('open','In Progress'),('pending','To Renew'),('close','Closed'),('cancelled', 'Cancelled')], 'Status', required=True, track_visibility='onchange'),
'currency_id': fields.function(_currency, fnct_inv=_set_company_currency, #the currency_id field is readonly except if it's a view account and if there is no company
store = {
'res.company': (_get_analytic_account, ['currency_id'], 10),
}, string='Currency', type='many2one', relation='res.currency'),
}
def on_change_template(self, cr, uid, ids, template_id, context=None):
if not template_id:
return {}
res = {'value':{}}
template = self.browse(cr, uid, template_id, context=context)
if template.date_start and template.date:
from_dt = datetime.strptime(template.date_start, tools.DEFAULT_SERVER_DATE_FORMAT)
to_dt = datetime.strptime(template.date, tools.DEFAULT_SERVER_DATE_FORMAT)
timedelta = to_dt - from_dt
res['value']['date'] = datetime.strftime(datetime.now() + timedelta, tools.DEFAULT_SERVER_DATE_FORMAT)
res['value']['date_start'] = fields.date.today()
res['value']['quantity_max'] = template.quantity_max
res['value']['parent_id'] = template.parent_id and template.parent_id.id or False
res['value']['description'] = template.description
return res
def on_change_partner_id(self, cr, uid, ids,partner_id, name, context={}):
res={}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
if partner.user_id:
res['manager_id'] = partner.user_id.id
if not name:
res['name'] = _('Contract: ') + partner.name
return {'value': res}
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
def _get_default_currency(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.currency_id.id
_defaults = {
'type': 'normal',
'company_id': _default_company,
'code' : lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'account.analytic.account'),
'state': 'open',
'user_id': lambda self, cr, uid, ctx: uid,
'partner_id': lambda self, cr, uid, ctx: ctx.get('partner_id', False),
'date_start': lambda *a: time.strftime('%Y-%m-%d'),
'currency_id': _get_default_currency,
}
def check_recursion(self, cr, uid, ids, context=None, parent=None):
return super(account_analytic_account, self)._check_recursion(cr, uid, ids, context=context, parent=parent)
_order = 'name asc'
_constraints = [
(check_recursion, 'Error! You cannot create recursive analytic accounts.', ['parent_id']),
]
def name_create(self, cr, uid, name, context=None):
raise osv.except_osv(_('Warning'), _("Quick account creation disallowed."))
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
analytic = self.browse(cr, uid, id, context=context)
default.update(
code=False,
line_ids=[],
name=_("%s (copy)") % (analytic['name']))
return super(account_analytic_account, self).copy(cr, uid, id, default, context=context)
def on_change_company(self, cr, uid, id, company_id):
if not company_id:
return {}
currency = self.pool.get('res.company').read(cr, uid, [company_id], ['currency_id'])[0]['currency_id']
return {'value': {'currency_id': currency}}
def on_change_parent(self, cr, uid, id, parent_id):
if not parent_id:
return {}
parent = self.read(cr, uid, [parent_id], ['partner_id','code'])[0]
if parent['partner_id']:
partner = parent['partner_id'][0]
else:
partner = False
res = {'value': {}}
if partner:
res['value']['partner_id'] = partner
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args=[]
if context is None:
context={}
if name:
account_ids = self.search(cr, uid, [('code', '=', name)] + args, limit=limit, context=context)
if not account_ids:
dom = []
for name2 in name.split('/'):
name = name2.strip()
account_ids = self.search(cr, uid, dom + [('name', 'ilike', name)] + args, limit=limit, context=context)
if not account_ids: break
dom = [('parent_id','in',account_ids)]
else:
account_ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, account_ids, context=context)
class account_analytic_line(osv.osv):
_name = 'account.analytic.line'
_description = 'Analytic Line'
_columns = {
'name': fields.char('Description', size=256, required=True),
'date': fields.date('Date', required=True, select=True),
'amount': fields.float('Amount', required=True, help='Calculated by multiplying the quantity and the price given in the Product\'s cost price. Always expressed in the company main currency.', digits_compute=dp.get_precision('Account')),
'unit_amount': fields.float('Quantity', help='Specifies the amount of quantity to count.'),
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', required=True, ondelete='restrict', select=True, domain=[('type','<>','view')]),
'user_id': fields.many2one('res.users', 'User'),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
def _get_default_date(self, cr, uid, context=None):
return fields.date.context_today(self, cr, uid, context=context)
def __get_default_date(self, cr, uid, context=None):
return self._get_default_date(cr, uid, context=context)
_defaults = {
'date': __get_default_date,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=c),
'amount': 0.00
}
_order = 'date desc'
def _check_no_view(self, cr, uid, ids, context=None):
analytic_lines = self.browse(cr, uid, ids, context=context)
for line in analytic_lines:
if line.account_id.type == 'view':
return False
return True
_constraints = [
(_check_no_view, 'You cannot create analytic line on view account.', ['account_id']),
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
arokem/nipype | nipype/algorithms/tests/test_overlap.py | 7 | 1388 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
from shutil import rmtree
from tempfile import mkdtemp
from nipype.testing import (assert_equal, assert_raises,
assert_almost_equal, example_data)
import numpy as np
import nibabel as nb
def test_overlap():
from nipype.algorithms.metrics import Overlap
def check_close(val1, val2):
import numpy.testing as npt
return npt.assert_almost_equal(val1, val2, decimal=3)
tempdir = mkdtemp()
in1 = example_data('segmentation0.nii.gz')
in2 = example_data('segmentation1.nii.gz')
os.chdir(tempdir)
overlap = Overlap()
overlap.inputs.volume1 = in1
overlap.inputs.volume2 = in1
res = overlap.run()
yield check_close, res.outputs.jaccard, 1.0
overlap = Overlap()
overlap.inputs.volume1 = in1
overlap.inputs.volume2 = in2
res = overlap.run()
yield check_close, res.outputs.jaccard, 0.99705
overlap = Overlap()
overlap.inputs.volume1 = in1
overlap.inputs.volume2 = in2
overlap.inputs.vol_units = 'mm'
res = overlap.run()
yield check_close, res.outputs.jaccard, 0.99705
yield (check_close, res.outputs.roi_voldiff,
np.array([0.0063086, -0.0025506, 0.0]))
rmtree(tempdir)
| bsd-3-clause |
Emercoin/emcweb | engine/emcweb/emcweb_webapi/views/wallet.py | 1 | 2709 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
import time
from pathlib import Path
from flask import current_app
from flask_login import current_user
from flask_restful import reqparse
from emcweb.emcweb_webapi.login_resource import LoginResource
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
from emcweb.exts import connection
from emcweb.tasks import create_empty_wallet
from emcweb.emcweb_webapi.models import Wallets
from emcweb.emcweb_webapi.views import api
class WalletAPI(LoginResource):
@staticmethod
def post():
"""
Create new wallet
"""
result = {'result_status': False,
'result': False,
'message': ''}
parser = reqparse.RequestParser()
parser.add_argument('name')
args = parser.parse_args()
s_name = secure_filename(args['name'])
file_path = os.path.join(current_app.config['UPLOAD_FOLDER'], s_name)
exsists_name = Wallets.query.filter(Wallets.name == s_name).all()
if exsists_name or Path(file_path).is_file():
result['result'] = False
result['message'] = 'The name already exists!'
else:
try:
create_res = create_empty_wallet.delay(s_name)
except Exception:
result['result_status'] = False
result['message'] = 'Celery transport connection refused'
else:
seconds = 200
while seconds>0:
if create_res.ready():
result['result_status'] = True
result['result'] = True
result['message'] = 'The wallet has been created'
break
time.sleep(1)
seconds -= 1
if not result['result_status']:
result['result_status'] = False
result['result'] = False
result['message'] = 'Celery hasn\'t reported about finish'
if result['result_status'] and result['result']:
new_wallet = Wallets(user_id=current_user.id,
name=s_name,
path=file_path)
connection.session.add(new_wallet)
connection.session.commit()
if result['result_status']:
return result, 201
else:
return result, 500
@staticmethod
def get():
pass
return {'result_status': True, 'result': 'Opened'}, 201
api.add_resource(WalletAPI, '/wallet', '/wallet/<string:name>')
| gpl-3.0 |
velorientc/git_test7 | contrib/thgdebugtools/widgets.py | 1 | 6451 | # widgets.py - menu to find invisible widgets and gc issues
#
# Copyright 2013 Yuya Nishihara <yuya@tcha.org>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2, incorporated herein by reference.
import cgi, gc, pprint, re, weakref
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import dbgutil
def invisibleWindows():
"""List of invisible top-level widgets excluding menus"""
return [w for w in QApplication.topLevelWidgets()
if w.isHidden() and not isinstance(w, QMenu)]
def orphanedWidgets():
"""List of invisible widgets of no parent"""
return [w for w in QApplication.allWidgets()
if (not w.parent() and w.isHidden()
and not isinstance(w, QDesktopWidget))]
def zombieWidgets():
"""List of possibly-deleted widgets but referenced from Python"""
referenced = set(w for w in gc.get_objects() if isinstance(w, QWidget))
return referenced - set(QApplication.allWidgets())
class WidgetsMenuActions(dbgutil.BaseMenuActions):
"""Set up menu to find unused widgets"""
def _setupMenu(self, menu):
findtypes = [
('&Invisible Windows', invisibleWindows, self.showWidget),
('&Orphaned Widgets', orphanedWidgets, self.showWidget),
('&Zombie Widgets', zombieWidgets, self.openGcInfoOfWidget),
]
for name, collect, action in findtypes:
m = menu.addMenu(name)
m.menuAction().setStatusTip(collect.__doc__ or '')
f = WidgetsFinder(m, collect, parent=self)
f.triggered.connect(action)
menu.addSeparator()
a = menu.addAction('&GC Info of Active Window')
a.triggered.connect(self.openGcInfoOfActiveWindow)
self._gcInfoDialog = None
@pyqtSlot(object)
def showWidget(self, w):
w.show()
w.raise_()
w.activateWindow()
def _openGcInfoDialog(self):
if self._gcInfoDialog:
dlg = self._gcInfoDialog
else:
dlg = self._gcInfoDialog = GcInfoDialog()
dlg.show()
dlg.raise_()
dlg.activateWindow()
return dlg
@pyqtSlot(object)
def openGcInfoOfWidget(self, w):
dlg = self._openGcInfoDialog()
dlg.update(w)
@pyqtSlot()
def openGcInfoOfActiveWindow(self):
dlg = self._openGcInfoDialog()
dlg.update(QApplication.activeWindow())
class WidgetsFinder(QObject):
# not QWidget because C++ part may be deleted
triggered = pyqtSignal(object)
def __init__(self, menu, collect, parent=None):
super(WidgetsFinder, self).__init__(parent)
self._menu = menu
self._menu.aboutToShow.connect(self.rebuild)
self._menu.triggered.connect(self._emitTriggered)
self._collect = collect
self._refreshTimer = QTimer(self, interval=100)
self._refreshTimer.timeout.connect(self.refresh)
self._menu.aboutToShow.connect(self._refreshTimer.start)
self._menu.aboutToHide.connect(self._refreshTimer.stop)
@pyqtSlot()
def rebuild(self):
widgets = self._collect()
self._menu.clear()
if not widgets:
self._menu.addAction('(none)').setEnabled(False)
return
for i, w in enumerate(sorted(widgets, key=repr)):
s = re.sub(r'^(tortoisehg\.hgqt|PyQt4\.QtGui)\.', '',
repr(w)[1:-1])
s = s.replace(' object at ', ' at ')
if i < 10:
s = '&%d %s' % ((i + 1) % 10, s)
a = self._menu.addAction(s)
a.setData(weakref.ref(w))
@pyqtSlot()
def refresh(self):
for a in self._menu.actions():
wref = a.data().toPyObject()
if not wref:
continue
w = wref()
a.setEnabled(bool(w))
@pyqtSlot(QAction)
def _emitTriggered(self, action):
wref = action.data().toPyObject()
w = wref()
if w:
self.triggered.emit(w)
class GcInfoDialog(QDialog):
def __init__(self, parent=None):
super(GcInfoDialog, self).__init__(parent)
self.setLayout(QVBoxLayout(self))
self._infoEdit = QTextBrowser(self)
self.layout().addWidget(self._infoEdit)
self._followActiveCheck = QCheckBox('&Follow active window', self)
self._followActiveCheck.setChecked(True)
self.layout().addWidget(self._followActiveCheck)
self._buttonBox = bbox = QDialogButtonBox(self)
self.layout().addWidget(bbox)
b = bbox.addButton('&Show Widget', QDialogButtonBox.ActionRole)
b.clicked.connect(self.showWidget)
b = bbox.addButton('&Destroy', QDialogButtonBox.ResetRole)
b.clicked.connect(self.deleteWidget)
b.setAutoDefault(False)
self._targetWidgetRef = None
QApplication.instance().focusChanged.connect(self._updateByFocusChange)
self._updateButtons()
self.resize(600, 400)
def targetWidget(self):
if not self._targetWidgetRef:
return
return self._targetWidgetRef()
@pyqtSlot()
def showWidget(self):
w = self.targetWidget()
if not w:
self._updateButtons()
return
w.show()
w.raise_()
w.activateWindow()
@pyqtSlot()
def deleteWidget(self):
w = self.targetWidget()
if not w:
self._updateButtons()
return
w.deleteLater()
@pyqtSlot(QWidget, QWidget)
def _updateByFocusChange(self, old, now):
if (not self._followActiveCheck.isChecked()
or not old or not now or old.window() is now.window()
or now.window() is self):
return
self.update(now.window())
def update(self, w):
if not w:
self._targetWidgetRef = None
self._updateButtons()
return
referrers = gc.get_referrers(w)
self.setWindowTitle('GC Info - %r' % w)
self._infoEdit.clear()
self._infoEdit.append('<h1>Referrers</h1>')
self._infoEdit.append('<pre>%s</pre>'
% cgi.escape(pprint.pformat(referrers)))
del referrers
self._targetWidgetRef = weakref.ref(w)
self._updateButtons()
@pyqtSlot()
def _updateButtons(self):
self._buttonBox.setEnabled(bool(self.targetWidget()))
| gpl-2.0 |
jonathan-beard/edx-platform | common/djangoapps/third_party_auth/models.py | 16 | 18301 | # -*- coding: utf-8 -*-
"""
Models used to implement SAML SSO support in third_party_auth
(inlcuding Shibboleth support)
"""
from config_models.models import ConfigurationModel, cache
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
import json
import logging
from social.backends.base import BaseAuth
from social.backends.oauth import BaseOAuth2
from social.backends.saml import SAMLAuth, SAMLIdentityProvider
from social.exceptions import SocialAuthBaseException
from social.utils import module_member
log = logging.getLogger(__name__)
# A dictionary of {name: class} entries for each python-social-auth backend available.
# Because this setting can specify arbitrary code to load and execute, it is set via
# normal Django settings only and cannot be changed at runtime:
def _load_backend_classes(base_class=BaseAuth):
""" Load the list of python-social-auth backend classes from Django settings """
for class_path in settings.AUTHENTICATION_BACKENDS:
auth_class = module_member(class_path)
if issubclass(auth_class, base_class):
yield auth_class
_PSA_BACKENDS = {backend_class.name: backend_class for backend_class in _load_backend_classes()}
_PSA_OAUTH2_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(BaseOAuth2)]
_PSA_SAML_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(SAMLAuth)]
def clean_json(value, of_type):
""" Simple helper method to parse and clean JSON """
if not value.strip():
return json.dumps(of_type())
try:
value_python = json.loads(value)
except ValueError as err:
raise ValidationError("Invalid JSON: {}".format(err.message))
if not isinstance(value_python, of_type):
raise ValidationError("Expected a JSON {}".format(of_type))
return json.dumps(value_python, indent=4)
class AuthNotConfigured(SocialAuthBaseException):
""" Exception when SAMLProviderData or other required info is missing """
def __init__(self, provider_name):
super(AuthNotConfigured, self).__init__()
self.provider_name = provider_name
def __str__(self):
return _('Authentication with {} is currently unavailable.').format( # pylint: disable=no-member
self.provider_name
)
class ProviderConfig(ConfigurationModel):
"""
Abstract Base Class for configuring a third_party_auth provider
"""
icon_class = models.CharField(
max_length=50, default='fa-sign-in',
help_text=(
'The Font Awesome (or custom) icon class to use on the login button for this provider. '
'Examples: fa-google-plus, fa-facebook, fa-linkedin, fa-sign-in, fa-university'
),
)
name = models.CharField(max_length=50, blank=False, help_text="Name of this provider (shown to users)")
secondary = models.BooleanField(
default=False,
help_text=_(
'Secondary providers are displayed less prominently, '
'in a separate list of "Institution" login providers.'
),
)
skip_registration_form = models.BooleanField(
default=False,
help_text=_(
"If this option is enabled, users will not be asked to confirm their details "
"(name, email, etc.) during the registration process. Only select this option "
"for trusted providers that are known to provide accurate user information."
),
)
skip_email_verification = models.BooleanField(
default=False,
help_text=_(
"If this option is selected, users will not be required to confirm their "
"email, and their account will be activated immediately upon registration."
),
)
prefix = None # used for provider_id. Set to a string value in subclass
backend_name = None # Set to a field or fixed value in subclass
# "enabled" field is inherited from ConfigurationModel
class Meta(object): # pylint: disable=missing-docstring
abstract = True
@property
def provider_id(self):
""" Unique string key identifying this provider. Must be URL and css class friendly. """
assert self.prefix is not None
return "-".join((self.prefix, ) + tuple(getattr(self, field) for field in self.KEY_FIELDS))
@property
def backend_class(self):
""" Get the python-social-auth backend class used for this provider """
return _PSA_BACKENDS[self.backend_name]
def get_url_params(self):
""" Get a dict of GET parameters to append to login links for this provider """
return {}
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
return self.backend_name == pipeline['backend']
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
return self.backend_name == social_auth.provider
@classmethod
def get_register_form_data(cls, pipeline_kwargs):
"""Gets dict of data to display on the register form.
common.djangoapps.student.views.register_user uses this to populate the
new account creation form with values supplied by the user's chosen
provider, preventing duplicate data entry.
Args:
pipeline_kwargs: dict of string -> object. Keyword arguments
accumulated by the pipeline thus far.
Returns:
Dict of string -> string. Keys are names of form fields; values are
values for that field. Where there is no value, the empty string
must be used.
"""
# Details about the user sent back from the provider.
details = pipeline_kwargs.get('details')
# Get the username separately to take advantage of the de-duping logic
# built into the pipeline. The provider cannot de-dupe because it can't
# check the state of taken usernames in our system. Note that there is
# technically a data race between the creation of this value and the
# creation of the user object, so it is still possible for users to get
# an error on submit.
suggested_username = pipeline_kwargs.get('username')
return {
'email': details.get('email', ''),
'name': details.get('fullname', ''),
'username': suggested_username,
}
def get_authentication_backend(self):
"""Gets associated Django settings.AUTHENTICATION_BACKEND string."""
return '{}.{}'.format(self.backend_class.__module__, self.backend_class.__name__)
class OAuth2ProviderConfig(ProviderConfig):
"""
Configuration Entry for an OAuth2 based provider.
"""
prefix = 'oa2'
KEY_FIELDS = ('backend_name', ) # Backend name is unique
backend_name = models.CharField(
max_length=50, choices=[(name, name) for name in _PSA_OAUTH2_BACKENDS], blank=False, db_index=True,
help_text=(
"Which python-social-auth OAuth2 provider backend to use. "
"The list of backend choices is determined by the THIRD_PARTY_AUTH_BACKENDS setting."
# To be precise, it's set by AUTHENTICATION_BACKENDS - which aws.py sets from THIRD_PARTY_AUTH_BACKENDS
)
)
key = models.TextField(blank=True, verbose_name="Client ID")
secret = models.TextField(blank=True, verbose_name="Client Secret")
other_settings = models.TextField(blank=True, help_text="Optional JSON object with advanced settings, if any.")
class Meta(object): # pylint: disable=missing-docstring
verbose_name = "Provider Configuration (OAuth2)"
verbose_name_plural = verbose_name
def clean(self):
""" Standardize and validate fields """
super(OAuth2ProviderConfig, self).clean()
self.other_settings = clean_json(self.other_settings, dict)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
if name in ("KEY", "SECRET"):
return getattr(self, name.lower())
if self.other_settings:
other_settings = json.loads(self.other_settings)
assert isinstance(other_settings, dict), "other_settings should be a JSON object (dictionary)"
return other_settings[name]
raise KeyError
class SAMLProviderConfig(ProviderConfig):
"""
Configuration Entry for a SAML/Shibboleth provider.
"""
prefix = 'saml'
KEY_FIELDS = ('idp_slug', )
backend_name = models.CharField(
max_length=50, default='tpa-saml', choices=[(name, name) for name in _PSA_SAML_BACKENDS], blank=False,
help_text="Which python-social-auth provider backend to use. 'tpa-saml' is the standard edX SAML backend.")
idp_slug = models.SlugField(
max_length=30, db_index=True,
help_text=(
'A short string uniquely identifying this provider. '
'Cannot contain spaces and should be a usable as a CSS class. Examples: "ubc", "mit-staging"'
))
entity_id = models.CharField(
max_length=255, verbose_name="Entity ID", help_text="Example: https://idp.testshib.org/idp/shibboleth")
metadata_source = models.CharField(
max_length=255,
help_text=(
"URL to this provider's XML metadata. Should be an HTTPS URL. "
"Example: https://www.testshib.org/metadata/testshib-providers.xml"
))
attr_user_permanent_id = models.CharField(
max_length=128, blank=True, verbose_name="User ID Attribute",
help_text="URN of the SAML attribute that we can use as a unique, persistent user ID. Leave blank for default.")
attr_full_name = models.CharField(
max_length=128, blank=True, verbose_name="Full Name Attribute",
help_text="URN of SAML attribute containing the user's full name. Leave blank for default.")
attr_first_name = models.CharField(
max_length=128, blank=True, verbose_name="First Name Attribute",
help_text="URN of SAML attribute containing the user's first name. Leave blank for default.")
attr_last_name = models.CharField(
max_length=128, blank=True, verbose_name="Last Name Attribute",
help_text="URN of SAML attribute containing the user's last name. Leave blank for default.")
attr_username = models.CharField(
max_length=128, blank=True, verbose_name="Username Hint Attribute",
help_text="URN of SAML attribute to use as a suggested username for this user. Leave blank for default.")
attr_email = models.CharField(
max_length=128, blank=True, verbose_name="Email Attribute",
help_text="URN of SAML attribute containing the user's email address[es]. Leave blank for default.")
other_settings = models.TextField(
verbose_name="Advanced settings", blank=True,
help_text=(
'For advanced use cases, enter a JSON object with addtional configuration. '
'The tpa-saml backend supports only {"requiredEntitlements": ["urn:..."]} '
'which can be used to require the presence of a specific eduPersonEntitlement.'
))
def clean(self):
""" Standardize and validate fields """
super(SAMLProviderConfig, self).clean()
self.other_settings = clean_json(self.other_settings, dict)
class Meta(object): # pylint: disable=missing-docstring
verbose_name = "Provider Configuration (SAML IdP)"
verbose_name_plural = "Provider Configuration (SAML IdPs)"
def get_url_params(self):
""" Get a dict of GET parameters to append to login links for this provider """
return {'idp': self.idp_slug}
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
return self.backend_name == pipeline['backend'] and self.idp_slug == pipeline['kwargs']['response']['idp_name']
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
prefix = self.idp_slug + ":"
return self.backend_name == social_auth.provider and social_auth.uid.startswith(prefix)
def get_config(self):
"""
Return a SAMLIdentityProvider instance for use by SAMLAuthBackend.
Essentially this just returns the values of this object and its
associated 'SAMLProviderData' entry.
"""
if self.other_settings:
conf = json.loads(self.other_settings)
else:
conf = {}
attrs = (
'attr_user_permanent_id', 'attr_full_name', 'attr_first_name',
'attr_last_name', 'attr_username', 'attr_email', 'entity_id')
for field in attrs:
val = getattr(self, field)
if val:
conf[field] = val
# Now get the data fetched automatically from the metadata.xml:
data = SAMLProviderData.current(self.entity_id)
if not data or not data.is_valid():
log.error("No SAMLProviderData found for %s. Run 'manage.py saml pull' to fix or debug.", self.entity_id)
raise AuthNotConfigured(provider_name=self.name)
conf['x509cert'] = data.public_key
conf['url'] = data.sso_url
return SAMLIdentityProvider(self.idp_slug, **conf)
class SAMLConfiguration(ConfigurationModel):
"""
General configuration required for this edX instance to act as a SAML
Service Provider and allow users to authenticate via third party SAML
Identity Providers (IdPs)
"""
private_key = models.TextField(
help_text=(
'To generate a key pair as two files, run '
'"openssl req -new -x509 -days 3652 -nodes -out saml.crt -keyout saml.key". '
'Paste the contents of saml.key here.'
)
)
public_key = models.TextField(help_text="Public key certificate.")
entity_id = models.CharField(max_length=255, default="http://saml.example.com", verbose_name="Entity ID")
org_info_str = models.TextField(
verbose_name="Organization Info",
default='{"en-US": {"url": "http://www.example.com", "displayname": "Example Inc.", "name": "example"}}',
help_text="JSON dictionary of 'url', 'displayname', and 'name' for each language",
)
other_config_str = models.TextField(
default='{\n"SECURITY_CONFIG": {"metadataCacheDuration": 604800, "signMetadata": false}\n}',
help_text=(
"JSON object defining advanced settings that are passed on to python-saml. "
"Valid keys that can be set here include: SECURITY_CONFIG and SP_EXTRA"
),
)
class Meta(object): # pylint: disable=missing-docstring
verbose_name = "SAML Configuration"
verbose_name_plural = verbose_name
def clean(self):
""" Standardize and validate fields """
super(SAMLConfiguration, self).clean()
self.org_info_str = clean_json(self.org_info_str, dict)
self.other_config_str = clean_json(self.other_config_str, dict)
self.private_key = (
self.private_key
.replace("-----BEGIN RSA PRIVATE KEY-----", "")
.replace("-----BEGIN PRIVATE KEY-----", "")
.replace("-----END RSA PRIVATE KEY-----", "")
.replace("-----END PRIVATE KEY-----", "")
.strip()
)
self.public_key = (
self.public_key
.replace("-----BEGIN CERTIFICATE-----", "")
.replace("-----END CERTIFICATE-----", "")
.strip()
)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
if name == "ORG_INFO":
return json.loads(self.org_info_str)
if name == "SP_ENTITY_ID":
return self.entity_id
if name == "SP_PUBLIC_CERT":
return self.public_key
if name == "SP_PRIVATE_KEY":
return self.private_key
other_config = json.loads(self.other_config_str)
if name in ("TECHNICAL_CONTACT", "SUPPORT_CONTACT"):
contact = {
"givenName": "{} Support".format(settings.PLATFORM_NAME),
"emailAddress": settings.TECH_SUPPORT_EMAIL
}
contact.update(other_config.get(name, {}))
return contact
return other_config[name] # SECURITY_CONFIG, SP_EXTRA, or similar extra settings
class SAMLProviderData(models.Model):
"""
Data about a SAML IdP that is fetched automatically by 'manage.py saml pull'
This data is only required during the actual authentication process.
"""
cache_timeout = 600
fetched_at = models.DateTimeField(db_index=True, null=False)
expires_at = models.DateTimeField(db_index=True, null=True)
entity_id = models.CharField(max_length=255, db_index=True) # This is the key for lookups in this table
sso_url = models.URLField(verbose_name="SSO URL")
public_key = models.TextField()
class Meta(object): # pylint: disable=missing-docstring
verbose_name = "SAML Provider Data"
verbose_name_plural = verbose_name
ordering = ('-fetched_at', )
def is_valid(self):
""" Is this data valid? """
if self.expires_at and timezone.now() > self.expires_at:
return False
return bool(self.entity_id and self.sso_url and self.public_key)
is_valid.boolean = True
@classmethod
def cache_key_name(cls, entity_id):
""" Return the name of the key to use to cache the current data """
return 'configuration/{}/current/{}'.format(cls.__name__, entity_id)
@classmethod
def current(cls, entity_id):
"""
Return the active data entry, if any, otherwise None
"""
cached = cache.get(cls.cache_key_name(entity_id))
if cached is not None:
return cached
try:
current = cls.objects.filter(entity_id=entity_id).order_by('-fetched_at')[0]
except IndexError:
current = None
cache.set(cls.cache_key_name(entity_id), current, cls.cache_timeout)
return current
| agpl-3.0 |
jwalgran/otm-core | opentreemap/manage_treemap/tests/test_instance_invitations.py | 3 | 7088 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from registration.models import RegistrationProfile
from django.core import mail
from django.http import HttpRequest
from django.test import override_settings
from treemap.models import User, InstanceUser
from treemap.tests import make_user, make_instance
from treemap.tests.test_urls import UrlTestCase
from registration_backend.views import (RegistrationView, ActivationView,
RegistrationForm)
from manage_treemap.models import InstanceInvitation
class InstanceInvitationTest(UrlTestCase):
class MockSession(dict):
def cycle_key(self):
pass
def setUp(self):
self.user = make_user(username='test', password='password')
self.instance = make_instance()
# Create an admin user to verify that not all admins get notifications
self.admin = make_user(instance=self.instance, username='admin',
admin=True)
def _login(self):
self.client.post('/accounts/login/',
{'username': 'test',
'password': 'password'})
def _make_registration_view(self):
rv = RegistrationView()
rv.request = self._make_request()
return rv
def _make_request(self):
request = HttpRequest()
request.META = {'SERVER_NAME': 'localhost',
'SERVER_PORT': '80'}
request.session = self.MockSession()
return request
def test_normal_registration_without_invite(self):
mail.outbox = []
email = "arst@neio.com"
rv = self._make_registration_view()
form = RegistrationForm(data={
'email': email,
'email2': email,
'username': "u1",
'password1': "pass",
'password2': "pass"
})
self.assertTrue(form.is_valid())
rv.register(form)
users = User.objects.filter(email=email)
self.assertTrue(users.exists())
user = users[0]
self.assertFalse(user.is_active)
self.assertEquals(len(InstanceUser.objects.filter(user=user)), 0)
success_url = rv.get_success_url(user)
self.assertEqual(success_url, 'registration_complete')
def _invite_and_register(self, invite_email, user_email=None,
key_matches=True):
if user_email is None:
user_email = invite_email
invite = InstanceInvitation.objects.create(
created_by=self.user,
instance=self.instance,
email=invite_email,
role=self.instance.default_role)
# Clear the outbox after creating the instance so that emails
# triggered by instance creation do not affect this test
mail.outbox = []
rv = self._make_registration_view()
if key_matches:
rv.request.GET = {'key': invite.activation_key}
form = RegistrationForm(data={
'email': user_email,
'email2': user_email,
'username': "u1",
'password1': "pass",
'password2': "pass"
})
self.assertTrue(form.is_valid())
rv.register(form)
return rv
def assert_user_was_invited(self, view, new_user):
self.assertTrue(new_user.is_active)
self.assertIsNotNone(new_user.get_instance_user(self.instance))
success_url, __, __ = view.get_success_url(new_user)
self.assertEqual(success_url, '/%s/map/' % self.instance.url_name)
self.assertEquals(len(mail.outbox), 1)
msg = mail.outbox[0]
# Make sure we have some chars and the correct receivers
self.assertGreater(len(msg.subject), 10)
self.assertGreater(len(msg.body), 10)
to = set(msg.to)
expected_to = {self.user.email}
self.assertEquals(to, expected_to)
# Disable plug-in function to ensure we are testing core behavior
@override_settings(INVITATION_ACCEPTED_NOTIFICATION_EMAILS=None)
def test_adds_to_invited_instances_and_redirects(self):
rv = self._invite_and_register("some@email.com")
users = User.objects.filter(email="some@email.com")
self.assertTrue(users.exists())
new_user = users[0]
self.assert_user_was_invited(rv, new_user)
def test_does_not_redirect_when_email_different(self):
rv = self._invite_and_register("some@email.com", "different@other.com")
users = User.objects.filter(email="different@other.com")
self.assertTrue(users.exists())
new_user = users[0]
# The email did not match the invite email, so the user should not be
# activated (yet)
self.assertFalse(new_user.is_active)
self.assertIsNone(new_user.get_instance_user(self.instance))
success_url = rv.get_success_url(new_user)
self.assertEqual(success_url, 'registration_complete')
# We should get an activation email, and no others, because the emails
# did not match
self.assertEquals(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertEquals(tuple(msg.to), (new_user.email,))
def test_does_not_redirect_when_key_does_not_match(self):
rv = self._invite_and_register("some@email.com", key_matches=False)
users = User.objects.filter(email="some@email.com")
self.assertTrue(users.exists())
new_user = users[0]
# The activation key did not match the invite key, so the user should
# not be activated (yet)
self.assertFalse(new_user.is_active)
self.assertIsNone(new_user.get_instance_user(self.instance))
success_url = rv.get_success_url(new_user)
self.assertEqual(success_url, 'registration_complete')
# We should get an activation email, and no others, because the emails
# did not match
self.assertEquals(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertEquals(tuple(msg.to), (new_user.email,))
# Disable plug-in function to ensure we are testing core behavior
@override_settings(INVITATION_ACCEPTED_NOTIFICATION_EMAILS=None)
def test_adds_to_invited_instances_after_activation(self):
self._invite_and_register("some@email.com", "different@other.com")
users = User.objects.filter(email="different@other.com")
self.assertTrue(users.exists())
new_user = users[0]
reg_profile = RegistrationProfile.objects.get(user=new_user)
av = ActivationView()
av.request = self._make_request()
mail.outbox = []
activated_user = av.activate(activation_key=reg_profile.activation_key)
# All the things that were true for the same email case should be true
# now that we have activated via email loop, even though the user's
# email is different from the original invite (since the keys match)
self.assert_user_was_invited(av, activated_user)
| gpl-3.0 |
framon/samba | python/samba/ndr.py | 45 | 1563 | # -*- coding: utf-8 -*-
# Unix SMB/CIFS implementation.
# Copyright © Jelmer Vernooij <jelmer@samba.org> 2008
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Network Data Representation (NDR) marshalling and unmarshalling."""
def ndr_pack(object):
"""Pack a NDR object.
:param object: Object to pack
:return: String object with marshalled object.
"""
ndr_pack = getattr(object, "__ndr_pack__", None)
if ndr_pack is None:
raise TypeError("%r is not a NDR object" % object)
return ndr_pack()
def ndr_unpack(cls, data, allow_remaining=False):
"""NDR unpack an object.
:param cls: Class of the object to unpack
:param data: Buffer to unpack
:param allow_remaining: allows remaining data at the end (default=False)
:return: Unpacked object
"""
object = cls()
object.__ndr_unpack__(data, allow_remaining=allow_remaining)
return object
def ndr_print(object):
return object.__ndr_print__()
| gpl-3.0 |
jbedorf/tensorflow | tensorflow/python/framework/proto_test.py | 178 | 1704 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Protobuf related tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class ProtoTest(test.TestCase):
# TODO(vrv): re-enable this test once we figure out how this can
# pass the pip install test (where the user is expected to have
# protobuf installed).
def _testLargeProto(self):
# create a constant of size > 64MB.
a = constant_op.constant(np.zeros([1024, 1024, 17]))
# Serialize the resulting graph def.
gdef = a.op.graph.as_graph_def()
serialized = gdef.SerializeToString()
unserialized = ops.Graph().as_graph_def()
# Deserialize back. Protobuf python library should support
# protos larger than 64MB.
unserialized.ParseFromString(serialized)
self.assertProtoEquals(unserialized, gdef)
if __name__ == "__main__":
test.main()
| apache-2.0 |
yanchen036/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/ops.py | 7 | 46402 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-core ops for LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import types
import numpy as np
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.training import input # pylint: disable=redefined-builtin
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis,
tc.Optional(string_types))
def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None):
with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope:
temp_axes = core.Axes([axis] + list(
labeled_tensor.axes.remove(axis.name).values()))
transposed = core.transpose(labeled_tensor, temp_axes.keys())
indexed = core.LabeledTensor(
array_ops.gather(transposed.tensor, indexer), temp_axes)
return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types,
tc.Union(slice, collections.Hashable, list)),
tc.Optional(string_types))
def select(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
Args:
labeled_tensor: The input tensor.
selection: A dictionary mapping an axis name to a scalar, slice or list of
values to select. Currently supports two types of selections:
(a) Any number of scalar and/or slice selections.
(b) Exactly one list selection, without any scalars or slices.
name: Optional op name.
Returns:
The selection as a `LabeledTensor`.
Raises:
ValueError: If the tensor doesn't have an axis in the selection or if
that axis lacks labels.
KeyError: If any labels in a selection are not found in the original axis.
NotImplementedError: If you attempt to combine a list selection with
scalar selection or another list selection.
"""
with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
slices = {}
indexers = {}
for axis_name, value in selection.items():
if axis_name not in labeled_tensor.axes:
raise ValueError(
'The tensor does not have an axis named %s. Its axes are: %r' %
(axis_name, labeled_tensor.axes.keys()))
axis = labeled_tensor.axes[axis_name]
if axis.labels is None:
raise ValueError(
'The axis named %s does not have labels. The axis is: %r' %
(axis_name, axis))
if isinstance(value, slice):
# TODO(shoyer): consider deprecating using slices in favor of lists
if value.start is None:
start = None
else:
start = axis.index(value.start)
if value.stop is None:
stop = None
else:
# For now, follow the pandas convention of making labeled slices
# inclusive of both bounds.
stop = axis.index(value.stop) + 1
if value.step is not None:
raise NotImplementedError('slicing with a step is not yet supported')
slices[axis_name] = slice(start, stop)
# Needs to be after checking for slices, since slice objects claim to be
# instances of collections.Hashable but hash() on them fails.
elif isinstance(value, collections.Hashable):
slices[axis_name] = axis.index(value)
elif isinstance(value, list):
if indexers:
raise NotImplementedError(
'select does not yet support more than one list selection at '
'the same time')
indexer = [axis.index(v) for v in value]
indexers[axis_name] = ops.convert_to_tensor(indexer, dtype=dtypes.int64)
else:
# If type checking is working properly, this shouldn't be possible.
raise TypeError('cannot handle arbitrary types')
if indexers and slices:
raise NotImplementedError(
'select does not yet support combined scalar and list selection')
# For now, handle array selection separately, because tf.gather_nd does
# not support gradients yet. Later, using gather_nd will let us combine
# these paths.
if indexers:
(axis_name, indexer), = indexers.items()
axis = core.Axis(axis_name, selection[axis_name])
return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope)
else:
return core.slice_function(labeled_tensor, slices, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike), string_types,
tc.Optional(string_types))
def concat(labeled_tensors, axis_name, name=None):
"""Concatenate tensors along a dimension.
See tf.concat.
Args:
labeled_tensors: A list of input LabeledTensors.
axis_name: The name of the axis along which to concatenate.
name: Optional op name.
Returns:
The concatenated tensor.
The coordinate labels for the concatenation dimension are also concatenated,
if they are available for every tensor.
Raises:
ValueError: If fewer than one tensor inputs is provided, if the tensors
have incompatible axes, or if `axis_name` isn't the name of an axis.
"""
with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('concat expects at least 1 tensor, but received %s' %
labeled_tensors)
# All tensors must have these axes.
axes_0 = labeled_tensors[0].axes
axis_names = list(axes_0.keys())
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
shared_axes = axes_0.remove(axis_name)
tensors = [labeled_tensors[0].tensor]
concat_axis_list = [axes_0[axis_name]]
for labeled_tensor in labeled_tensors[1:]:
current_shared_axes = labeled_tensor.axes.remove(axis_name)
if current_shared_axes != shared_axes:
# TODO(shoyer): add more specific checks about what went wrong,
# including raising AxisOrderError when appropriate
raise ValueError('Mismatched shared axes: the first tensor '
'had axes %r but this tensor has axes %r.' %
(shared_axes, current_shared_axes))
# Accumulate the axis labels, if they're available.
concat_axis_list.append(labeled_tensor.axes[axis_name])
tensors.append(labeled_tensor.tensor)
concat_axis = core.concat_axes(concat_axis_list)
concat_dimension = axis_names.index(axis_name)
concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
values = list(axes_0.values())
concat_axes = (values[:concat_dimension] + [concat_axis] +
values[concat_dimension + 1:])
return core.LabeledTensor(concat_tensor, concat_axes)
# TODO(shoyer): rename pack/unpack to stack/unstack
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike),
tc.Union(string_types, core.AxisLike), int, tc.Optional(string_types))
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
"""Pack tensors along a new axis.
See tf.pack.
Args:
labeled_tensors: The input tensors, which must have identical axes.
new_axis: The name of the new axis, or a tuple containing the name
and coordinate labels.
axis_position: Optional integer position at which to insert the new axis.
name: Optional op name.
Returns:
The packed tensors as a single LabeledTensor, with `new_axis` in the given
`axis_position`.
Raises:
ValueError: If fewer than one input tensors is provided, or if the tensors
don't have identical axes.
"""
with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('pack expects at least 1 tensors, but received %s' %
labeled_tensors)
axes_0 = labeled_tensors[0].axes
for t in labeled_tensors:
if t.axes != axes_0:
raise ValueError('Non-identical axes. Expected %s but got %s' %
(axes_0, t.axes))
pack_op = array_ops.stack(
[t.tensor for t in labeled_tensors], axis=axis_position, name=scope)
axes = list(axes_0.values())
axes.insert(axis_position, new_axis)
return core.LabeledTensor(pack_op, axes)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike,
tc.Optional(string_types), tc.Optional(string_types))
def unpack(labeled_tensor, axis_name=None, name=None):
"""Unpack the tensor.
See tf.unpack.
Args:
labeled_tensor: The input tensor.
axis_name: Optional name of axis to unpack. By default, the first axis is
used.
name: Optional op name.
Returns:
The list of unpacked LabeledTensors.
Raises:
ValueError: If `axis_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
axis_names = list(labeled_tensor.axes.keys())
if axis_name is None:
axis_name = axis_names[0]
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
axis = axis_names.index(axis_name)
unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope)
axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis]
return [core.LabeledTensor(t, axes) for t in unpack_ops]
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Collection(string_types),
tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def reshape(labeled_tensor, existing_axes, new_axes, name=None):
"""Reshape specific axes of a LabeledTensor.
Non-indicated axes remain in their original locations.
Args:
labeled_tensor: The input tensor.
existing_axes: List of axis names found on the input tensor. These must
appear sequentially in the list of axis names on the input. In other
words, they must be a valid slice of `list(labeled_tensor.axes.keys())`.
new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects
providing new axes with which to replace `existing_axes` in the reshaped
result. At most one element of `new_axes` may be a string, indicating an
axis with unknown size.
name: Optional op name.
Returns:
The reshaped LabeledTensor.
Raises:
ValueError: If `existing_axes` are not all axes on the input, or if more
than one of `new_axes` has unknown size.
AxisOrderError: If `existing_axes` are not a slice of axis names on the
input.
"""
with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
original_axis_names = list(labeled_tensor.axes.keys())
existing_axes = list(existing_axes)
if not set(existing_axes) <= set(original_axis_names):
raise ValueError('existing_axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_axes, original_axis_names))
start = original_axis_names.index(existing_axes[0])
stop = original_axis_names.index(existing_axes[-1]) + 1
if existing_axes != original_axis_names[start:stop]:
# We could support existing_axes that aren't a slice by using transpose,
# but that could lead to unpredictable performance consequences because
# transposes are not free in TensorFlow. If we did transpose
# automatically, the user might never realize that their data is being
# produced with the wrong order. (The later will occur with some frequency
# because of how broadcasting automatically choose axis order.)
# So for now we've taken the strict approach.
raise core.AxisOrderError(
'existing_axes %r are not a slice of axis names %r on the input '
'labeled tensor. Use `transpose` or `impose_axis_order` to reorder '
'axes on the input explicitly.' %
(existing_axes, original_axis_names))
if sum(isinstance(axis, string_types) for axis in new_axes) > 1:
raise ValueError(
'at most one axis in new_axes can have unknown size. All other '
'axes must have an indicated integer size or labels: %r' % new_axes)
original_values = list(labeled_tensor.axes.values())
axis_size = lambda axis: -1 if axis.size is None else axis.size
shape = [axis_size(axis) for axis in original_values[:start]]
for axis_ref in new_axes:
if isinstance(axis_ref, string_types):
shape.append(-1)
else:
axis = core.as_axis(axis_ref)
shape.append(axis_size(axis))
shape.extend(axis_size(axis) for axis in original_values[stop:])
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
axes = original_values[:start] + list(new_axes) + original_values[stop:]
return core.LabeledTensor(reshaped_tensor, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, string_types,
tc.Optional(string_types))
def rename_axis(labeled_tensor, existing_name, new_name, name=None):
"""Rename an axis of LabeledTensor.
Args:
labeled_tensor: The input tensor.
existing_name: Name for an existing axis on the input.
new_name: Desired replacement name.
name: Optional op name.
Returns:
LabeledTensor with renamed axis.
Raises:
ValueError: If `existing_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope:
if existing_name not in labeled_tensor.axes:
raise ValueError('existing_name %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_name, labeled_tensor.axes.keys()))
new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value)
return reshape(labeled_tensor, [existing_name], [new_axis], name=scope)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(string_types, collections.Callable, int, bool,
tc.Collection(core.LabeledTensorLike), bool,
tc.Optional(string_types))
def _batch_helper(default_name,
batch_fn,
batch_size,
enqueue_many,
labeled_tensors,
allow_smaller_final_batch,
name=None):
with ops.name_scope(name, default_name, labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope)
# TODO(shoyer): Remove this when they sanitize the TF API.
if not isinstance(batch_ops, list):
assert isinstance(batch_ops, ops.Tensor)
batch_ops = [batch_ops]
if allow_smaller_final_batch:
batch_size = None
@tc.returns(core.Axes)
@tc.accepts(core.Axes)
def output_axes(axes):
if enqueue_many:
if 'batch' not in axes or list(axes.keys()).index('batch') != 0:
raise ValueError(
'When enqueue_many is True, input tensors must have an axis '
'called "batch" as their first dimension, '
'but axes were %s' % axes)
culled_axes = axes.remove('batch')
return core.Axes([('batch', batch_size)] + list(culled_axes.values()))
else:
return core.Axes([('batch', batch_size)] + list(axes.values()))
output_labeled_tensors = []
for i, tensor in enumerate(batch_ops):
axes = output_axes(labeled_tensors[i].axes)
output_labeled_tensors.append(core.LabeledTensor(tensor, axes))
return output_labeled_tensors
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool,
tc.Optional(string_types))
def batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, int,
tc.Optional(int), bool, tc.Optional(string_types))
def shuffle_batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
min_after_dequeue=0,
seed=None,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor, with shuffling.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
min_after_dequeue: Minimum number of elements in the queue after a dequeue,
used to ensure mixing.
seed: Optional random seed.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.shuffle_batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
min_after_dequeue=min_after_dequeue,
seed=seed,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types, int),
tc.Optional(int), tc.Optional(string_types))
def random_crop(labeled_tensor, shape_map, seed=None, name=None):
"""Randomly crops a tensor to a given size.
See tf.random_crop.
Args:
labeled_tensor: The input tensor.
shape_map: A dictionary mapping axis names to the size of the random crop
for that dimension.
seed: An optional random seed.
name: An optional op name.
Returns:
A tensor of the same rank as `labeled_tensor`, cropped randomly in the
selected dimensions.
Raises:
ValueError: If the shape map contains an axis name not in the input tensor.
"""
with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
for axis_name in shape_map:
if axis_name not in labeled_tensor.axes:
raise ValueError('Selection axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
shape = []
axes = []
for axis in labeled_tensor.axes.values():
if axis.name in shape_map:
size = shape_map[axis.name]
shape.append(size)
# We lose labels for the axes we crop, leaving just the size.
axes.append((axis.name, size))
else:
shape.append(len(axis))
axes.append(axis)
crop_op = random_ops.random_crop(
labeled_tensor.tensor, shape, seed=seed, name=scope)
return core.LabeledTensor(crop_op, axes)
# TODO(shoyer): Allow the user to select the axis over which to map.
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
tc.Optional(string_types))
def map_fn(fn, labeled_tensor, name=None):
"""Map on the list of tensors unpacked from labeled_tensor.
See tf.map_fn.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type LabeledTensor -> LabeledTensor.
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
A tensor that packs the results of applying fn to the list of tensors
unpacked from labeled_tensor.
"""
with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
unpack_lts = unpack(labeled_tensor)
# TODO(ericmc): Fix this upstream.
if labeled_tensor.dtype == dtypes.string:
# We must construct the full graph here, because functional_ops.map_fn
# doesn't work for string-valued tensors.
# Constructing the full graph may be slow.
map_lts = [fn(t) for t in unpack_lts]
return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
else:
# Figure out what the axis labels should be, but use tf.map_fn to
# construct the graph because it's efficient.
# It may be slow to construct the full graph, so we infer the labels from
# the first element.
# TODO(ericmc): This builds a subgraph which then gets thrown away.
# Find a more elegant solution.
first_map_lt = fn(unpack_lts[0])
final_axes = list(labeled_tensor.axes.values())[:1] + list(
first_map_lt.axes.values())
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor)
def tf_fn(tensor):
original_axes = list(labeled_tensor.axes.values())[1:]
tensor_lt = core.LabeledTensor(tensor, original_axes)
return fn(tensor_lt).tensor
map_op = functional_ops.map_fn(tf_fn, labeled_tensor.tensor)
map_lt = core.LabeledTensor(map_op, final_axes)
return core.identity(map_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def foldl(fn, labeled_tensor, initial_value, name=None):
"""Left fold on the list of tensors unpacked from labeled_tensor.
See tf.foldl.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
Its arguments are (accumulated_value, next_value).
labeled_tensor: The input tensor.
initial_value: The initial value of the accumulator.
name: Optional op name.
Returns:
The accumulated value.
"""
with ops.name_scope(name, 'lt_foldl',
[labeled_tensor, initial_value]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
initial_value = core.convert_to_labeled_tensor(initial_value)
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor, ops.Tensor)
def tf_fn(accumulator, next_element):
accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
next_element_lt = core.LabeledTensor(
next_element, list(labeled_tensor.axes.values())[1:])
return fn(accumulator_lt, next_element_lt).tensor
foldl_op = functional_ops.foldl(
tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)
return core.identity(foldl_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def squeeze(labeled_tensor, axis_names=None, name=None):
"""Remove size-1 dimensions.
See tf.squeeze.
Args:
labeled_tensor: The input tensor.
axis_names: The names of the dimensions to remove, or None to remove
all size-1 dimensions.
name: Optional op name.
Returns:
A tensor with the specified dimensions removed.
Raises:
ValueError: If the named axes are not in the tensor, or if they are
not size-1.
"""
with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axis_names is None:
axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1]
for axis_name in axis_names:
if axis_name not in labeled_tensor.axes:
raise ValueError('axis %s is not in tensor axes %s' %
(axis_name, labeled_tensor.axes))
elif len(labeled_tensor.axes[axis_name]) != 1:
raise ValueError(
'cannot squeeze axis with size greater than 1: (%s, %s)' %
(axis_name, labeled_tensor.axes[axis_name]))
squeeze_dimensions = []
axes = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in axis_names:
squeeze_dimensions.append(i)
else:
axes.append(axis)
if squeeze_dimensions:
squeeze_op = array_ops.squeeze(
labeled_tensor.tensor, squeeze_dimensions, name=scope)
else:
squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope)
return core.LabeledTensor(squeeze_op, axes)
# pylint: disable=invalid-name
ReduceAxis = tc.Union(string_types,
tc.Tuple(string_types, collections.Hashable))
ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis)))
# pylint: enable=invalid-name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def matmul(a, b, name=None):
"""Matrix multiply two tensors with rank 1 or 2.
If both tensors have rank 2, a matrix-matrix product is performed.
If one tensor has rank 1 and the other has rank 2, then a matrix-vector
product is performed.
If both tensors have rank 1, then a vector dot-product is performed.
(This behavior matches that of `numpy.dot`.)
Both tensors must share exactly one dimension in common, which is the
dimension the operation is summed along. The inputs will be automatically
transposed if necessary as part of the matmul op.
We intend to eventually support `matmul` on higher rank input, and also
eventually support summing over any number shared dimensions (via an `axis`
argument), but neither of these features has been implemented yet.
Args:
a: First LabeledTensor.
b: Second LabeledTensor.
name: Optional op name.
Returns:
LabeledTensor with the result of matrix multiplication. Axes are ordered by
the current axis_order_scope, if set, or in or order of appearance on the
inputs.
Raises:
NotImplementedError: If inputs have rank >2 or share multiple axes.
ValueError: If the inputs have rank 0 or do not share any axes.
"""
with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:
a = core.convert_to_labeled_tensor(a)
b = core.convert_to_labeled_tensor(b)
if len(a.axes) > 2 or len(b.axes) > 2:
# We could pass batched inputs to tf.matmul to make this work, but we
# would also need to use tf.tile and/or tf.transpose. These are more
# expensive than doing reshapes, so it's not clear if it's a good idea to
# do this automatically.
raise NotImplementedError(
'matmul currently requires inputs with rank 2 or less, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
if not a.axes or not b.axes:
raise ValueError(
'matmul currently requires inputs with at least rank 1, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
shared_axes = set(a.axes) & set(b.axes)
if len(shared_axes) > 1:
raise NotImplementedError(
'matmul does not yet support summing over multiple shared axes: %r. '
'Use transpose and reshape to create a single shared axis to sum '
'over.' % shared_axes)
if not shared_axes:
raise ValueError('there must have exactly one axis in common between '
'input to matmul: %r, %r' %
(a.axes.keys(), b.axes.keys()))
shared_axis, = shared_axes
if a.axes[shared_axis] != b.axes[shared_axis]:
raise ValueError('axis %r does not match on input arguments: %r vs %r' %
(shared_axis, a.axes[shared_axis].value,
b.axes[shared_axis].value))
result_axes = []
for axes in [a.axes, b.axes]:
for axis in axes.values():
if axis.name != shared_axis:
result_axes.append(axis)
axis_scope_order = core.get_axis_order()
if axis_scope_order is not None:
result_axis_names = [axis.name for axis in result_axes]
new_axis_names = [
name for name in axis_scope_order if name in result_axis_names
]
if new_axis_names != result_axis_names:
# switch a and b
b, a = a, b
# result_axes is a list of length 1 or 2
result_axes = result_axes[::-1]
squeeze_dims = []
if len(a.axes) == 1:
a_tensor = array_ops.reshape(a.tensor, (1, -1))
squeeze_dims.append(0)
transpose_a = False
else:
a_tensor = a.tensor
transpose_a = list(a.axes.keys()).index(shared_axis) == 0
if len(b.axes) == 1:
b_tensor = array_ops.reshape(b.tensor, (-1, 1))
squeeze_dims.append(1)
transpose_b = False
else:
b_tensor = b.tensor
transpose_b = list(b.axes.keys()).index(shared_axis) == 1
result_op = math_ops.matmul(
a_tensor, b_tensor, transpose_a=transpose_a, transpose_b=transpose_b)
if squeeze_dims:
result_op = array_ops.squeeze(result_op, squeeze_dims)
result_op = array_ops.identity(result_op, name=scope)
return core.LabeledTensor(result_op, result_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_reduce_op(op_name, reduce_fn):
"""Define a reduction op for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
reduce_fn: function to call to evaluate the op on a tf.Tensor.
Returns:
Function defining the given reduction op that acts on a LabeledTensor.
"""
default_name = 'lt_%s' % op_name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types))
def op(labeled_tensor, axes=None, name=None):
"""Computes the given reduction across the given axes of a LabeledTensor.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: The input tensor.
axes: A set of axes or None.
If None, all axes will be reduced.
Axes must all be strings, in which case those dimensions will be
removed, or pairs of (name, None) or (name, label), in which case those
dimensions will be kept.
name: Optional op name.
Returns:
The reduced LabeledTensor.
Raises:
ValueError: if any of the axes to reduce over are not found on
`labeled_tensor`.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axes is None:
axes = labeled_tensor.axes.keys()
if isinstance(axes, (string_types, tuple)):
axes = [axes]
reduction_axes = {}
axes_to_squeeze = []
for a in axes:
if isinstance(a, string_types):
# We squeeze out this axis.
reduction_axes[a] = a
axes_to_squeeze.append(a)
else:
# We keep this axis, with the user-provided labels.
(axis_name, label) = a
if label is not None:
# The input was a single label, so make it a list so it can be
# turned into an Axis.
label = [label]
reduction_axes[axis_name] = (axis_name, label)
for axis_name in reduction_axes:
if axis_name not in labeled_tensor.axes:
raise ValueError('Axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
intermediate_axes = []
reduction_dimensions = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in reduction_axes:
intermediate_axes.append(reduction_axes[axis.name])
reduction_dimensions.append(i)
else:
intermediate_axes.append(axis)
reduce_op = reduce_fn(
labeled_tensor.tensor, reduction_dimensions, keepdims=True)
reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes)
return squeeze(reduce_lt, axes_to_squeeze, name=scope)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all)
reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any)
reduce_logsumexp = define_reduce_op('reduce_logsumexp',
math_ops.reduce_logsumexp)
reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max)
reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean)
reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min)
reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod)
reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Union(int, ops.Tensor)),
tc.Optional(string_types))
def tile(labeled_tensor, multiples, name=None):
"""Constructs a tensor by tiling a given tensor.
Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled
tensors would no longer be unique.)
See lt.tile.
Args:
labeled_tensor: The input tensor.
multiples: A mapping where the keys are axis names and the values are the
integer number of times to tile along that axis. Only axes with a multiple
different than 1 need be included.
name: Optional op name.
Returns:
A tensor with the indicated axes tiled.
Raises:
ValueError: If the tiled axes are not axes in the input tensor, or if any
axes in multiples have tick labels.
"""
with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('tile axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(multiples.keys(), labeled_tensor.axes))
labeled_axes = [
name for name in multiples
if labeled_tensor.axes[name].labels is not None
]
if labeled_axes:
raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes)
multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes]
tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope)
new_axes = [
axis.name if axis.labels is None else axis
for axis in labeled_tensor.axes.values()
]
return core.LabeledTensor(tile_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)),
string_types, tc.Optional(string_types))
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None):
"""Pads a tensor.
See tf.pad.
Args:
labeled_tensor: The input tensor.
paddings: A mapping where the keys are axis names and the values are
tuples where the first element is the padding to insert at the beginning
of the axis and the second is the padding to insert at the end of the
axis.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: Optional op name.
Returns:
A tensor with the indicated axes padded, optionally with those axes extended
with the provided labels.
Raises:
ValueError: If the padded axes are not axes in the input tensor.
"""
with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('pad axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(paddings.keys(), labeled_tensor.axes))
new_axes = []
padding_pairs = []
for name, axis in labeled_tensor.axes.items():
if name in paddings:
padding_before, padding_after = paddings[name]
axis_before = core.Axis(name, padding_before)
axis_after = core.Axis(name, padding_after)
new_axes.append(core.concat_axes([axis_before, axis, axis_after]))
padding_pairs.append((len(axis_before), len(axis_after)))
else:
new_axes.append(axis)
padding_pairs.append((0, 0))
pad_op = array_ops.pad(labeled_tensor.tensor,
padding_pairs,
mode,
name=scope)
return core.LabeledTensor(pad_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Union(np.ndarray, list, tuple, core.Scalar),
tc.Optional(dtypes.DType),
tc.Optional(
tc.Union(core.Axes, tc.Collection(
tc.Union(string_types, core.AxisLike)))), tc.Optional(string_types))
def constant(value, dtype=None, axes=None, name=None):
"""Creates a constant tensor.
If `axes` includes any strings, shape is inferred from `value`. Otherwise,
the sizes of the given `axes` are used to set `shape` for `tf.constant`.
See tf.constant for more details.
Args:
value: The input tensor.
dtype: The type of the returned tensor.
axes: Optional Axes, list of strings or list of objects coercible to Axis
objects. By default, axes are assumed to be an empty list (i.e., `value`
is treated as a scalar).
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_constant', [value]) as scope:
if axes is None:
axes = []
if isinstance(axes, core.Axes):
axes = axes.values()
if any(isinstance(ax, string_types) for ax in axes):
# need to infer shape
shape = None
else:
# axes already indicate shape
axes = [core.as_axis(a) for a in axes]
shape = [a.size for a in axes]
op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def zeros_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to zero.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def ones_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to one.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to one.
"""
with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def cast(labeled_tensor, dtype=None, name=None):
"""Casts a labeled tensor to a new type.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
A labeled tensor with the new dtype.
"""
with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types))
def verify_tensor_all_finite(labeled_tensor, message, name=None):
"""Asserts a tensor doesn't contain NaNs or Infs.
See tf.verify_tensor_all_finite.
Args:
labeled_tensor: The input tensor.
message: Message to log on failure.
name: Optional op name.
Returns:
The input tensor.
"""
with ops.name_scope(name, 'lt_verify_tensor_all_finite',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = numerics.verify_tensor_all_finite(
labeled_tensor.tensor, msg=message, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def boolean_mask(labeled_tensor, mask, name=None):
"""Apply a boolean mask to a labeled tensor.
Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks.
The mask is applied to the first axis of `labeled_tensor`. Labels on the first
axis are removed, because True indices in `mask` may not be known dynamically.
Args:
labeled_tensor: The input tensor.
mask: The type of the returned tensor.
name: Optional op name.
Returns:
The masked labeled tensor.
Raises:
ValueError: if the first axis of the mask
"""
with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
mask = core.convert_to_labeled_tensor(mask)
if len(mask.axes) > 1:
raise NotImplementedError(
"LabeledTensor's boolean_mask currently only supports 1D masks")
mask_axis = list(mask.axes.values())[0]
lt_axis = list(labeled_tensor.axes.values())[0]
if mask_axis != lt_axis:
raise ValueError('the first axis of the labeled tensor and the mask '
'are not equal:\n%r\n%r' % (lt_axis, mask_axis))
op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope)
# TODO(shoyer): attempt to infer labels for the masked values, by calling
# tf.contrib.util.constant_value on the mask?
axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:]
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def where(condition, x, y, name=None):
"""Return elements from x or y depending on condition.
See `tf.where` for more details. This function currently only implements the
three argument version of where.
Args:
condition: LabeledTensor of type `bool`.
x: LabeledTensor for values where condition is true.
y: LabeledTensor for values where condition is false.
name: Optional op name.
Returns:
The labeled tensor with values according to condition.
Raises:
ValueError: if `x` and `y` have different axes, or if the axes of `x` do not
start with the axes of `condition`.
"""
with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope:
condition = core.convert_to_labeled_tensor(condition)
x = core.convert_to_labeled_tensor(x)
y = core.convert_to_labeled_tensor(y)
if not condition.axes == x.axes == y.axes:
raise ValueError('all inputs to `where` must have equal axes')
op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope)
return core.LabeledTensor(op, x.axes)
| apache-2.0 |
mfrlin/zemcounters | zemcounters/tailer.py | 1 | 3196 | import time
import bson
import bson.objectid
import bson.errors
import tornado.escape
from tornado.websocket import WebSocketHandler
from tornado.web import RequestHandler
from tornado.ioloop import IOLoop
from tornado import gen
class TailHandler(WebSocketHandler):
listeners = {}
def open(self, counter_id):
self.subscriptions = []
if counter_id:
cid = counter_id.decode('utf-8')
self.subscriptions.append(cid)
TailHandler.listeners.setdefault(cid, set()).add(self)
def on_message(self, message):
"""Sockets can subscribe to more than one object_id.
Sending {'<counter_id>': 's'} subscribes and {'<counter_id>': 'u'} un subscribes."""
try:
parsed = tornado.escape.json_decode(message)
except:
parsed = {}
actions = {
's': self.subscribe,
'u': self.un_subscribe,
}
for key in parsed:
actions.get(parsed[key], lambda x: None)(key)
def on_close(self):
for sub in self.subscriptions:
self.un_subscribe(sub)
def subscribe(self, counter_id):
try:
bson.objectid.ObjectId(counter_id)
except bson.errors.InvalidId:
return
self.subscriptions.append(counter_id)
TailHandler.listeners.setdefault(counter_id, set()).add(self)
def un_subscribe(self, counter_id):
TailHandler.listeners.get(counter_id, set()).discard(self)
if not TailHandler.listeners.get(counter_id, 1):
del TailHandler.listeners[counter_id]
def handle_update(obj):
try:
object_id = str(obj['o2']['_id'])
n = int(obj['o']['$set']['n'])
except KeyError:
# TODO: real logging
print(obj)
return
for socket in TailHandler.listeners.get(object_id, []):
socket.write({'id': object_id, 'n': n})
def handle_delete(obj):
try:
object_id = str(obj['o'])
except KeyError:
# TODO: real logging
print(obj)
return
for socket in TailHandler.listeners.get(object_id, []):
socket.write(socket.write({'id': object_id, 'd': 1}))
socket.subscriptions.discard(object_id)
del TailHandler.listeners[object_id]
@gen.coroutine
def start_stream(db):
print("start stream")
oplog = db['oplog.rs']
now = bson.Timestamp(int(time.time()), 1)
cursor = oplog.find({'ts': {'$gte': now}}, tailable=True, await_data=True)
while True:
if not cursor.alive:
# While collection is empty, tailable cursor dies immediately
loop = IOLoop.instance()
now = bson.Timestamp(int(time.time()), 1)
yield gen.Task(loop.add_timeout, time.time() + 0.5)
cursor = oplog.find({'ts': {'$gte': now}}, tailable=True, await_data=True)
if (yield cursor.fetch_next):
obj = cursor.next_object()
actions = {
'u': handle_update,
'd': handle_delete,
}
actions.get(obj['op'], lambda x: None)(obj)
class TestSocketHandler(RequestHandler):
def get(self):
self.render("index.html")
| mit |
jreback/pandas | pandas/tests/series/apply/test_series_apply.py | 1 | 29906 | from collections import Counter, defaultdict
from itertools import chain
import numpy as np
import pytest
from pandas.core.dtypes.common import is_number
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, isna, timedelta_range
import pandas._testing as tm
from pandas.core.base import SpecificationError
class TestSeriesApply:
def test_series_map_box_timedelta(self):
# GH#11349
ser = Series(timedelta_range("1 day 1 s", periods=5, freq="h"))
def f(x):
return x.total_seconds()
ser.map(f)
ser.apply(f)
DataFrame(ser).applymap(f)
def test_apply(self, datetime_series):
with np.errstate(all="ignore"):
tm.assert_series_equal(
datetime_series.apply(np.sqrt), np.sqrt(datetime_series)
)
# element-wise apply
import math
tm.assert_series_equal(
datetime_series.apply(math.exp), np.exp(datetime_series)
)
# empty series
s = Series(dtype=object, name="foo", index=Index([], name="bar"))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
assert s is not rs
assert s.index is rs.index
assert s.dtype == rs.dtype
assert s.name == rs.name
# index but no data
s = Series(index=[1, 2, 3], dtype=np.float64)
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
def f(x):
return (x, x + 1)
result = s.apply(f)
expected = s.map(f)
tm.assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
tm.assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
def f(x):
return x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
assert result.dtype == object
def test_with_string_args(self, datetime_series):
for arg in ["sum", "mean", "min", "max", "std"]:
result = datetime_series.apply(arg)
expected = getattr(datetime_series, arg)()
assert result == expected
def test_apply_args(self):
s = Series(["foo,bar"])
result = s.apply(str.split, args=(",",))
assert result[0] == ["foo", "bar"]
assert isinstance(result[0], list)
def test_series_map_box_timestamps(self):
# GH#2689, GH#2627
ser = Series(pd.date_range("1/1/2000", periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
ser.map(func)
ser.apply(func)
def test_apply_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
# boxed value must be Timestamp instance
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = Series(["Timestamp_1_None", "Timestamp_2_None"])
tm.assert_series_equal(res, exp)
vals = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
s = Series(vals)
assert s.dtype == "timedelta64[ns]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.days}")
exp = Series(["Timedelta_1", "Timedelta_2"])
tm.assert_series_equal(res, exp)
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = Series(vals)
assert s.dtype == "Period[M]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}")
exp = Series(["Period_M", "Period_M"])
tm.assert_series_equal(res, exp)
def test_apply_datetimetz(self):
values = pd.date_range("2011-01-01", "2011-01-02", freq="H").tz_localize(
"Asia/Tokyo"
)
s = Series(values, name="XX")
result = s.apply(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range("2011-01-02", "2011-01-03", freq="H").tz_localize(
"Asia/Tokyo"
)
exp = Series(exp_values, name="XX")
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.apply(lambda x: x.hour)
exp = Series(list(range(24)) + [0], name="XX", dtype=np.int64)
tm.assert_series_equal(result, exp)
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = Series(["Asia/Tokyo"] * 25, name="XX")
tm.assert_series_equal(result, exp)
def test_apply_dict_depr(self):
tsdf = DataFrame(
np.random.randn(10, 3),
columns=["A", "B", "C"],
index=pd.date_range("1/1/2000", periods=10),
)
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
tsdf.A.agg({"foo": ["sum", "mean"]})
def test_apply_categorical(self):
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
ser = Series(values, name="XX", index=list("abcdefg"))
result = ser.apply(lambda x: x.lower())
# should be categorical dtype when the number of categories are
# the same
values = pd.Categorical(list("abbabcd"), categories=list("dcba"), ordered=True)
exp = Series(values, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp.values)
result = ser.apply(lambda x: "A")
exp = Series(["A"] * 7, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
assert result.dtype == object
@pytest.mark.parametrize("series", [["1-1", "1-1", np.NaN], ["1-1", "1-2", np.NaN]])
def test_apply_categorical_with_nan_values(self, series):
# GH 20714 bug fixed in: GH 24275
s = Series(series, dtype="category")
result = s.apply(lambda x: x.split("-")[0])
result = result.astype(object)
expected = Series(["1", "1", np.NaN], dtype="category")
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
def test_apply_empty_integer_series_with_datetime_index(self):
# GH 21245
s = Series([], index=pd.date_range(start="2018-01-01", periods=0), dtype=int)
result = s.apply(lambda x: x)
tm.assert_series_equal(result, s)
class TestSeriesAggregate:
def test_transform(self, string_series):
# transforming functions
with np.errstate(all="ignore"):
f_sqrt = np.sqrt(string_series)
f_abs = np.abs(string_series)
# ufunc
result = string_series.apply(np.sqrt)
expected = f_sqrt.copy()
tm.assert_series_equal(result, expected)
# list-like
result = string_series.apply([np.sqrt])
expected = f_sqrt.to_frame().copy()
expected.columns = ["sqrt"]
tm.assert_frame_equal(result, expected)
result = string_series.apply(["sqrt"])
tm.assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both functions per
# series and then concatting
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ["sqrt", "absolute"]
result = string_series.apply([np.sqrt, np.abs])
tm.assert_frame_equal(result, expected)
# dict, provide renaming
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ["foo", "bar"]
expected = expected.unstack().rename("series")
result = string_series.apply({"foo": np.sqrt, "bar": np.abs})
tm.assert_series_equal(result.reindex_like(expected), expected)
def test_transform_and_agg_error(self, string_series):
# we are trying to transform with an aggregator
msg = "cannot combine transform and aggregation"
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.agg(["sqrt", "max"])
msg = "cannot perform both aggregation and transformation"
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.agg({"foo": np.sqrt, "bar": "sum"})
def test_demo(self):
# demonstration tests
s = Series(range(6), dtype="int64", name="series")
result = s.agg(["min", "max"])
expected = Series([0, 5], index=["min", "max"], name="series")
tm.assert_series_equal(result, expected)
result = s.agg({"foo": "min"})
expected = Series([0], index=["foo"], name="series")
tm.assert_series_equal(result, expected)
# nested renaming
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
s.agg({"foo": ["min", "max"]})
def test_multiple_aggregators_with_dict_api(self):
s = Series(range(6), dtype="int64", name="series")
# nested renaming
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
s.agg({"foo": ["min", "max"], "bar": ["sum", "mean"]})
def test_agg_apply_evaluate_lambdas_the_same(self, string_series):
# test that we are evaluating row-by-row first
# before vectorized evaluation
result = string_series.apply(lambda x: str(x))
expected = string_series.agg(lambda x: str(x))
tm.assert_series_equal(result, expected)
result = string_series.apply(str)
expected = string_series.agg(str)
tm.assert_series_equal(result, expected)
def test_with_nested_series(self, datetime_series):
# GH 2316
# .agg with a reducer and a transform, what to do
result = datetime_series.apply(
lambda x: Series([x, x ** 2], index=["x", "x^2"])
)
expected = DataFrame({"x": datetime_series, "x^2": datetime_series ** 2})
tm.assert_frame_equal(result, expected)
result = datetime_series.agg(lambda x: Series([x, x ** 2], index=["x", "x^2"]))
tm.assert_frame_equal(result, expected)
def test_replicate_describe(self, string_series):
# this also tests a result set that is all scalars
expected = string_series.describe()
result = string_series.apply(
{
"count": "count",
"mean": "mean",
"std": "std",
"min": "min",
"25%": lambda x: x.quantile(0.25),
"50%": "median",
"75%": lambda x: x.quantile(0.75),
"max": "max",
}
)
tm.assert_series_equal(result, expected)
def test_reduce(self, string_series):
# reductions with named functions
result = string_series.agg(["sum", "mean"])
expected = Series(
[string_series.sum(), string_series.mean()],
["sum", "mean"],
name=string_series.name,
)
tm.assert_series_equal(result, expected)
def test_non_callable_aggregates(self):
# test agg using non-callable series attributes
s = Series([1, 2, None])
# Calling agg w/ just a string arg same as calling s.arg
result = s.agg("size")
expected = s.size
assert result == expected
# test when mixed w/ callable reducers
result = s.agg(["size", "count", "mean"])
expected = Series({"size": 3.0, "count": 2.0, "mean": 1.5})
tm.assert_series_equal(result[expected.index], expected)
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("sum", 0),
("max", np.nan),
("min", np.nan),
("all", True),
("any", False),
("mean", np.nan),
("prod", 1),
("std", np.nan),
("var", np.nan),
("median", np.nan),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("sum", 6),
("max", 3),
("min", 1),
("all", True),
("any", True),
("mean", 2),
("prod", 6),
("std", 1),
("var", 1),
("median", 2),
],
),
tm.get_cython_table_params(
Series("a b c".split()),
[
("sum", "abc"),
("max", "c"),
("min", "a"),
("all", "c"), # see GH12863
("any", "a"),
],
),
),
)
def test_agg_cython_table(self, series, func, expected):
# GH21224
# test reducing functions in
# pandas.core.base.SelectionMixin._cython_table
result = series.agg(func)
if is_number(expected):
assert np.isclose(result, expected, equal_nan=True)
else:
assert result == expected
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("cumprod", Series([], Index([]), dtype=np.float64)),
("cumsum", Series([], Index([]), dtype=np.float64)),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("cumprod", Series([np.nan, 1, 2, 6])),
("cumsum", Series([np.nan, 1, 3, 6])),
],
),
tm.get_cython_table_params(
Series("a b c".split()), [("cumsum", Series(["a", "ab", "abc"]))]
),
),
)
def test_agg_cython_table_transform(self, series, func, expected):
# GH21224
# test transforming functions in
# pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
result = series.agg(func)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series("a b c".split()),
[
("mean", TypeError), # mean raises TypeError
("prod", TypeError),
("std", TypeError),
("var", TypeError),
("median", TypeError),
("cumprod", TypeError),
],
)
),
)
def test_agg_cython_table_raises(self, series, func, expected):
# GH21224
msg = r"[Cc]ould not convert|can't multiply sequence by non-int of type"
with pytest.raises(expected, match=msg):
# e.g. Series('a b'.split()).cumprod() will raise
series.agg(func)
def test_series_apply_no_suffix_index(self):
# GH36189
s = Series([4] * 3)
result = s.apply(["sum", lambda x: x.sum(), lambda x: x.sum()])
expected = Series([12, 12, 12], index=["sum", "<lambda>", "<lambda>"])
tm.assert_series_equal(result, expected)
class TestSeriesMap:
def test_map(self, datetime_series):
index, data = tm.getMixedTypeDict()
source = Series(data["B"], index=data["C"])
target = Series(data["C"][:4], index=data["D"][:4])
merged = target.map(source)
for k, v in merged.items():
assert v == source[target[k]]
# input could be a dict
merged = target.map(source.to_dict())
for k, v in merged.items():
assert v == source[target[k]]
# function
result = datetime_series.map(lambda x: x * 2)
tm.assert_series_equal(result, datetime_series * 2)
# GH 10324
a = Series([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = Series(["odd", "even", "odd", np.nan], dtype="category")
tm.assert_series_equal(a.map(b), exp)
exp = Series(["odd", "even", "odd", np.nan])
tm.assert_series_equal(a.map(c), exp)
a = Series(["a", "b", "c", "d"])
b = Series([1, 2, 3, 4], index=pd.CategoricalIndex(["b", "c", "d", "e"]))
c = Series([1, 2, 3, 4], index=Index(["b", "c", "d", "e"]))
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(c), exp)
a = Series(["a", "b", "c", "d"])
b = Series(
["B", "C", "D", "E"],
dtype="category",
index=pd.CategoricalIndex(["b", "c", "d", "e"]),
)
c = Series(["B", "C", "D", "E"], index=Index(["b", "c", "d", "e"]))
exp = Series(
pd.Categorical([np.nan, "B", "C", "D"], categories=["B", "C", "D", "E"])
)
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, "B", "C", "D"])
tm.assert_series_equal(a.map(c), exp)
def test_map_empty(self, index):
if isinstance(index, MultiIndex):
pytest.skip("Initializing a Series from a MultiIndex is not supported")
s = Series(index)
result = s.map({})
expected = Series(np.nan, index=s.index)
tm.assert_series_equal(result, expected)
def test_map_compat(self):
# related GH 8024
s = Series([True, True, False], index=[1, 2, 3])
result = s.map({True: "foo", False: "bar"})
expected = Series(["foo", "foo", "bar"], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
def test_map_int(self):
left = Series({"a": 1.0, "b": 2.0, "c": 3.0, "d": 4})
right = Series({1: 11, 2: 22, 3: 33})
assert left.dtype == np.float_
assert issubclass(right.dtype.type, np.integer)
merged = left.map(right)
assert merged.dtype == np.float_
assert isna(merged["d"])
assert not isna(merged["c"])
def test_map_type_inference(self):
s = Series(range(3))
s2 = s.map(lambda x: np.where(x == 0, 0, 1))
assert issubclass(s2.dtype.type, np.integer)
def test_map_decimal(self, string_series):
from decimal import Decimal
result = string_series.map(lambda x: Decimal(str(x)))
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_map_na_exclusion(self):
s = Series([1.5, np.nan, 3, np.nan, 5])
result = s.map(lambda x: x * 2, na_action="ignore")
exp = s * 2
tm.assert_series_equal(result, exp)
def test_map_dict_with_tuple_keys(self):
"""
Due to new MultiIndex-ing behaviour in v0.14.0,
dicts with tuple keys passed to map were being
converted to a multi-index, preventing tuple values
from being mapped properly.
"""
# GH 18496
df = DataFrame({"a": [(1,), (2,), (3, 4), (5, 6)]})
label_mappings = {(1,): "A", (2,): "B", (3, 4): "A", (5, 6): "B"}
df["labels"] = df["a"].map(label_mappings)
df["expected_labels"] = Series(["A", "B", "A", "B"], index=df.index)
# All labels should be filled now
tm.assert_series_equal(df["labels"], df["expected_labels"], check_names=False)
def test_map_counter(self):
s = Series(["a", "b", "c"], index=[1, 2, 3])
counter = Counter()
counter["b"] = 5
counter["c"] += 1
result = s.map(counter)
expected = Series([0, 5, 1], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
def test_map_defaultdict(self):
s = Series([1, 2, 3], index=["a", "b", "c"])
default_dict = defaultdict(lambda: "blank")
default_dict[1] = "stuff"
result = s.map(default_dict)
expected = Series(["stuff", "blank", "blank"], index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
def test_map_dict_na_key(self):
# https://github.com/pandas-dev/pandas/issues/17648
# Checks that np.nan key is appropriately mapped
s = Series([1, 2, np.nan])
expected = Series(["a", "b", "c"])
result = s.map({1: "a", 2: "b", np.nan: "c"})
tm.assert_series_equal(result, expected)
def test_map_dict_subclass_with_missing(self):
"""
Test Series.map with a dictionary subclass that defines __missing__,
i.e. sets a default value (GH #15999).
"""
class DictWithMissing(dict):
def __missing__(self, key):
return "missing"
s = Series([1, 2, 3])
dictionary = DictWithMissing({3: "three"})
result = s.map(dictionary)
expected = Series(["missing", "missing", "three"])
tm.assert_series_equal(result, expected)
def test_map_dict_subclass_without_missing(self):
class DictWithoutMissing(dict):
pass
s = Series([1, 2, 3])
dictionary = DictWithoutMissing({3: "three"})
result = s.map(dictionary)
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_abc_mapping(self, non_dict_mapping_subclass):
# https://github.com/pandas-dev/pandas/issues/29733
# Check collections.abc.Mapping support as mapper for Series.map
s = Series([1, 2, 3])
not_a_dictionary = non_dict_mapping_subclass({3: "three"})
result = s.map(not_a_dictionary)
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_abc_mapping_with_missing(self, non_dict_mapping_subclass):
# https://github.com/pandas-dev/pandas/issues/29733
# Check collections.abc.Mapping support as mapper for Series.map
class NonDictMappingWithMissing(non_dict_mapping_subclass):
def __missing__(self, key):
return "missing"
s = Series([1, 2, 3])
not_a_dictionary = NonDictMappingWithMissing({3: "three"})
result = s.map(not_a_dictionary)
# __missing__ is a dict concept, not a Mapping concept,
# so it should not change the result!
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_box(self):
vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
# boxed value must be Timestamp instance
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = Series(["Timestamp_1_None", "Timestamp_2_None"])
tm.assert_series_equal(res, exp)
vals = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
s = Series(vals)
assert s.dtype == "timedelta64[ns]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.days}")
exp = Series(["Timedelta_1", "Timedelta_2"])
tm.assert_series_equal(res, exp)
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = Series(vals)
assert s.dtype == "Period[M]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}")
exp = Series(["Period_M", "Period_M"])
tm.assert_series_equal(res, exp)
def test_map_categorical(self):
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
s = Series(values, name="XX", index=list("abcdefg"))
result = s.map(lambda x: x.lower())
exp_values = pd.Categorical(
list("abbabcd"), categories=list("dcba"), ordered=True
)
exp = Series(exp_values, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp_values)
result = s.map(lambda x: "A")
exp = Series(["A"] * 7, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
assert result.dtype == object
with pytest.raises(NotImplementedError, match=tm.EMPTY_STRING_PATTERN):
s.map(lambda x: x, na_action="ignore")
def test_map_datetimetz(self):
values = pd.date_range("2011-01-01", "2011-01-02", freq="H").tz_localize(
"Asia/Tokyo"
)
s = Series(values, name="XX")
# keep tz
result = s.map(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range("2011-01-02", "2011-01-03", freq="H").tz_localize(
"Asia/Tokyo"
)
exp = Series(exp_values, name="XX")
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.map(lambda x: x.hour)
exp = Series(list(range(24)) + [0], name="XX", dtype=np.int64)
tm.assert_series_equal(result, exp)
with pytest.raises(NotImplementedError, match=tm.EMPTY_STRING_PATTERN):
s.map(lambda x: x, na_action="ignore")
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = Series(["Asia/Tokyo"] * 25, name="XX")
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize(
"vals,mapping,exp",
[
(list("abc"), {np.nan: "not NaN"}, [np.nan] * 3 + ["not NaN"]),
(list("abc"), {"a": "a letter"}, ["a letter"] + [np.nan] * 3),
(list(range(3)), {0: 42}, [42] + [np.nan] * 3),
],
)
def test_map_missing_mixed(self, vals, mapping, exp):
# GH20495
s = Series(vals + [np.nan])
result = s.map(mapping)
tm.assert_series_equal(result, Series(exp))
@pytest.mark.parametrize(
"dti,exp",
[
(
Series([1, 2], index=pd.DatetimeIndex([0, 31536000000])),
DataFrame(np.repeat([[1, 2]], 2, axis=0), dtype="int64"),
),
(
tm.makeTimeSeries(nper=30),
DataFrame(np.repeat([[1, 2]], 30, axis=0), dtype="int64"),
),
],
)
@pytest.mark.parametrize("aware", [True, False])
def test_apply_series_on_date_time_index_aware_series(self, dti, exp, aware):
# GH 25959
# Calling apply on a localized time series should not cause an error
if aware:
index = dti.tz_localize("UTC").index
else:
index = dti.index
result = Series(index).apply(lambda x: Series([1, 2]))
tm.assert_frame_equal(result, exp)
def test_apply_scaler_on_date_time_index_aware_series(self):
# GH 25959
# Calling apply on a localized time series should not cause an error
series = tm.makeTimeSeries(nper=30).tz_localize("UTC")
result = Series(series.index).apply(lambda x: 1)
tm.assert_series_equal(result, Series(np.ones(30), dtype="int64"))
def test_map_float_to_string_precision(self):
# GH 13228
ser = Series(1 / 3)
result = ser.map(lambda val: str(val)).to_dict()
expected = {0: "0.3333333333333333"}
assert result == expected
def test_map_with_invalid_na_action_raises(self):
# https://github.com/pandas-dev/pandas/issues/32815
s = Series([1, 2, 3])
msg = "na_action must either be 'ignore' or None"
with pytest.raises(ValueError, match=msg):
s.map(lambda x: x, na_action="____")
def test_apply_to_timedelta(self):
list_of_valid_strings = ["00:00:01", "00:00:02"]
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# FIXME: dont leave commented-out
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
list_of_strings = ["00:00:01", np.nan, pd.NaT, pd.NaT]
a = pd.to_timedelta(list_of_strings) # noqa
b = Series(list_of_strings).apply(pd.to_timedelta) # noqa
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
| bsd-3-clause |
gr33ndata/irlib | irlib/matrix.py | 1 | 17160 | '''
Informations Retrieval Library
==============================
Matrix is an index for documents, terms, and their classes.
'''
# Author: Tarek Amr <@gr33ndata>
import sys, math, random
from superlist import SuperList
from progress import Progress
class MatrixDocs(list):
def doc_fields(self):
return set(['id', 'class', 'terms'])
def is_valid_doc(self, doc):
doc_fields = set(doc.keys())
return self.doc_fields().issubset(doc_fields)
def index(self, doc_id):
for i, doc in enumerate(self):
if doc['id'] == doc_id:
return i
raise IndexError
def __contains__(self, doc_id):
try:
index(doc_id)
return True
except:
return False
def add_unique(self, doc):
if not self.is_valid_doc(doc):
raise ValueError
try:
idx = self.index(doc['id'])
self[idx]['terms'].add(doc['terms'])
except IndexError:
self.append(doc)
def shuffle(self):
random.shuffle(self)
def split(self):
split_point = len(self)/2
left = MatrixDocs(self[:split_point])
right = MatrixDocs(self[split_point:])
return (left,right)
class Matrix:
def __init__(self, whitelist=[], blacklist=[]):
''' Initilize our matrix.
whitelist: If not empty, discard any terms not in whitelist,
when adding new terms via add_doc()
blacklist: If not empty, discard any terms in blacklist,
when adding new terms via add_doc()
Anything in the blacklist will be discarded,
even if it is in the whitelist.
terms: We will populate this with our vocabulary of terms
docs: This is our actual 2D matrix terms/docs.
A list of the following dictionary,
{ 'id': Unique ID to each document,
'class': In case of labeled data, doc class label,
'terms': list of 1's and 0's, i.e. term Frequencies.
}
'''
# List of unique terms (vocabulary)
self.terms = SuperList()
# List of document classes and terms summary
#self.classes = {}
self.docs = MatrixDocs()
self.whitelist = whitelist
self.blacklist = blacklist
def __len__(self):
'Returns number of loaded ducuments'
return len(self.docs)
def vocabulary(self, threshold_map=[]):
'''Returns list of all unique terms if threshold_map not given.
Otherwise, only return terms above threshold.
'''
if not threshold_map:
return self.terms
elif len(threshold_map) == len(self.terms):
vlist = []
for i in range(len(self.terms)):
if threshold_map[i] == 1:
vlist.append(self.terms[i])
return vlist
else:
return []
def __str__(self):
s = 'Matrix:'
s += '\n * Vocabulary read: %d' % len(self.terms)
s += '\n * Documents read: %d' % len(self.docs)
return s
def dump_tf(self, filename, freqs, delimiter='\t', header=True):
''' Dumps term frequencies
'''
fd = open(filename,'w')
# Let's first print file header
header_line = 'term'
header_line = header_line + delimiter + 'freq'
if header:
fd.write('%s\n' % header_line)
# Now we print data lines
terms = self.vocabulary()
for i in range(len(terms)):
line = terms[i]
line = line + delimiter + str(freqs[i])
fd.write('%s\n' % line)
fd.close()
def dump(self, filename, delimiter='\t', header=True):
''' Dumps matrix to CSV/TSV file
'''
fd = open(filename,'w')
# Let's first print file header
header_line = 'id'
header_line = header_line + delimiter + 'class'
for term in self.terms:
header_line = header_line + delimiter + term
if header:
fd.write('%s\n' % header_line)
# Now we print data lines
for doc in self.docs:
line = doc['id']
line = line + delimiter + doc['class']
for term in doc['terms']:
line = line + delimiter + str(term)
fd.write('%s\n' % line)
fd.close()
def load(self, filename, delimiter='\t', header=True):
''' Loads matrix from CSV/TSV file
'''
with open(filename, 'r') as fd:
header_line = fd.readline()
header_data = header_line.strip().split(delimiter)
# First 2 columns are id and class
self.terms = SuperList(header_data[2:])
for line in fd.readlines():
doc_data = {
'id': line[0],
'class': line[1],
'terms': SuperList(line[2:])
}
self.docs.append(doc_data)
fd.close()
def dump_arff(self, filename, delimiter=',', clstype='NUMERIC'):
''' Dumps matrix to ARFF (Weka) file
'''
fd = open(filename,'w')
header = '@RELATION %s\n\n' % filename.split('.')[0]
header = header + '@ATTRIBUTE \'ARFFID\' NUMERIC\n'
for term in self.terms:
header = header + '@ATTRIBUTE \'' + term + '\' NUMERIC\n'
header = header + '@ATTRIBUTE \'ClassLabel\' ' + clstype + '\n'
fd.write('%s\n' % header)
# Now we print data lines
fd.write('@DATA\n')
for doc in self.docs:
line = doc['id']
for term in doc['terms']:
line = line + delimiter + str(term)
line = line + delimiter + str(doc['class'])
fd.write('%s\n' % line)
fd.close()
def dump_transposed(self, filename, delimiter='\t', header=True):
fd = open(filename,'w')
# Let's first print file header
header_line = 'terms'
for doc in self.docs:
header_line = header_line + delimiter + doc['id']
if header:
fd.write('%s\n' % header_line)
# Now we print data lines
idx = 0
for term in self.terms:
line = term
for doc in self.docs:
line = line + delimiter + str(doc['terms'][idx])
fd.write('%s\n' % line)
idx += 1
fd.close()
def dump_transposed_arff(self, filename):
fd = open(filename,'w')
# Let's first print file header
header = '@RELATION %s\n\n' % filename.split('.')[0]
header = header + '@ATTRIBUTE terms STRING\n'
for doc in self.docs:
header = header + '@ATTRIBUTE "%s" NUMERIC\n' % doc['id']
fd.write('%s\n' % header)
# Now we print data lines
fd.write('@DATA\n')
idx = 0
delimiter = ','
for term in self.terms:
line = '"%s"' % term
for doc in self.docs:
line = line + delimiter + str(doc['terms'][idx])
fd.write('%s\n' % line)
idx += 1
fd.close()
def prune(self, prune_map, show_progress=True):
''' Helper method to remove terms (fields) of our matrix
prune_map is a list of 0's and 1's of same length as self.terms.
For each term, if 0, then remove it, otherwise keep it.
'''
if not(prune_map) or len(prune_map) != len(self.terms):
return False
if show_progress:
print ' Pruning terms list ...'
new_terms = SuperList()
for i in range(len(prune_map)-1,-1,-1):
if prune_map[i] == 1:
#print self.terms[i]
new_terms.append(self.terms[i])
self.terms = new_terms
if show_progress:
print ' Pruning documents ...'
p = Progress(n=len(self), percent=10)
for doc in self.docs:
new_doc_terms = SuperList()
for i in range(len(prune_map)-1,-1,-1):
if prune_map[i] == 1:
new_doc_terms.append(doc['terms'][i])
doc['terms'] = new_doc_terms
if show_progress:
p.show(message=' Pruning progress:')
def freq_levels(self, threshold=3):
''' Creates two lists:
threshold_map is a list of 0's and 1's,
where 1 means term's freq >= threshold
freq_map is a list of terms frequences
'''
threshold_map = [0] * len(self.terms)
freq_map = [0] * len(self.terms)
for i in range(0,len(self.terms)):
val = 0
for doc in self.docs:
if doc['terms'][i] != 0:
#val += 1
val += doc['terms'][i]
if val >= threshold:
threshold_map[i] = 1
freq_map[i] = val
return (threshold_map, freq_map)
def __contains__(self, term):
'Checks if certain terms is loaded'
return self.terms.__contains__(term)
def __getitem__(self, term):
''' If term exists in terms, retruns it position in list,
otherwise, return -1
'''
if not term in self:
return -1
else:
return self.terms.index(term)
def do_padding(self):
''' Align the length of all rows in matrix
Each time we see a new term, list of terms is expanded,
and the matrix row for such document is of same length too.
But what about rows added earlier for previous documents?
So, this method alighn all previously added rows,
to match the current length of the terms list.
'''
if len(self.docs[-1]['terms']) == len(self.docs[0]['terms']):
return
for doc in self.docs:
doc['terms'].expand(new_len=len(self.terms))
#for cls in self.classes:
# self.classes[cls].expand(new_len=len(self.terms))
def _log_tf(self, value, log_base=10):
val = float(value)
val = 1 + math.log(val, log_base) if val != 0 else float(0)
return val
def tf_idf(self, do_idf=True, log_base=10):
''' Converts matrix to tf.idf values
do_idf: if False, convert to tf only
'''
N = len(self.docs)
df = SuperList([0] * len(self.terms))
for doc in self.docs:
row = SuperList([0] * len(self.terms))
for idx in range(len(self.terms)):
if doc['terms'][idx] > 0:
row[idx] = 1
df.add(row)
for doc in self.docs:
for idx in range(len(self.terms)):
tf = self._log_tf(doc['terms'][idx])
idf = math.log(float(N) / df[idx], log_base)
if do_idf:
doc['terms'][idx] = tf * idf
else:
doc['terms'][idx] = tf
def add_doc(self, doc_id='',
doc_class='',
doc_terms=[],
frequency=False,
do_padding=False,
unique_ids=False,
meta_data={}):
''' Add new document to our matrix:
doc_id: Identifier for the document, eg. file name, url, etc.
doc_class: You might need this in classification.
doc_terms: List of terms you got after tokenizing the document.
Terms can be typles; string and frequencies
frequency: If true, term occurences is incremented by one.
Else, occurences is only 0 or 1 (a la Bernoulli)
do_padding: Boolean. Check do_padding() for more info.
unique_ids: When true, if two documents are added with same id,
then their terms are summed up into only one record.
meta_data: More fields to add to the document, for your own use.
'''
if not doc_terms:
raise ValueError('doc_terms cannot be empty')
# Update list of terms if new term seen.
# And document (row) with its associated data.
my_doc_terms = SuperList()
# Discard anything not in whitelist if it is not empty
if self.whitelist:
doc_terms = [t for t in doc_terms if t in self.whitelist]
# Discard anything in stopwords if not empty
if self.blacklist:
doc_terms = [t for t in doc_terms if t not in self.blacklist]
for term in doc_terms:
if type(term) == tuple:
term_idx = self.terms.unique_append(term[0])
my_doc_terms.increment_after_padding(term_idx,term[1])
else:
term_idx = self.terms.unique_append(term)
if frequency:
my_doc_terms.increment_after_padding(term_idx,1)
else:
my_doc_terms.insert_after_padding(term_idx,1)
# In the rare event when whitelisting causes an empty doc_terms list
# We add at least one zero in the list of my_doc_terms
if not my_doc_terms:
zeros = [float(0)] * len(self.vocabulary())
my_doc_terms = SuperList(zeros)
doc_data = {
'id': doc_id,
'class': doc_class,
'terms': my_doc_terms
}
for key in meta_data:
doc_data[key] = meta_data[key]
if unique_ids:
self.docs.add_unique(doc_data)
else:
self.docs.append(doc_data)
if do_padding:
self.do_padding()
def query_to_vector(self, q_terms, frequency=False,):
''' Converts query to a list alligned with our self.terms.
Terms not seen before will be ignored.
q_terms: list of query terms
frequency: return a multinomial or multivariate list?
'''
my_query_vector = SuperList()
my_query_vector.expand(new_len=len(self.terms))
for term in q_terms:
try:
term_idx = self.terms.index(term)
except:
# Term not seen before, skip
continue
#print term, self.terms.index(term)
if frequency:
my_query_vector.increment_after_padding(term_idx,1)
else:
my_query_vector.insert_after_padding(term_idx,1)
return my_query_vector
def get_stats(self):
return Stats(self)
'''
classes = {
'class1': {'terms': [1,2,0,3], 'totel': 6}
}
terms = []
'''
class Stats:
def __init__(self, matrix):
self.mx = matrix
self.N = 0
self.classes = {}
#self.terms = SuperList()
#for c in self.mx.classes:
# self.classes[c] = {}
# self.classes[c]['terms'] = self.mx.classes[c]
# self.terms.add(self.classes[c]['terms'])
# self.classes[c]['total'] = sum(self.classes[c]['terms'])
# self.N += self.classes[c]['total']
#self.mi_terms = []
def __str__(self):
s = 'Matrix Stats:'
s += '\n * Vocabulary/Terms: %d/%d' % (len(self.terms), self.N)
return s
def getN(self):
''' Get total number of terms, counting their frequencies too.
Notice: This is not the same as len(vocabulary)
'''
return self.N
def get_terms_freq(self, normalized=False):
''' Returns 2d matrix of vocabulary terms and their occurences
if normalized is True, devide by total number of terms
'''
terms = self.mx.terms
freq = self.terms.div(self.N) if normalized else self.terms
return [terms, freq]
def pr_term(self, t):
' Get probability of term t '
i = self.mx[t]
if i == -1:
return 0
return float(self.terms[i]) / self.N
def pr_class(self, c):
' Get probability of class c '
return float(self.classes[c]['total']) / self.N
def pr_joint(self, t, c):
'Get joint probability between term t and class c'
i = self.mx[t]
if i == -1:
return 0
return float(self.classes[c]['terms'][i]) / self.N
def mi(self):
for t in self.mx.vocabulary():
mi = 0
for c in self.classes:
try:
mi += self.pr_joint(t,c) * math.log10( self.pr_joint(t,c) / ( self.pr_term(t) * self.pr_class(c) ))
except:
# Oh, log(0), let's set mi = 0
mi = 0
self.mi_terms.append(mi)
print self.classes
print self.mi_terms
if __name__ == '__main__':
pass
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.