gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in math_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
def _safe_shape_div(x, y):
"""Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`."""
return x // math_ops.maximum(y, 1)
@ops.RegisterGradient("Sum")
def _SumGrad(op, grad):
"""Gradient for Sum."""
# Fast path for when reducing to a scalar and ndims is known: adds only
# Reshape and Tile ops (and possibly a Shape).
if (op.inputs[0].get_shape().ndims is not None and op.inputs[1].op.type ==
"Const"):
rank = op.inputs[0].get_shape().ndims
axes = tensor_util.MakeNdarray(op.inputs[1].op.get_attr("value"))
if np.array_equal(axes, np.arange(rank)): # Reduce all dims.
grad = array_ops.reshape(grad, [1] * rank)
# If shape is not fully defined (but rank is), we use Shape.
if op.inputs[0].get_shape().is_fully_defined():
input_shape = op.inputs[0].get_shape().as_list()
else:
input_shape = array_ops.shape(op.inputs[0])
return [array_ops.tile(grad, input_shape), None]
input_shape = array_ops.shape(op.inputs[0])
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
return [array_ops.tile(grad, tile_scaling), None]
def _MinOrMaxGrad(op, grad):
"""Gradient for Min or Max. Amazingly it's precisely the same code."""
input_shape = array_ops.shape(op.inputs[0])
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
y = op.outputs[0]
y = array_ops.reshape(y, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
# Compute the number of selected (maximum or minimum) elements in each
# reduction dimension. If there are multiple minimum or maximum elements
# then the gradient will be divided between them.
indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype)
num_selected = array_ops.reshape(
math_ops.reduce_sum(indicators, op.inputs[1]),
output_shape_kept_dims)
return [math_ops.div(indicators, num_selected) * grad, None]
@ops.RegisterGradient("Max")
def _MaxGrad(op, grad):
"""Gradient for Max."""
return _MinOrMaxGrad(op, grad)
@ops.RegisterGradient("Min")
def _MinGrad(op, grad):
return _MinOrMaxGrad(op, grad)
@ops.RegisterGradient("Mean")
def _MeanGrad(op, grad):
"""Gradient for Mean."""
sum_grad = _SumGrad(op, grad)[0]
input_shape = array_ops.shape(op.inputs[0])
output_shape = array_ops.shape(op.outputs[0])
factor = _safe_shape_div(math_ops.reduce_prod(input_shape),
math_ops.reduce_prod(output_shape))
return sum_grad / math_ops.cast(factor, sum_grad.dtype), None
@ops.RegisterGradient("Prod")
def _ProdGrad(op, grad):
"""Gradient for Prod."""
# The gradient can be expressed by dividing the product by each entry of the
# input tensor, but this approach can't deal with zeros in the input.
# Here, we avoid this problem by composing the output as a product of two
# cumprod operations.
input_shape = array_ops.shape(op.inputs[0])
# Reshape reduction indices for the case where the parameter is a scalar
reduction_indices = array_ops.reshape(op.inputs[1], [-1])
# Expand grad to full input shape
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
grad = array_ops.tile(grad, tile_scaling)
# Pack all reduced dimensions into a single one, so we can perform the
# cumprod ops. If the reduction dims list is empty, it defaults to float32,
# so we need to cast here. We put all the shape-related ops on CPU to avoid
# copying back and forth, and since listdiff is CPU only.
with ops.device("/cpu:0"):
reduced = math_ops.cast(reduction_indices, dtypes.int32)
idx = math_ops.range(0, array_ops.rank(op.inputs[0]))
other, _ = array_ops.setdiff1d(idx, reduced)
perm = array_ops.concat(0, [reduced, other])
reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))
other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))
permuted = array_ops.transpose(op.inputs[0], perm)
permuted_shape = array_ops.shape(permuted)
reshaped = array_ops.reshape(permuted, (reduced_num, other_num))
# Calculate product, leaving out the current entry
left = math_ops.cumprod(reshaped, axis=0, exclusive=True)
right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True)
y = array_ops.reshape(left * right, permuted_shape)
# Invert the transpose and reshape operations.
# Make sure to set the statically known shape information through a reshape.
out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm))
return array_ops.reshape(out, input_shape), None
@ops.RegisterGradient("SegmentSum")
def _SegmentSumGrad(op, grad):
"""Gradient for SegmentSum."""
return array_ops.gather(grad, op.inputs[1]), None
@ops.RegisterGradient("SegmentMean")
def _SegmentMeanGrad(op, grad):
"""Gradient for SegmentMean."""
input_rank = array_ops.rank(op.inputs[0])
ones_shape = array_ops.concat(
0, [array_ops.shape(op.inputs[1]),
array_ops.fill(array_ops.expand_dims(input_rank - 1, 0), 1)])
ones = array_ops.fill(ones_shape,
constant_op.constant(1, dtype=grad.dtype))
scaled_grad = math_ops.div(grad, math_ops.segment_sum(ones, op.inputs[1]))
return array_ops.gather(scaled_grad, op.inputs[1]), None
@ops.RegisterGradient("SparseSegmentSum")
def _SparseSegmentSumGrad(op, grad):
"""Gradient for SparseSegmentSum."""
input_rows = array_ops.shape(op.inputs[0])[0]
return (math_ops.unsorted_segment_sum(
array_ops.gather(grad, op.inputs[2]),
op.inputs[1], input_rows), None, None)
@ops.RegisterGradient("SparseSegmentMean")
def _SparseSegmentMeanGrad(op, grad):
"""Gradient for SparseSegmentMean."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_mean_grad(grad,
op.inputs[1],
op.inputs[2],
dim0),
None, None)
@ops.RegisterGradient("SparseSegmentSqrtN")
def _SparseSegmentSqrtNGrad(op, grad):
"""Gradient for SparseSegmentSqrtN."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_sqrt_n_grad(grad,
op.inputs[1],
op.inputs[2],
dim0),
None, None)
def _SegmentMinOrMaxGrad(op, grad):
"""Gradient for SegmentMin and SegmentMax. Both share the same code."""
zeros = array_ops.zeros(array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype)
# Get the number of selected (minimum or maximum) elements in each segment.
gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1])
is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
num_selected = math_ops.segment_sum(math_ops.cast(is_selected, grad.dtype),
op.inputs[1])
# Compute the gradient for each segment. The gradient for the ith segment is
# divided evenly among the selected elements in that segment.
weighted_grads = math_ops.div(grad, num_selected)
gathered_grads = array_ops.gather(weighted_grads, op.inputs[1])
return math_ops.select(is_selected, gathered_grads, zeros), None
@ops.RegisterGradient("SegmentMin")
def _SegmentMinGrad(op, grad):
"""Gradient for SegmentMin."""
return _SegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("SegmentMax")
def _SegmentMaxGrad(op, grad):
"""Gradient for SegmentMax."""
return _SegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("UnsortedSegmentSum")
def _UnsortedSegmentSumGrad(op, grad):
"""Gradient for SegmentSum."""
return array_ops.gather(grad, op.inputs[1]), None, None
@ops.RegisterGradient("Abs")
def _AbsGrad(op, grad):
x = op.inputs[0]
return grad * math_ops.sign(x)
@ops.RegisterGradient("Neg")
def _NegGrad(_, grad):
"""Returns -grad."""
return -grad
@ops.RegisterGradient("Inv")
def _InvGrad(op, grad):
"""Returns -grad * (1 / x^2)."""
y = op.outputs[0] # y = 1 / x
# pylint: disable=protected-access
return gen_math_ops._inv_grad(y, grad)
@ops.RegisterGradient("InvGrad")
def _InvGradGrad(op, grad):
b = op.inputs[1]
# op.output[0]: y = -b * conj(a)^2
with ops.control_dependencies([grad.op]):
ca = math_ops.conj(op.inputs[0])
cg = math_ops.conj(grad)
# pylint: disable=protected-access
return cg * -2.0 * b * ca, gen_math_ops._inv_grad(ca, grad)
@ops.RegisterGradient("Square")
def _SquareGrad(op, grad):
x = op.inputs[0]
# Added control dependencies to prevent 2*x from being computed too early.
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * (2.0 * x)
@ops.RegisterGradient("Sqrt")
def _SqrtGrad(op, grad):
y = op.outputs[0] # y = x^(1/2)
return gen_math_ops._sqrt_grad(y, grad)
@ops.RegisterGradient("SqrtGrad")
def _SqrtGradGrad(op, grad):
a = op.inputs[0]
y = op.outputs[0] # y = 0.5 * b / conj(a)
with ops.control_dependencies([grad.op]):
ga = grad / a
return -math_ops.conj(ga) * y, 0.5 * ga
@ops.RegisterGradient("Rsqrt")
def _RsqrtGrad(op, grad):
"""Returns -0.5 * grad * conj(y)^3."""
y = op.outputs[0] # y = x^(-1/2)
return gen_math_ops._rsqrt_grad(y, grad)
@ops.RegisterGradient("RsqrtGrad")
def _RsqrtGradGrad(op, grad):
"""Returns backprop gradient for f(a,b) = -0.5 * b * conj(a)^3."""
a = op.inputs[0] # a = x^{-1/2}
b = op.inputs[1] # backprop gradient for a
with ops.control_dependencies([grad.op]):
ca = math_ops.conj(a)
cg = math_ops.conj(grad)
grad_a = -1.5 * cg * b * math_ops.square(ca)
# pylint: disable=protected-access
grad_b = gen_math_ops._rsqrt_grad(ca, grad)
return grad_a, grad_b
@ops.RegisterGradient("Exp")
def _ExpGrad(op, grad):
"""Returns grad * exp(x)."""
y = op.outputs[0] # y = e^x
with ops.control_dependencies([grad.op]):
y = math_ops.conj(y)
return grad * y
@ops.RegisterGradient("Log")
def _LogGrad(op, grad):
"""Returns grad * (1/x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * math_ops.inv(x)
@ops.RegisterGradient("Log1p")
def _Log1pGrad(op, grad):
"""Returns grad * (1/(1 + x))."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * math_ops.inv(1 + x)
@ops.RegisterGradient("Tanh")
def _TanhGrad(op, grad):
"""Returns grad * (1 - tanh(x) * tanh(x))."""
y = op.outputs[0] # y = tanh(x)
with ops.control_dependencies([grad.op]):
y = math_ops.conj(y)
# pylint: disable=protected-access
return gen_math_ops._tanh_grad(y, grad)
@ops.RegisterGradient("TanhGrad")
def _TanhGradGrad(op, grad):
with ops.control_dependencies([grad.op]):
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
# pylint: disable=protected-access
return grad * -2.0 * b * a, gen_math_ops._tanh_grad(a, grad)
@ops.RegisterGradient("Erf")
def _ErfGrad(op, grad):
"""Returns grad * 2/sqrt(pi) * exp(-x**2)."""
x = op.inputs[0]
two_over_root_pi = constant_op.constant(2 / np.sqrt(np.pi), dtype=grad.dtype)
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * two_over_root_pi * math_ops.exp(-math_ops.square(x))
@ops.RegisterGradient("Erfc")
def _ErfcGrad(op, grad):
"""Returns -grad * 2/sqrt(pi) * exp(-x**2)."""
x = op.inputs[0]
minus_two_over_root_pi = constant_op.constant(-2 / np.sqrt(np.pi),
dtype=grad.dtype)
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * minus_two_over_root_pi * math_ops.exp(-math_ops.square(x))
@ops.RegisterGradient("Lgamma")
def _LgammaGrad(op, grad):
"""Returns grad * digamma(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * math_ops.digamma(x)
@ops.RegisterGradient("Digamma")
def _DigammaGrad(op, grad):
"""Compute gradient of the digamma function with respect to its argument."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x)
@ops.RegisterGradient("Igamma")
def _IgammaGrad(op, grad):
"""Returns gradient of igamma(a, x) with respect to a and x."""
# TODO(ebrevdo): Perhaps add the derivative w.r.t. a
a = op.inputs[0]
x = op.inputs[1]
sa = array_ops.shape(a)
sx = array_ops.shape(x)
unused_ra, rx = gen_array_ops._broadcast_gradient_args(sa, sx)
# Perform operations in log space before summing, because Gamma(a)
# and Gamma'(a) can grow large.
partial_x = math_ops.exp(-x + (a-1) * math_ops.log(x) - math_ops.lgamma(a))
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Igammac")
def _IgammacGrad(op, grad):
"""Returns gradient of igammac(a, x) = 1 - igamma(a, x) w.r.t. a and x."""
return [-1 * g if g is not None else None for g in _IgammaGrad(op, grad)]
@ops.RegisterGradient("Zeta")
def _ZetaGrad(op, grad):
"""Returns gradient of zeta(x, q) with respect to x and q."""
# TODO(tillahoffmann): Add derivative with respect to x
x = op.inputs[0]
q = op.inputs[1]
# Broadcast gradients
sx = array_ops.shape(x)
sq = array_ops.shape(q)
unused_rx, rq = gen_array_ops._broadcast_gradient_args(sx, sq)
# Evaluate gradient
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
q = math_ops.conj(q)
partial_q = -x * math_ops.zeta(x + 1, q)
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq))
@ops.RegisterGradient("Polygamma")
def _PolygammaGrad(op, grad):
"""Returns gradient of psi(n, x) with respect to n and x."""
# TODO(tillahoffmann): Add derivative with respect to n
n = op.inputs[0]
x = op.inputs[1]
# Broadcast gradients
sn = array_ops.shape(n)
sx = array_ops.shape(x)
unused_rn, rx = gen_array_ops._broadcast_gradient_args(sn, sx)
# Evaluate gradient
with ops.control_dependencies([grad.op]):
n = math_ops.conj(n)
x = math_ops.conj(x)
partial_x = math_ops.polygamma(n + 1, x)
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Sigmoid")
def _SigmoidGrad(op, grad):
"""Returns grad * sigmoid(x) * (1 - sigmoid(x))."""
y = op.outputs[0] # y = sigmoid(x)
with ops.control_dependencies([grad.op]):
y = math_ops.conj(y)
# pylint: disable=protected-access
return gen_math_ops._sigmoid_grad(y, grad)
@ops.RegisterGradient("SigmoidGrad")
def _SigmoidGradGrad(op, grad):
with ops.control_dependencies([grad.op]):
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
gb = grad * b
# pylint: disable=protected-access
return gb - 2.0 * gb * a, gen_math_ops._sigmoid_grad(a, grad)
@ops.RegisterGradient("Sign")
def _SignGrad(op, _):
"""Returns 0."""
x = op.inputs[0]
return array_ops.zeros(array_ops.shape(x), dtype=x.dtype)
@ops.RegisterGradient("Sin")
def _SinGrad(op, grad):
"""Returns grad * cos(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * math_ops.cos(x)
@ops.RegisterGradient("Cos")
def _CosGrad(op, grad):
"""Returns grad * -sin(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return -grad * math_ops.sin(x)
@ops.RegisterGradient("Tan")
def _TanGrad(op, grad):
"""Returns grad * 1/sec^2(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
secx = math_ops.inv(math_ops.cos(x))
secx2 = math_ops.square(secx)
return grad * secx2
@ops.RegisterGradient("Asin")
def _AsinGrad(op, grad):
"""Returns grad * 1/sqrt(1-x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
den = math_ops.sqrt(math_ops.sub(one, x2))
inv = math_ops.inv(den)
return grad * inv
@ops.RegisterGradient("Acos")
def _AcosGrad(op, grad):
"""Returns grad * -1/sqrt(1-x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
den = math_ops.sqrt(math_ops.sub(one, x2))
inv = math_ops.inv(den)
return -grad * inv
@ops.RegisterGradient("Atan")
def _AtanGrad(op, grad):
"""Returns grad * 1/ (1 + x^2)"""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
inv = math_ops.inv(math_ops.add(one, x2))
return grad * inv
@ops.RegisterGradient("AddN")
def _AddNGrad(op, grad):
"""Copies the gradient to all inputs."""
# Not broadcasting.
return [grad] * len(op.inputs)
@ops.RegisterGradient("Add")
def _AddGrad(op, grad):
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(grad, rx), sx),
array_ops.reshape(math_ops.reduce_sum(grad, ry), sy))
@ops.RegisterGradient("Sub")
def _SubGrad(op, grad):
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(grad, rx), sx),
array_ops.reshape(-math_ops.reduce_sum(grad, ry), sy))
@ops.RegisterGradient("Mul")
def _MulGrad(op, grad):
"""The gradient of scalar multiplication."""
x = op.inputs[0]
y = op.inputs[1]
assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
return (array_ops.reshape(math_ops.reduce_sum(grad * y, rx), sx),
array_ops.reshape(math_ops.reduce_sum(x * grad, ry), sy))
@ops.RegisterGradient("Div")
def _DivGrad(op, grad):
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy) # pylint: disable=protected-access
x = math_ops.conj(x)
y = math_ops.conj(y)
return (array_ops.reshape(math_ops.reduce_sum(grad / y, rx), sx),
array_ops.reshape(math_ops.reduce_sum(grad *
(-x / math_ops.square(y)), ry), sy))
@ops.RegisterGradient("Pow")
def _PowGrad(op, grad):
"""Returns grad * (y*x^(y-1), z*log(x))."""
x = op.inputs[0]
y = op.inputs[1]
z = op.outputs[0]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
z = math_ops.conj(z)
gx = array_ops.reshape(
math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx)
# Avoid false singularity at x = 0
if x.dtype.is_complex:
# real(x) < 0 is fine for the complex case
log_x = math_ops.select(
math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x))
else:
# There's no sensible real value to return if x < 0, so return 0
log_x = math_ops.select(x > 0, math_ops.log(x), array_ops.zeros_like(x))
gy = array_ops.reshape(
math_ops.reduce_sum(grad * z * log_x, ry), sy)
return gx, gy
def _MaximumMinimumGrad(op, grad, selector_op):
"""Factor out the code for the gradient of Maximum or Minimum."""
x = op.inputs[0]
y = op.inputs[1]
gdtype = grad.dtype
sx = array_ops.shape(x)
sy = array_ops.shape(y)
gradshape = array_ops.shape(grad)
zeros = array_ops.zeros(gradshape, gdtype)
xmask = selector_op(x, y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
xgrad = math_ops.select(xmask, grad, zeros)
ygrad = math_ops.select(math_ops.logical_not(xmask), grad, zeros)
gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)
gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)
return (gx, gy)
@ops.RegisterGradient("Maximum")
def _MaximumGrad(op, grad):
"""Returns grad*(x > y, x <= y) with type of grad."""
return _MaximumMinimumGrad(op, grad, math_ops.greater_equal)
@ops.RegisterGradient("Minimum")
def _MinimumGrad(op, grad):
"""Returns grad*(x < y, x >= y) with type of grad."""
return _MaximumMinimumGrad(op, grad, math_ops.less_equal)
@ops.RegisterGradient("SquaredDifference")
def _SquaredDifferenceGrad(op, grad):
"""Returns the gradient for (x-y)^2."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
# pylint: disable=protected-access
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
# pylint: enable=protected-access
# .op works with Tensors or IndexedSlices
with ops.control_dependencies([grad.op]):
# The parens ensure that if grad is IndexedSlices, it'll get multiplied by
# Tensor (not a number like 2.0) which causes it to convert to Tensor.
x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)
return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx),
-array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy))
# Logical operations have no gradients.
ops.NotDifferentiable("Less")
ops.NotDifferentiable("LessEqual")
ops.NotDifferentiable("Greater")
ops.NotDifferentiable("GreaterEqual")
ops.NotDifferentiable("Equal")
ops.NotDifferentiable("NotEqual")
ops.NotDifferentiable("LogicalAnd")
ops.NotDifferentiable("LogicalOr")
ops.NotDifferentiable("LogicalNot")
@ops.RegisterGradient("Select")
def _SelectGrad(op, grad):
c = op.inputs[0]
x = op.inputs[1]
zeros = array_ops.zeros_like(x)
return (None, math_ops.select(c, grad, zeros),
math_ops.select(c, zeros, grad))
@ops.RegisterGradient("MatMul")
def _MatMulGrad(op, grad):
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
if not t_a and not t_b:
return (math_ops.matmul(grad, op.inputs[1], transpose_b=True),
math_ops.matmul(op.inputs[0], grad, transpose_a=True))
elif not t_a and t_b:
return (math_ops.matmul(grad, op.inputs[1]),
math_ops.matmul(grad, op.inputs[0], transpose_a=True))
elif t_a and not t_b:
return (math_ops.matmul(op.inputs[1], grad, transpose_b=True),
math_ops.matmul(op.inputs[0], grad))
elif t_a and t_b:
return (math_ops.matmul(op.inputs[1], grad, transpose_a=True,
transpose_b=True),
math_ops.matmul(grad, op.inputs[0], transpose_a=True,
transpose_b=True))
@ops.RegisterGradient("SparseMatMul")
def _SparseMatMulGrad(op, grad):
"""Gradient for SparseMatMul."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
is_sparse = {
op.inputs[0]: op.get_attr("a_is_sparse"),
op.inputs[1]: op.get_attr("b_is_sparse"),
# Use heuristic to figure out if grad might be sparse
grad: (grad.op.type == "ReluGrad")
}
def _SparseMatMul(t1, t2, out_dtype,
transpose_a=False, transpose_b=False):
"""Helper function to create SparseMatMul op."""
assert t1 in is_sparse and t2 in is_sparse
t1_sparse = is_sparse[t1]
t2_sparse = is_sparse[t2]
if transpose_b:
t2 = array_ops.transpose(t2)
transpose_b = False
prod = math_ops.matmul(t1, t2,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=t1_sparse,
b_is_sparse=t2_sparse)
if prod.dtype != out_dtype:
prod = math_ops.cast(prod, out_dtype)
return prod
dtype_a = op.inputs[0].dtype
dtype_b = op.inputs[1].dtype
if not t_a and not t_b:
return (_SparseMatMul(grad, op.inputs[1], dtype_a, transpose_b=True),
_SparseMatMul(op.inputs[0], grad, dtype_b, transpose_a=True))
elif not t_a and t_b:
return (_SparseMatMul(grad, op.inputs[1], dtype_a),
_SparseMatMul(grad, op.inputs[0], dtype_b, transpose_a=True))
elif t_a and not t_b:
return (_SparseMatMul(op.inputs[1], grad, dtype_a, transpose_b=True),
_SparseMatMul(op.inputs[0], grad, dtype_b))
elif t_a and t_b:
return (_SparseMatMul(op.inputs[1], grad, dtype_a,
transpose_a=True, transpose_b=True),
_SparseMatMul(grad, op.inputs[0], dtype_b,
transpose_a=True, transpose_b=True))
@ops.RegisterGradient("Floor")
def _FloorGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("BatchMatMul")
def _BatchMatMul(op, grad):
"""Returns the gradient of x and y given the gradient of x * y."""
x = op.inputs[0]
y = op.inputs[1]
adj_x = op.get_attr("adj_x")
adj_y = op.get_attr("adj_y")
if not adj_x:
if not adj_y:
grad_x = math_ops.batch_matmul(grad, y, False, True)
grad_y = math_ops.batch_matmul(x, grad, True, False)
else:
grad_x = math_ops.batch_matmul(grad, y, False, False)
grad_y = math_ops.batch_matmul(grad, x, True, False)
else:
if not adj_y:
grad_x = math_ops.batch_matmul(y, grad, False, True)
grad_y = math_ops.batch_matmul(x, grad, False, False)
else:
grad_x = math_ops.batch_matmul(y, grad, True, True)
grad_y = math_ops.batch_matmul(grad, x, True, True)
return grad_x, grad_y
ops.NotDifferentiable("Range")
ops.NotDifferentiable("LinSpace")
@ops.RegisterGradient("Complex")
def _ComplexGrad(op, grad):
"""Returns the real and imaginary components of 'grad', respectively."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(math_ops.real(grad), rx), sx),
array_ops.reshape(math_ops.reduce_sum(math_ops.imag(grad), ry), sy))
@ops.RegisterGradient("Real")
def _RealGrad(_, grad):
"""Returns 'grad' as the real part and set the imaginary part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(grad, zero)
@ops.RegisterGradient("Imag")
def _ImagGrad(_, grad):
"""Returns 'grad' as the imaginary part and set the real part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(zero, grad)
@ops.RegisterGradient("Conj")
def _ConjGrad(_, grad):
"""Returns the complex conjugate of grad."""
return math_ops.conj(grad)
@ops.RegisterGradient("ComplexAbs")
def _ComplexAbsGrad(op, grad):
"""Returns the gradient of ComplexAbs."""
# TODO(b/27786104): The cast to complex could be removed once arithmetic
# supports mixtures of complex64 and real values.
return (math_ops.complex(grad, array_ops.zeros_like(grad)) *
math_ops.sign(op.inputs[0]))
@ops.RegisterGradient("Cast")
def _CastGrad(op, grad):
t = [dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.bfloat16, dtypes.complex64, dtypes.complex128]
src_type = op.inputs[0].dtype.base_dtype
dst_type = grad.dtype.base_dtype
if src_type in t and dst_type in t:
return math_ops.cast(grad, src_type)
else:
return None
def _FFTSizeForGrad(grad, rank):
return math_ops.reduce_prod(
array_ops.slice(
array_ops.reverse(array_ops.shape(grad), (True,)), (0,), (rank,)))
@ops.RegisterGradient("FFT")
def _FFTGrad(_, grad):
size = math_ops.cast(_FFTSizeForGrad(grad, 1), dtypes.float32)
return math_ops.ifft(grad) * math_ops.complex(size, 0.)
@ops.RegisterGradient("IFFT")
def _IFFTGrad(_, grad):
rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 1), dtypes.float32)
return math_ops.fft(grad) * math_ops.complex(rsize, 0.)
@ops.RegisterGradient("FFT2D")
def _FFT2DGrad(_, grad):
size = math_ops.cast(_FFTSizeForGrad(grad, 2), dtypes.float32)
return math_ops.ifft2d(grad) * math_ops.complex(size, 0.)
@ops.RegisterGradient("IFFT2D")
def _IFFT2DGrad(_, grad):
rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 2), dtypes.float32)
return math_ops.fft2d(grad) * math_ops.complex(rsize, 0.)
@ops.RegisterGradient("FFT3D")
def _FFT3DGrad(_, grad):
size = math_ops.cast(_FFTSizeForGrad(grad, 3), dtypes.float32)
return math_ops.ifft3d(grad) * math_ops.complex(size, 0.)
@ops.RegisterGradient("IFFT3D")
def _IFFT3DGrad(_, grad):
rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 3), dtypes.float32)
return math_ops.fft3d(grad) * math_ops.complex(rsize, 0.)
@ops.RegisterGradient("Cross")
def _CrossGrad(op, grad):
u = op.inputs[0]
v = op.inputs[1]
return (math_ops.cross(v, grad), math_ops.cross(grad, u))
@ops.RegisterGradient("Cumsum")
def _CumsumGrad(op, grad):
axis = op.inputs[1]
exclusive = op.get_attr("exclusive")
reverse = op.get_attr("reverse")
return [math_ops.cumsum(grad, axis, exclusive=exclusive,
reverse=not reverse), None]
@ops.RegisterGradient("Cumprod")
def _CumprodGrad(op, grad):
x = op.inputs[0]
axis = op.inputs[1]
exclusive = op.get_attr("exclusive")
reverse = op.get_attr("reverse")
# TODO This fails when x contains 0 and should be fixed
prod = math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse)
out = math_ops.cumsum(prod * grad, axis, exclusive=exclusive,
reverse=not reverse)
return [out / x, None]
|
|
from pyswagger import App
from pyswagger.getter import UrlGetter, DictGetter, SimpleGetter
from pyswagger.resolve import Resolver
from pyswagger.utils import _diff_
from .utils import get_test_data_folder
import unittest
import os
import json
class _MyCustomException(Exception):
pass
def _my_custom_load(path):
raise _MyCustomException('a testing exception')
class _MyCustomGetter(SimpleGetter):
__simple_getter_callback__ = _my_custom_load
class GetterTestCase(unittest.TestCase):
""" test getter """
def test_random_name_v2_0(self):
"""
"""
path = get_test_data_folder(
version='2.0',
which='random_file_name'
)
path = os.path.join(path, 'test_random.json')
# should not raise ValueError
app = App.create(path)
def test_random_name_v1_2(self):
"""
"""
path = get_test_data_folder(
version='1.2',
which='random_file_name'
)
path = os.path.join(path, 'test_random.json')
# should not raise ValueError
app = App.create(path)
def test_local_path_with_custome_getter(self):
""" make sure path would be assigned when
passing a getter class
"""
cls = UrlGetter
path = get_test_data_folder(
version='2.0',
which='random_file_name'
)
path = os.path.join(path, 'test_random.json')
# should not raise errors
app = App.load(path, getter=cls)
def test_dict_getter_v1_2(self):
""" make sure 'DictGetter' works the same as 'LocalGetter'
for Swagger 1.2
"""
#
# loading via DictGetter
#
path = get_test_data_folder(
version='1.2',
which='wordnik'
)
path_resource_list = os.path.join(path, 'resource_list.json')
path_pet = os.path.join(path, 'pet.json')
path_store = os.path.join(path, 'store.json')
path_user = os.path.join(path, 'user.json')
with open(path_resource_list, 'r') as f:
resource_list = json.loads(f.read())
with open(path_pet, 'r') as f:
pet = json.loads(f.read())
with open(path_store, 'r') as f:
store = json.loads(f.read())
with open(path_user, 'r') as f:
user = json.loads(f.read())
getter = DictGetter([
path_resource_list,
path_pet,
path_user,
path_store,
], {
path_resource_list: resource_list,
path_pet: pet,
path_store: store,
path_user: user,
})
app = App.load(path, resolver=Resolver(default_getter=getter))
app.prepare()
# make sure it produce the same App in default way
self.assertEqual(sorted(_diff_(app.dump(), App.create(path).dump())), [])
#
# different path, mocking an url
#
getter = DictGetter([
'http://petstore.com',
'http://petstore.com/pet.json',
'http://petstore.com/user.json',
'http://petstore.com/store.json',
], {
'http://petstore.com': resource_list,
'http://petstore.com/pet.json': pet,
'http://petstore.com/store.json': store,
'http://petstore.com/user.json': user
})
app = App.load('http://petstore.com', resolver=Resolver(default_getter=getter))
app.prepare()
# make sure it produce the same App in default way
self.assertEqual(sorted(_diff_(app.dump(), App.create(path).dump(), exclude=['$ref'])), [])
#
# provide empty path
#
getter = DictGetter([
'',
'pet.json',
'user.json',
'store.json',
], {
'': resource_list,
'pet.json': pet,
'store.json': store,
'user.json': user
})
app = App.load('http://petstore.com', resolver=Resolver(default_getter=getter))
app.prepare()
# make sure it produce the same App in default way
self.assertEqual(sorted(_diff_(app.dump(), App.create(path).dump(), exclude=['$ref'])), [])
def test_dict_getter_v2_0(self):
""" make sure 'DictGetter' works the same as 'LocalGetter'
for Swagger 2.0
"""
#
# loading via DictGetter
#
path = get_test_data_folder(
version='2.0',
which='wordnik'
)
origin_app = App.create(path)
with open(os.path.join(path, 'swagger.json'), 'r') as f:
spec = json.loads(f.read())
getter = DictGetter([path], {
os.path.join(path, 'swagger.json'): spec
})
app = App.load(path, resolver=Resolver(default_getter=getter))
app.prepare()
# make sure it produce the same App in default way
self.assertEqual(sorted(_diff_(app.dump(), origin_app.dump())), [])
#
# loading via wrong path, should be ok when all internal $ref are not absoluted
#
getter = DictGetter([''], {
'': spec
})
app = App.load('', resolver=Resolver(default_getter=getter))
app.prepare()
# make sure it produce the same App in default way
self.assertEqual(sorted(_diff_(app.dump(), origin_app.dump(), exclude=['$ref'])), [])
#
# faking http path
#
getter = DictGetter(['https://petstore.com'], {
'https://petstore.com': spec
})
app = App.load('https://petstore.com', resolver=Resolver(default_getter=getter))
app.prepare()
# make sure it produce the same App in default way
self.assertEqual(sorted(_diff_(app.dump(), origin_app.dump(), exclude=['$ref'])), [])
def test_simple_getter_callback(self):
""" make sure __simple_getter_callback__ is called """
path = get_test_data_folder(
version='2.0',
which='random_file_name'
)
path = os.path.join(path, 'test_random.json')
# should raise some specific error
self.assertRaises(_MyCustomException, App.load, path, getter=_MyCustomGetter)
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gym
import os
os.environ['MUJOCO_GL'] = 'osmesa'
# import roboverse
# from rlkit.data_management.awr_env_replay_buffer import AWREnvReplayBuffer
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.data_management.split_buffer import SplitReplayBuffer
from rlkit.envs.wrappers import NormalizedBoxEnv, StackObservationEnv, RewardWrapperEnv
import rlkit.torch.pytorch_util as ptu
from rlkit.samplers.data_collector import MdpPathCollector, ObsDictPathCollector
from rlkit.samplers.data_collector.step_collector import MdpStepCollector
from rlkit.torch.networks import ConcatMlp, Mlp
from rlkit.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic, GaussianMixturePolicy, GaussianPolicy
from rlkit.torch.sac.awac_trainer import AWACTrainer
from rlkit.torch.torch_rl_algorithm import (
TorchBatchRLAlgorithm,
TorchOnlineRLAlgorithm,
)
import gym_point
import gym
import adept_envs
from rlkit.demos.source.hdf5_path_loader import HDF5PathLoader
from rlkit.demos.source.mdp_path_loader import MDPPathLoader
# from rlkit.visualization.video import save_paths, VideoSaveFunction
from multiworld.core.flat_goal_env import FlatGoalEnv
from multiworld.core.image_env import ImageEnv
from multiworld.core.gym_to_multi_env import GymToMultiEnv
import torch
import numpy as np
from torchvision.utils import save_image
from rlkit.exploration_strategies.base import \
PolicyWrappedWithExplorationStrategy
from rlkit.exploration_strategies.gaussian_and_epsilon_strategy import GaussianAndEpsilonStrategy
from rlkit.exploration_strategies.ou_strategy import OUStrategy
import os.path as osp
from rlkit.core import logger
from rlkit.util.io import load_local_or_remote_file
import pickle
from rlkit.samplers.rollout_functions import rollout
# from rlkit.envs.images import Renderer, InsertImageEnv, EnvRenderer
from rlkit.envs.make_env import make
ENV_PARAMS = {
'HalfCheetah-v2': {
'num_expl_steps_per_train_loop': 1000,
'max_path_length': 1000,
'env_demo_path': dict(
path="demos/icml2020/mujoco/hc_action_noise_15.npy",
obs_dict=False,
is_demo=True,
),
'env_offpolicy_data_path': dict(
path="demos/icml2020/mujoco/hc_off_policy_15_demos_100.npy",
obs_dict=False,
is_demo=False,
train_split=0.9,
),
},
'Ant-v2': {
'num_expl_steps_per_train_loop': 1000,
'max_path_length': 1000,
'env_demo_path': dict(
path="demos/icml2020/mujoco/ant_action_noise_15.npy",
obs_dict=False,
is_demo=True,
),
'env_offpolicy_data_path': dict(
path="demos/icml2020/mujoco/ant_off_policy_15_demos_100.npy",
obs_dict=False,
is_demo=False,
train_split=0.9,
),
},
'Walker2d-v2': {
'num_expl_steps_per_train_loop': 1000,
'max_path_length': 1000,
'env_demo_path': dict(
path="demos/icml2020/mujoco/walker_action_noise_15.npy",
obs_dict=False,
is_demo=True,
),
'env_offpolicy_data_path': dict(
path="demos/icml2020/mujoco/walker_off_policy_15_demos_100.npy",
obs_dict=False,
is_demo=False,
train_split=0.9,
),
},
'SawyerRigGrasp-v0': {
'env_id': 'SawyerRigGrasp-v0',
# 'num_expl_steps_per_train_loop': 1000,
'max_path_length': 50,
# 'num_epochs': 1000,
},
'pen-binary-v0': {
'env_id': 'pen-binary-v0',
'max_path_length': 200,
'env_demo_path': dict(
path="demos/icml2020/hand/pen2_sparse.npy",
obs_dict=True,
is_demo=True,
),
'env_offpolicy_data_path': dict(
path="demos/icml2020/hand/pen_bc_sparse4.npy",
obs_dict=False,
is_demo=False,
train_split=0.9,
),
},
'door-binary-v0': {
'env_id': 'door-binary-v0',
'max_path_length': 200,
'env_demo_path': dict(
path="demos/icml2020/hand/door2_sparse.npy",
obs_dict=True,
is_demo=True,
),
'env_offpolicy_data_path': dict(
path="demos/icml2020/hand/door_bc_sparse4.npy",
obs_dict=False,
is_demo=False,
train_split=0.9,
),
},
'relocate-binary-v0': {
'env_id': 'relocate-binary-v0',
'max_path_length': 200,
'env_demo_path': dict(
path="demos/icml2020/hand/relocate2_sparse.npy",
obs_dict=True,
is_demo=True,
),
'env_offpolicy_data_path': dict(
path="demos/icml2020/hand/relocate_bc_sparse4.npy",
obs_dict=False,
is_demo=False,
train_split=0.9,
),
},
'PointPlayEnv-v0': {
'env_id': 'PointPlayEnv-v0',
'max_path_length': 100,
},
'franka_slide-v1': {
'env_id': 'franka_slide-v1',
},
'franka_microwave_cabinet_slider_resetfree-v1': {
'env_id': 'franka_microwave_cabinet_slider_resetfree-v1',
},
'SliderResetFree-v0': {
'env_id': 'SliderResetFree-v0'
},
'SliderCabinetResetFree-v0': {
'env_id': 'SliderCabinetResetFree-v0'
},
'SliderCabinetKnobResetFree-v0': {
'env_id': 'SliderCabinetKnobResetFree-v0'
},
'SliderCabinetResetFreeNew-v0': {
'env_id': 'SliderCabinetResetFreeNew-v0'
}
}
def resume(variant):
data = load_local_or_remote_file(variant.get("pretrained_algorithm_path"), map_location="cuda")
return data
def process_args(variant):
if variant.get("debug", False):
variant['max_path_length'] = 50
variant['batch_size'] = 5
variant['num_epochs'] = 5
# variant['num_eval_steps_per_epoch'] = 100
# variant['num_expl_steps_per_train_loop'] = 100
variant['num_trains_per_train_loop'] = 10
# variant['min_num_steps_before_training'] = 100
variant['trainer_kwargs']['bc_num_pretrain_steps'] = min(10, variant['trainer_kwargs'].get('bc_num_pretrain_steps', 0))
variant['trainer_kwargs']['q_num_pretrain1_steps'] = min(10, variant['trainer_kwargs'].get('q_num_pretrain1_steps', 0))
variant['trainer_kwargs']['q_num_pretrain2_steps'] = min(10, variant['trainer_kwargs'].get('q_num_pretrain2_steps', 0))
def experiment(variant):
# if variant.get("pretrained_algorithm_path", False):
# resume(variant)
# return
normalize_env = variant.get('normalize_env', True)
env_id = variant.get('env_id', None)
env_params = ENV_PARAMS.get(env_id, {})
variant.update(env_params)
env_class = variant.get('env_class', None)
env_kwargs = variant.get('env_kwargs', {})
eval_env_kwargs = variant.get('eval_env_kwargs', env_kwargs)
expl_env = make(env_id, env_class, env_kwargs, normalize_env)
if variant.get('expl_eval_same', True):
eval_env = expl_env
else:
eval_env = make(env_id, env_class, eval_env_kwargs, normalize_env)
if variant.get('add_env_demos', False):
variant["path_loader_kwargs"]["demo_paths"].append(variant["env_demo_path"])
if variant.get('add_env_offpolicy_data', False):
variant["path_loader_kwargs"]["demo_paths"].append(variant["env_offpolicy_data_path"])
path_loader_kwargs = variant.get("path_loader_kwargs", {})
stack_obs = path_loader_kwargs.get("stack_obs", 1)
if stack_obs > 1:
expl_env = StackObservationEnv(expl_env, stack_obs=stack_obs)
eval_env = StackObservationEnv(eval_env, stack_obs=stack_obs)
obs_dim = expl_env.observation_space.low.size
action_dim = eval_env.action_space.low.size
if hasattr(expl_env, 'info_sizes'):
env_info_sizes = expl_env.info_sizes
else:
env_info_sizes = dict()
qf_kwargs = variant.get("qf_kwargs", {})
rnd_kwargs = variant.get("rnd_kwargs", qf_kwargs)
rnd_size = variant.get("rnd_size", 32)
qf1 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
**qf_kwargs
)
qf2 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
**qf_kwargs
)
target_qf1 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
**qf_kwargs
)
target_qf2 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
**qf_kwargs
)
rnd_net1 = Mlp(
input_size=obs_dim // 2,
output_size=rnd_size,
**rnd_kwargs
)
rnd_net2 = Mlp(
input_size=obs_dim // 2,
output_size=rnd_size,
**rnd_kwargs
)
policy_class = variant.get("policy_class", TanhGaussianPolicy)
if policy_class == 'GaussianMixturePolicy':
policy_class = GaussianMixturePolicy
elif policy_class == 'TanhGaussianPolicy':
policy_class = TanhGaussianPolicy
elif policy_class == 'GaussianPolicy':
policy_class = GaussianPolicy
policy_kwargs = variant['policy_kwargs']
policy_path = variant.get("policy_path", False)
if policy_path:
policy = load_local_or_remote_file(policy_path)
else:
policy = policy_class(
obs_dim=obs_dim,
action_dim=action_dim,
**policy_kwargs,
)
goal_model = policy_class(
obs_dim=int(obs_dim // 2),
action_dim=int(obs_dim // 2),
**policy_kwargs,
)
buffer_policy_path = variant.get("buffer_policy_path", False)
if buffer_policy_path:
buffer_policy = load_local_or_remote_file(buffer_policy_path)
else:
buffer_policy_class = variant.get("buffer_policy_class", policy_class)
buffer_policy = buffer_policy_class(
obs_dim=obs_dim,
action_dim=action_dim,
**variant.get("buffer_policy_kwargs", policy_kwargs),
)
eval_policy = MakeDeterministic(policy)
eval_path_collector = MdpPathCollector(
eval_env,
eval_policy,
name='eval',
render=variant.get('render_eval', False)
)
expl_policy = policy
exploration_kwargs = variant.get('exploration_kwargs', {})
if exploration_kwargs:
if exploration_kwargs.get("deterministic_exploration", False):
expl_policy = MakeDeterministic(policy)
exploration_strategy = exploration_kwargs.get("strategy", None)
if exploration_strategy is None:
pass
elif exploration_strategy == 'ou':
es = OUStrategy(
action_space=expl_env.action_space,
max_sigma=exploration_kwargs['noise'],
min_sigma=exploration_kwargs['noise'],
)
expl_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=es,
policy=expl_policy,
)
elif exploration_strategy == 'gauss_eps':
es = GaussianAndEpsilonStrategy(
action_space=expl_env.action_space,
max_sigma=exploration_kwargs['noise'],
min_sigma=exploration_kwargs['noise'], # constant sigma
epsilon=0,
)
expl_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=es,
policy=expl_policy,
)
else:
error
main_replay_buffer_kwargs=dict(
max_replay_buffer_size=variant['replay_buffer_size'],
env=expl_env,
)
replay_buffer_kwargs = dict(
max_replay_buffer_size=variant['replay_buffer_size'],
env=expl_env,
)
replay_buffer = variant.get('replay_buffer_class', EnvReplayBuffer)(
**main_replay_buffer_kwargs,
)
if variant.get('use_validation_buffer', False):
train_replay_buffer = replay_buffer
validation_replay_buffer = variant.get('replay_buffer_class', EnvReplayBuffer)(
**main_replay_buffer_kwargs,
)
replay_buffer = SplitReplayBuffer(train_replay_buffer, validation_replay_buffer, 0.9)
trainer_class = variant.get("trainer_class", AWACTrainer)
trainer = trainer_class(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
buffer_policy=buffer_policy,
rnd_net1=rnd_net1,
rnd_net2=rnd_net2,
goal_model=goal_model,
**variant['trainer_kwargs']
)
if variant['collection_mode'] == 'online':
expl_path_collector = MdpStepCollector(
expl_env,
policy,
)
algorithm = TorchOnlineRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
max_path_length=variant['max_path_length'],
batch_size=variant['batch_size'],
num_epochs=variant['num_epochs'],
num_eval_steps_per_epoch=variant['num_eval_steps_per_epoch'],
num_expl_steps_per_train_loop=variant['num_expl_steps_per_train_loop'],
num_trains_per_train_loop=variant['num_trains_per_train_loop'],
min_num_steps_before_training=variant['min_num_steps_before_training'],
)
else:
expl_path_collector = MdpPathCollector(
expl_env,
expl_policy,
name='expl',
render=variant.get('render_expl', False)
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
max_path_length=variant['max_path_length'],
batch_size=variant['batch_size'],
num_epochs=variant['num_epochs'],
num_eval_steps_per_epoch=variant['num_eval_steps_per_epoch'],
num_expl_steps_per_train_loop=variant['num_expl_steps_per_train_loop'],
num_trains_per_train_loop=variant['num_trains_per_train_loop'],
min_num_steps_before_training=variant['min_num_steps_before_training'],
)
algorithm.to(ptu.device)
if variant.get('goal_select', False):
expl_path_collector.goal_select = True
if variant.get('random_goal', False):
expl_path_collector.random_goal = True
if variant.get('use_densities', False):
expl_path_collector.use_densities = True
expl_path_collector._replay_buffer=replay_buffer
expl_path_collector._algo = algorithm
demo_train_buffer = EnvReplayBuffer(
**replay_buffer_kwargs,
)
demo_test_buffer = EnvReplayBuffer(
**replay_buffer_kwargs,
)
if variant.get("save_video", False):
if variant.get("presampled_goals", None):
variant['image_env_kwargs']['presampled_goals'] = load_local_or_remote_file(variant['presampled_goals']).item()
def get_img_env(env):
renderer = EnvRenderer(**variant["renderer_kwargs"])
img_env = InsertImageEnv(GymToMultiEnv(env), renderer=renderer)
image_eval_env = ImageEnv(GymToMultiEnv(eval_env), **variant["image_env_kwargs"])
# image_eval_env = get_img_env(eval_env)
image_eval_path_collector = ObsDictPathCollector(
image_eval_env,
eval_policy,
observation_key="state_observation",
)
image_expl_env = ImageEnv(GymToMultiEnv(expl_env), **variant["image_env_kwargs"])
# image_expl_env = get_img_env(expl_env)
image_expl_path_collector = ObsDictPathCollector(
image_expl_env,
expl_policy,
observation_key="state_observation",
)
video_func = VideoSaveFunction(
image_eval_env,
variant,
image_expl_path_collector,
image_eval_path_collector,
)
algorithm.post_train_funcs.append(video_func)
if variant.get('save_paths', False):
algorithm.post_train_funcs.append(save_paths)
if variant.get('load_demos', False):
path_loader_class = variant.get('path_loader_class', MDPPathLoader)
path_loader = path_loader_class(trainer,
replay_buffer=replay_buffer,
demo_train_buffer=demo_train_buffer,
demo_test_buffer=demo_test_buffer,
**path_loader_kwargs
)
path_loader.load_demos()
if variant.get('load_env_dataset_demos', False):
path_loader_class = variant.get('path_loader_class', HDF5PathLoader)
path_loader = path_loader_class(trainer,
replay_buffer=replay_buffer,
demo_train_buffer=demo_train_buffer,
demo_test_buffer=demo_test_buffer,
**path_loader_kwargs
)
path_loader.load_demos(expl_env.get_dataset())
if variant.get('save_initial_buffers', False):
buffers = dict(
replay_buffer=replay_buffer,
demo_train_buffer=demo_train_buffer,
demo_test_buffer=demo_test_buffer,
)
buffer_path = osp.join(logger.get_snapshot_dir(), 'buffers.p')
pickle.dump(buffers, open(buffer_path, "wb"))
if variant.get('pretrained_rl_path', False):
data = torch.load(variant.get('pretrained_rl_path', False))
state_dict = data['trainer/policy'].state_dict()
algorithm.trainer.policy.load_state_dict(state_dict)
state_dict = data['trainer/qf1'].state_dict()
algorithm.trainer.qf1.load_state_dict(state_dict)
state_dict = data['trainer/qf2'].state_dict()
algorithm.trainer.qf2.load_state_dict(state_dict)
state_dict = data['trainer/target_qf1'].state_dict()
algorithm.trainer.target_qf1.load_state_dict(state_dict)
state_dict = data['trainer/target_qf2'].state_dict()
algorithm.trainer.target_qf2.load_state_dict(state_dict)
state_dict = data['trainer/buffer_policy'].state_dict()
algorithm.trainer.buffer_policy.load_state_dict(state_dict)
state_dict = data['exploration/policy'].state_dict()
algorithm.expl_data_collector._policy.load_state_dict(state_dict)
state_dict = data['evaluation/policy'].state_dict()
algorithm.eval_data_collector._policy.load_state_dict(state_dict)
state_dict = data['trainer/goal_model'].state_dict()
algorithm.trainer.goal_model.load_state_dict(state_dict)
# Loading optimizers
algorithm.trainer.optimizers[algorithm.trainer.policy].load_state_dict(
data['trainer/optimizers'][data['trainer/policy']].state_dict())
algorithm.trainer.optimizers[algorithm.trainer.goal_model].load_state_dict(
data['trainer/optimizers'][data['trainer/goal_model']].state_dict())
print("LOADED IN PRETRAINED PATH")
if variant.get('pretrain_buffer_policy', False):
trainer.pretrain_policy_with_bc(
buffer_policy,
replay_buffer.train_replay_buffer,
replay_buffer.validation_replay_buffer,
10000,
label="buffer",
)
if variant.get('pretrain_policy', False):
trainer.pretrain_policy_with_bc(
policy,
demo_train_buffer,
demo_test_buffer,
trainer.bc_num_pretrain_steps,
)
if variant.get('pretrain_goal_model', False):
trainer.pretrain_goalproposer_with_bc(
goal_model,
demo_train_buffer,
demo_test_buffer,
trainer.bc_goal_num_pretrain_steps,
)
if variant.get('pretrain_rl', False):
trainer.pretrain_q_with_bc_data()
std_bump = variant.get('std_bump', 0.)
state_dict = algorithm.trainer.policy.state_dict()
state_dict['log_std_logits'] += std_bump
algorithm.trainer.policy.load_state_dict(state_dict)
if variant.get('save_pretrained_algorithm', False):
p_path = osp.join(logger.get_snapshot_dir(), 'pretrain_algorithm.p')
pt_path = osp.join(logger.get_snapshot_dir(), 'pretrain_algorithm.pt')
data = algorithm._get_snapshot()
data['algorithm'] = algorithm
del data['algorithm']
del data['exploration/env']
del data['evaluation/env']
torch.save(data, open(pt_path, "wb"))
torch.save(data, open(p_path, "wb"))
# Code for post-hoc evaluation
if variant.get('post_eval', False):
load_directory = variant.get('load_directory', None)
import os
from pathlib import Path
files = sorted(Path(load_directory).iterdir(), key=os.path.getmtime)
diagnostics = []
num_eval_paths = variant.get('num_eval_paths', 1)
eval_env.wrapped_env._eval_mode = True
from rlkit.samplers.rollout_functions import rollout
import numpy as np
curr_idx_f = 0
eval_skip = variant.get('eval_skip', 10)
starting_idx = variant.get('starting_idx', 0)
for f in files:
if ('pkl' not in f.name) or ('itr' not in f.name) or ('diagnostics' in f.name):
continue
if curr_idx_f % eval_skip != 0:
curr_idx_f += 1
continue
print("LOADING %s"%(f.name))
# Load directory
data = torch.load(f)
state_dict = data['trainer/policy'].state_dict()
algorithm.trainer.policy.load_state_dict(state_dict)
state_dict = data['trainer/qf1'].state_dict()
algorithm.trainer.qf1.load_state_dict(state_dict)
state_dict = data['trainer/qf2'].state_dict()
algorithm.trainer.qf2.load_state_dict(state_dict)
state_dict = data['trainer/target_qf1'].state_dict()
algorithm.trainer.target_qf1.load_state_dict(state_dict)
state_dict = data['trainer/target_qf2'].state_dict()
algorithm.trainer.target_qf2.load_state_dict(state_dict)
state_dict = data['trainer/buffer_policy'].state_dict()
algorithm.trainer.buffer_policy.load_state_dict(state_dict)
state_dict = data['exploration/policy'].state_dict()
algorithm.expl_data_collector._policy.load_state_dict(state_dict)
state_dict = data['evaluation/policy'].state_dict()
algorithm.eval_data_collector._policy.load_state_dict(state_dict)
# Loading optimizers
algorithm.trainer.optimizers[algorithm.trainer.policy].load_state_dict(
data['trainer/optimizers'][data['trainer/policy']].state_dict())
algorithm.trainer.optimizers[algorithm.trainer.goal_model].load_state_dict(
data['trainer/optimizers'][data['trainer/goal_model']].state_dict())
paths = [[[] for _ in range(eval_env.wrapped_env.goal_matrix.shape[0])] for _ in range(eval_env.wrapped_env.goal_matrix.shape[0])]
for start_idx in range(eval_env.wrapped_env.goal_matrix.shape[0]):
eval_env.wrapped_env.commanded_start = start_idx
viable_goals = np.where(eval_env.wrapped_env.goal_matrix[start_idx] > 0)[0]
for end_idx in viable_goals:
eval_env.wrapped_env.commanded_goal = end_idx
for _ in range(num_eval_paths):
p = rollout(eval_env, algorithm.trainer.policy, max_path_length=variant.get("max_path_length", 200))
paths[start_idx][end_idx].append(p)
diagnostics.append(paths)
new_path = os.path.join(load_directory, 'stochastic_diagnostics_itr_%d.pkl'%(curr_idx_f + starting_idx))
pickle.dump(paths, open(new_path, 'wb'))
curr_idx_f += 1
new_path = os.path.join(load_directory, 'stochastic_diagnostics_overall.pkl')
pickle.dump(diagnostics, open(new_path, 'wb'))
# Code for post-hoc evaluation
if variant.get('post_eval_reachability', False):
load_directory = variant.get('load_directory', None)
import os
from pathlib import Path
files = sorted(Path(load_directory).iterdir(), key=os.path.getmtime)
diagnostics = []
import numpy as np
curr_idx_f = 0
eval_skip = variant.get('eval_skip', 10)
# import IPython
# IPython.embed()
for f in files:
if ('pkl' not in f.name) or ('itr' not in f.name) or ('diagnostics' in f.name):
continue
if curr_idx_f % eval_skip != 0:
curr_idx_f += 1
continue
print("LOADING %s"%(f.name))
# Load directory
data = torch.load(f)
state_dict = data['trainer/policy'].state_dict()
algorithm.trainer.policy.load_state_dict(state_dict)
state_dict = data['trainer/qf1'].state_dict()
algorithm.trainer.qf1.load_state_dict(state_dict)
state_dict = data['trainer/qf2'].state_dict()
algorithm.trainer.qf2.load_state_dict(state_dict)
state_dict = data['trainer/target_qf1'].state_dict()
algorithm.trainer.target_qf1.load_state_dict(state_dict)
state_dict = data['trainer/target_qf2'].state_dict()
algorithm.trainer.target_qf2.load_state_dict(state_dict)
state_dict = data['trainer/buffer_policy'].state_dict()
algorithm.trainer.buffer_policy.load_state_dict(state_dict)
state_dict = data['exploration/policy'].state_dict()
algorithm.expl_data_collector._policy.load_state_dict(state_dict)
state_dict = data['evaluation/policy'].state_dict()
algorithm.eval_data_collector._policy.load_state_dict(state_dict)
# Loading optimizers
algorithm.trainer.optimizers[algorithm.trainer.policy].load_state_dict(
data['trainer/optimizers'][data['trainer/policy']].state_dict())
algorithm.trainer.optimizers[algorithm.trainer.goal_model].load_state_dict(
data['trainer/optimizers'][data['trainer/goal_model']].state_dict())
stats = algorithm.eval_policy_goalreaching()
diagnostics.append(stats)
curr_idx_f += 1
# new_path = os.path.join(load_directory, 'eval_goalreaching.pkl')
if variant.get('cloud_launch', False):
new_path = os.path.join(load_directory, 'eval_goalreaching_CORLPAPER.pkl')
else:
new_path = os.path.join(logger.get_snapshot_dir(), 'eval_goalreaching_CORLPAPER.pkl')
pickle.dump(diagnostics, open(new_path, 'wb'))
logger.save_itr_params(0, {'paths': diagnostics})
if variant.get('train_rl', True):
algorithm.train()
|
|
#!/usr/bin/env python
#
# Copyright 2016 The Android Open Source Project. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generate a C++ data table containing locale data."""
import collections
import glob
import os.path
import sys
def get_locale_parts(locale):
"""Split a locale into three parts, for langauge, script, and region."""
parts = locale.split('_')
if len(parts) == 1:
return (parts[0], None, None)
elif len(parts) == 2:
if len(parts[1]) == 4: # parts[1] is a script
return (parts[0], parts[1], None)
else:
return (parts[0], None, parts[1])
else:
assert len(parts) == 3
return tuple(parts)
def read_likely_subtags(input_file_name):
"""Read and parse ICU's likelySubtags.txt."""
with open(input_file_name) as input_file:
likely_script_dict = {
# Android's additions for pseudo-locales. These internal codes make
# sure that the pseudo-locales would not match other English or
# Arabic locales. (We can't use private-use ISO 15924 codes, since
# they may be used by apps for other purposes.)
"en_XA": "~~~A",
"ar_XB": "~~~B",
}
representative_locales = {
# Android's additions
"en_Latn_GB", # representative for en_Latn_001
"es_Latn_MX", # representative for es_Latn_419
"es_Latn_US", # representative for es_Latn_419 (not the best idea,
# but Android has been shipping with it for quite a
# while. Fortunately, MX < US, so if both exist, MX
# would be chosen.)
}
for line in input_file:
line = unicode(line, 'UTF-8').strip(u' \n\uFEFF').encode('UTF-8')
if line.startswith('//'):
continue
if '{' in line and '}' in line:
from_locale = line[:line.index('{')]
to_locale = line[line.index('"')+1:line.rindex('"')]
from_lang, from_scr, from_region = get_locale_parts(from_locale)
_, to_scr, to_region = get_locale_parts(to_locale)
if from_lang == 'und':
continue # not very useful for our purposes
if from_region is None and to_region != '001':
representative_locales.add(to_locale)
if from_scr is None:
likely_script_dict[from_locale] = to_scr
return likely_script_dict, frozenset(representative_locales)
# From packLanguageOrRegion() in ResourceTypes.cpp
def pack_language_or_region(inp, base):
"""Pack langauge or region in a two-byte tuple."""
if inp is None:
return (0, 0)
elif len(inp) == 2:
return ord(inp[0]), ord(inp[1])
else:
assert len(inp) == 3
base = ord(base)
first = ord(inp[0]) - base
second = ord(inp[1]) - base
third = ord(inp[2]) - base
return (0x80 | (third << 2) | (second >>3),
((second << 5) | first) & 0xFF)
# From packLanguage() in ResourceTypes.cpp
def pack_language(language):
"""Pack language in a two-byte tuple."""
return pack_language_or_region(language, 'a')
# From packRegion() in ResourceTypes.cpp
def pack_region(region):
"""Pack region in a two-byte tuple."""
return pack_language_or_region(region, '0')
def pack_to_uint32(locale):
"""Pack language+region of locale into a 32-bit unsigned integer."""
lang, _, region = get_locale_parts(locale)
plang = pack_language(lang)
pregion = pack_region(region)
return (plang[0] << 24) | (plang[1] << 16) | (pregion[0] << 8) | pregion[1]
def dump_script_codes(all_scripts):
"""Dump the SCRIPT_CODES table."""
print 'const char SCRIPT_CODES[][4] = {'
for index, script in enumerate(all_scripts):
print " /* %-2d */ {'%c', '%c', '%c', '%c'}," % (
index, script[0], script[1], script[2], script[3])
print '};'
print
def dump_script_data(likely_script_dict, all_scripts):
"""Dump the script data."""
print
print 'const std::unordered_map<uint32_t, uint8_t> LIKELY_SCRIPTS({'
for locale in sorted(likely_script_dict.keys()):
script = likely_script_dict[locale]
print ' {0x%08Xu, %2du}, // %s -> %s' % (
pack_to_uint32(locale),
all_scripts.index(script),
locale.replace('_', '-'),
script)
print '});'
def pack_to_uint64(locale):
"""Pack a full locale into a 64-bit unsigned integer."""
_, script, _ = get_locale_parts(locale)
return ((pack_to_uint32(locale) << 32) |
(ord(script[0]) << 24) |
(ord(script[1]) << 16) |
(ord(script[2]) << 8) |
ord(script[3]))
def dump_representative_locales(representative_locales):
"""Dump the set of representative locales."""
print
print 'std::unordered_set<uint64_t> REPRESENTATIVE_LOCALES({'
for locale in sorted(representative_locales):
print ' 0x%08Xllu, // %s' % (
pack_to_uint64(locale),
locale)
print '});'
def read_and_dump_likely_data(icu_data_dir):
"""Read and dump the likely-script data."""
likely_subtags_txt = os.path.join(icu_data_dir, 'misc', 'likelySubtags.txt')
likely_script_dict, representative_locales = read_likely_subtags(
likely_subtags_txt)
all_scripts = list(set(likely_script_dict.values()))
assert len(all_scripts) <= 256
all_scripts.sort()
dump_script_codes(all_scripts)
dump_script_data(likely_script_dict, all_scripts)
dump_representative_locales(representative_locales)
return likely_script_dict
def read_parent_data(icu_data_dir):
"""Read locale parent data from ICU data files."""
all_icu_data_files = glob.glob(os.path.join(icu_data_dir, '*', '*.txt'))
parent_dict = {}
for data_file in all_icu_data_files:
locale = os.path.splitext(os.path.basename(data_file))[0]
with open(data_file) as input_file:
for line in input_file:
if '%%Parent' in line:
parent = line[line.index('"')+1:line.rindex('"')]
if locale in parent_dict:
# Different files shouldn't have different parent info
assert parent_dict[locale] == parent
else:
parent_dict[locale] = parent
elif locale.startswith('ar_') and 'default{"latn"}' in line:
# Arabic parent overrides for ASCII digits. Since
# Unicode extensions are not supported in ResourceTypes,
# we will use ar-015 (Arabic, Northern Africa) instead
# of the more correct ar-u-nu-latn.
parent_dict[locale] = 'ar_015'
return parent_dict
def get_likely_script(locale, likely_script_dict):
"""Find the likely script for a locale, given the likely-script dictionary.
"""
if locale.count('_') == 2:
# it already has a script
return locale.split('_')[1]
elif locale in likely_script_dict:
return likely_script_dict[locale]
else:
language = locale.split('_')[0]
return likely_script_dict[language]
def dump_parent_data(script_organized_dict):
"""Dump information for parents of locales."""
sorted_scripts = sorted(script_organized_dict.keys())
print
for script in sorted_scripts:
parent_dict = script_organized_dict[script]
print ('const std::unordered_map<uint32_t, uint32_t> %s_PARENTS({'
% script.upper())
for locale in sorted(parent_dict.keys()):
parent = parent_dict[locale]
print ' {0x%08Xu, 0x%08Xu}, // %s -> %s' % (
pack_to_uint32(locale),
pack_to_uint32(parent),
locale.replace('_', '-'),
parent.replace('_', '-'))
print '});'
print
print 'const struct {'
print ' const char script[4];'
print ' const std::unordered_map<uint32_t, uint32_t>* map;'
print '} SCRIPT_PARENTS[] = {'
for script in sorted_scripts:
print " {{'%c', '%c', '%c', '%c'}, &%s_PARENTS}," % (
script[0], script[1], script[2], script[3],
script.upper())
print '};'
def dump_parent_tree_depth(parent_dict):
"""Find and dump the depth of the parent tree."""
max_depth = 1
for locale, _ in parent_dict.items():
depth = 1
while locale in parent_dict:
locale = parent_dict[locale]
depth += 1
max_depth = max(max_depth, depth)
assert max_depth < 5 # Our algorithms assume small max_depth
print
print 'const size_t MAX_PARENT_DEPTH = %d;' % max_depth
def read_and_dump_parent_data(icu_data_dir, likely_script_dict):
"""Read parent data from ICU and dump it."""
parent_dict = read_parent_data(icu_data_dir)
script_organized_dict = collections.defaultdict(dict)
for locale in parent_dict:
parent = parent_dict[locale]
if parent == 'root':
continue
script = get_likely_script(locale, likely_script_dict)
script_organized_dict[script][locale] = parent_dict[locale]
dump_parent_data(script_organized_dict)
dump_parent_tree_depth(parent_dict)
def main():
"""Read the data files from ICU and dump the output to a C++ file."""
source_root = sys.argv[1]
icu_data_dir = os.path.join(
source_root,
'external', 'icu', 'icu4c', 'source', 'data')
print '// Auto-generated by %s' % sys.argv[0]
print
likely_script_dict = read_and_dump_likely_data(icu_data_dir)
read_and_dump_parent_data(icu_data_dir, likely_script_dict)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python3
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'Simone Campagna'
import abc
class Expression(metaclass=abc.ABCMeta):
def __init__(self):
pass
def _coerce_operand(self, other):
if isinstance(other, Expression):
return other
else:
return ConstExpression(other)
def bind(self, instance):
pass
def __hash__(self):
return hash(str(self))
@abc.abstractmethod
def get_value(self):
pass
@abc.abstractmethod
def __str__(self):
pass
def __and__(self, other):
return And(self, other)
def __rand__(self, other):
return And(other, self)
def __or__(self, other):
return Or(self, other)
def __ror__(self, other):
return Or(other, self)
def __add__(self, other):
return Add(self, other)
def __radd__(self, other):
return Add(other, self)
def __sub__(self, other):
return Sub(self, other)
def __rsub__(self, other):
return Sub(other, self)
def __mul__(self, other):
return Mul(self, other)
def __rmul__(self, other):
return Mul(other, self)
def __div__(self, other):
return Div(self, other)
def __rdiv__(self, other):
return Div(other, self)
def __truediv__(self, other):
return TrueDiv(self, other)
def __rtruediv__(self, other):
return TrueDiv(other, self)
def __floordiv__(self, other):
return Flooriv(self, other)
def __rflooriv__(self, other):
return FloorDiv(other, self)
def __pow__(self, other):
return Pow(self, other)
def __rpow__(self, other):
return Pow(other, self)
def __mod__(self, other):
return Mod(self, other)
def __eq__(self, other):
return Eq(self, other)
def __ne__(self, other):
return Ne(self, other)
def __lt__(self, other):
return Lt(self, other)
def __le__(self, other):
return Le(self, other)
def __gt__(self, other):
return Gt(self, other)
def __ge__(self, other):
return Ge(self, other)
def __rmod__(self, other):
return Mod(other, self)
def __pos__(self):
return Pos(self)
def __neg__(self):
return Neg(self)
def __abs__(self):
return Abs(self)
def __invert__(self):
return Not(self)
def __repr__(self):
return self.__str__()
def __bool__(self):
raise NotImplementedError("Expression objects cannot be converted to bool")
class _Instance(Expression):
def __init__(self, symbol=None):
self.symbol = symbol
self.instance = None
def bind(self, instance):
self.instance = instance
def __str__(self):
return self.symbol
class AttributeGetter(_Instance):
def __init__(self, attribute_name, symbol=None):
self.attribute_name = attribute_name
super().__init__(symbol)
def get_value(self):
return getattr(self.instance, self.attribute_name)
class InstanceGetter(_Instance):
def __init__(self, symbol=None):
self.symbol = symbol
def get_value(self):
return self.instance
class MethodCaller(_Instance):
def __init__(self, method_name, method_p_args=None, method_n_args=None, symbol=None):
super().__init__(symbol=symbol)
self.method_name = method_name
if method_p_args is None:
method_p_args = ()
self.method_p_args = method_p_args
if method_n_args is None:
method_n_args = {}
self.method_n_args = method_n_args
def get_value(self):
return getattr(self.instance, self.method_name)(*self.method_p_args, **self.method_n_args)
class ConstExpression(Expression):
def __init__(self, const_value):
self.const_value = const_value
def get_value(self):
return self.const_value
def __str__(self):
return str(self.const_value)
class BinaryOperator(Expression):
__symbol__ = '?'
def __init__(self, left_operand, right_operand):
self.left_operand = self._coerce_operand(left_operand)
self.right_operand = self._coerce_operand(right_operand)
def bind(self, instance):
self.left_operand.bind(instance)
self.right_operand.bind(instance)
def get_value(self):
return self.compute(self.left_operand.get_value(), self.right_operand.get_value())
@abc.abstractmethod
def compute(self, l, r):
pass
def __str__(self):
return "({l} {s} {r})".format(l=self.left_operand, s=self.__symbol__, r=self.right_operand)
class UnaryOperator(Expression):
__symbol__ = '?'
def __init__(self, operand):
self.operand = self._coerce_operand(operand)
def bind(self, instance):
self.operand.bind(instance)
def get_value(self):
return self.compute(self.operand.get_value())
@abc.abstractmethod
def compute(self, o):
pass
def __str__(self):
return "({s} {o})".format(s=self.__symbol__, o=self.operand)
class And(BinaryOperator):
__symbol__ = "&"
def compute(self, l, r):
return l and r
class Or(BinaryOperator):
__symbol__ = "|"
def compute(self, l, r):
return l or r
class Add(BinaryOperator):
__symbol__ = "+"
def compute(self, l, r):
return l + r
class Mul(BinaryOperator):
__symbol__ = "*"
def compute(self, l, r):
return l * r
class Sub(BinaryOperator):
__symbol__ = "-"
def compute(self, l, r):
return l - r
class Div(BinaryOperator):
__symbol__ = "/"
def compute(self, l, r):
return l / r
class TrueDiv(BinaryOperator):
__symbol__ = "/"
def compute(self, l, r):
return l / r
class Flooriv(BinaryOperator):
__symbol__ = "//"
def compute(self, l, r):
return l // r
class Pow(BinaryOperator):
__symbol__ = "**"
def compute(self, l, r):
return l * r
class Mod(BinaryOperator):
__symbol__ = "%"
def compute(self, l, r):
return l % r
class Eq(BinaryOperator):
__symbol__ = "=="
def compute(self, l, r):
return l == r
class Ne(BinaryOperator):
__symbol__ = "!="
def compute(self, l, r):
return l != r
class Lt(BinaryOperator):
__symbol__ = "<"
def compute(self, l, r):
return l < r
class Le(BinaryOperator):
__symbol__ = "<="
def compute(self, l, r):
return l <= r
class Gt(BinaryOperator):
__symbol__ = ">"
def compute(self, l, r):
return l > r
class Ge(BinaryOperator):
__symbol__ = ">="
def compute(self, l, r):
return l >= r
class Pos(UnaryOperator):
__symbol__ = "+"
def compute(self, o):
return +o
class Abs(UnaryOperator):
def compute(self, o):
return +o
def __str__(self):
return "abs({0})".format(self.operand)
class Neg(UnaryOperator):
__symbol__ = "-"
def compute(self, o):
return -o
class Not(UnaryOperator):
__symbol__ = "~"
def compute(self, o):
return not o
if __name__ == "__main__":
class MyClass(object):
def __init__(self, a, b):
self.alfa = a
self.beta = b
ALFA = AttributeGetter('alfa', 'ALFA')
BETA = AttributeGetter('beta', 'BETA')
e = (ALFA > 10) & (BETA < 3)
print(e)
x = MyClass(100, 100)
y = MyClass(100, 0)
z = MyClass(1, 1)
e.bind(x)
print(e.get_value())
e.bind(y)
print(e.get_value())
e.bind(z)
print(e.get_value())
|
|
import logging
from typing import List
from unittest import mock
import pandas as pd
import pytest
import great_expectations.exceptions as ge_exceptions
from great_expectations import DataContext
from great_expectations.core import (
ExpectationConfiguration,
ExpectationSuite,
ExpectationSuiteValidationResult,
ExpectationValidationResult,
)
from great_expectations.core.batch import RuntimeBatchRequest
from great_expectations.core.usage_statistics.usage_statistics import (
UsageStatisticsHandler,
)
from great_expectations.data_context import BaseDataContext
from great_expectations.data_context.types.base import (
DataContextConfig,
InMemoryStoreBackendDefaults,
)
from great_expectations.validator.validator import Validator
logger = logging.getLogger(__name__)
try:
from pyspark.sql import DataFrame
except ImportError:
DataFrame = None
logger.debug(
"Unable to load pyspark; install optional spark dependency for support."
)
def build_in_memory_runtime_context():
data_context_config: DataContextConfig = DataContextConfig(
datasources={
"pandas_datasource": {
"execution_engine": {
"class_name": "PandasExecutionEngine",
"module_name": "great_expectations.execution_engine",
},
"class_name": "Datasource",
"module_name": "great_expectations.datasource",
"data_connectors": {
"runtime_data_connector": {
"class_name": "RuntimeDataConnector",
"batch_identifiers": [
"id_key_0",
"id_key_1",
],
}
},
},
"spark_datasource": {
"execution_engine": {
"class_name": "SparkDFExecutionEngine",
"module_name": "great_expectations.execution_engine",
},
"class_name": "Datasource",
"module_name": "great_expectations.datasource",
"data_connectors": {
"runtime_data_connector": {
"class_name": "RuntimeDataConnector",
"batch_identifiers": [
"id_key_0",
"id_key_1",
],
}
},
},
},
expectations_store_name="expectations_store",
validations_store_name="validations_store",
evaluation_parameter_store_name="evaluation_parameter_store",
checkpoint_store_name="checkpoint_store",
store_backend_defaults=InMemoryStoreBackendDefaults(),
)
context: BaseDataContext = BaseDataContext(project_config=data_context_config)
return context
@pytest.fixture
def in_memory_runtime_context():
return build_in_memory_runtime_context()
@pytest.fixture
def test_pandas_df():
df: pd.DataFrame = pd.DataFrame(
data=[["Scott"], ["Jeff"], ["Thomas"], ["Ann"]], columns=["Name"]
)
return df
@pytest.fixture
def test_spark_df(test_pandas_df, spark_session):
df: DataFrame = spark_session.createDataFrame(data=test_pandas_df)
return df
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_catch_exceptions_no_exceptions(
mock_emit, in_memory_runtime_context, test_spark_df
):
catch_exceptions: bool = False # expect exceptions to be raised
result_format: dict = {
"result_format": "SUMMARY",
}
runtime_environment_arguments = {
"catch_exceptions": catch_exceptions,
"result_format": result_format,
}
suite: ExpectationSuite = in_memory_runtime_context.create_expectation_suite(
"test_suite", overwrite_existing=True
)
expectation_configuration: ExpectationConfiguration
expectation_meta: dict = {"Notes": "Some notes"}
expectation_arguments_without_meta: dict
expectation_arguments_column: dict = {
"include_config": True,
"column": "Name", # use correct column to avoid error
}
expectation_arguments_without_meta = dict(
**runtime_environment_arguments, **expectation_arguments_column
)
expectation_configuration = ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs=expectation_arguments_without_meta,
meta=expectation_meta,
)
suite.add_expectation(expectation_configuration=expectation_configuration)
expectation_arguments_table: dict = {
"include_config": True,
"value": 4,
}
expectation_arguments_without_meta = dict(
**runtime_environment_arguments, **expectation_arguments_table
)
expectation_configuration = ExpectationConfiguration(
expectation_type="expect_table_row_count_to_equal",
kwargs=expectation_arguments_without_meta,
meta=expectation_meta,
)
suite.add_expectation(expectation_configuration=expectation_configuration)
runtime_batch_request = RuntimeBatchRequest(
datasource_name="spark_datasource",
data_connector_name="runtime_data_connector",
data_asset_name="insert_your_data_asset_name_here",
runtime_parameters={"batch_data": test_spark_df},
batch_identifiers={
"id_key_0": "id_value_0",
"id_key_1": "id_value_1",
},
)
validator: Validator = in_memory_runtime_context.get_validator(
batch_request=runtime_batch_request,
expectation_suite=suite,
)
# Test calling "validator.validate()" explicitly.
validator_validation: ExpectationSuiteValidationResult = validator.validate(
**runtime_environment_arguments
)
results: List[ExpectationValidationResult] = validator_validation.results
assert len(results) == 2
result: ExpectationValidationResult
for result in results:
assert result.success
assert (
"exception_traceback" not in result.exception_info
) or not result.exception_info["exception_traceback"]
assert (
"exception_message" not in result.exception_info
) or not result.exception_info["exception_message"]
# Test calling "validator.expect_*" through "validator.validate_expectation()".
expectation_parameters: dict
expectation_arguments_without_meta = dict(
**runtime_environment_arguments, **expectation_arguments_column
)
expectation_parameters = dict(
**expectation_arguments_without_meta, **expectation_meta
)
result = validator.expect_column_values_to_not_be_null(**expectation_parameters)
assert result.success
expectation_arguments_without_meta = dict(
**runtime_environment_arguments, **expectation_arguments_table
)
expectation_parameters = dict(
**expectation_arguments_without_meta, **expectation_meta
)
result = validator.expect_table_row_count_to_equal(**expectation_parameters)
assert result.success
# In-Memory DataContext does not have UsageStatisticsHandler configured
assert mock_emit.call_count == 0
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_catch_exceptions_exception_occurred_catch_exceptions_false(
mock_emit, in_memory_runtime_context, test_spark_df
):
catch_exceptions: bool = False # expect exceptions to be raised
result_format: dict = {
"result_format": "SUMMARY",
}
runtime_environment_arguments = {
"catch_exceptions": catch_exceptions,
"result_format": result_format,
}
suite: ExpectationSuite = in_memory_runtime_context.create_expectation_suite(
"test_suite", overwrite_existing=True
)
expectation_configuration: ExpectationConfiguration
expectation_meta: dict = {"Notes": "Some notes"}
expectation_arguments_without_meta: dict
expectation_arguments_column: dict = {
"include_config": True,
"column": "unknown_column", # use intentionally incorrect column to force error in "MetricProvider" evaluations
}
expectation_arguments_without_meta = dict(
**runtime_environment_arguments, **expectation_arguments_column
)
expectation_configuration = ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs=expectation_arguments_without_meta,
meta=expectation_meta,
)
suite.add_expectation(expectation_configuration=expectation_configuration)
expectation_arguments_table: dict = {
"include_config": True,
"value": 4,
}
expectation_arguments_without_meta = dict(
**runtime_environment_arguments, **expectation_arguments_table
)
expectation_configuration = ExpectationConfiguration(
expectation_type="expect_table_row_count_to_equal",
kwargs=expectation_arguments_without_meta,
meta=expectation_meta,
)
suite.add_expectation(expectation_configuration=expectation_configuration)
runtime_batch_request = RuntimeBatchRequest(
datasource_name="spark_datasource",
data_connector_name="runtime_data_connector",
data_asset_name="insert_your_data_asset_name_here",
runtime_parameters={"batch_data": test_spark_df},
batch_identifiers={
"id_key_0": "id_value_0",
"id_key_1": "id_value_1",
},
)
validator: Validator = in_memory_runtime_context.get_validator(
batch_request=runtime_batch_request,
expectation_suite=suite,
)
expected_exception_message: str = (
'Error: The column "unknown_column" in BatchData does not exist.'
)
# Test calling "validator.validate()" explicitly.
with pytest.raises(ge_exceptions.MetricResolutionError) as e:
# noinspection PyUnusedLocal
validator_validation: ExpectationSuiteValidationResult = validator.validate(
**runtime_environment_arguments
)
assert e.value.message == expected_exception_message
# Test calling "validator.expect_*" through "validator.validate_expectation()".
expectation_parameters: dict
expectation_arguments_without_meta = dict(
**runtime_environment_arguments, **expectation_arguments_column
)
expectation_parameters = dict(
**expectation_arguments_without_meta, **expectation_meta
)
with pytest.raises(ge_exceptions.MetricResolutionError) as e:
# noinspection PyUnusedLocal
result: ExpectationValidationResult = (
validator.expect_column_values_to_not_be_null(**expectation_parameters)
)
assert e.value.message == expected_exception_message
# Confirm that even though exceptions may occur in some expectations, other expectations can be validated properly.
expectation_arguments_without_meta = dict(
**runtime_environment_arguments, **expectation_arguments_table
)
expectation_parameters = dict(
**expectation_arguments_without_meta, **expectation_meta
)
result: ExpectationValidationResult = validator.expect_table_row_count_to_equal(
**expectation_parameters
)
assert result.success
# In-Memory DataContext does not have UsageStatisticsHandler configured
assert mock_emit.call_count == 0
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_catch_exceptions_exception_occurred_catch_exceptions_true(
mock_emit, in_memory_runtime_context, test_spark_df
):
catch_exceptions: bool = True # expect exceptions to be caught
result_format: dict = {
"result_format": "SUMMARY",
}
runtime_environment_arguments = {
"catch_exceptions": catch_exceptions,
"result_format": result_format,
}
suite: ExpectationSuite = in_memory_runtime_context.create_expectation_suite(
"test_suite", overwrite_existing=True
)
expectation_configuration: ExpectationConfiguration
expectation_meta: dict = {"Notes": "Some notes"}
expectation_arguments_without_meta: dict
expectation_arguments_column: dict = {
"include_config": True,
"column": "unknown_column", # use intentionally incorrect column to force error in "MetricProvider" evaluations
}
expectation_arguments_without_meta = dict(
**runtime_environment_arguments, **expectation_arguments_column
)
expectation_configuration = ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs=expectation_arguments_without_meta,
meta=expectation_meta,
)
suite.add_expectation(expectation_configuration=expectation_configuration)
expectation_arguments_table: dict = {
"include_config": True,
"value": 4,
}
expectation_arguments_without_meta = dict(
**runtime_environment_arguments, **expectation_arguments_table
)
expectation_configuration = ExpectationConfiguration(
expectation_type="expect_table_row_count_to_equal",
kwargs=expectation_arguments_without_meta,
meta=expectation_meta,
)
suite.add_expectation(expectation_configuration=expectation_configuration)
runtime_batch_request = RuntimeBatchRequest(
datasource_name="spark_datasource",
data_connector_name="runtime_data_connector",
data_asset_name="insert_your_data_asset_name_here",
runtime_parameters={"batch_data": test_spark_df},
batch_identifiers={
"id_key_0": "id_value_0",
"id_key_1": "id_value_1",
},
)
validator: Validator = in_memory_runtime_context.get_validator(
batch_request=runtime_batch_request,
expectation_suite=suite,
)
expected_exception_message: str = (
'Error: The column "unknown_column" in BatchData does not exist.'
)
# Test calling "validator.validate()" explicitly.
validator_validation: ExpectationSuiteValidationResult = validator.validate(
**runtime_environment_arguments
)
results: List[ExpectationValidationResult] = validator_validation.results
assert len(results) == 2
# Confirm that even though an exception occurred in one expectation, the other expectation is validated properly.
results = sorted(
results, key=lambda element: element.expectation_config["expectation_type"]
)
result: ExpectationValidationResult
result = results[0]
assert (
result.expectation_config["expectation_type"]
== "expect_column_values_to_not_be_null"
)
assert not result.success
assert "exception_traceback" in result.exception_info
assert "exception_message" in result.exception_info
assert result.exception_info["exception_message"] == expected_exception_message
result = results[1]
assert (
result.expectation_config["expectation_type"]
== "expect_table_row_count_to_equal"
)
assert result.success
assert (
"exception_traceback" not in result.exception_info
) or not result.exception_info["exception_traceback"]
assert (
"exception_message" not in result.exception_info
) or not result.exception_info["exception_message"]
# Test calling "validator.expect_*" through "validator.validate_expectation()".
expectation_parameters: dict
expectation_arguments_without_meta = dict(
**runtime_environment_arguments, **expectation_arguments_column
)
expectation_parameters = dict(
**expectation_arguments_without_meta, **expectation_meta
)
result = validator.expect_column_values_to_not_be_null(**expectation_parameters)
assert not result.success
assert "exception_traceback" in result.exception_info
assert "exception_message" in result.exception_info
assert result.exception_info["exception_message"] == expected_exception_message
# Confirm that even though exceptions may occur in some expectations, other expectations can be validated properly.
expectation_arguments_without_meta = dict(
**runtime_environment_arguments, **expectation_arguments_table
)
expectation_parameters = dict(
**expectation_arguments_without_meta, **expectation_meta
)
result = validator.expect_table_row_count_to_equal(**expectation_parameters)
assert result.success
assert (
"exception_traceback" not in result.exception_info
) or not result.exception_info["exception_traceback"]
assert (
"exception_message" not in result.exception_info
) or not result.exception_info["exception_message"]
# In-Memory DataContext does not have UsageStatisticsHandler configured
assert mock_emit.call_count == 0
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_result_format_configured_no_set_default_override(
mock_emit, in_memory_runtime_context, test_spark_df
):
catch_exceptions: bool = False # expect exceptions to be raised
result_format: dict
result_format = {
"result_format": "SUMMARY",
}
runtime_environment_arguments: dict = {
"catch_exceptions": catch_exceptions,
"result_format": result_format,
}
suite: ExpectationSuite
suite = in_memory_runtime_context.create_expectation_suite(
"test_suite", overwrite_existing=True
)
expectation_configuration: ExpectationConfiguration
expectation_meta: dict = {"Notes": "Some notes"}
expectation_arguments_without_meta: dict
expectation_arguments_column: dict = {
"include_config": True,
"column": "Name", # use correct column to avoid error
}
expectation_arguments_without_meta = dict(
**runtime_environment_arguments, **expectation_arguments_column
)
expectation_configuration = ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs=expectation_arguments_without_meta,
meta=expectation_meta,
)
suite.add_expectation(expectation_configuration=expectation_configuration)
runtime_batch_request = RuntimeBatchRequest(
datasource_name="spark_datasource",
data_connector_name="runtime_data_connector",
data_asset_name="insert_your_data_asset_name_here",
runtime_parameters={"batch_data": test_spark_df},
batch_identifiers={
"id_key_0": "id_value_0",
"id_key_1": "id_value_1",
},
)
validator: Validator
validator = in_memory_runtime_context.get_validator(
batch_request=runtime_batch_request,
expectation_suite=suite,
)
# Test calling "validator.validate()" explicitly.
validator_validation: ExpectationSuiteValidationResult
validator_validation = validator.validate(**runtime_environment_arguments)
results: List[ExpectationValidationResult]
results = validator_validation.results
assert len(results) == 1
result: ExpectationValidationResult
result = results[0]
assert result.success
assert len(result.result.keys()) > 0
assert result.result == {
"element_count": 4,
"unexpected_count": 0,
"unexpected_percent": 0.0,
"partial_unexpected_list": [],
"partial_unexpected_index_list": None,
"partial_unexpected_counts": [],
}
result_format = {
"result_format": "BASIC",
}
runtime_environment_arguments: dict = {
"catch_exceptions": catch_exceptions,
"result_format": result_format,
}
suite = in_memory_runtime_context.create_expectation_suite(
"test_suite", overwrite_existing=True
)
expectation_arguments_without_meta = dict(
**runtime_environment_arguments, **expectation_arguments_column
)
expectation_configuration = ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs=expectation_arguments_without_meta,
meta=expectation_meta,
)
suite.add_expectation(expectation_configuration=expectation_configuration)
validator = in_memory_runtime_context.get_validator(
batch_request=runtime_batch_request,
expectation_suite=suite,
)
validator_validation = validator.validate(**runtime_environment_arguments)
results = validator_validation.results
assert len(results) == 1
result = results[0]
assert result.success
assert len(result.result.keys()) > 0
assert result.result == {
"element_count": 4,
"unexpected_count": 0,
"unexpected_percent": 0.0,
"partial_unexpected_list": [],
}
result_format = {
"result_format": "BOOLEAN_ONLY",
}
runtime_environment_arguments: dict = {
"catch_exceptions": catch_exceptions,
"result_format": result_format,
}
suite = in_memory_runtime_context.create_expectation_suite(
"test_suite", overwrite_existing=True
)
expectation_arguments_without_meta = dict(
**runtime_environment_arguments, **expectation_arguments_column
)
expectation_configuration = ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs=expectation_arguments_without_meta,
meta=expectation_meta,
)
suite.add_expectation(expectation_configuration=expectation_configuration)
validator = in_memory_runtime_context.get_validator(
batch_request=runtime_batch_request,
expectation_suite=suite,
)
validator_validation = validator.validate(**runtime_environment_arguments)
results = validator_validation.results
assert len(results) == 1
result = results[0]
assert result.success
assert result.to_json_dict() == {
"expectation_config": {
"kwargs": {
"catch_exceptions": False,
"result_format": {"result_format": "BOOLEAN_ONLY"},
"include_config": True,
"column": "Name",
"batch_id": "bd7b9290f981fde37aabd403e8a507ea",
},
"expectation_type": "expect_column_values_to_not_be_null",
"meta": {"Notes": "Some notes"},
},
"meta": {},
"exception_info": {
"raised_exception": False,
"exception_traceback": None,
"exception_message": None,
},
"result": {},
"success": True,
}
assert len(result.result.keys()) == 0
assert result.result == {}
# Test calling "validator.expect_*" through "validator.validate_expectation()".
expectation_parameters: dict
expectation_parameters = dict(
**expectation_arguments_without_meta, **expectation_meta
)
result = validator.expect_column_values_to_not_be_null(**expectation_parameters)
assert result.success
assert result.to_json_dict() == {
"success": True,
"meta": {},
"expectation_config": {
"expectation_type": "expect_column_values_to_not_be_null",
"meta": {},
"kwargs": {
"catch_exceptions": False,
"result_format": {
"result_format": "BOOLEAN_ONLY",
"include_unexpected_rows": False,
"partial_unexpected_count": 20,
},
"include_config": True,
"column": "Name",
"Notes": "Some notes",
"batch_id": "bd7b9290f981fde37aabd403e8a507ea",
},
},
"result": {},
"exception_info": {
"raised_exception": False,
"exception_traceback": None,
"exception_message": None,
},
}
assert len(result.result.keys()) == 0
assert result.result == {}
# In-Memory DataContext does not have UsageStatisticsHandler configured
assert mock_emit.call_count == 0
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_result_format_configured_with_set_default_override(
mock_emit, in_memory_runtime_context, test_spark_df
):
catch_exceptions: bool = False # expect exceptions to be raised
result_format: dict
result_format = {
"result_format": "SUMMARY",
}
runtime_environment_arguments: dict = {
"catch_exceptions": catch_exceptions,
"result_format": result_format,
}
suite: ExpectationSuite
suite = in_memory_runtime_context.create_expectation_suite(
"test_suite", overwrite_existing=True
)
expectation_configuration: ExpectationConfiguration
expectation_meta: dict = {"Notes": "Some notes"}
expectation_arguments_without_meta: dict
expectation_arguments_column: dict = {
"include_config": True,
"column": "Name", # use correct column to avoid error
}
expectation_arguments_without_meta = dict(
**runtime_environment_arguments, **expectation_arguments_column
)
expectation_configuration = ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs=expectation_arguments_without_meta,
meta=expectation_meta,
)
suite.add_expectation(expectation_configuration=expectation_configuration)
runtime_batch_request = RuntimeBatchRequest(
datasource_name="spark_datasource",
data_connector_name="runtime_data_connector",
data_asset_name="insert_your_data_asset_name_here",
runtime_parameters={"batch_data": test_spark_df},
batch_identifiers={
"id_key_0": "id_value_0",
"id_key_1": "id_value_1",
},
)
validator: Validator
validator = in_memory_runtime_context.get_validator(
batch_request=runtime_batch_request,
expectation_suite=suite,
)
validator.set_default_expectation_argument("result_format", "BOOLEAN_ONLY")
# Test calling "validator.validate()" explicitly.
validator_validation: ExpectationSuiteValidationResult
validator_validation = validator.validate()
results: List[ExpectationValidationResult]
results = validator_validation.results
assert len(results) == 1
result: ExpectationValidationResult
result = results[0]
assert result.success
assert result.to_json_dict() == {
"result": {},
"expectation_config": {
"kwargs": {
"catch_exceptions": False,
"result_format": {"result_format": "SUMMARY"},
"include_config": True,
"column": "Name",
"batch_id": "bd7b9290f981fde37aabd403e8a507ea",
},
"meta": {"Notes": "Some notes"},
"expectation_type": "expect_column_values_to_not_be_null",
},
"success": True,
"meta": {},
"exception_info": {
"raised_exception": False,
"exception_traceback": None,
"exception_message": None,
},
}
assert len(result.result.keys()) == 0
assert result.result == {}
result_format = {
"result_format": "BASIC",
}
runtime_environment_arguments: dict = {
"catch_exceptions": catch_exceptions,
"result_format": result_format,
}
suite = in_memory_runtime_context.create_expectation_suite(
"test_suite", overwrite_existing=True
)
expectation_arguments_without_meta = dict(
**runtime_environment_arguments, **expectation_arguments_column
)
expectation_configuration = ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs=expectation_arguments_without_meta,
meta=expectation_meta,
)
suite.add_expectation(expectation_configuration=expectation_configuration)
validator = in_memory_runtime_context.get_validator(
batch_request=runtime_batch_request,
expectation_suite=suite,
)
validator.set_default_expectation_argument("result_format", "BOOLEAN_ONLY")
validator_validation = validator.validate()
results = validator_validation.results
assert len(results) == 1
result = results[0]
assert len(result.result.keys()) == 0
# Test calling "validator.expect_*" through "validator.validate_expectation()".
expectation_parameters: dict
expectation_parameters = dict(**expectation_arguments_column, **expectation_meta)
result = validator.expect_column_values_to_not_be_null(**expectation_parameters)
assert result.success
assert result.to_json_dict() == {
"result": {},
"expectation_config": {
"kwargs": {
"include_config": True,
"column": "Name",
"Notes": "Some notes",
"batch_id": "bd7b9290f981fde37aabd403e8a507ea",
},
"meta": {},
"expectation_type": "expect_column_values_to_not_be_null",
},
"success": True,
"meta": {},
"exception_info": {
"raised_exception": False,
"exception_traceback": None,
"exception_message": None,
},
}
assert len(result.result.keys()) == 0
assert result.result == {}
# In-Memory DataContext does not have UsageStatisticsHandler configured
assert mock_emit.call_count == 0
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_in_memory_runtime_context_configured_with_usage_stats_handler(
mock_emit, in_memory_runtime_context, test_pandas_df
):
context: DataContext = in_memory_runtime_context
# manually set usage statistics handler
context._usage_statistics_handler = UsageStatisticsHandler(
data_context=context,
data_context_id=context._data_context_id,
usage_statistics_url="http://fakeendpoint.com",
)
catch_exceptions: bool = False # expect exceptions to be raised
result_format: dict = {
"result_format": "SUMMARY",
}
runtime_environment_arguments = {
"catch_exceptions": catch_exceptions,
"result_format": result_format,
}
suite: ExpectationSuite = in_memory_runtime_context.create_expectation_suite(
"test_suite", overwrite_existing=True
)
expectation_configuration: ExpectationConfiguration
expectation_meta: dict = {"Notes": "Some notes"}
expectation_arguments_without_meta: dict
expectation_arguments_column: dict = {
"include_config": True,
"column": "Name", # use correct column to avoid error
}
expectation_arguments_without_meta = dict(
**runtime_environment_arguments, **expectation_arguments_column
)
expectation_configuration = ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs=expectation_arguments_without_meta,
meta=expectation_meta,
)
suite.add_expectation(expectation_configuration=expectation_configuration)
# emit 1 from add_expectation
assert mock_emit.call_count == 1
assert mock_emit.call_args_list == [
mock.call(
{
"event": "expectation_suite.add_expectation",
"event_payload": {},
"success": True,
}
)
]
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: import <name> as <name> is required for names to be exported.
# See PEP 484 & https://github.com/google/jax/issues/7570
# flake8: noqa: F401
from jax.numpy import fft as fft
from jax.numpy import linalg as linalg
from jax._src.device_array import DeviceArray as DeviceArray
from jax._src.numpy.lax_numpy import (
ComplexWarning as ComplexWarning,
NINF as NINF,
NZERO as NZERO,
PZERO as PZERO,
abs as abs,
absolute as absolute,
add as add,
all as all,
allclose as allclose,
alltrue as alltrue,
amax as amax,
amin as amin,
angle as angle,
any as any,
append as append,
apply_along_axis as apply_along_axis,
apply_over_axes as apply_over_axes,
arange as arange,
arccos as arccos,
arccosh as arccosh,
arcsin as arcsin,
arcsinh as arcsinh,
arctan as arctan,
arctan2 as arctan2,
arctanh as arctanh,
argmax as argmax,
argmin as argmin,
argsort as argsort,
argwhere as argwhere,
around as around,
array as array,
array_equal as array_equal,
array_equiv as array_equiv,
array_repr as array_repr,
array_split as array_split,
array_str as array_str,
asarray as asarray,
atleast_1d as atleast_1d,
atleast_2d as atleast_2d,
atleast_3d as atleast_3d,
average as average,
bartlett as bartlett,
bfloat16 as bfloat16,
bincount as bincount,
bitwise_and as bitwise_and,
bitwise_not as bitwise_not,
bitwise_or as bitwise_or,
bitwise_xor as bitwise_xor,
blackman as blackman,
block as block,
bool_ as bool_,
broadcast_arrays as broadcast_arrays,
broadcast_shapes as broadcast_shapes,
broadcast_to as broadcast_to,
c_ as c_,
can_cast as can_cast,
cbrt as cbrt,
cdouble as cdouble,
ceil as ceil,
character as character,
choose as choose,
clip as clip,
column_stack as column_stack,
complex128 as complex128,
complex64 as complex64,
complex_ as complex_,
complexfloating as complexfloating,
compress as compress,
concatenate as concatenate,
conj as conj,
conjugate as conjugate,
convolve as convolve,
copy as copy,
copysign as copysign,
corrcoef as corrcoef,
correlate as correlate,
cos as cos,
cosh as cosh,
count_nonzero as count_nonzero,
cov as cov,
cross as cross,
csingle as csingle,
cumprod as cumprod,
cumproduct as cumproduct,
cumsum as cumsum,
deg2rad as deg2rad,
degrees as degrees,
delete as delete,
diag as diag,
diagflat as diagflat,
diag_indices as diag_indices,
diag_indices_from as diag_indices_from,
diagonal as diagonal,
diff as diff,
digitize as digitize,
divide as divide,
divmod as divmod,
dot as dot,
double as double,
dsplit as dsplit,
dstack as dstack,
dtype as dtype,
e as e,
ediff1d as ediff1d,
einsum as einsum,
einsum_path as einsum_path,
empty as empty,
empty_like as empty_like,
equal as equal,
euler_gamma as euler_gamma,
exp as exp,
exp2 as exp2,
expand_dims as expand_dims,
expm1 as expm1,
extract as extract,
eye as eye,
fabs as fabs,
finfo as finfo,
fix as fix,
flatnonzero as flatnonzero,
flexible as flexible,
flip as flip,
fliplr as fliplr,
flipud as flipud,
float16 as float16,
float32 as float32,
float64 as float64,
float_ as float_,
float_power as float_power,
floating as floating,
floor as floor,
floor_divide as floor_divide,
fmax as fmax,
fmin as fmin,
fmod as fmod,
frexp as frexp,
full as full,
full_like as full_like,
gcd as gcd,
generic as generic,
geomspace as geomspace,
get_printoptions as get_printoptions,
gradient as gradient,
greater as greater,
greater_equal as greater_equal,
hamming as hamming,
hanning as hanning,
heaviside as heaviside,
histogram as histogram,
histogram_bin_edges as histogram_bin_edges,
histogram2d as histogram2d,
histogramdd as histogramdd,
hsplit as hsplit,
hstack as hstack,
hypot as hypot,
i0 as i0,
identity as identity,
iinfo as iinfo,
imag as imag,
index_exp as index_exp,
indices as indices,
inexact as inexact,
in1d as in1d,
inf as inf,
inner as inner,
insert as insert,
int16 as int16,
int32 as int32,
int64 as int64,
int8 as int8,
int_ as int_,
integer as integer,
interp as interp,
intersect1d as intersect1d,
invert as invert,
isclose as isclose,
iscomplex as iscomplex,
iscomplexobj as iscomplexobj,
isfinite as isfinite,
isin as isin,
isinf as isinf,
isnan as isnan,
isneginf as isneginf,
isposinf as isposinf,
isreal as isreal,
isrealobj as isrealobj,
isscalar as isscalar,
issubdtype as issubdtype,
issubsctype as issubsctype,
iterable as iterable,
ix_ as ix_,
kaiser as kaiser,
kron as kron,
lcm as lcm,
ldexp as ldexp,
left_shift as left_shift,
less as less,
less_equal as less_equal,
lexsort as lexsort,
linspace as linspace,
load as load,
log as log,
log10 as log10,
log1p as log1p,
log2 as log2,
logaddexp as logaddexp,
logaddexp2 as logaddexp2,
logical_and as logical_and,
logical_not as logical_not,
logical_or as logical_or,
logical_xor as logical_xor,
logspace as logspace,
mask_indices as mask_indices,
matmul as matmul,
max as max,
maximum as maximum,
mean as mean,
median as median,
meshgrid as meshgrid,
mgrid as mgrid,
min as min,
minimum as minimum,
mod as mod,
modf as modf,
moveaxis as moveaxis,
msort as msort,
multiply as multiply,
nan as nan,
nan_to_num as nan_to_num,
nanargmax as nanargmax,
nanargmin as nanargmin,
nancumprod as nancumprod,
nancumsum as nancumsum,
nanmedian as nanmedian,
nanpercentile as nanpercentile,
nanquantile as nanquantile,
nanmax as nanmax,
nanmean as nanmean,
nanmin as nanmin,
nanprod as nanprod,
nanstd as nanstd,
nansum as nansum,
nanvar as nanvar,
ndarray as ndarray,
ndim as ndim,
negative as negative,
newaxis as newaxis,
nextafter as nextafter,
nonzero as nonzero,
not_equal as not_equal,
number as number,
object_ as object_,
ogrid as ogrid,
ones as ones,
ones_like as ones_like,
outer as outer,
packbits as packbits,
pad as pad,
percentile as percentile,
pi as pi,
piecewise as piecewise,
poly as poly,
polyadd as polyadd,
polyder as polyder,
polyfit as polyfit,
polyint as polyint,
polymul as polymul,
polysub as polysub,
polyval as polyval,
positive as positive,
power as power,
printoptions as printoptions,
prod as prod,
product as product,
promote_types as promote_types,
ptp as ptp,
quantile as quantile,
r_ as r_,
rad2deg as rad2deg,
radians as radians,
ravel as ravel,
ravel_multi_index as ravel_multi_index,
real as real,
reciprocal as reciprocal,
remainder as remainder,
repeat as repeat,
reshape as reshape,
resize as resize,
result_type as result_type,
right_shift as right_shift,
rint as rint,
roll as roll,
rollaxis as rollaxis,
rot90 as rot90,
round as round,
round_ as round_,
row_stack as row_stack,
save as save,
savez as savez,
searchsorted as searchsorted,
select as select,
set_printoptions as set_printoptions,
setdiff1d as setdiff1d,
setxor1d as setxor1d,
shape as shape,
sign as sign,
signbit as signbit,
s_ as s_,
signedinteger as signedinteger,
sin as sin,
sinc as sinc,
single as single,
sinh as sinh,
size as size,
sometrue as sometrue,
sort as sort,
sort_complex as sort_complex,
split as split,
sqrt as sqrt,
square as square,
squeeze as squeeze,
stack as stack,
std as std,
subtract as subtract,
sum as sum,
swapaxes as swapaxes,
take as take,
take_along_axis as take_along_axis,
tan as tan,
tanh as tanh,
tensordot as tensordot,
tile as tile,
trace as trace,
trapz as trapz,
transpose as transpose,
tri as tri,
tril as tril,
tril_indices as tril_indices,
tril_indices_from as tril_indices_from,
trim_zeros as trim_zeros,
triu as triu,
triu_indices as triu_indices,
triu_indices_from as triu_indices_from,
true_divide as true_divide,
trunc as trunc,
uint as uint,
uint16 as uint16,
uint32 as uint32,
uint64 as uint64,
uint8 as uint8,
unique as unique,
union1d as union1d,
unpackbits as unpackbits,
unravel_index as unravel_index,
unsignedinteger as unsignedinteger,
unwrap as unwrap,
vander as vander,
var as var,
vdot as vdot,
vsplit as vsplit,
vstack as vstack,
where as where,
zeros as zeros,
zeros_like as zeros_like,
_NOT_IMPLEMENTED,
)
from jax._src.numpy.polynomial import roots as roots
from jax._src.numpy.vectorize import vectorize as vectorize
# TODO(phawkins): remove this import after fixing users.
from jax._src.numpy import lax_numpy
# Module initialization is encapsulated in a function to avoid accidental
# namespace pollution.
def _init():
import numpy as np
from jax._src.numpy import lax_numpy
from jax._src import util
# Builds a set of all unimplemented NumPy functions.
for name, func in util.get_module_functions(np).items():
if name not in globals():
_NOT_IMPLEMENTED.append(name)
globals()[name] = lax_numpy._not_implemented(func)
_init()
del _init
|
|
"""Checkpointable data structures."""
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.ops import variables
from tensorflow.python.training.checkpointable import base as checkpointable_lib
# TODO(allenl): We could track regular Python data structures which get assigned
# to Checkpointable objects. Making this work with restore-on-create would be
# tricky; we'd need to re-create nested structures with our own wrapped objects
# on assignment to an attribute, and track the user's original structure to make
# sure they don't modify it except through the wrappers (since we could save the
# user's updated structure, but would have no way to support restore-on-create
# for those modifications).
# TODO(allenl): A dictionary data structure would be good too.
class CheckpointableDataStructure(checkpointable_lib.CheckpointableBase):
"""Base class for data structures which contain checkpointable objects."""
def __init__(self):
self._layers = []
self.trainable = True
self._extra_variables = []
def _track_value(self, value, name):
"""Add a dependency on `value`."""
if isinstance(value, checkpointable_lib.CheckpointableBase):
self._track_checkpointable(value, name=name)
if isinstance(value, variables.Variable):
self._extra_variables.append(value)
else:
raise ValueError(
("Only checkpointable objects (such as Layers or Optimizers) may be "
"stored in a List object. Got %s, which does not inherit from "
"CheckpointableBase.") % (value,))
if isinstance(value, (base_layer.Layer, CheckpointableDataStructure)):
if value not in self._layers:
self._layers.append(value)
if hasattr(value, "_use_resource_variables"):
# In subclassed models, legacy layers (tf.layers) must always use
# resource variables.
value._use_resource_variables = True # pylint: disable=protected-access
@property
def layers(self):
return self._layers
@property
def trainable_weights(self):
return layer_utils.gather_trainable_weights(
trainable=self.trainable,
sub_layers=self.layers,
extra_variables=self._extra_variables)
@property
def non_trainable_weights(self):
return layer_utils.gather_non_trainable_weights(
trainable=self.trainable,
sub_layers=self.layers,
extra_variables=self._extra_variables)
@property
def weights(self):
return self.trainable_weights + self.non_trainable_weights
@property
def trainable_variables(self):
return self.trainable_weights
@property
def non_trainable_variables(self):
return self.non_trainable_weights
@property
def variables(self):
return self.weights
@property
def updates(self):
"""Aggregate updates from any `Layer` instances."""
# Updates and conditional losses are forwarded as-is rather than being
# filtered based on inputs, since this is just a container and won't ever
# have any inputs.
aggregated = []
for layer in self.layers:
aggregated += layer.updates
return aggregated
@property
def losses(self):
"""Aggregate losses from any `Layer` instances."""
aggregated = []
for layer in self.layers:
aggregated += layer.losses
return aggregated
def __hash__(self):
# Support object-identity hashing, so these structures can be used as keys
# in sets/dicts.
return id(self)
def __eq__(self, other):
# Similar to Tensors, checkpointable data structures use object-identity
# equality to support set/dict membership.
return self is other
class List(CheckpointableDataStructure, collections.Sequence):
"""An append-only sequence type which is checkpointable.
Maintains checkpoint dependencies on its contents (which must also be
checkpointable), and forwards any `Layer` metadata such as updates and losses.
Note that `List` is purely a container. It lets a `tf.keras.Model` or
other checkpointable object know about its contents, but does not call any
`Layer` instances which are added to it. To indicate a sequence of `Layer`
instances which should be called sequentially, use `tf.keras.Sequential`.
Example usage:
```python
class HasList(tf.keras.Model):
def __init__(self):
super(HasList, self).__init__()
self.layer_list = tf.contrib.checkpoint.List([layers.Dense(3)])
self.layer_list.append(layers.Dense(4))
def call(self, x):
aggregation = 0.
for l in self.layer_list:
x = l(x)
aggregation += tf.reduce_sum(x)
return aggregation
```
This kind of wrapping is necessary because `Checkpointable` objects do not
(yet) deeply inspect regular Python data structures, so for example assigning
a regular list (`self.layer_list = [layers.Dense(3)]`) does not create a
checkpoint dependency and does not add the `Layer` instance's weights to its
parent `Model`.
"""
def __init__(self, *args, **kwargs):
"""Construct a new sequence. Arguments are passed to `list()`."""
super(List, self).__init__()
self._storage = list(*args, **kwargs)
for index, element in enumerate(self._storage):
self._track_value(element, name=self._name_element(index))
def _name_element(self, index):
return "%d" % (index,)
def append(self, value):
"""Add a new checkpointable value."""
self._track_value(value, self._name_element(len(self._storage)))
self._storage.append(value)
def extend(self, values):
"""Add a sequence of checkpointable values."""
for index_offset, value in enumerate(values):
self._track_value(
value, name=self._name_element(len(self._storage) + index_offset))
self._storage.extend(values)
def __iadd__(self, values):
self.extend(values)
return self
def __add__(self, other):
if isinstance(other, List):
return List(self._storage + other._storage) # pylint: disable=protected-access
else:
return List(self._storage + other)
def __getitem__(self, key):
return self._storage[key]
def __len__(self):
return len(self._storage)
def __repr__(self):
return "List(%s)" % (repr(self._storage),)
class Mapping(CheckpointableDataStructure, collections.Mapping):
"""An append-only checkpointable mapping data structure with string keys.
Maintains checkpoint dependencies on its contents (which must also be
checkpointable), named based on its keys.
Note that once a key has been added, it may not be deleted or replaced. If
names may not be unique, see `tf.contrib.checkpoint.UniqueNameTracker`.
"""
def __init__(self, *args, **kwargs):
"""Construct a new sequence. Arguments are passed to `dict()`."""
super(Mapping, self).__init__()
self._storage = dict(*args, **kwargs)
for key, value in self._storage.items():
self._track_value(value, name=self._name_element(key))
def _name_element(self, key):
if not isinstance(key, six.string_types):
raise TypeError(
"Mapping accepts only string keys, but got a key %s."
% repr(key))
return str(key)
def __setitem__(self, key, value):
current_value = self._storage.setdefault(key, value)
if current_value is not value:
raise ValueError(
("Mappings are an append-only data structure. Tried to overwrite the "
"key '%s' with value %s, but it already contains %s")
% (key, value, current_value))
self._track_value(value, name=self._name_element(key))
def update(self, *args, **kwargs):
for key, value in dict(*args, **kwargs).items():
self[key] = value
def __getitem__(self, key):
return self._storage[key]
def __len__(self):
return len(self._storage)
def __repr__(self):
return "Mapping(%s)" % (repr(self._storage),)
def __iter__(self):
return iter(self._storage)
|
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
import pytest
import os
from tests import mock
from tests import unittest
import botocore.session as session
import botocore.configprovider
from botocore.configprovider import ConfigValueStore
from botocore.configprovider import BaseProvider
from botocore.configprovider import InstanceVarProvider
from botocore.configprovider import EnvironmentProvider
from botocore.configprovider import ScopedConfigProvider
from botocore.configprovider import SectionConfigProvider
from botocore.configprovider import ConstantProvider
from botocore.configprovider import ChainProvider
from botocore.configprovider import ConfigChainFactory
from botocore.configprovider import SmartDefaultsConfigStoreFactory
from botocore.configprovider import DefaultConfigResolver
from botocore.utils import IMDSRegionProvider
from botocore.exceptions import ConnectTimeoutError
class TestConfigChainFactory(unittest.TestCase):
def assert_chain_does_provide(self, instance_map, environ_map,
scoped_config_map, create_config_chain_args,
expected_value):
fake_session = mock.Mock(spec=session.Session)
fake_session.get_scoped_config.return_value = scoped_config_map
fake_session.instance_variables.return_value = instance_map
builder = ConfigChainFactory(fake_session, environ=environ_map)
chain = builder.create_config_chain(
**create_config_chain_args
)
value = chain.provide()
self.assertEqual(value, expected_value)
def test_chain_builder_can_provide_instance(self):
self.assert_chain_does_provide(
instance_map={'instance_var': 'from-instance'},
environ_map={},
scoped_config_map={},
create_config_chain_args={
'instance_name': 'instance_var',
},
expected_value='from-instance',
)
def test_chain_builder_can_skip_instance(self):
self.assert_chain_does_provide(
instance_map={'wrong_instance_var': 'instance'},
environ_map={'ENV_VAR': 'env'},
scoped_config_map={},
create_config_chain_args={
'instance_name': 'instance_var',
'env_var_names': 'ENV_VAR',
},
expected_value='env',
)
def test_chain_builder_can_provide_env_var(self):
self.assert_chain_does_provide(
instance_map={},
environ_map={'ENV_VAR': 'from-env'},
scoped_config_map={},
create_config_chain_args={
'env_var_names': 'ENV_VAR',
},
expected_value='from-env',
)
def test_does_provide_none_if_no_variable_exists_in_env_var_list(self):
self.assert_chain_does_provide(
instance_map={},
environ_map={},
scoped_config_map={},
create_config_chain_args={
'env_var_names': ['FOO'],
},
expected_value=None,
)
def test_does_provide_value_if_variable_exists_in_env_var_list(self):
self.assert_chain_does_provide(
instance_map={},
environ_map={'FOO': 'bar'},
scoped_config_map={},
create_config_chain_args={
'env_var_names': ['FOO'],
},
expected_value='bar',
)
def test_does_provide_first_non_none_value_first_in_env_var_list(self):
self.assert_chain_does_provide(
instance_map={},
environ_map={'FOO': 'baz'},
scoped_config_map={},
create_config_chain_args={
'env_var_names': ['FOO', 'BAR'],
},
expected_value='baz',
)
def test_does_provide_first_non_none_value_second_in_env_var_list(self):
self.assert_chain_does_provide(
instance_map={},
environ_map={'BAR': 'baz'},
scoped_config_map={},
create_config_chain_args={
'env_var_names': ['FOO', 'BAR'],
},
expected_value='baz',
)
def test_does_provide_none_if_all_list_env_vars_are_none(self):
self.assert_chain_does_provide(
instance_map={},
environ_map={},
scoped_config_map={},
create_config_chain_args={
'env_var_names': ['FOO', 'BAR'],
},
expected_value=None,
)
def test_does_provide_first_value_when_both_env_vars_exist(self):
self.assert_chain_does_provide(
instance_map={},
environ_map={'FOO': 'baz', 'BAR': 'buz'},
scoped_config_map={},
create_config_chain_args={
'env_var_names': ['FOO', 'BAR'],
},
expected_value='baz',
)
def test_chain_builder_can_provide_config_var(self):
self.assert_chain_does_provide(
instance_map={},
environ_map={},
scoped_config_map={'config_var': 'from-config'},
create_config_chain_args={
'config_property_names': 'config_var',
},
expected_value='from-config',
)
def test_chain_builder_can_provide_nested_config_var(self):
self.assert_chain_does_provide(
instance_map={},
environ_map={},
scoped_config_map={'config_var': {'nested-key': 'nested-val'}},
create_config_chain_args={
'config_property_names': ('config_var', 'nested-key'),
},
expected_value='nested-val',
)
def test_provide_value_from_config_list(self):
self.assert_chain_does_provide(
instance_map={},
environ_map={},
scoped_config_map={'var': 'val'},
create_config_chain_args={
'config_property_names': ['var'],
},
expected_value='val',
)
def test_provide_value_from_config_list_looks_for_non_none_vals(self):
self.assert_chain_does_provide(
instance_map={},
environ_map={},
scoped_config_map={'non_none_var': 'non_none_val'},
create_config_chain_args={
'config_property_names': ['none_var', 'non_none_var'],
},
expected_value='non_none_val',
)
def test_provide_value_from_config_list_retrieves_first_non_none_val(self):
self.assert_chain_does_provide(
instance_map={},
environ_map={},
scoped_config_map={
'first': 'first_val',
'second': 'second_val'
},
create_config_chain_args={
'config_property_names': ['first', 'second'],
},
expected_value='first_val',
)
def test_provide_value_from_config_list_if_all_vars_are_none(self):
self.assert_chain_does_provide(
instance_map={},
environ_map={},
scoped_config_map={},
create_config_chain_args={
'config_property_names': ['config1', 'config2'],
},
expected_value=None,
)
def test_provide_value_from_list_with_nested_var(self):
self.assert_chain_does_provide(
instance_map={},
environ_map={},
scoped_config_map={'section': {'nested_var': 'nested_val'}},
create_config_chain_args={
'config_property_names': [('section', 'nested_var')],
},
expected_value='nested_val',
)
def test_chain_builder_can_provide_default(self):
self.assert_chain_does_provide(
instance_map={},
environ_map={},
scoped_config_map={},
create_config_chain_args={
'default': 'from-default'
},
expected_value='from-default',
)
def test_chain_provider_does_follow_priority_instance_var(self):
self.assert_chain_does_provide(
instance_map={'instance_var': 'from-instance'},
environ_map={'ENV_VAR': 'from-env'},
scoped_config_map={'config_var': 'from-config'},
create_config_chain_args={
'instance_name': 'instance_var',
'env_var_names': 'ENV_VAR',
'config_property_names': 'config_var',
'default': 'from-default',
},
expected_value='from-instance',
)
def test_chain_provider_does_follow_priority_env_var(self):
self.assert_chain_does_provide(
instance_map={'wrong_instance_var': 'from-instance'},
environ_map={'ENV_VAR': 'from-env'},
scoped_config_map={'config_var': 'from-confi'},
create_config_chain_args={
'instance_name': 'instance_var',
'env_var_names': 'ENV_VAR',
'config_property_names': 'config_var',
'default': 'from-default',
},
expected_value='from-env',
)
def test_chain_provider_does_follow_priority_config(self):
self.assert_chain_does_provide(
instance_map={'wrong_instance_var': 'from-instance'},
environ_map={'WRONG_ENV_VAR': 'from-env'},
scoped_config_map={'config_var': 'from-config'},
create_config_chain_args={
'instance_name': 'instance_var',
'env_var_names': 'ENV_VAR',
'config_property_names': 'config_var',
'default': 'from-default',
},
expected_value='from-config',
)
def test_chain_provider_does_follow_priority_default(self):
self.assert_chain_does_provide(
instance_map={'wrong_instance_var': 'from-instance'},
environ_map={'WRONG_ENV_VAR': 'from-env'},
scoped_config_map={'wrong_config_var': 'from-config'},
create_config_chain_args={
'instance_name': 'instance_var',
'env_var_names': 'ENV_VAR',
'config_property_names': 'config_var',
'default': 'from-default',
},
expected_value='from-default',
)
class TestConfigValueStore(unittest.TestCase):
def test_does_provide_none_if_no_variable_exists(self):
provider = ConfigValueStore()
value = provider.get_config_variable('fake_variable')
self.assertIsNone(value)
def test_does_provide_value_if_variable_exists(self):
mock_value_provider = mock.Mock(spec=BaseProvider)
mock_value_provider.provide.return_value = 'foo'
provider = ConfigValueStore(mapping={
'fake_variable': mock_value_provider,
})
value = provider.get_config_variable('fake_variable')
self.assertEqual(value, 'foo')
def test_can_set_variable(self):
provider = ConfigValueStore()
provider.set_config_variable('fake_variable', 'foo')
value = provider.get_config_variable('fake_variable')
self.assertEqual(value, 'foo')
def test_can_set_config_provider(self):
foo_value_provider = mock.Mock(spec=BaseProvider)
foo_value_provider.provide.return_value = 'foo'
provider = ConfigValueStore(mapping={
'fake_variable': foo_value_provider,
})
value = provider.get_config_variable('fake_variable')
self.assertEqual(value, 'foo')
bar_value_provider = mock.Mock(spec=BaseProvider)
bar_value_provider.provide.return_value = 'bar'
provider.set_config_provider('fake_variable', bar_value_provider)
value = provider.get_config_variable('fake_variable')
self.assertEqual(value, 'bar')
def test_can_get_config_provider(self):
chain_provider = ChainProvider(
providers=[ConstantProvider(value='bar')]
)
config_value_store = ConfigValueStore(mapping={
'fake_variable': chain_provider,
})
provider = config_value_store.get_config_provider('fake_variable')
value = config_value_store.get_config_variable('fake_variable')
self.assertIsInstance(provider, ChainProvider)
self.assertEqual(value, 'bar')
def test_can_get_config_provider_non_chain_provider(self):
constant_provider = ConstantProvider(value='bar')
config_value_store = ConfigValueStore(mapping={
'fake_variable': constant_provider,
})
provider = config_value_store.get_config_provider('fake_variable')
value = config_value_store.get_config_variable('fake_variable')
self.assertIsInstance(provider, ConstantProvider)
self.assertEqual(value, 'bar')
class TestInstanceVarProvider(unittest.TestCase):
def assert_provides_value(self, name, instance_map, expected_value):
fake_session = mock.Mock(spec=session.Session)
fake_session.instance_variables.return_value = instance_map
provider = InstanceVarProvider(
instance_var=name,
session=fake_session,
)
value = provider.provide()
self.assertEqual(value, expected_value)
def test_can_provide_value(self):
self.assert_provides_value(
name='foo',
instance_map={'foo': 'bar'},
expected_value='bar',
)
def test_does_provide_none_if_value_not_in_dict(self):
self.assert_provides_value(
name='foo',
instance_map={},
expected_value=None,
)
class TestEnvironmentProvider(unittest.TestCase):
def assert_does_provide(self, env, name, expected_value):
provider = EnvironmentProvider(name=name, env=env)
value = provider.provide()
self.assertEqual(value, expected_value)
def test_does_provide_none_if_no_variable_exists(self):
self.assert_does_provide(
name='FOO',
env={},
expected_value=None,
)
def test_does_provide_value_if_variable_exists(self):
self.assert_does_provide(
name='FOO',
env={
'FOO': 'bar',
},
expected_value='bar',
)
class TestScopedConfigProvider(unittest.TestCase):
def assert_provides_value(self, config_file_values, config_var_name,
expected_value):
fake_session = mock.Mock(spec=session.Session)
fake_session.get_scoped_config.return_value = config_file_values
property_provider = ScopedConfigProvider(
config_var_name=config_var_name,
session=fake_session,
)
value = property_provider.provide()
self.assertEqual(value, expected_value)
def test_can_provide_value(self):
self.assert_provides_value(
config_file_values={
'foo': 'bar'
},
config_var_name='foo',
expected_value='bar',
)
def test_does_provide_none_if_var_not_in_config(self):
self.assert_provides_value(
config_file_values={
'foo': 'bar'
},
config_var_name='no_such_var',
expected_value=None,
)
def test_provide_nested_value(self):
self.assert_provides_value(
config_file_values={
'section': {
'nested_var': 'nested_val'
}
},
config_var_name=('section', 'nested_var'),
expected_value='nested_val',
)
def test_provide_nested_value_but_not_section(self):
self.assert_provides_value(
config_file_values={
'section': 'not-nested'
},
config_var_name=('section', 'nested_var'),
expected_value=None,
)
def _make_provider_that_returns(return_value):
provider = mock.Mock(spec=BaseProvider)
provider.provide.return_value = return_value
return provider
def _make_providers_that_return(return_values):
mocks = []
for return_value in return_values:
provider = _make_provider_that_returns(return_value)
mocks.append(provider)
return mocks
def assert_chain_does_provide(providers, expected_value):
provider = ChainProvider(
providers=providers,
)
assert provider.provide() == expected_value
@pytest.mark.parametrize(
'case',
(
(None, []),
(None, [None]),
('foo', ['foo']),
('foo', ['foo', 'bar']),
('bar', [None, 'bar']),
('foo', ['foo', None]),
('baz', [None, None, 'baz']),
('bar', [None, 'bar', None]),
('foo', ['foo', 'bar', None]),
('foo', ['foo', 'bar', 'baz']),
)
)
def test_chain_provider(case):
# Each case is a tuple with the first element being the expected return
# value from the ChainProvider. The second value being a list of return
# values from the individual providers that are in the chain.
assert_chain_does_provide(
_make_providers_that_return(case[1]),
case[0]
)
class TestChainProvider(unittest.TestCase):
def test_can_convert_provided_value(self):
chain_provider = ChainProvider(
providers=_make_providers_that_return(['1']),
conversion_func=int,
)
value = chain_provider.provide()
self.assertIsInstance(value, int)
self.assertEqual(value, 1)
class TestConstantProvider(unittest.TestCase):
def test_can_provide_value(self):
provider = ConstantProvider(value='foo')
value = provider.provide()
self.assertEqual(value, 'foo')
class TestSectionConfigProvider(unittest.TestCase):
def assert_provides_value(self, config_file_values, section_name,
expected_value, override_providers=None):
fake_session = mock.Mock(spec=session.Session)
fake_session.get_scoped_config.return_value = config_file_values
provider = SectionConfigProvider(
section_name=section_name,
session=fake_session,
override_providers=override_providers
)
value = provider.provide()
self.assertEqual(value, expected_value)
def test_provide_section_config(self):
self.assert_provides_value(
config_file_values={
'mysection': {
'section_var': 'section_val'
}
},
section_name='mysection',
expected_value={'section_var': 'section_val'},
)
def test_provide_service_config_missing_service(self):
self.assert_provides_value(
config_file_values={},
section_name='mysection',
expected_value=None,
)
def test_provide_service_config_not_a_section(self):
self.assert_provides_value(
config_file_values={'myservice': 'not-a-section'},
section_name='mysection',
expected_value=None,
)
def test_provide_section_config_with_overrides(self):
self.assert_provides_value(
config_file_values={
'mysection': {
'override_var': 'from_config_file',
'no_override_var': 'from_config_file'
}
},
section_name='mysection',
override_providers={'override_var': ConstantProvider('override')},
expected_value={
'override_var': 'override',
'no_override_var': 'from_config_file'
}
)
def test_provide_section_config_with_only_overrides(self):
self.assert_provides_value(
config_file_values={},
section_name='mysection',
override_providers={'override_var': ConstantProvider('override')},
expected_value={
'override_var': 'override',
}
)
class TestSmartDefaults:
def _template(self):
return {
"base": {
"retryMode": "standard",
"stsRegionalEndpoints": "regional",
"s3UsEast1RegionalEndpoints": "regional",
"connectTimeoutInMillis": 1000,
"tlsNegotiationTimeoutInMillis": 1000
},
"modes": {
"standard": {
"connectTimeoutInMillis": {
"multiply": 2
},
"tlsNegotiationTimeoutInMillis": {
"multiply": 2
}
},
"in-region": {
"connectTimeoutInMillis": {
"multiply": 1
},
"tlsNegotiationTimeoutInMillis": {
"multiply": 1
}
},
"cross-region": {
"connectTimeoutInMillis": {
"multiply": 2.8
},
"tlsNegotiationTimeoutInMillis": {
"multiply": 2.8
}
},
"mobile": {
"connectTimeoutInMillis": {
"override": 10000
},
"tlsNegotiationTimeoutInMillis": {
"add": 10000
},
"retryMode": {
"override": "adaptive"
}
}
}
}
def _create_default_config_resolver(self):
return DefaultConfigResolver(self._template())
@pytest.fixture
def smart_defaults_factory(self):
fake_session = mock.Mock(spec=session.Session)
fake_session.get_scoped_config.return_value = {}
default_config_resolver = self._create_default_config_resolver()
return SmartDefaultsConfigStoreFactory(
default_config_resolver, imds_region_provider=mock.Mock()
)
@pytest.fixture
def fake_session(self):
fake_session = mock.Mock(spec=session.Session)
fake_session.get_scoped_config.return_value = {}
return fake_session
def _create_config_value_store(self, s3_mapping={}, **override_kwargs):
provider_foo = ConstantProvider(value='foo')
environment_provider_foo = EnvironmentProvider(
name='AWS_RETRY_MODE',
env={'AWS_RETRY_MODE': None}
)
fake_session = mock.Mock(spec=session.Session)
fake_session.get_scoped_config.return_value = {}
# Testing with three different providers to validate
# SmartDefaultsConfigStoreFactory._get_new_chain_provider
mapping = {
'sts_regional_endpoints': ChainProvider(providers=[provider_foo]),
'retry_mode': ChainProvider(providers=[environment_provider_foo]),
's3': SectionConfigProvider('s3', fake_session, s3_mapping)
}
mapping.update(**override_kwargs)
config_store = ConfigValueStore(mapping=mapping)
return config_store
def _create_os_environ_patcher(self):
return mock.patch.object(
botocore.configprovider.os, 'environ',
mock.Mock(wraps=os.environ)
)
def test_config_store_deepcopy(self):
config_store = ConfigValueStore()
config_store.set_config_provider('foo', ConstantProvider('bar'))
config_store_copy = copy.deepcopy(config_store)
config_store_copy.set_config_provider('fizz', ConstantProvider('buzz'))
assert config_store.get_config_variable('fizz') is None
assert config_store_copy.get_config_variable('foo') == 'bar'
@pytest.mark.parametrize(
'defaults_mode, retry_mode, sts_regional_endpoints,'
' us_east_1_regional_endpoint, connect_timeout',
[
('standard', 'standard', 'regional', 'regional', 2000),
('in-region', 'standard', 'regional', 'regional', 1000),
('cross-region', 'standard', 'regional', 'regional', 2800),
('mobile', 'adaptive', 'regional', 'regional', 10000),
]
)
def test_get_defualt_config_values(self, defaults_mode, retry_mode,
sts_regional_endpoints,
us_east_1_regional_endpoint,
connect_timeout):
default_config_resolver = self._create_default_config_resolver()
default_values = default_config_resolver.get_default_config_values(
defaults_mode
)
assert default_values['retryMode'] == retry_mode
assert default_values['stsRegionalEndpoints'] == sts_regional_endpoints
assert default_values['s3UsEast1RegionalEndpoints'] == us_east_1_regional_endpoint
assert default_values['connectTimeoutInMillis'] == connect_timeout
def test_resolve_default_values_on_config(self, smart_defaults_factory,
fake_session):
config_store = self._create_config_value_store()
smart_defaults_factory.merge_smart_defaults(
config_store, 'standard', 'foo')
s3_config = config_store.get_config_variable('s3')
assert s3_config['us_east_1_regional_endpoint'] == 'regional'
assert config_store.get_config_variable('retry_mode') == 'standard'
assert config_store.get_config_variable('sts_regional_endpoints') == 'regional'
assert config_store.get_config_variable('connect_timeout') == 2
def test_no_resolve_default_s3_values_on_config(self,
smart_defaults_factory,
fake_session):
environment_provider = EnvironmentProvider(
name='AWS_S3_US_EAST_1_REGIONAL_ENDPOINT',
env={'AWS_S3_US_EAST_1_REGIONAL_ENDPOINT': 'legacy'}
)
s3_mapping = {
'us_east_1_regional_endpoint': ChainProvider(
providers=[environment_provider])
}
config_store = self._create_config_value_store(s3_mapping=s3_mapping)
smart_defaults_factory.merge_smart_defaults(
config_store, 'standard', 'foo')
s3_config = config_store.get_config_variable('s3')
assert s3_config['us_east_1_regional_endpoint'] == 'legacy'
assert config_store.get_config_variable('retry_mode') == 'standard'
assert config_store.get_config_variable('sts_regional_endpoints') == 'regional'
assert config_store.get_config_variable('connect_timeout') == 2
def test_resolve_default_s3_values_on_config(self, smart_defaults_factory,
fake_session):
s3_mapping = {
'use_arn_region': ChainProvider(
providers=[ConstantProvider(value=False)])
}
config_store = self._create_config_value_store(s3_mapping=s3_mapping)
smart_defaults_factory.merge_smart_defaults(
config_store, 'standard', 'foo')
s3_config = config_store.get_config_variable('s3')
assert s3_config['us_east_1_regional_endpoint'] == 'regional'
assert config_store.get_config_variable('retry_mode') == 'standard'
assert config_store.get_config_variable('sts_regional_endpoints') == 'regional'
assert config_store.get_config_variable('connect_timeout') == 2
@pytest.mark.parametrize(
'execution_env_var, region_env_var, default_region_env_var, '
'imds_region, client_region, resolved_mode',
[
('AWS_Lambda_python3.6', 'us-east-1', None,
None, 'us-east-1', 'in-region'),
('AWS_Lambda_python3.6', 'us-west-2', 'us-west-2',
None, 'us-east-1', 'cross-region'),
('AWS_Lambda_python3.6', None, None,
'us-west-2', 'us-east-1', 'cross-region'),
(None, None, 'us-east-1',
'us-east-1', 'us-east-1', 'in-region'),
(None, None, None,
'us-west-2', 'us-east-1', 'cross-region'),
(None, None, None,
None, 'us-west-2', 'standard'),
]
)
def test_resolve_auto_mode(self, execution_env_var, region_env_var,
default_region_env_var,
imds_region, client_region, resolved_mode):
imds_region_provider = mock.Mock(spec=IMDSRegionProvider)
imds_region_provider.provide.return_value = imds_region
default_config_resolver = mock.Mock()
with mock.patch.object(
botocore.configprovider.os, 'environ',
mock.Mock(wraps=os.environ)
) as os_environ_patcher:
os_environ_patcher.get.side_effect = [
execution_env_var, default_region_env_var, region_env_var]
smart_defaults_factory = SmartDefaultsConfigStoreFactory(
default_config_resolver, imds_region_provider)
mode = smart_defaults_factory.resolve_auto_mode(client_region)
assert mode == resolved_mode
def test_resolve_auto_mode_imds_region_provider_connect_timeout(self):
imds_region_provider = mock.Mock(spec=IMDSRegionProvider)
imds_region_provider.provide.side_effect = ConnectTimeoutError(
endpoint_url='foo')
default_config_resolver = mock.Mock()
with mock.patch.object(
botocore.configprovider.os, 'environ',
mock.Mock(wraps=os.environ)
) as os_environ_patcher:
os_environ_patcher.get.side_effect = [None] * 3
smart_defaults_factory = SmartDefaultsConfigStoreFactory(
default_config_resolver, imds_region_provider)
mode = smart_defaults_factory.resolve_auto_mode('us-west-2')
assert mode == 'standard'
|
|
"""Tests for media.models."""
# pylint: disable=maybe-no-member, too-many-instance-attributes
from base64 import urlsafe_b64encode
from unittest import skipIf
import hashlib
import os
import re
from django.core.files import File
from django.core.urlresolvers import reverse
from django.test import TestCase, Client
from mock import patch, Mock
from model_mommy import mommy
from PIL import Image as PILImage
from open_connect.media import models
from open_connect.media.models import Image
from open_connect.media.tests.test_utils import gifsicle_not_installed
from open_connect.connect_core.utils.basetests import ConnectTestMixin
class ImageTest(ConnectTestMixin, TestCase):
"""Tests for Image model"""
def setUp(self):
super(ImageTest, self).setUp()
self.path = os.path.dirname(os.path.abspath(__file__))
def get_image(self, filename):
"""Returns the specified image."""
path = os.path.join(self.path, filename)
image = Image()
image.image = File(open(path))
image.user = self.create_user()
image.save(process=False)
return image
def get_large_image(self):
"""Returns the large image."""
return self.get_image('1000x500.png')
def get_small_image(self):
"""Returns the small image."""
return self.get_image('200x200.png')
def get_animated_image(self):
"""Returns the animated GIF"""
return self.get_image('animation.gif')
def get_exif_image(self):
"""Returns the exif image"""
return self.get_image('exif.jpg')
def test_create_display_size(self):
"""Test creating a display_image."""
largeimage = self.get_large_image()
largeimage.create_display_size()
smallimage = self.get_small_image()
smallimage.create_display_size()
# Confirm that the large image was resized, the small was not
self.assertEqual(smallimage.image, smallimage.display_image)
self.assertNotEqual(largeimage.image, largeimage.display_image)
# Confirm that the large image is at or below 600x600
largeimage.display_image.open()
large_image_display = PILImage.open(largeimage.display_image)
self.assertLessEqual(large_image_display.size, (600, 600))
def test_create_thumbnail(self):
"""Test creating a thumbnail."""
largeimage = self.get_large_image()
largeimage.create_thumbnail()
largeimage.thumbnail.open()
thumbnail = PILImage.open(largeimage.thumbnail)
self.assertLessEqual(thumbnail.size, (200, 200))
@skipIf(gifsicle_not_installed(), 'Gifsicle not installed')
def test_create_thumbnail_animation(self):
"""Test creating a thumbnail of an animated GIF."""
# pylint: disable=expression-not-assigned
animatedimage = self.get_animated_image()
animatedimage.image.open()
image = PILImage.open(animatedimage.image)
# Confirm there are 4 frames by ensuring that the 5th frame raises an
# error
[image.seek(frame) for frame in range(0, 4)]
with self.assertRaises(ValueError):
image.seek(5)
animatedimage.create_thumbnail()
animatedimage.thumbnail.open()
thumbnail = PILImage.open(animatedimage.thumbnail)
# Confirm that there are the same number of frames (4) as the original
[thumbnail.seek(frame) for frame in range(0, 4)]
with self.assertRaises(ValueError):
thumbnail.seek(5)
self.assertLessEqual(thumbnail.size, (200, 200))
@patch('open_connect.media.models.resize_gif')
def test_create_thumbnail_animation_no_gifsicle(self, mock_resize):
"""Test resizing an image when gifsicle is not installed"""
mock_resize.return_value = ('', True)
animatedimage = self.get_animated_image()
mock_resize.assert_called_once()
animatedimage.create_thumbnail()
# Open both, confirm that the thumbnail is identical to the image
animatedimage.image.open()
animatedimage.thumbnail.open()
# Hash both files to confirm they are the same
image_hash = hashlib.md5(animatedimage.image.read()).hexdigest()
thumbnail_hash = hashlib.md5(animatedimage.thumbnail.read()).hexdigest()
self.assertEqual(image_hash, thumbnail_hash)
def test_process_exif_data(self):
"""Test grabbing exif data from an image"""
image = self.get_exif_image()
image.image = File(open(os.path.join(self.path, 'exif.jpg')))
image.save()
self.assertFalse(image.exif)
image.process_exif_data()
self.assertTrue(image.exif)
exif_data = image.exif
self.assertEqual(exif_data['ExifImageWidth'], 375)
self.assertEqual(exif_data['ExifImageHeight'], 500)
self.assertEqual(
exif_data['LensModel'], u'iPhone 5s back camera 4.15mm f/2.2')
self.assertEqual(exif_data['Model'], 'iPhone 5s')
def test_process_exif_data_when_no_data(self):
"""Test process_exif_data when there is no exif data"""
image = self.get_small_image()
self.assertFalse(image.exif)
image.process_exif_data()
self.assertFalse(image.exif)
@patch.object(models, 'PILImage')
def test_process_exif_getexif_returns_none(self, mock_pilimage):
"""If _getexif returns None, don't fail."""
# pylint: disable=protected-access
mock_original = Mock()
mock_original._getexif.return_value = None
mock_pilimage.open.return_value = mock_original
image = self.get_small_image()
image.process_exif_data()
self.assertEqual(mock_original._getexif.call_count, 1)
def test_process_exif_raises_unicode_decode_error(self):
"""Handle UnicodeDecodeError gracefully when saving exif."""
image = self.get_exif_image()
with patch.object(image, 'save') as mock_save:
mock_save.side_effect = UnicodeDecodeError(b'utf-8', b'', 0, 1, 'a')
self.assertIsNone(image.process_exif_data())
def test_get_thumbnail(self):
"""Test getting the thumbnail."""
image = self.get_large_image()
self.assertEqual(image.image, image.get_thumbnail)
image.create_thumbnail()
self.assertNotEqual(image.image, image.thumbnail)
def test_get_display_size(self):
"""Test getting the display size."""
image = self.get_large_image()
self.assertEqual(image.image, image.get_display_image)
image.create_display_size()
self.assertNotEqual(image.image, image.get_display_image)
@patch('open_connect.media.models.process_image')
def test_image_process_called(self, mock):
"""process_image is called when save is called with process=True."""
image = self.get_small_image()
image.save(process=True)
self.assertTrue(mock.delay.called)
def test_serializable(self):
"""Test serializable method."""
image = self.get_small_image()
serialized = image.serializable()
self.assertEqual(serialized['pk'], image.pk)
self.assertEqual(
serialized['image_url'], image.get_absolute_url())
self.assertEqual(
serialized['display_image_url'],
image.get_display_image.url
)
self.assertEqual(
serialized['thumbnail_url'], image.get_thumbnail.url)
def test_file_name(self):
"""Should return just the name of the file without any path."""
image = self.get_small_image()
self.assertTrue(
re.search(
r'^[1-4][0-9]{5}\.[0-9a-f]{32}\.png$',
image.file_name()
)
)
class ImagePopularityManagerTest(ConnectTestMixin, TestCase):
"""Tests for the Image Popularity manager"""
def setUp(self):
"""Setup for image popularity manager tests"""
self.banned_user = self.create_user(is_banned=True, is_superuser=True)
self.super_user = self.create_superuser()
self.normal_user = self.create_user()
self.group = mommy.make('groups.Group')
self.banned_user.add_to_group(self.group.pk)
self.super_user.add_to_group(self.group.pk)
self.normal_user.add_to_group(self.group.pk)
self.client.login(username=self.super_user.email, password='moo')
path = os.path.dirname(os.path.abspath(__file__))
self.largefile = path + '/1000x500.png'
self.smallfile = path + '/200x200.png'
self.largeimage = Image()
self.largeimage.image = File(open(self.largefile))
self.largeimage.user = self.super_user
self.largeimage.save()
self.smallimage = Image()
self.smallimage.image = File(open(self.smallfile))
self.smallimage.user = self.super_user
self.smallimage.save()
def test_with_user(self):
"""Should return images attached to approved messages."""
# Create a new thread and message
thread = mommy.make('connectmessages.Thread', group=self.group)
message = mommy.make(
'connectmessages.Message',
thread=thread,
sender=self.normal_user,
status='approved'
)
# Create and attach a new image
image1 = Image()
image1.user = message.sender
image1.image = File(open(self.smallfile))
image1.save()
message.images.add(image1)
result = self.client.get(reverse('admin_gallery'))
self.assertEqual(result.status_code, 200)
self.assertIn(image1, result.context['images'])
def test_with_user_non_group_message(self):
"""Images posted in direct messages shouldn't be returned."""
# Create a new thread and message
thread = mommy.make(
'connectmessages.Thread', group=None, thread_type='direct')
message = mommy.make(
'connectmessages.Message',
thread=thread,
sender=self.normal_user,
status='spam'
)
# Create and attach a new image
image1 = Image()
image1.user = message.sender
image1.image = File(open(self.smallfile))
image1.save()
message.images.add(image1)
result = self.client.get(reverse('admin_gallery'))
self.assertEqual(result.status_code, 200)
self.assertNotIn(image1, result.context['images'])
def test_with_user_message_not_approved(self):
"""Images that are not approved should not be returned."""
# Create a new thread and message
thread = mommy.make('connectmessages.Thread', group=self.group)
message = mommy.make(
'connectmessages.Message',
thread=thread,
sender=self.normal_user,
status='spam'
)
# Create and attach a new image
image1 = Image()
image1.user = message.sender
image1.image = File(open(self.smallfile))
image1.save()
message.images.add(image1)
result = self.client.get(reverse('admin_gallery'))
self.assertEqual(result.status_code, 200)
self.assertIn(image1, result.context['images'])
def test_with_user_message_not_approved_user_is_sender(self):
"""Images that are not approved should be returned to the sender."""
# Create a new thread and message
thread = mommy.make('connectmessages.Thread', group=self.group)
message = mommy.make(
'connectmessages.Message',
thread=thread,
sender=self.super_user,
status='spam'
)
# Create and attach a new image
image1 = Image()
image1.user = message.sender
image1.image = File(open(self.smallfile))
image1.save()
message.images.add(image1)
result = self.client.get(reverse('admin_gallery'))
self.assertEqual(result.status_code, 200)
self.assertIn(image1, result.context['images'])
def test_with_user_no_images_from_banned_users(self):
"""Images from banned users shouldn't be present."""
# Create a new thread and message
thread = mommy.make('connectmessages.Thread', group=self.group)
message = mommy.make(
'connectmessages.Message', thread=thread, sender=self.banned_user)
# Create and attach a new image
image1 = Image()
image1.user = message.sender
image1.image = File(open(self.smallfile))
image1.save()
message.images.add(image1)
result = self.client.get(reverse('admin_gallery'))
self.assertEqual(result.status_code, 200)
self.assertNotIn(image1, result.context['images'])
def test_with_user_current_user_is_banned(self):
"""Images from banned users should be visible to the banned user.."""
# Create a new thread and message
thread = mommy.make('connectmessages.Thread', group=self.group)
message = mommy.make(
'connectmessages.Message', thread=thread, sender=self.banned_user)
# Create and attach a new image
image1 = Image()
image1.user = message.sender
image1.image = File(open(self.smallfile))
image1.save()
message.images.add(image1)
client = Client()
client.post(
reverse('account_login'),
{'login': self.banned_user.email, 'password': 'moo'}
)
result = client.get(reverse('admin_gallery'))
self.assertEqual(result.status_code, 200)
self.assertIn(image1, result.context['images'])
class Base64URLShortenerTest(TestCase):
"""Tests for Base64URLShortener."""
def setUp(self):
self.shortener = models.Base64URLShortener()
def test_shorten(self):
"""Test that shortener returns expected value."""
result = self.shortener.shorten(123)
self.assertEqual(result, 'MTIz')
def test_expand(self):
"""Test that shortener can expand shortened value."""
result = self.shortener.expand('MTIz')
self.assertEqual(result, '123')
class ShortenedURLTest(TestCase):
"""Tests for ShortenedURL model."""
def setUp(self):
"""ShortenURL Test Setup"""
self.url = models.ShortenedURL.objects.create(
url='http://www.google.com')
def test_save_without_short_code(self):
"""Test that saving ShortenedURL sets the short code."""
self.assertEqual(
self.url.short_code, urlsafe_b64encode(str(self.url.pk)).strip('='))
def test_save_with_short_code(self):
"""Test that saving ShortenedURL doesn't override a preset short_code"""
result = models.ShortenedURL.objects.create(
url='http://www.thisisanewurl.com',
short_code='something crazy'
)
self.assertEqual(result.short_code, 'something crazy')
def test_get_absolute_url(self):
"""Test that get_absolute_url returns redirect view."""
self.assertEqual(
self.url.get_absolute_url(),
reverse('shortened_url_redirect',
kwargs={'code': self.url.short_code})
)
def test_click_increases_click_count(self):
"""Test that click method increments click_count."""
click_count = self.url.click_count
self.url.click()
url = models.ShortenedURL.objects.get(pk=self.url.pk)
self.assertEqual(url.click_count, click_count + 1)
def test_click_creates_shortened_url_click(self):
"""Test that click method creates new ShortenedURLClick instance."""
clicks = self.url.shortenedurlclick_set.count()
self.url.click()
url = models.ShortenedURL.objects.get(pk=self.url.pk)
self.assertEqual(url.shortenedurlclick_set.count(), clicks + 1)
def test_unicode(self):
"""Test that unicode response is as expected."""
self.assertEqual(
unicode(self.url),
u'ShortenedURL %s: %s' % (self.url.pk, self.url.url)
)
|
|
#!/usr/bin/env python
##===-----------------------------------------------------------------------------*- Python -*-===##
## _
## | |
## __| | __ ___ ___ ___
## / _` |/ _` \ \ /\ / / '_ |
## | (_| | (_| |\ V V /| | | |
## \__,_|\__,_| \_/\_/ |_| |_| - Compiler Toolchain
##
##
## This file is distributed under the MIT License (MIT).
## See LICENSE.txt for details.
##
##===------------------------------------------------------------------------------------------===##
"""Generate input for the ICON Laplacian stencil test. This is the classic Finite Volume vector Laplacian.
Unfortunately, it is not used in operational simulations because of bad convergence."""
import argparse
import os
import dawn4py
from dawn4py.serialization import SIR, AST
from dawn4py.serialization import utils as serial_utils
from google.protobuf.json_format import MessageToJson, Parse
def main(args: argparse.Namespace):
stencil_name = "ICON_laplacian_stencil"
gen_outputfile = f"{stencil_name}.cpp"
sir_outputfile = f"{stencil_name}.sir"
interval = serial_utils.make_interval(AST.Interval.Start, AST.Interval.End, 0, 0)
body_ast = serial_utils.make_ast(
[
serial_utils.make_assignment_stmt(
serial_utils.make_field_access_expr("rot_vec"),
serial_utils.make_reduction_over_neighbor_expr(
op="+",
init=serial_utils.make_literal_access_expr("0.0", AST.BuiltinType.Double),
rhs=serial_utils.make_binary_operator(
serial_utils.make_field_access_expr("vec", [True, 0]),
"*",
serial_utils.make_field_access_expr("geofac_rot"),
),
chain=[AST.LocationType.Value("Vertex"), AST.LocationType.Value("Edge")],
),
"=",
),
serial_utils.make_assignment_stmt(
serial_utils.make_field_access_expr("div_vec"),
serial_utils.make_reduction_over_neighbor_expr(
op="+",
init=serial_utils.make_literal_access_expr("0.0", AST.BuiltinType.Double),
rhs=serial_utils.make_binary_operator(
serial_utils.make_field_access_expr("vec", [True, 0]),
"*",
serial_utils.make_field_access_expr("geofac_div"),
),
chain=[AST.LocationType.Value("Cell"), AST.LocationType.Value("Edge")],
),
"=",
),
serial_utils.make_assignment_stmt(
serial_utils.make_field_access_expr("nabla2t1_vec"),
serial_utils.make_reduction_over_neighbor_expr(
op="+",
init=serial_utils.make_literal_access_expr(
"0.0", AST.BuiltinType.Double),
rhs=serial_utils.make_field_access_expr("rot_vec", [True, 0]),
chain=[AST.LocationType.Value(
"Edge"), AST.LocationType.Value("Vertex")],
weights=[serial_utils.make_literal_access_expr(
"-1.0", AST.BuiltinType.Double), serial_utils.make_literal_access_expr(
"1.0", AST.BuiltinType.Double)]
),
"=",
),
serial_utils.make_assignment_stmt(
serial_utils.make_field_access_expr("nabla2t1_vec"),
serial_utils.make_binary_operator(
serial_utils.make_binary_operator(
serial_utils.make_field_access_expr("tangent_orientation"),
"*",
serial_utils.make_field_access_expr("nabla2t1_vec"),
),
"/",
serial_utils.make_field_access_expr("primal_edge_length"),
),
"=",
),
serial_utils.make_assignment_stmt(
serial_utils.make_field_access_expr("nabla2t2_vec"),
serial_utils.make_reduction_over_neighbor_expr(
op="+",
init=serial_utils.make_literal_access_expr(
"0.0", AST.BuiltinType.Double),
rhs=serial_utils.make_field_access_expr("div_vec", [True, 0]),
chain=[AST.LocationType.Value(
"Edge"), AST.LocationType.Value("Cell")],
weights=[serial_utils.make_literal_access_expr(
"-1.0", AST.BuiltinType.Double), serial_utils.make_literal_access_expr(
"1.0", AST.BuiltinType.Double)]
),
"=",
),
serial_utils.make_assignment_stmt(
serial_utils.make_field_access_expr("nabla2t2_vec"),
serial_utils.make_binary_operator(
serial_utils.make_field_access_expr("nabla2t2_vec"),
"/",
serial_utils.make_field_access_expr("dual_edge_length"),
),
"=",
),
serial_utils.make_assignment_stmt(
serial_utils.make_field_access_expr("nabla2_vec"),
serial_utils.make_binary_operator(
serial_utils.make_field_access_expr("nabla2t2_vec"),
"-",
serial_utils.make_field_access_expr("nabla2t1_vec"),
),
"=",
),
]
)
vertical_region_stmt = serial_utils.make_vertical_region_decl_stmt(
body_ast, interval, AST.VerticalRegion.Forward
)
sir = serial_utils.make_sir(
gen_outputfile,
AST.GridType.Value("Unstructured"),
[
serial_utils.make_stencil(
stencil_name,
serial_utils.make_ast([vertical_region_stmt]),
[
serial_utils.make_field(
"vec",
serial_utils.make_field_dimensions_unstructured(
[AST.LocationType.Value("Edge")], 1
),
),
serial_utils.make_field(
"div_vec",
serial_utils.make_field_dimensions_unstructured(
[AST.LocationType.Value("Cell")], 1
),
),
serial_utils.make_field(
"rot_vec",
serial_utils.make_field_dimensions_unstructured(
[AST.LocationType.Value("Vertex")], 1
),
),
serial_utils.make_field(
"nabla2t1_vec",
serial_utils.make_field_dimensions_unstructured(
[AST.LocationType.Value("Edge")], 1
),
is_temporary = True
),
serial_utils.make_field(
"nabla2t2_vec",
serial_utils.make_field_dimensions_unstructured(
[AST.LocationType.Value("Edge")], 1
),
is_temporary = True
),
serial_utils.make_field(
"nabla2_vec",
serial_utils.make_field_dimensions_unstructured(
[AST.LocationType.Value("Edge")], 1
),
),
serial_utils.make_field(
"primal_edge_length",
serial_utils.make_field_dimensions_unstructured(
[AST.LocationType.Value("Edge")], 1
),
),
serial_utils.make_field(
"dual_edge_length",
serial_utils.make_field_dimensions_unstructured(
[AST.LocationType.Value("Edge")], 1
),
),
serial_utils.make_field(
"tangent_orientation",
serial_utils.make_field_dimensions_unstructured(
[AST.LocationType.Value("Edge")], 1
),
),
serial_utils.make_field(
"geofac_rot",
serial_utils.make_field_dimensions_unstructured(
[AST.LocationType.Value("Vertex"), AST.LocationType.Value("Edge")], 1
),
),
serial_utils.make_field(
"geofac_div",
serial_utils.make_field_dimensions_unstructured(
[AST.LocationType.Value("Cell"), AST.LocationType.Value("Edge")], 1
),
),
],
),
],
)
# print the SIR
if args.verbose:
print(MessageToJson(sir))
# compile
code = dawn4py.compile(sir, groups = [], backend=dawn4py.CodeGenBackend.CXXNaiveIco)
# write to file
print(f"Writing generated code to '{gen_outputfile}'")
with open(gen_outputfile, "w") as f:
f.write(code)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-v", "--verbose", dest="verbose", action="store_true", default=False, help="Print the generated SIR",
)
main(parser.parse_args())
|
|
import numbers
import numpy as np
from scipy.stats.distributions import randint
from scipy.stats.distributions import rv_discrete
from scipy.stats.distributions import uniform
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import check_random_state
from sklearn.utils.fixes import sp_version
class _Identity:
"""Identity transform."""
def fit(self, X):
return self
def transform(self, X):
return X
def inverse_transform(self, Xt):
return Xt
class _Log10:
"""Base 10 logarithm transform."""
def fit(self, X):
return self
def transform(self, X):
return np.log10(np.asarray(X, dtype=np.float))
def inverse_transform(self, Xt):
return 10.0 ** np.asarray(Xt, dtype=np.float)
class _CategoricalEncoder:
"""OneHotEncoder that can handle categorical variables."""
def __init__(self):
"""Convert labeled categories into one-hot encoded features."""
self._lb = LabelBinarizer()
def fit(self, X):
"""Fit a list or array of categories.
Parameters
----------
* `X` [array-like, shape=(n_categories,)]:
List of categories.
"""
self.mapping_ = {v: i for i, v in enumerate(X)}
self.inverse_mapping_ = {i: v for v, i in self.mapping_.items()}
self._lb.fit([self.mapping_[v] for v in X])
self.n_classes = len(self._lb.classes_)
return self
def transform(self, X):
"""Transform an array of categories to a one-hot encoded representation.
Parameters
----------
* `X` [array-like, shape=(n_samples,)]:
List of categories.
Returns
-------
* `Xt` [array-like, shape=(n_samples, n_categories)]:
The one-hot encoded categories.
"""
return self._lb.transform([self.mapping_[v] for v in X])
def inverse_transform(self, Xt):
"""Inverse transform one-hot encoded categories back to their original
representation.
Parameters
----------
* `Xt` [array-like, shape=(n_samples, n_categories)]:
One-hot encoded categories.
Returns
-------
* `X` [array-like, shape=(n_samples,)]:
The original categories.
"""
Xt = np.asarray(Xt)
return [
self.inverse_mapping_[i] for i in self._lb.inverse_transform(Xt)
]
class Dimension(object):
"""Base class for search space dimensions."""
def rvs(self, n_samples=1, random_state=None):
"""Draw random samples.
Parameters
----------
* `n_samples` [int or None]:
The number of samples to be drawn.
* `random_state` [int, RandomState instance, or None (default)]:
Set random state to something other than None for reproducible
results.
"""
rng = check_random_state(random_state)
samples = self._rvs.rvs(size=n_samples, random_state=rng)
return self.inverse_transform(samples)
def transform(self, X):
"""Transform samples form the original space to a warped space."""
return self.transformer.transform(X)
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back into the
original space.
"""
return self.transformer.inverse_transform(Xt)
@property
def size(self):
return 1
@property
def transformed_size(self):
return 1
@property
def bounds(self):
raise NotImplementedError
@property
def transformed_bounds(self):
raise NotImplementedError
class Real(Dimension):
def __init__(self, low, high, prior="uniform"):
"""Search space dimension that can take on any real value.
Parameters
----------
* `low` [float]:
Lower bound (inclusive).
* `high` [float]:
Upper bound (exclusive).
* `prior` ["uniform" or "log-uniform", default="uniform"]:
Distribution to use when sampling random points for this dimension.
- If `"uniform"`, points are sampled uniformly between the lower
and upper bounds.
- If `"log-uniform"`, points are sampled uniformly between
`log10(lower)` and `log10(upper)`.`
"""
self._low = low
self._high = high
self.prior = prior
if prior == "uniform":
self._rvs = uniform(self._low, self._high - self._low)
self.transformer = _Identity()
elif prior == "log-uniform":
self._rvs = uniform(
np.log10(self._low),
np.log10(self._high) - np.log10(self._low))
self.transformer = _Log10()
else:
raise ValueError(
"Prior should be either 'uniform' or 'log-uniform', "
"got '%s'." % self._rvs)
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back into the
orignal space.
"""
return super(Real, self).inverse_transform(Xt).astype(np.float)
@property
def bounds(self):
return (self._low, self._high)
@property
def transformed_bounds(self):
if self.prior == "uniform":
return (self._low, self._high)
else: # self.prior == "log-uniform"
return (np.log10(self._low), np.log10(self._high))
class Integer(Dimension):
def __init__(self, low, high):
"""Search space dimension that can take on integer values.
Parameters
----------
* `low` [float]:
Lower bound (inclusive).
* `high` [float]:
Upper bound (inclusive).
"""
self._low = low
self._high = high
self._rvs = randint(self._low, self._high + 1)
self.transformer = _Identity()
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back into the
orignal space.
"""
# The concatenation of all transformed dimensions makes Xt to be
# of type float, hence the required cast back to int.
return super(Integer, self).inverse_transform(Xt).astype(np.int)
@property
def bounds(self):
return (self._low, self._high)
@property
def transformed_bounds(self):
return (self._low, self._high)
class Categorical(Dimension):
def __init__(self, categories, prior=None):
"""Search space dimension that can take on categorical values.
Parameters
----------
* `categories` [list, shape=(n_categories,)]:
Sequence of possible categories.
* `prior` [list, shape=(categories,), default=None]:
Prior probabilities for each category. By default all categories
are equally likely.
"""
self.categories = categories
self.transformer = _CategoricalEncoder()
self.transformer.fit(self.categories)
if prior is None:
prior = np.tile(1. / len(self.categories), len(self.categories))
# XXX check that sum(prior) == 1
self._rvs = rv_discrete(values=(range(len(self.categories)), prior))
def rvs(self, n_samples=None, random_state=None):
choices = self._rvs.rvs(size=n_samples, random_state=random_state)
if isinstance(choices, numbers.Integral):
return self.categories[choices]
else:
return [self.categories[c] for c in choices]
@property
def transformed_size(self):
size = len(self.categories)
# when len(categories) == 2, CategoricalEncoder outputs a single value
return size if size != 2 else 1
@property
def bounds(self):
return self.categories
@property
def transformed_bounds(self):
if self.transformed_size == 1:
return (0.0, 1.0)
else:
return [(0.0, 1.0) for i in range(self.transformed_size)]
class Space:
"""Search space."""
def __init__(self, dimensions):
"""Initialize a search space from given specifications.
Parameters
----------
* `dimensions` [list, shape=(n_dims,)]:
List of search space dimensions.
Each search dimension can be defined either as
- a `(upper_bound, lower_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(upper_bound, lower_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
"""
_dimensions = []
for dim in dimensions:
if isinstance(dim, Dimension):
_dimensions.append(dim)
elif (len(dim) == 3 and
isinstance(dim[0], numbers.Real) and
isinstance(dim[2], str)):
_dimensions.append(Real(*dim))
elif len(dim) > 2 or isinstance(dim[0], str):
_dimensions.append(Categorical(dim))
elif isinstance(dim[0], numbers.Integral):
_dimensions.append(Integer(*dim))
elif isinstance(dim[0], numbers.Real):
_dimensions.append(Real(*dim))
else:
raise ValueError("Invalid grid component (got %s)." % dim)
self.dimensions = _dimensions
@property
def is_real(self):
"""
Returns true if all dimensions are Real
"""
return all([isinstance(dim, Real) for dim in self.dimensions])
def rvs(self, n_samples=1, random_state=None):
"""Draw random samples.
The samples are in the original space. They need to be transformed
before being passed to a model or minimizer by `space.transform()`.
Parameters
----------
* `n_samples` [int, default=1]:
Number of samples to be drawn from the space.
* `random_state` [int, RandomState instance, or None (default)]:
Set random state to something other than None for reproducible
results.
Returns
-------
* `points`: [list of lists, shape=(n_points, n_dims)]
Points sampled from the space.
"""
rng = check_random_state(random_state)
# Draw
columns = []
for dim in self.dimensions:
if sp_version < (0, 16):
columns.append(dim.rvs(n_samples=n_samples))
else:
columns.append(dim.rvs(n_samples=n_samples, random_state=rng))
# Transpose
rows = []
for i in range(n_samples):
r = []
for j in range(self.n_dims):
r.append(columns[j][i])
rows.append(r)
return rows
def transform(self, X):
"""Transform samples from the original space into a warped space.
Note: this transformation is expected to be used to project samples
into a suitable space for numerical optimization.
Parameters
----------
* `X` [list of lists, shape=(n_samples, n_dims)]:
The samples to transform.
Returns
-------
* `Xt` [array of floats, shape=(n_samples, transformed_n_dims)]
The transformed samples.
"""
# Pack by dimension
columns = []
for dim in self.dimensions:
columns.append([])
for i in range(len(X)):
for j in range(self.n_dims):
columns[j].append(X[i][j])
# Transform
for j in range(self.n_dims):
columns[j] = self.dimensions[j].transform(columns[j])
# Repack as an array
Xt = np.hstack([np.asarray(c).reshape((len(X), -1)) for c in columns])
Xt = Xt.astype(np.float)
return Xt
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back to the
original space.
Parameters
----------
* `Xt` [array of floats, shape=(n_samples, transformed_n_dims)]:
The samples to inverse transform.
Returns
-------
* `X` [list of lists, shape=(n_samples, n_dims)]
The original samples.
"""
# Inverse transform
columns = []
start = 0
for j in range(self.n_dims):
dim = self.dimensions[j]
offset = dim.transformed_size
if offset == 1:
columns.append(dim.inverse_transform(Xt[:, start]))
else:
columns.append(
dim.inverse_transform(Xt[:, start:start+offset]))
start += offset
# Transpose
rows = []
for i in range(len(Xt)):
r = []
for j in range(self.n_dims):
r.append(columns[j][i])
rows.append(r)
return rows
@property
def n_dims(self):
"""The dimensionality of the original space."""
return len(self.dimensions)
@property
def transformed_n_dims(self):
"""The dimensionality of the warped space."""
return sum([dim.transformed_size for dim in self.dimensions])
@property
def bounds(self):
"""The dimension bounds, in the original space."""
b = []
for dim in self.dimensions:
if dim.size == 1:
b.append(dim.bounds)
else:
b.extend(dim.bounds)
return b
@property
def transformed_bounds(self):
"""The dimension bounds, in the warped space."""
b = []
for dim in self.dimensions:
if dim.transformed_size == 1:
b.append(dim.transformed_bounds)
else:
b.extend(dim.transformed_bounds)
return b
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
import os
import warnings
import functools
import keyring
import numpy as np
import astropy.units as u
import astropy.io.votable as votable
from astropy import coordinates
import six
from astropy.table import Table
from astroquery import log
from bs4 import BeautifulSoup
from ..query import QueryWithLogin
from ..utils import commons, async_to_sync, prepend_docstr_nosections
from ..exceptions import TableParseError, LoginError
from . import conf
__all__ = ["Nrao", "NraoClass"]
def _validate_params(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
telescope = kwargs.get('telescope', 'all')
telescope_config = kwargs.get('telescope_config', 'all')
obs_band = kwargs.get('obs_band', 'all')
sub_array = kwargs.get('sub_array', 'all')
if not isinstance(telescope, (list, tuple)):
telescope = [telescope]
for tel in telescope:
if tel not in Nrao.telescope_code:
raise ValueError("'telescope must be one of {!s}"
.format(Nrao.telescope_code.keys()))
if not isinstance(telescope_config, (list, tuple)):
telescope_config = [telescope_config]
for tconf in telescope_config:
if tconf.upper() not in Nrao.telescope_config:
raise ValueError("'telescope_config' must be one of {!s}"
.format(Nrao.telescope_config))
if isinstance(obs_band, (list, tuple)):
for ob in obs_band:
if ob.upper() not in Nrao.obs_bands:
raise ValueError("'obs_band' must be one of {!s}"
.format(Nrao.obs_bands))
elif obs_band.upper() not in Nrao.obs_bands:
raise ValueError("'obs_band' must be one of {!s}"
.format(Nrao.obs_bands))
if sub_array not in Nrao.subarrays and sub_array != 'all':
raise ValueError("'sub_array' must be one of {!s}"
.format(Nrao.subarrays))
return func(*args, **kwargs)
return wrapper
@async_to_sync
class NraoClass(QueryWithLogin):
DATA_URL = conf.server
TIMEOUT = conf.timeout
USERNAME = conf.username
# dicts and lists for data archive queries
telescope_code = {
"all": "ALL",
"jansky_vla": "EVLA",
"historical_vla": "VLA",
"vlba": "VLBA",
"gbt": "GBT",
}
telescope_config = ['ALL', 'A', 'AB', 'BnA', 'B', 'BC', 'CnB', 'C',
'CD', 'DnC', 'D', 'DA']
obs_bands = ['ALL', 'all', '4', 'P', 'L', 'S', 'C', 'X', 'U', 'K', 'Ka', 'Q', 'W']
# we only ever use uppercase versions
telescope_config = [x.upper() for x in telescope_config]
obs_bands = [x.upper() for x in obs_bands]
subarrays = ['ALL', 1, 2, 3, 4, 5]
@_validate_params
def _args_to_payload(self, **kwargs):
"""
Queries the NRAO data archive and fetches table of observation
summaries.
Parameters
----------
coordinates : str or `astropy.coordinates` object
The target around which to search. It may be specified as a
string in which case it is resolved using online services or as
the appropriate `astropy.coordinates` object. ICRS coordinates
may also be entered as a string.
radius : str or `~astropy.units.Quantity` object, optional
The string must be parsable by `astropy.coordinates.Angle`. The
appropriate `~astropy.units.Quantity` object may also be
used. Defaults to 1 arcminute.
equinox : str, optional
One of 'J2000' or 'B1950'. Defaults to 'J2000'.
telescope : str, optional
The telescope that produced the data. Defaults to 'all'. Valid
values are:
['gbt', 'all', 'historical_vla', 'vlba', 'jansky_vla']
start_date : str, optional
The starting date and time of the observations , e.g. 2010-06-21
14:20:30 Decimal seconds are not allowed. Defaults to `None` for
no constraints.
end_date : str, optional
The ending date and time of the observations , e.g. 2010-06-21
14:20:30 Decimal seconds are not allowed. Defaults to `None` for
no constraints.
freq_low : `~astropy.units.Quantity` object, optional
The lower frequency of the observations in proper units of
frequency via `astropy.units`. Defaults to `None` for no
constraints.
freq_up : `~astropy.units.Quantity` object, optional
The upper frequency of the observations in proper units of
frequency via `astropy.units`. Defaults to `None` for no
constraints.
telescope_config : str, optional
Select the telescope configuration (only valid for VLA
array). Defaults to 'all'. Valid values are ['all', 'A', 'AB',
'BnA', 'B', 'BC', 'CnB', 'C', 'CD', 'DnC', 'D', 'DA']
obs_band : str, optional
The frequency bands for the observation. Defaults to
'all'. Valid values are ['all', '4', 'P', 'L', 'S', 'C', 'X',
'U', 'K', 'Ka', 'Q', 'W'].
sub_array : str, number, optional
VLA subarray designations, may be set to an integer from 1 to 5.
Defaults to 'all'.
project_code : str, optional
A string indicating the project code. Examples::
* GBT: AGBT12A_055
* JVLA: 12A-256
querytype : str
The type of query to perform. "OBSSUMMARY" is the default, but
it is only valid for VLA/VLBA observations. ARCHIVE will give
the list of files available for download. OBSERVATION will
provide full details of the sources observed and under what
configurations.
source_id : str, optional
A source name (to be parsed by SIMBAD or NED)
protocol : 'VOTable-XML' or 'HTML'
The type of table to return. In theory, this should not matter,
but in practice the different table types actually have different
content. For ``querytype='ARCHIVE'``, the protocol will be force
to HTML because the archive doesn't support votable returns for
archive queries.
get_query_payload : bool, optional
if set to `True` then returns the dictionary sent as the HTTP
request. Defaults to `False`
cache : bool
Cache the query results
retry : bool or int
The number of times to retry querying the server if it doesn't
raise an exception but returns a null result (this sort of behavior
seems unique to the NRAO archive)
Returns
-------
request_payload : dict
The dictionary of parameters to send via HTTP GET request.
"""
lower_frequency = kwargs.get('freq_low', None)
upper_frequency = kwargs.get('freq_up', None)
if lower_frequency is not None and upper_frequency is not None:
freq_str = (str(lower_frequency.to(u.MHz).value) + '-' +
str(upper_frequency.to(u.MHz).value))
else:
freq_str = ""
obs_bands = kwargs.get('obs_band', 'all')
if isinstance(obs_bands, six.string_types):
obs_bands = obs_bands.upper()
elif isinstance(obs_bands, (list, tuple)):
obs_bands = [x.upper() for x in obs_bands]
telescope_config = kwargs.get('telescope_config', 'all')
if isinstance(telescope_config, six.string_types):
telescope_config = telescope_config.upper()
elif isinstance(telescope_config, (list, tuple)):
telescope_config = [x.upper() for x in telescope_config]
telescope_ = kwargs.get('telescope', 'all')
if isinstance(telescope_, six.string_types):
telescope = Nrao.telescope_code[telescope_]
elif isinstance(telescope, (list, tuple)):
telescope = [Nrao.telescope_code[telescope_] for x in telescope_]
request_payload = dict(
QUERYTYPE=kwargs.get('querytype', "OBSSUMMARY"),
PROTOCOL=kwargs.get('protocol', "VOTable-XML"),
MAX_ROWS="NO LIMIT",
SORT_PARM="Starttime",
SORT_ORDER="Asc",
SORT_PARM2="Starttime",
SORT_ORDER2="Asc",
QUERY_ID=9999,
QUERY_MODE="AAT_TOOL",
LOCKMODE="PROJECT",
SITE_CODE="AOC",
DBHOST="CHEWBACCA",
WRITELOG=0,
TELESCOPE=telescope,
PROJECT_CODE=kwargs.get('project_code', ''),
SEGMENT="",
MIN_EXPOSURE='',
TIMERANGE1=kwargs.get('start_date', ''),
OBSERVER="",
ARCHIVE_VOLUME="",
TIMERANGE2=kwargs.get('end_date', ''),
EQUINOX=kwargs.get('equinox', 'J2000'),
CENTER_RA='',
CENTER_DEC='',
SRAD=str(
coordinates.Angle(kwargs.get('radius', "1.0m")).deg) + 'd',
TELESCOPE_CONFIG=telescope_config,
OBS_BANDS=obs_bands,
SUBARRAY=kwargs.get('subarray', 'all').upper(),
SOURCE_ID=kwargs.get('source_id', ''),
SRC_SEARCH_TYPE='SIMBAD or NED',
OBSFREQ1=freq_str,
OBS_POLAR="ALL",
RECEIVER_ID="ALL",
BACKEND_ID="ALL",
DATATYPE="ALL",
PASSWD="", # TODO: implement login...
SUBMIT="Submit Query")
if ((request_payload['QUERYTYPE'] == "ARCHIVE" and
request_payload['PROTOCOL'] != 'HTML')):
warnings.warn("Changing protocol to HTML: ARCHIVE queries do not"
" support votable returns")
request_payload['PROTOCOL'] = 'HTML'
if request_payload['PROTOCOL'] not in ('HTML', 'VOTable-XML'):
raise ValueError("Only HTML and VOTable-XML returns are supported")
if request_payload['QUERYTYPE'] not in ('ARCHIVE', 'OBSSUMMARY',
'OBSERVATION'):
raise ValueError("Only ARCHIVE, OBSSUMMARY, and OBSERVATION "
"querytypes are supported")
if 'coordinates' in kwargs:
c = commons.parse_coordinates(
kwargs['coordinates']).transform_to(coordinates.ICRS)
request_payload['CENTER_RA'] = str(c.ra.degree) + 'd'
request_payload['CENTER_DEC'] = str(c.dec.degree) + 'd'
return request_payload
def _login(self, username=None, store_password=False,
reenter_password=False):
"""
Login to the NRAO archive
Parameters
----------
username : str, optional
Username to the NRAO archive. If not given, it should be specified
in the config file.
store_password : bool, optional
Stores the password securely in your keyring. Default is False.
reenter_password : bool, optional
Asks for the password even if it is already stored in the
keyring. This is the way to overwrite an already stored passwork
on the keyring. Default is False.
"""
# Developer notes:
# Login via https://my.nrao.edu/cas/login
# # this can be added to auto-redirect back to the query tool:
# ?service=https://archive.nrao.edu/archive/advquery.jsp
if username is None:
if not self.USERNAME:
raise LoginError("If you do not pass a username to login(), "
"you should configure a default one!")
else:
username = self.USERNAME
# Check if already logged in
loginpage = self._request("GET", "https://my.nrao.edu/cas/login",
cache=False)
root = BeautifulSoup(loginpage.content, 'html5lib')
if root.find('div', class_='success'):
log.info("Already logged in.")
return True
# Get password from keyring or prompt
password, password_from_keyring = self._get_password(
"astroquery:my.nrao.edu", username, reenter=reenter_password)
# Authenticate
log.info("Authenticating {0} on my.nrao.edu ...".format(username))
# Do not cache pieces of the login process
data = {kw: root.find('input', {'name': kw})['value']
for kw in ('lt', '_eventId', 'execution')}
data['username'] = username
data['password'] = password
data['execution'] = 'e1s1' # not sure if needed
data['_eventId'] = 'submit'
data['submit'] = 'LOGIN'
login_response = self._request("POST", "https://my.nrao.edu/cas/login",
data=data, cache=False)
authenticated = ('You have successfully logged in' in
login_response.text)
if authenticated:
log.info("Authentication successful!")
self.USERNAME = username
else:
log.exception("Authentication failed!")
# When authenticated, save password in keyring if needed
if authenticated and password_from_keyring is None and store_password:
keyring.set_password("astroquery:my.nrao.edu", username, password)
return authenticated
@prepend_docstr_nosections(_args_to_payload.__doc__)
def query_async(self,
get_query_payload=False,
cache=True,
retry=False,
**kwargs):
"""
Returns
-------
response : `~requests.Response`
The HTTP response returned from the service.
"""
request_payload = self._args_to_payload(**kwargs)
if get_query_payload:
return request_payload
response = self._request('POST', self.DATA_URL, params=request_payload,
timeout=self.TIMEOUT, cache=cache)
self._last_response = response
response.raise_for_status()
# fail if response is entirely whitespace or if it is empty
if not response.content.strip():
if cache:
last_pickle = self._last_query.hash()+".pickle"
cache_fn = os.path.join(self.cache_location, last_pickle)
os.remove(cache_fn)
if retry > 0:
log.warning("Query resulted in an empty result. Retrying {0}"
" more times.".format(retry))
self.query_async(cache=cache, retry=retry-1, **kwargs)
else:
raise ValueError("Query resulted in an empty result but "
"the server did not raise an error.")
return response
@prepend_docstr_nosections(_args_to_payload.__doc__)
def query_region_async(self, coordinates, radius=1 * u.arcmin,
equinox='J2000', telescope='all', start_date="",
end_date="", freq_low=None, freq_up=None,
telescope_config='all', obs_band='all',
querytype='OBSSUMMARY', sub_array='all',
project_code=None,
protocol='VOTable-XML',
retry=False,
get_query_payload=False, cache=True):
"""
Returns
-------
response : `~requests.Response`
The HTTP response returned from the service.
"""
return self.query_async(coordinates=coordinates,
radius=radius,
equinox=equinox,
telescope=telescope,
start_date=start_date,
end_date=end_date,
freq_low=freq_low,
freq_up=freq_up,
telescope_config=telescope_config,
obs_band=obs_band,
querytype=querytype,
sub_array=sub_array,
project_code=project_code,
protocol=protocol,
retry=retry,
get_query_payload=get_query_payload,
cache=cache)
def _parse_result(self, response, verbose=False):
if '<?xml' in response.text[:5]:
return self._parse_votable_result(response, verbose=verbose)
elif '<html>' in response.text[:6]:
return self._parse_html_result(response, verbose=verbose)
else:
raise ValueError("Unrecognized response type; it does not appear "
"to be VO-XML or HTML")
def _parse_votable_result(self, response, verbose=False):
if not verbose:
commons.suppress_vo_warnings()
new_content = response.text
# these are pretty bad hacks, but also needed...
days_re = re.compile(r'unit="days" datatype="double"')
new_content = days_re.sub(r'unit="days" datatype="char" '
'arraysize="*"', new_content)
degrees_re = re.compile(r'unit="degrees" datatype="double"')
new_content = degrees_re.sub(r'unit="degrees" datatype="char" '
'arraysize="*"', new_content)
telconfig_re = re.compile(r'datatype="char" name="Telescope:config"')
new_content = telconfig_re.sub(r'datatype="unicodeChar" '
'name="Telescope:config" '
' arraysize="*" ', new_content)
datatype_mapping = {'integer': 'long'}
try:
tf = six.BytesIO(new_content.encode())
first_table = votable.parse(
tf, pedantic=False,
datatype_mapping=datatype_mapping).get_first_table()
try:
table = first_table.to_table(use_names_over_ids=True)
except TypeError:
warnings.warn("NRAO table parsing: astropy versions prior "
"to 6558975c use the table column IDs instead "
"of names.")
table = first_table.to_table()
return table
except Exception as ex:
self.response = response
self.table_parse_error = ex
raise TableParseError("Failed to parse NRAO votable result! The "
"raw response can be found in self.response,"
" and the error in self.table_parse_error.")
def _parse_html_result(self, response, verbose=False):
# parse the HTML return...
root = BeautifulSoup(response.content, 'html5lib')
htmltable = root.findAll('table')
# if len(htmltable) != 1:
# raise ValueError("Found the wrong number of tables: {0}"
# .format(len(htmltable)))
string_to_parse = htmltable[-1].encode('ascii')
if six.PY2:
from astropy.io.ascii import html
from astropy.io.ascii.core import convert_numpy
htmlreader = html.HTML({'parser': 'html5lib'})
htmlreader.outputter.default_converters.append(convert_numpy(np.unicode))
table = htmlreader.read(string_to_parse)
else:
table = Table.read(string_to_parse.decode('utf-8'), format='ascii.html')
return table
Nrao = NraoClass()
|
|
"""
sentry.models.project
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import warnings
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import F, Q
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.constants import PLATFORM_TITLES, PLATFORM_LIST
from sentry.db.models import (
BaseManager, BoundedPositiveIntegerField, FlexibleForeignKey, Model,
sane_repr
)
from sentry.db.models.utils import slugify_instance
from sentry.utils.http import absolute_uri
class ProjectManager(BaseManager):
# TODO(dcramer): we might want to cache this per user
def get_for_user(self, team, user, access=None, _skip_team_check=False):
from sentry.models import Team
if not (user and user.is_authenticated()):
return []
if not _skip_team_check:
team_list = Team.objects.get_for_user(
organization=team.organization,
user=user,
access=access,
)
try:
team = team_list[team_list.index(team)]
except ValueError:
logging.info('User does not have access to team: %s', team.id)
return []
base_qs = self.filter(
team=team,
status=ProjectStatus.VISIBLE,
)
project_list = []
for project in base_qs:
project.team = team
project_list.append(project)
return sorted(project_list, key=lambda x: x.name.lower())
# TODO(dcramer): pull in enum library
class ProjectStatus(object):
VISIBLE = 0
PENDING_DELETION = 1
DELETION_IN_PROGRESS = 2
class Project(Model):
"""
Projects are permission based namespaces which generally
are the top level entry point for all data.
"""
PLATFORM_CHOICES = tuple(
(p, PLATFORM_TITLES.get(p, p.title()))
for p in PLATFORM_LIST
) + (('other', 'Other'),)
slug = models.SlugField(null=True)
name = models.CharField(max_length=200)
organization = FlexibleForeignKey('sentry.Organization')
team = FlexibleForeignKey('sentry.Team')
public = models.BooleanField(default=False)
date_added = models.DateTimeField(default=timezone.now)
status = BoundedPositiveIntegerField(default=0, choices=(
(ProjectStatus.VISIBLE, _('Active')),
(ProjectStatus.PENDING_DELETION, _('Pending Deletion')),
(ProjectStatus.DELETION_IN_PROGRESS, _('Deletion in Progress')),
), db_index=True)
platform = models.CharField(max_length=32, choices=PLATFORM_CHOICES, null=True)
objects = ProjectManager(cache_fields=[
'pk',
'slug',
])
class Meta:
app_label = 'sentry'
db_table = 'sentry_project'
unique_together = (('team', 'slug'), ('organization', 'slug'))
__repr__ = sane_repr('team_id', 'slug')
def __unicode__(self):
return u'%s (%s)' % (self.name, self.slug)
def save(self, *args, **kwargs):
if not self.slug:
slugify_instance(self, self.name, organization=self.organization)
super(Project, self).save(*args, **kwargs)
def get_absolute_url(self):
return absolute_uri(reverse('sentry-stream', args=[
self.organization.slug, self.slug]))
def merge_to(self, project):
from sentry.models import (
Group, GroupTagValue, Event, TagValue
)
if not isinstance(project, Project):
project = Project.objects.get_from_cache(pk=project)
for group in Group.objects.filter(project=self):
try:
other = Group.objects.get(
project=project,
)
except Group.DoesNotExist:
group.update(project=project)
for model in (Event, GroupTagValue):
model.objects.filter(project=self, group=group).update(project=project)
else:
Event.objects.filter(group=group).update(group=other)
for obj in GroupTagValue.objects.filter(group=group):
obj2, created = GroupTagValue.objects.get_or_create(
project=project,
group=group,
key=obj.key,
value=obj.value,
defaults={'times_seen': obj.times_seen}
)
if not created:
obj2.update(times_seen=F('times_seen') + obj.times_seen)
for fv in TagValue.objects.filter(project=self):
TagValue.objects.get_or_create(project=project, key=fv.key, value=fv.value)
fv.delete()
self.delete()
def is_internal_project(self):
for value in (settings.SENTRY_FRONTEND_PROJECT, settings.SENTRY_PROJECT):
if str(self.id) == str(value) or str(self.slug) == str(value):
return True
return False
def get_tags(self, with_internal=True):
from sentry.models import TagKey
if not hasattr(self, '_tag_cache'):
tags = self.get_option('tags', None)
if tags is None:
tags = [
t for t in TagKey.objects.all_keys(self)
if with_internal or not t.startswith('sentry:')
]
self._tag_cache = tags
return self._tag_cache
# TODO: Make these a mixin
def update_option(self, *args, **kwargs):
from sentry.models import ProjectOption
return ProjectOption.objects.set_value(self, *args, **kwargs)
def get_option(self, *args, **kwargs):
from sentry.models import ProjectOption
return ProjectOption.objects.get_value(self, *args, **kwargs)
def delete_option(self, *args, **kwargs):
from sentry.models import ProjectOption
return ProjectOption.objects.unset_value(self, *args, **kwargs)
@property
def member_set(self):
from sentry.models import OrganizationMember
return self.organization.member_set.filter(
Q(organizationmemberteam__team=self.team) |
Q(has_global_access=True),
user__is_active=True,
).exclude(
id__in=OrganizationMember.objects.filter(
organizationmemberteam__is_active=False,
organizationmemberteam__team=self.team,
).values('id')
).distinct()
def has_access(self, user, access=None):
from sentry.models import AuthIdentity, OrganizationMember
warnings.warn('Project.has_access is deprecated.', DeprecationWarning)
queryset = self.member_set.filter(user=user)
if access is not None:
queryset = queryset.filter(type__lte=access)
try:
member = queryset.get()
except OrganizationMember.DoesNotExist:
return False
try:
auth_identity = AuthIdentity.objects.get(
auth_provider__organization=self.organization_id,
user=member.user_id,
)
except AuthIdentity.DoesNotExist:
return True
return auth_identity.is_valid(member)
def get_audit_log_data(self):
return {
'slug': self.slug,
'name': self.name,
'status': self.status,
'public': self.public,
'platform': self.platform,
}
def get_full_name(self):
if self.team.name not in self.name:
return '%s %s' % (self.team.name, self.name)
return self.name
|
|
"""
Model test set
"""
import unittest
from datetime import date
from math import sqrt
from tr55.tablelookup import lookup_ki, is_built_type
from tr55.model import runoff_nrcs, simulate_tile, simulate_all_tiles
from tr55.model import simulate_year
# These data are taken directly from Table 2-1 of the revised (1986)
# TR-55 report. The data in the PS array are various precipitation
# levels, and each respective CNx array is the calculated runoff for
# that particular curve number with the given level of precipitation
# corresponding to that in PS.
PS = [1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0]
CN55 = [0.000, 0.000, 0.000, 0.000, 0.000, 0.020, 0.080, 0.190, 0.350, 0.530, 0.740, 0.980, 1.520, 2.120, 2.780, 3.490, 4.230, 5.000, 5.790, 6.610, 7.440, 8.290]
CN70 = [0.000, 0.030, 0.060, 0.110, 0.170, 0.240, 0.460, 0.710, 1.010, 1.330, 1.670, 2.040, 2.810, 3.620, 4.460, 5.330, 6.220, 7.130, 8.050, 8.980, 9.910, 10.85]
CN80 = [0.080, 0.150, 0.240, 0.340, 0.440, 0.560, 0.890, 1.250, 1.640, 2.040, 2.460, 2.890, 3.780, 4.690, 5.630, 6.570, 7.520, 8.480, 9.450, 10.42, 11.39, 12.37]
CN90 = [0.320, 0.460, 0.610, 0.760, 0.930, 1.090, 1.530, 1.980, 2.450, 2.920, 3.400, 3.880, 4.850, 5.820, 6.810, 7.790, 8.780, 9.770, 10.76, 11.76, 12.75, 13.74]
# INPUT and OUTPUT are data that were emailed to Azavea in a
# spreadsheet for testing the TR-55 model implementation.
INPUT = [
(0.5, 'a:water'),
(1, 'a:water'),
(2, 'a:water'),
(3.2, 'a:water'),
(8, 'a:water'),
(0.5, 'a:rock'),
(1, 'a:rock'),
(2, 'a:rock'),
(3.2, 'a:rock'),
(8, 'a:rock'),
(0.5, 'a:urban_grass'),
(1, 'a:urban_grass'),
(2, 'a:urban_grass'),
(3.2, 'a:urban_grass'),
(8, 'a:urban_grass'),
(0.5, 'a:li_residential'),
(1, 'a:li_residential'),
(2, 'a:li_residential'),
(3.2, 'a:li_residential'),
(8, 'a:li_residential'),
(0.5, 'a:hi_residential'),
(1, 'a:hi_residential'),
(2, 'a:hi_residential'),
(3.2, 'a:hi_residential'),
(8, 'a:hi_residential'),
(0.5, 'a:commercial'),
(1, 'a:commercial'),
(2, 'a:commercial'),
(3.2, 'a:commercial'),
(8, 'a:commercial'),
(0.5, 'a:deciduous_forest'),
(0.5, 'a:evergreen_forest'),
(0.5, 'a:mixed_forest'),
(1, 'a:deciduous_forest'),
(1, 'a:evergreen_forest'),
(1, 'a:mixed_forest'),
(2, 'a:deciduous_forest'),
(2, 'a:evergreen_forest'),
(2, 'a:mixed_forest'),
(3.2, 'a:deciduous_forest'),
(3.2, 'a:evergreen_forest'),
(3.2, 'a:mixed_forest'),
(8, 'a:deciduous_forest'),
(8, 'a:evergreen_forest'),
(8, 'a:mixed_forest'),
(0.5, 'a:grassland'),
(1, 'a:grassland'),
(2, 'a:grassland'),
(3.2, 'a:grassland'),
(8, 'a:grassland'),
(0.5, 'a:pasture'),
(1, 'a:pasture'),
(2, 'a:pasture'),
(3.2, 'a:pasture'),
(8, 'a:pasture'),
(0.5, 'a:row_crop'),
(1, 'a:row_crop'),
(2, 'a:row_crop'),
(3.2, 'a:row_crop'),
(8, 'a:row_crop'),
(0.5, 'a:woody_wetland'),
(0.5, 'a:herbaceous_wetland'),
(1, 'a:woody_wetland'),
(1, 'a:herbaceous_wetland'),
(2, 'a:woody_wetland'),
(2, 'a:herbaceous_wetland'),
(3.2, 'a:woody_wetland'),
(3.2, 'a:herbaceous_wetland'),
(8, 'a:woody_wetland'),
(8, 'a:herbaceous_wetland'),
(0.5, 'b:water'),
(1, 'b:water'),
(2, 'b:water'),
(3.2, 'b:water'),
(8, 'b:water'),
(0.5, 'b:rock'),
(1, 'b:rock'),
(2, 'b:rock'),
(3.2, 'b:rock'),
(8, 'b:rock'),
(0.5, 'b:urban_grass'),
(1, 'b:urban_grass'),
(2, 'b:urban_grass'),
(3.2, 'b:urban_grass'),
(8, 'b:urban_grass'),
(0.5, 'b:li_residential'),
(1, 'b:li_residential'),
(2, 'b:li_residential'),
(3.2, 'b:li_residential'),
(8, 'b:li_residential'),
(0.5, 'b:hi_residential'),
(1, 'b:hi_residential'),
(2, 'b:hi_residential'),
(3.2, 'b:hi_residential'),
(8, 'b:hi_residential'),
(0.5, 'b:commercial'),
(1, 'b:commercial'),
(2, 'b:commercial'),
(3.2, 'b:commercial'),
(8, 'b:commercial'),
(0.5, 'b:deciduous_forest'),
(0.5, 'b:evergreen_forest'),
(0.5, 'b:mixed_forest'),
(1, 'b:deciduous_forest'),
(1, 'b:evergreen_forest'),
(1, 'b:mixed_forest'),
(2, 'b:deciduous_forest'),
(2, 'b:evergreen_forest'),
(2, 'b:mixed_forest'),
(3.2, 'b:deciduous_forest'),
(3.2, 'b:evergreen_forest'),
(3.2, 'b:mixed_forest'),
(8, 'b:deciduous_forest'),
(8, 'b:evergreen_forest'),
(8, 'b:mixed_forest'),
(0.5, 'b:grassland'),
(1, 'b:grassland'),
(2, 'b:grassland'),
(3.2, 'b:grassland'),
(8, 'b:grassland'),
(0.5, 'b:pasture'),
(1, 'b:pasture'),
(2, 'b:pasture'),
(3.2, 'b:pasture'),
(8, 'b:pasture'),
(0.5, 'b:row_crop'),
(1, 'b:row_crop'),
(2, 'b:row_crop'),
(3.2, 'b:row_crop'),
(8, 'b:row_crop'),
(0.5, 'b:woody_wetland'),
(0.5, 'b:herbaceous_wetland'),
(1, 'b:woody_wetland'),
(1, 'b:herbaceous_wetland'),
(2, 'b:woody_wetland'),
(2, 'b:herbaceous_wetland'),
(3.2, 'b:woody_wetland'),
(3.2, 'b:herbaceous_wetland'),
(8, 'b:woody_wetland'),
(8, 'b:herbaceous_wetland'),
(0.5, 'c:water'),
(1, 'c:water'),
(2, 'c:water'),
(3.2, 'c:water'),
(8, 'c:water'),
(0.5, 'c:rock'),
(1, 'c:rock'),
(2, 'c:rock'),
(3.2, 'c:rock'),
(8, 'c:rock'),
(0.5, 'c:urban_grass'),
(1, 'c:urban_grass'),
(2, 'c:urban_grass'),
(3.2, 'c:urban_grass'),
(8, 'c:urban_grass'),
(0.5, 'c:li_residential'),
(1, 'c:li_residential'),
(2, 'c:li_residential'),
(3.2, 'c:li_residential'),
(8, 'c:li_residential'),
(0.5, 'c:hi_residential'),
(1, 'c:hi_residential'),
(2, 'c:hi_residential'),
(3.2, 'c:hi_residential'),
(8, 'c:hi_residential'),
(0.5, 'c:commercial'),
(1, 'c:commercial'),
(2, 'c:commercial'),
(3.2, 'c:commercial'),
(8, 'c:commercial'),
(0.5, 'c:deciduous_forest'),
(0.5, 'c:evergreen_forest'),
(0.5, 'c:mixed_forest'),
(1, 'c:deciduous_forest'),
(1, 'c:evergreen_forest'),
(1, 'c:mixed_forest'),
(2, 'c:deciduous_forest'),
(2, 'c:evergreen_forest'),
(2, 'c:mixed_forest'),
(3.2, 'c:deciduous_forest'),
(3.2, 'c:evergreen_forest'),
(3.2, 'c:mixed_forest'),
(8, 'c:deciduous_forest'),
(8, 'c:evergreen_forest'),
(8, 'c:mixed_forest'),
(0.5, 'c:grassland'),
(1, 'c:grassland'),
(2, 'c:grassland'),
(3.2, 'c:grassland'),
(8, 'c:grassland'),
(0.5, 'c:pasture'),
(1, 'c:pasture'),
(2, 'c:pasture'),
(3.2, 'c:pasture'),
(8, 'c:pasture'),
(0.5, 'c:row_crop'),
(1, 'c:row_crop'),
(2, 'c:row_crop'),
(3.2, 'c:row_crop'),
(8, 'c:row_crop'),
(0.5, 'c:woody_wetland'),
(0.5, 'c:herbaceous_wetland'),
(1, 'c:woody_wetland'),
(1, 'c:herbaceous_wetland'),
(2, 'c:woody_wetland'),
(2, 'c:herbaceous_wetland'),
(3.2, 'c:woody_wetland'),
(3.2, 'c:herbaceous_wetland'),
(8, 'c:woody_wetland'),
(8, 'c:herbaceous_wetland'),
(0.5, 'd:water'),
(1, 'd:water'),
(2, 'd:water'),
(3.2, 'd:water'),
(8, 'd:water'),
(0.5, 'd:rock'),
(1, 'd:rock'),
(2, 'd:rock'),
(3.2, 'd:rock'),
(8, 'd:rock'),
(0.5, 'd:urban_grass'),
(1, 'd:urban_grass'),
(2, 'd:urban_grass'),
(3.2, 'd:urban_grass'),
(8, 'd:urban_grass'),
(0.5, 'd:li_residential'),
(1, 'd:li_residential'),
(2, 'd:li_residential'),
(3.2, 'd:li_residential'),
(8, 'd:li_residential'),
(0.5, 'd:hi_residential'),
(1, 'd:hi_residential'),
(2, 'd:hi_residential'),
(3.2, 'd:hi_residential'),
(8, 'd:hi_residential'),
(0.5, 'd:commercial'),
(1, 'd:commercial'),
(2, 'd:commercial'),
(3.2, 'd:commercial'),
(8, 'd:commercial'),
(0.5, 'd:deciduous_forest'),
(0.5, 'd:evergreen_forest'),
(0.5, 'd:mixed_forest'),
(1, 'd:deciduous_forest'),
(1, 'd:evergreen_forest'),
(1, 'd:mixed_forest'),
(2, 'd:deciduous_forest'),
(2, 'd:evergreen_forest'),
(2, 'd:mixed_forest'),
(3.2, 'd:deciduous_forest'),
(3.2, 'd:evergreen_forest'),
(3.2, 'd:mixed_forest'),
(8, 'd:deciduous_forest'),
(8, 'd:evergreen_forest'),
(8, 'd:mixed_forest'),
(0.5, 'd:grassland'),
(1, 'd:grassland'),
(2, 'd:grassland'),
(3.2, 'd:grassland'),
(8, 'd:grassland'),
(0.5, 'd:pasture'),
(1, 'd:pasture'),
(2, 'd:pasture'),
(3.2, 'd:pasture'),
(8, 'd:pasture'),
(0.5, 'd:row_crop'),
(1, 'd:row_crop'),
(2, 'd:row_crop'),
(3.2, 'd:row_crop'),
(8, 'd:row_crop'),
(0.5, 'd:woody_wetland'),
(0.5, 'd:herbaceous_wetland'),
(1, 'd:woody_wetland'),
(1, 'd:herbaceous_wetland'),
(2, 'd:woody_wetland'),
(2, 'd:herbaceous_wetland'),
(3.2, 'd:woody_wetland'),
(3.2, 'd:herbaceous_wetland'),
(8, 'd:woody_wetland'),
(8, 'd:herbaceous_wetland')
]
OUTPUT = [
(0.5, 0, 0),
(1, 0, 0),
(2, 0, 0),
(3.2, 0, 0),
(8, 0, 0),
(0, 0, 0.5),
(0, 0, 1),
(0.4, 0, 1.6),
(1.2, 0, 2),
(5.3, 0, 2.7),
(0, 0.1, 0.3),
(0.1, 0.1, 0.7),
(0.2, 0.1, 1.7),
(0.7, 0.1, 2.3),
(4.2, 0.1, 3.6),
(0.1, 0.1, 0.3),
(0.3, 0.1, 0.6),
(0, 0.1, 1.9),
(0.2, 0.1, 3),
(2.4, 0.1, 5.6),
(0.3, 0, 0.1),
(0.7, 0, 0.3),
(0.4, 0, 1.5),
(1.2, 0, 2),
(5.3, 0, 2.7),
(0.5, 0, 0),
(1, 0, 0),
(1, 0, 1),
(2.1, 0, 1.1),
(6.7, 0, 1.3),
(0, 0.1, 0.4),
(0, 0.1, 0.4),
(0, 0.1, 0.4),
(0, 0.1, 0.9),
(0, 0.1, 0.9),
(0, 0.1, 0.9),
(0, 0.1, 1.9),
(0, 0.1, 1.9),
(0, 0.1, 1.9),
(0, 0.1, 3.1),
(0, 0.1, 3.1),
(0, 0.1, 3.1),
(0.4, 0.1, 7.4),
(0.4, 0.1, 7.4),
(0.4, 0.1, 7.4),
(0, 0.1, 0.4),
(0, 0.1, 0.9),
(0, 0.1, 1.9),
(0, 0.1, 3.1),
(0.4, 0.1, 7.5),
(0, 0.1, 0.4),
(0, 0.1, 0.9),
(0, 0.1, 1.9),
(0, 0.1, 3.1),
(1.2, 0.1, 6.7),
(0, 0.2, 0.3),
(0, 0.2, 0.8),
(0.2, 0.2, 1.6),
(0.7, 0.2, 2.3),
(4.1, 0.2, 3.7),
(0.3, 0.2, 0),
(0.3, 0.2, 0),
(0.8, 0.2, 0),
(0.8, 0.2, 0),
(1.8, 0.2, 0),
(1.8, 0.2, 0),
(3, 0.2, 0),
(3, 0.2, 0),
(7.8, 0.2, 0),
(7.8, 0.2, 0),
(0.5, 0, 0),
(1, 0, 0),
(2, 0, 0),
(3.2, 0, 0),
(8, 0, 0),
(0, 0, 0.5),
(0.2, 0, 0.8),
(0.8, 0, 1.2),
(1.8, 0, 1.4),
(6.3, 0, 1.7),
(0, 0.1, 0.3),
(0.1, 0.1, 0.7),
(0.5, 0.1, 1.3),
(1.3, 0.1, 1.7),
(5.5, 0.1, 2.3),
(0.1, 0.1, 0.3),
(0.3, 0.1, 0.6),
(0.2, 0.1, 1.7),
(0.7, 0.1, 2.4),
(4.2, 0.1, 3.7),
(0.3, 0, 0.1),
(0.7, 0, 0.3),
(0.8, 0, 1.2),
(1.8, 0, 1.4),
(6.2, 0, 1.7),
(0.5, 0, 0),
(1, 0, 0),
(1.2, 0, 0.8),
(2.4, 0, 0.8),
(7, 0, 0.9),
(0, 0.1, 0.4),
(0, 0.1, 0.4),
(0, 0.1, 0.4),
(0, 0.1, 0.9),
(0, 0.1, 0.9),
(0, 0.1, 0.9),
(0, 0.1, 1.8),
(0, 0.1, 1.8),
(0, 0.1, 1.8),
(0.3, 0.1, 2.8),
(0.3, 0.1, 2.8),
(0.3, 0.1, 2.8),
(2.8, 0.1, 5.1),
(2.8, 0.1, 5.1),
(2.8, 0.1, 5.1),
(0, 0.1, 0.4),
(0, 0.1, 0.9),
(0, 0.1, 1.9),
(0.3, 0.1, 2.8),
(2.8, 0.1, 5.1),
(0, 0.1, 0.4),
(0, 0.1, 0.9),
(0.1, 0.1, 1.8),
(0.4, 0.1, 2.6),
(3.4, 0.1, 4.4),
(0, 0.2, 0.3),
(0.1, 0.2, 0.8),
(0.5, 0.2, 1.3),
(1.3, 0.2, 1.7),
(5.4, 0.2, 2.4),
(0.3, 0.2, 0),
(0.3, 0.2, 0),
(0.8, 0.2, 0),
(0.8, 0.2, 0),
(1.8, 0.2, 0),
(1.8, 0.2, 0),
(3, 0.2, 0),
(3, 0.2, 0),
(7.8, 0.2, 0),
(7.8, 0.2, 0),
(0.5, 0, 0),
(1, 0, 0),
(2, 0, 0),
(3.2, 0, 0),
(8, 0, 0),
(0.1, 0, 0.4),
(0.4, 0, 0.6),
(1.2, 0, 0.8),
(2.3, 0, 0.9),
(6.9, 0, 1.1),
(0, 0.1, 0.3),
(0.1, 0.1, 0.7),
(0.8, 0.1, 1),
(1.8, 0.1, 1.2),
(6.3, 0.1, 1.5),
(0.1, 0.1, 0.3),
(0.3, 0.1, 0.6),
(0.5, 0.1, 1.4),
(1.3, 0.1, 1.8),
(5.5, 0.1, 2.4),
(0.3, 0, 0.1),
(0.7, 0, 0.3),
(1.1, 0, 0.9),
(2.2, 0, 1),
(6.8, 0, 1.2),
(0.5, 0, 0),
(1, 0, 0),
(1.4, 0, 0.6),
(2.5, 0, 0.6),
(7.3, 0, 0.7),
(0, 0.1, 0.4),
(0, 0.1, 0.4),
(0, 0.1, 0.4),
(0, 0.1, 0.8),
(0, 0.1, 0.8),
(0, 0.1, 0.8),
(0.2, 0.1, 1.6),
(0.2, 0.1, 1.6),
(0.2, 0.1, 1.6),
(0.8, 0.1, 2.2),
(0.8, 0.1, 2.2),
(0.8, 0.1, 2.2),
(4.5, 0.1, 3.4),
(4.5, 0.1, 3.4),
(4.5, 0.1, 3.4),
(0, 0.1, 0.4),
(0, 0.1, 0.9),
(0.2, 0.1, 1.6),
(0.8, 0.1, 2.2),
(4.5, 0.1, 3.4),
(0, 0.1, 0.4),
(0, 0.1, 0.9),
(0.3, 0.1, 1.5),
(1, 0.1, 2),
(4.9, 0.1, 2.9),
(0, 0.2, 0.3),
(0.2, 0.2, 0.6),
(0.8, 0.2, 1),
(1.8, 0.2, 1.3),
(6.2, 0.2, 1.6),
(0.3, 0.2, 0),
(0.3, 0.2, 0),
(0.8, 0.2, 0),
(0.8, 0.2, 0),
(1.8, 0.2, 0),
(1.8, 0.2, 0),
(3, 0.2, 0),
(3, 0.2, 0),
(7.8, 0.2, 0),
(7.8, 0.2, 0),
(0.5, 0, 0),
(1, 0, 0),
(2, 0, 0),
(3.2, 0, 0),
(8, 0, 0),
(0.1, 0, 0.4),
(0.5, 0, 0.5),
(1.4, 0, 0.6),
(2.5, 0, 0.7),
(7.3, 0, 0.7),
(0, 0.1, 0.3),
(0.1, 0.1, 0.7),
(1, 0.1, 0.8),
(2.1, 0.1, 1),
(6.7, 0.1, 1.2),
(0.1, 0.1, 0.3),
(0.3, 0.1, 0.6),
(0.7, 0.1, 1.2),
(1.7, 0.1, 1.4),
(6.1, 0.1, 1.8),
(0.3, 0, 0.1),
(0.7, 0, 0.3),
(1.2, 0, 0.7),
(2.4, 0, 0.8),
(7, 0, 0.9),
(0.5, 0, 0),
(1, 0, 0),
(1.5, 0, 0.5),
(2.6, 0, 0.5),
(7.4, 0, 0.6),
(0, 0.1, 0.4),
(0, 0.1, 0.4),
(0, 0.1, 0.4),
(0, 0.1, 0.8),
(0, 0.1, 0.8),
(0, 0.1, 0.8),
(0.4, 0.1, 1.4),
(0.4, 0.1, 1.4),
(0.4, 0.1, 1.4),
(1.2, 0.1, 1.8),
(1.2, 0.1, 1.8),
(1.2, 0.1, 1.8),
(5.3, 0.1, 2.6),
(5.3, 0.1, 2.6),
(5.3, 0.1, 2.6),
(0, 0.1, 0.4),
(0, 0.1, 0.8),
(0.4, 0.1, 1.4),
(1.2, 0.1, 1.9),
(5.3, 0.1, 2.6),
(0, 0.1, 0.4),
(0.1, 0.1, 0.8),
(0.6, 0.1, 1.3),
(1.4, 0.1, 1.7),
(5.6, 0.1, 2.2),
(0, 0.2, 0.3),
(0.3, 0.2, 0.5),
(1, 0.2, 0.8),
(2.1, 0.2, 0.9),
(6.7, 0.2, 1.1),
(0.3, 0.2, 0),
(0.3, 0.2, 0),
(0.8, 0.2, 0),
(0.8, 0.2, 0),
(1.8, 0.2, 0),
(1.8, 0.2, 0),
(3, 0.2, 0),
(3, 0.2, 0),
(7.8, 0.2, 0),
(7.8, 0.2, 0)
]
def simulate(precip, tile_string):
soil_type, land_use = tile_string.split(':')
ki = lookup_ki(land_use)
return simulate_tile((precip, 0.209 * ki), tile_string)
def average(l):
return reduce(lambda x, y: x + y, l) / len(l)
class TestModel(unittest.TestCase):
"""
Model test set
"""
def test_nrcs(self):
"""
Test the implementation of the runoff equation.
"""
# This pair has CN=55 in Table C of the 2010/12/27 memo
runoffs = [round(runoff_nrcs(precip, 0.0, 'b', 'deciduous_forest'), 2)
for precip in PS]
self.assertEqual(runoffs[4:], CN55[4:]) # Low curve number and low P cause too-high runoff
# This pair has CN=70
runoffs = [round(runoff_nrcs(precip, 0.0, 'c', 'deciduous_forest'), 2)
for precip in PS]
self.assertEqual(runoffs[1:], CN70[1:])
# This pair has CN=80
runoffs = [round(runoff_nrcs(precip, 0.0, 'd', 'pasture'), 2)
for precip in PS]
self.assertEqual(runoffs, CN80)
# This pair has CN=90
runoffs = [round(runoff_nrcs(precip, 0.0, 'c', 'hi_residential'), 2)
for precip in PS]
self.assertEqual(runoffs, CN90)
def test_simulate_tile_horizontal(self):
"""
Test the one-day simulation using sample input/output. The number
0.04 is not very meaningful, this test just attempts to give
you some idea about the mean error of the three quantities
relative to precipitation.
"""
def similar(incoming, expected):
precip, tile_string = incoming
results = simulate(precip, tile_string)
me = average(map(lambda x, y: abs(x - y) / precip, results, expected))
# Precipitation levels <= 2 inches are known to be
# problematic. It is unclear why the 'rock' type is
# giving trouble on soil types C and D.
if precip > 2 and tile_string != 'c:rock' and tile_string != 'd:rock':
self.assertTrue(me < 0.04, tile_string + ' ' + str(me))
map(similar, INPUT, OUTPUT)
def test_simulate_tiles_vertical(self):
"""
Test the RMSE of the runoff levels produced by the one-day
simulation against values sample input/output. The number
0.13 is not very meaningful, this test just attempts to show
to deviation.
"""
results = [simulate(precip, tile_string)[0] / precip
for precip, tile_string in INPUT
if precip > 2 and tile_string != 'c:rock' and tile_string != 'd:rock']
expected = [OUTPUT[i][0] / INPUT[i][0]
for i in range(len(INPUT))
if INPUT[i][0] > 2 and INPUT[i][1] != 'c:rock' and INPUT[i][1] != 'd:rock']
rmse = sqrt(average(map(lambda x, y: pow((x - y), 2), results, expected)))
self.assertTrue(rmse < 0.13)
def test_simulate_all_tiles(self):
"""
Test the tile-by-tile simulation.
"""
# Test invalid responses
non_response2 = {
"result": { # No "distribution" key
"cell_count": 1
}
}
non_response3 = {
"result": { # No "cell_count" key
"distribution": {}
}
}
self.assertRaises(Exception, simulate_all_tiles, (date.today(), non_response2))
self.assertRaises(Exception, simulate_all_tiles, (date.today(), non_response3))
# Test valid responses
response1 = {
"result": {
"cell_count": 2,
"distribution": {
"a:pasture": 1,
"c:rock": 1
}
}
}
response2 = {
"result": {
"cell_count": 20,
"distribution": {
"a:pasture": 10,
"c:rock": 10
}
}
}
map(self.assertAlmostEqual,
simulate_all_tiles(date.today(), response1),
simulate_all_tiles(date.today(), response2))
# Test Pre-Columbian calculation
response3 = {
"result": {
"cell_count": 1,
"distribution": {
"d:hi_residential": 1
}
}
}
response4 = {
"result": {
"cell_count": 10,
"distribution": {
"d:pasture": 10
}
}
}
map(self.assertNotEqual,
simulate_all_tiles(date(1, 4, 15), response3),
simulate_all_tiles(date(1, 4, 15), response4))
map(self.assertEqual,
simulate_all_tiles(date(1, 4, 15), response3, True),
simulate_all_tiles(date(1, 4, 15), response4, True))
if __name__ == "__main__":
unittest.main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Support for mounting virtual image files."""
import os
import time
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
MAX_DEVICE_WAIT = 30
class Mount(object):
"""Standard mounting operations, that can be overridden by subclasses.
The basic device operations provided are get, map and mount,
to be called in that order.
"""
mode = None # to be overridden in subclasses
@staticmethod
def instance_for_format(imgfile, mountdir, partition, imgfmt):
LOG.debug(_("Instance for format imgfile=%(imgfile)s "
"mountdir=%(mountdir)s partition=%(partition)s "
"imgfmt=%(imgfmt)s") % locals())
if imgfmt == "raw":
LOG.debug(_("Using LoopMount"))
return importutils.import_object(
"nova.virt.disk.mount.loop.LoopMount",
imgfile, mountdir, partition)
else:
LOG.debug(_("Using NbdMount"))
return importutils.import_object(
"nova.virt.disk.mount.nbd.NbdMount",
imgfile, mountdir, partition)
@staticmethod
def instance_for_device(imgfile, mountdir, partition, device):
LOG.debug(_("Instance for device imgfile=%(imgfile)s "
"mountdir=%(mountdir)s partition=%(partition)s "
"device=%(device)s") % locals())
if "loop" in device:
LOG.debug(_("Using LoopMount"))
return importutils.import_object(
"nova.virt.disk.mount.loop.LoopMount",
imgfile, mountdir, partition, device)
else:
LOG.debug(_("Using NbdMount"))
return importutils.import_object(
"nova.virt.disk.mount.nbd.NbdMount",
imgfile, mountdir, partition, device)
def __init__(self, image, mount_dir, partition=None, device=None):
# Input
self.image = image
self.partition = partition
self.mount_dir = mount_dir
# Output
self.error = ""
# Internal
self.linked = self.mapped = self.mounted = self.automapped = False
self.device = self.mapped_device = device
# Reset to mounted dir if possible
self.reset_dev()
def reset_dev(self):
"""Reset device paths to allow unmounting."""
if not self.device:
return
self.linked = self.mapped = self.mounted = True
device = self.device
if os.path.isabs(device) and os.path.exists(device):
if device.startswith('/dev/mapper/'):
device = os.path.basename(device)
device, self.partition = device.rsplit('p', 1)
self.device = os.path.join('/dev', device)
def get_dev(self):
"""Make the image available as a block device in the file system."""
self.device = None
self.linked = True
return True
def _get_dev_retry_helper(self):
"""Some implementations need to retry their get_dev."""
# NOTE(mikal): This method helps implement retries. The implementation
# simply calls _get_dev_retry_helper from their get_dev, and implements
# _inner_get_dev with their device acquisition logic. The NBD
# implementation has an example.
start_time = time.time()
device = self._inner_get_dev()
while not device:
LOG.info(_('Device allocation failed. Will retry in 2 seconds.'))
time.sleep(2)
if time.time() - start_time > MAX_DEVICE_WAIT:
LOG.warn(_('Device allocation failed after repeated retries.'))
return False
device = self._inner_get_dev()
return True
def _inner_get_dev(self):
raise NotImplementedError()
def unget_dev(self):
"""Release the block device from the file system namespace."""
self.linked = False
def map_dev(self):
"""Map partitions of the device to the file system namespace."""
assert(os.path.exists(self.device))
LOG.debug(_("Map dev %s"), self.device)
automapped_path = '/dev/%sp%s' % (os.path.basename(self.device),
self.partition)
if self.partition == -1:
self.error = _('partition search unsupported with %s') % self.mode
elif self.partition and not os.path.exists(automapped_path):
map_path = '/dev/mapper/%sp%s' % (os.path.basename(self.device),
self.partition)
assert(not os.path.exists(map_path))
# Note kpartx can output warnings to stderr and succeed
# Also it can output failures to stderr and "succeed"
# So we just go on the existence of the mapped device
_out, err = utils.trycmd('kpartx', '-a', self.device,
run_as_root=True, discard_warnings=True)
# Note kpartx does nothing when presented with a raw image,
# so given we only use it when we expect a partitioned image, fail
if not os.path.exists(map_path):
if not err:
err = _('partition %s not found') % self.partition
self.error = _('Failed to map partitions: %s') % err
else:
self.mapped_device = map_path
self.mapped = True
elif self.partition and os.path.exists(automapped_path):
# Note auto mapping can be enabled with the 'max_part' option
# to the nbd or loop kernel modules. Beware of possible races
# in the partition scanning for _loop_ devices though
# (details in bug 1024586), which are currently uncatered for.
self.mapped_device = automapped_path
self.mapped = True
self.automapped = True
else:
self.mapped_device = self.device
self.mapped = True
return self.mapped
def unmap_dev(self):
"""Remove partitions of the device from the file system namespace."""
if not self.mapped:
return
LOG.debug(_("Unmap dev %s"), self.device)
if self.partition and not self.automapped:
utils.execute('kpartx', '-d', self.device, run_as_root=True)
self.mapped = False
self.automapped = False
def mnt_dev(self):
"""Mount the device into the file system."""
LOG.debug(_("Mount %(dev)s on %(dir)s") %
{'dev': self.mapped_device, 'dir': self.mount_dir})
_out, err = utils.trycmd('mount', self.mapped_device, self.mount_dir,
run_as_root=True)
if err:
self.error = _('Failed to mount filesystem: %s') % err
return False
self.mounted = True
return True
def unmnt_dev(self):
"""Unmount the device from the file system."""
if not self.mounted:
return
LOG.debug(_("Umount %s") % self.mapped_device)
utils.execute('umount', self.mapped_device, run_as_root=True)
self.mounted = False
def do_mount(self):
"""Call the get, map and mnt operations."""
status = False
try:
status = self.get_dev() and self.map_dev() and self.mnt_dev()
finally:
if not status:
LOG.debug(_("Fail to mount, tearing back down"))
self.do_teardown()
return status
def do_umount(self):
"""Call the unmnt operation."""
if self.mounted:
self.unmnt_dev()
def do_teardown(self):
"""Call the umnt, unmap, and unget operations."""
if self.mounted:
self.unmnt_dev()
if self.mapped:
self.unmap_dev()
if self.linked:
self.unget_dev()
|
|
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy import asarray, empty, ravel, nonzero
from scipy.sparse import (isspmatrix_csc, isspmatrix_csr, isspmatrix,
SparseEfficiencyWarning, csc_matrix)
from . import _superlu
noScikit = False
try:
import scikits.umfpack as umfpack
except ImportError:
noScikit = True
useUmfpack = not noScikit
__all__ = ['use_solver', 'spsolve', 'splu', 'spilu', 'factorized',
'MatrixRankWarning']
class MatrixRankWarning(UserWarning):
pass
def use_solver(**kwargs):
"""
Valid keyword arguments with defaults (other ignored)::
useUmfpack = True
assumeSortedIndices = False
The default sparse solver is umfpack when available. This can be changed by
passing useUmfpack = False, which then causes the always present SuperLU
based solver to be used.
Umfpack requires a CSR/CSC matrix to have sorted column/row indices. If
sure that the matrix fulfills this, pass ``assumeSortedIndices=True``
to gain some speed.
"""
if 'useUmfpack' in kwargs:
globals()['useUmfpack'] = kwargs['useUmfpack']
#TODO: pass other options to scikit
def spsolve(A, b, permc_spec=None, use_umfpack=True):
"""Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
Parameters
----------
A : ndarray or sparse matrix
The square matrix A will be converted into CSC or CSR form
b : ndarray or sparse matrix
The matrix or vector representing the right hand side of the equation.
If a vector, b.size must be (n,) or (n, 1)
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering
use_umfpack : bool (optional)
if True (default) then use umfpack for the solution. This is
only referenced if b is a vector and ``scikit-umfpack`` is installed.
Returns
-------
x : ndarray or sparse matrix
the solution of the sparse linear equation.
If b is a vector, then x is a vector of size A.shape[1]
If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1])
Notes
-----
For solving the matrix expression AX = B, this solver assumes the resulting
matrix X is sparse, as is often the case for very sparse inputs. If the
resulting X is dense, the construction of this sparse result will be
relatively expensive. In that case, consider converting A to a dense
matrix and using scipy.linalg.solve or its variants.
"""
if not (isspmatrix_csc(A) or isspmatrix_csr(A)):
A = csc_matrix(A)
warn('spsolve requires A be CSC or CSR matrix format',
SparseEfficiencyWarning)
# b is a vector only if b have shape (n,) or (n, 1)
b_is_sparse = isspmatrix(b)
if not b_is_sparse:
b = asarray(b)
b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1))
A.sort_indices()
A = A.asfptype() # upcast to a floating point format
# validate input shapes
M, N = A.shape
if (M != N):
raise ValueError("matrix must be square (has shape %s)" % ((M, N),))
if M != b.shape[0]:
raise ValueError("matrix - rhs dimension mismatch (%s - %s)"
% (A.shape, b.shape[0]))
use_umfpack = use_umfpack and useUmfpack
if b_is_vector and use_umfpack:
if b_is_sparse:
b_vec = b.toarray()
else:
b_vec = b
b_vec = asarray(b_vec, dtype=A.dtype).ravel()
if noScikit:
raise RuntimeError('Scikits.umfpack not installed.')
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack = False")
family = {'d': 'di', 'D': 'zi'}
umf = umfpack.UmfpackContext(family[A.dtype.char])
x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec,
autoTranspose=True)
else:
if b_is_vector and b_is_sparse:
b = b.toarray()
b_is_sparse = False
if not b_is_sparse:
if isspmatrix_csc(A):
flag = 1 # CSC format
else:
flag = 0 # CSR format
options = dict(ColPerm=permc_spec)
x, info = _superlu.gssv(N, A.nnz, A.data, A.indices, A.indptr,
b, flag, options=options)
if info != 0:
warn("Matrix is exactly singular", MatrixRankWarning)
x.fill(np.nan)
if b_is_vector:
x = x.ravel()
else:
# b is sparse
Afactsolve = factorized(A)
if not isspmatrix_csc(b):
warn('spsolve is more efficient when sparse b '
'is in the CSC matrix format', SparseEfficiencyWarning)
b = csc_matrix(b)
# Create a sparse output matrix by repeatedly applying
# the sparse factorization to solve columns of b.
data_segs = []
row_segs = []
col_segs = []
for j in range(b.shape[1]):
bj = b[:, j].A.ravel()
xj = Afactsolve(bj)
w = np.flatnonzero(xj)
segment_length = w.shape[0]
row_segs.append(w)
col_segs.append(np.ones(segment_length, dtype=int)*j)
data_segs.append(np.asarray(xj[w], dtype=A.dtype))
sparse_data = np.concatenate(data_segs)
sparse_row = np.concatenate(row_segs)
sparse_col = np.concatenate(col_segs)
x = A.__class__((sparse_data, (sparse_row, sparse_col)),
shape=b.shape, dtype=A.dtype)
return x
def splu(A, permc_spec=None, diag_pivot_thresh=None,
drop_tol=None, relax=None, panel_size=None, options=dict()):
"""
Compute the LU decomposition of a sparse, square matrix.
Parameters
----------
A : sparse matrix
Sparse matrix to factorize. Should be in CSR or CSC format.
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering
diag_pivot_thresh : float, optional
Threshold used for a diagonal entry to be an acceptable pivot.
See SuperLU user's guide for details [1]_
drop_tol : float, optional
(deprecated) No effect.
relax : int, optional
Expert option for customizing the degree of relaxing supernodes.
See SuperLU user's guide for details [1]_
panel_size : int, optional
Expert option for customizing the panel size.
See SuperLU user's guide for details [1]_
options : dict, optional
Dictionary containing additional expert options to SuperLU.
See SuperLU user guide [1]_ (section 2.4 on the 'Options' argument)
for more details. For example, you can specify
``options=dict(Equil=False, IterRefine='SINGLE'))``
to turn equilibration off and perform a single iterative refinement.
Returns
-------
invA : scipy.sparse.linalg.SuperLU
Object, which has a ``solve`` method.
See also
--------
spilu : incomplete LU decomposition
Notes
-----
This function uses the SuperLU library.
References
----------
.. [1] SuperLU http://crd.lbl.gov/~xiaoye/SuperLU/
"""
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() # upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") # is this true?
_options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr,
ilu=False, options=_options)
def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None,
diag_pivot_thresh=None, relax=None, panel_size=None, options=None):
"""
Compute an incomplete LU decomposition for a sparse, square matrix.
The resulting object is an approximation to the inverse of `A`.
Parameters
----------
A : (N, N) array_like
Sparse matrix to factorize
drop_tol : float, optional
Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition.
(default: 1e-4)
fill_factor : float, optional
Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10)
drop_rule : str, optional
Comma-separated string of drop rules to use.
Available rules: ``basic``, ``prows``, ``column``, ``area``,
``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``)
See SuperLU documentation for details.
milu : str, optional
Which version of modified ILU to use. (Choices: ``silu``,
``smilu_1``, ``smilu_2`` (default), ``smilu_3``.)
Remaining other options
Same as for `splu`
Returns
-------
invA_approx : scipy.sparse.linalg.SuperLU
Object, which has a ``solve`` method.
See also
--------
splu : complete LU decomposition
Notes
-----
To improve the better approximation to the inverse, you may need to
increase `fill_factor` AND decrease `drop_tol`.
This function uses the SuperLU library.
"""
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() # upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") # is this true?
_options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol,
ILU_FillFactor=fill_factor,
DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr,
ilu=True, options=_options)
def factorized(A):
"""
Return a fuction for solving a sparse linear system, with A pre-factorized.
Parameters
----------
A : (N, N) array_like
Input.
Returns
-------
solve : callable
To solve the linear system of equations given in `A`, the `solve`
callable should be passed an ndarray of shape (N,).
Examples
--------
>>> A = np.array([[ 3. , 2. , -1. ],
[ 2. , -2. , 4. ],
[-1. , 0.5, -1. ]])
>>> solve = factorized( A ) # Makes LU decomposition.
>>> rhs1 = np.array([1,-2,0])
>>> x1 = solve( rhs1 ) # Uses the LU factors.
array([ 1., -2., -2.])
"""
if useUmfpack:
if noScikit:
raise RuntimeError('Scikits.umfpack not installed.')
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() # upcast to a floating point format
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack = False")
family = {'d': 'di', 'D': 'zi'}
umf = umfpack.UmfpackContext(family[A.dtype.char])
# Make LU decomposition.
umf.numeric(A)
def solve(b):
return umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True)
return solve
else:
return splu(A).solve
|
|
"""This module contains a collection of unit tests which
validate ..main
"""
from ConfigParser import ConfigParser
import logging
import os
import sys
import tempfile
import unittest
import mock
import tornado.httpserver
from ..main import Main
from .. import async_docker_remote_api
class Patcher(object):
"""An abstract base class for all patcher context managers."""
def __init__(self, patcher):
object.__init__(self)
self._patcher = patcher
def __enter__(self):
self._patcher.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._patcher.stop()
class IsLibcurlCompiledWithAsyncDNSResolverPatcher(Patcher):
"""This context manager provides an easy way to install a
patch allowing the caller to determine the behavior of
tor_async_util.is_libcurl_compiled_with_async_dns_resolver().
"""
def __init__(self, response):
def the_patch(*args, **kwargs):
return response
patcher = mock.patch(
'tor_async_util.is_libcurl_compiled_with_async_dns_resolver',
the_patch)
Patcher.__init__(self, patcher)
class TornadoIOLoopInstancePatcher(Patcher):
def __init__(self):
def the_patch(*args, **kwargs):
pass
assert type(tornado.ioloop.IOLoop.instance()) == tornado.platform.epoll.EPollIOLoop
patcher = mock.patch(
'tornado.platform.epoll.EPollIOLoop.start',
the_patch)
Patcher.__init__(self, patcher)
class TornadoHttpServerListenPatcher(Patcher):
def __init__(self):
def the_patch(*args, **kwargs):
pass
patcher = mock.patch(
'tornado.httpserver.HTTPServer.listen',
the_patch)
Patcher.__init__(self, patcher)
class ServiceConfigFile(object):
def __init__(self, section):
object.__init__(self)
self.section = section
self.address = '1.1.1.1'
self.port = 5555
self.logging_level = logging.DEBUG
self.max_concurrent_executing_http_requests = 250
self.docker_remote_api = 'http://2.2.2.2:6666'
self.docker_remote_api_connect_timeout = 50
self.docker_remote_api_request_timeout = 500
self.filename = None
def __str__(self):
return str(self.filename)
def __enter__(self):
assert self.filename is None
cp = ConfigParser()
cp.add_section(self.section)
cp.set(self.section, 'address', self.address)
cp.set(self.section, 'port', self.port)
cp.set(self.section, 'log_level', logging.getLevelName(self.logging_level))
cp.set(self.section, 'max_concurrent_executing_http_requests', self.max_concurrent_executing_http_requests)
cp.set(self.section, 'docker_remote_api', self.docker_remote_api)
cp.set(self.section, 'docker_remote_api_connect_timeout', self.docker_remote_api_connect_timeout)
cp.set(self.section, 'docker_remote_api_request_timeout', self.docker_remote_api_request_timeout)
self.filename = tempfile.mktemp()
with open(self.filename, 'w+') as fp:
cp.write(fp)
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.filename:
if os.path.exists(self.filename):
os.unlink(self.filename)
self.filename = None
class SysDotArgcPatcher(object):
def __init__(self, sys_dot_argv):
object.__init__(self)
self.sys_dot_argv = sys_dot_argv
self._old_sys_dot_argv = None
def __enter__(self):
assert self._old_sys_dot_argv is None
self._old_sys_dot_argv = None
sys.argv = self.sys_dot_argv
return self
def __exit__(self, exc_type, exc_value, traceback):
sys.argv = self._old_sys_dot_argv
self._old_sys_dot_argv = None
class MainTestCase(unittest.TestCase):
def test_libcurl_async_dns_resolver(self):
main = Main()
with ServiceConfigFile(main.config_section) as service_config_file:
sys_dot_arv = [
'service',
'--config=%s' % service_config_file.filename,
]
with SysDotArgcPatcher(sys_dot_arv):
with TornadoHttpServerListenPatcher():
with TornadoIOLoopInstancePatcher():
with IsLibcurlCompiledWithAsyncDNSResolverPatcher(True):
main.configure()
with IsLibcurlCompiledWithAsyncDNSResolverPatcher(False):
main.configure()
def test_configuration(self):
main = Main()
with ServiceConfigFile(main.config_section) as service_config_file:
self.assertNotEqual(
service_config_file.address,
main.address)
self.assertNotEqual(
service_config_file.port,
main.port)
self.assertNotEqual(
service_config_file.logging_level,
main.logging_level)
self.assertNotEqual(
service_config_file.max_concurrent_executing_http_requests,
main.max_concurrent_executing_http_requests)
self.assertNotEqual(
service_config_file.docker_remote_api,
async_docker_remote_api.docker_remote_api_endpoint)
self.assertNotEqual(
service_config_file.docker_remote_api_connect_timeout,
async_docker_remote_api.connect_timeout)
self.assertNotEqual(
service_config_file.docker_remote_api_request_timeout,
async_docker_remote_api.request_timeout)
sys_dot_arv = [
'service',
'--config=%s' % service_config_file.filename,
]
with SysDotArgcPatcher(sys_dot_arv):
with TornadoHttpServerListenPatcher():
with TornadoIOLoopInstancePatcher():
main.configure()
self.assertEqual(
service_config_file.address,
main.address)
self.assertEqual(
service_config_file.port,
main.port)
self.assertEqual(
service_config_file.logging_level,
main.logging_level)
self.assertEqual(
service_config_file.max_concurrent_executing_http_requests,
main.max_concurrent_executing_http_requests)
self.assertEqual(
service_config_file.docker_remote_api,
async_docker_remote_api.docker_remote_api_endpoint)
self.assertEqual(
service_config_file.docker_remote_api_connect_timeout,
async_docker_remote_api.connect_timeout)
self.assertEqual(
service_config_file.docker_remote_api_request_timeout,
async_docker_remote_api.request_timeout)
def test_happy_path(self):
main = Main()
with ServiceConfigFile(main.config_section) as service_config_file:
sys_dot_arv = [
'service',
'--config=%s' % service_config_file.filename,
]
with SysDotArgcPatcher(sys_dot_arv):
with TornadoHttpServerListenPatcher():
with TornadoIOLoopInstancePatcher():
main.configure()
main.listen()
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Tests tod world + script, notably for batching, by comparing saved script logs to the
data that should have been generated.
Metrics are handled in separate files.
"""
import copy
import unittest
import parlai.core.tod.tod_test_utils.test_agents as test_agents
import parlai.core.tod.tod_core as tod_core
import parlai.scripts.tod_world_script as tod_world_script
from parlai.core.tod.tod_agents import StandaloneApiAgent
class TestTodWorldScript(tod_world_script.TodWorldScript):
"""
Wrap around it to check its logic; also makes it easier to do things w/ underlying
World.
"""
def _get_tod_agents(self, opt):
"""
Hack so we can separate out logic of making sure agent parsing is correct.
"""
if hasattr(self, "agents"):
return self.agents
return super()._get_tod_agents(opt)
def _save_outputs(self, opt, world, logger, episode_metrics):
self.world = world
self.logger = logger
class TodWorldInScriptTestBase(unittest.TestCase):
def add_tod_world_opts(self, base_opts):
"""
Convenience since we're initing the opt directly without parlai parser.
"""
opts = copy.deepcopy(base_opts)
opts["datatype"] = "DUMMY"
opts["datafile"] = "DUMMY"
opts["episodes_randomization_seed"] = -1
opts["standalone_api_file"] = test_agents.API_DATABASE_FILE
opts["exact_api_call"] = True
opts["log_keep_fields"] = "all"
opts["display_examples"] = False
opts[
"include_api_schemas"
] = True # do this to test_agents.make sure they're done correctly.
return opts
def setup_agents(self, added_opts):
full_opts = self.add_tod_world_opts(added_opts)
sys = test_agents.ApiCallAndSysUttAgent(full_opts)
agents = [
test_agents.UserUttAgent(full_opts),
sys,
StandaloneApiAgent(full_opts),
sys,
test_agents.ApiSchemaAgent(full_opts),
test_agents.GoalAgent(full_opts),
]
return agents, full_opts
def _test_roundDataCorrect(self):
self._test_roundDataCorrect_helper(test_agents.EPISODE_SETUP__SINGLE_API_CALL)
self._test_roundDataCorrect_helper(test_agents.EPISODE_SETUP__MULTI_ROUND)
self._test_roundDataCorrect_helper(test_agents.EPISODE_SETUP__MULTI_EPISODE)
self._test_roundDataCorrect_helper(test_agents.EPISODE_SETUP__MULTI_EPISODE_BS)
def _check_correctness_from_script_logs(
self, script, opt, process_round_utts=lambda x: x
):
"""
Last argument is only relevant for the max_turn test.
"""
max_rounds = opt[test_agents.TEST_NUM_ROUNDS_OPT_KEY]
max_episodes = opt[test_agents.TEST_NUM_EPISODES_OPT_KEY]
# there's something funky with logger.get_log() that inserts a space, but not gonna worry about it for now
logs = [x for x in script.logger.get_logs() if len(x) > 0]
for episode_idx in range(max_episodes):
episode_from_world = logs[episode_idx]
# first round is context
context = episode_from_world[0]
self.assertEquals(
context[0]["text"],
"APIS: "
+ tod_core.SerializationHelpers.list_of_maps_to_str(
test_agents.make_api_schemas_machine(max_rounds)
),
)
self.assertEquals(
context[3]["text"],
"GOAL: "
+ tod_core.SerializationHelpers.list_of_maps_to_str(
test_agents.make_goal_calls_machine(max_rounds)
),
)
# Check the rest
world_utts = [[x["text"] for x in turn] for turn in episode_from_world[1:]]
# ... ignore the last DONE turn here cause it's not that important
self.assertEquals(
world_utts[:-1],
process_round_utts(
test_agents.get_round_utts(episode_idx, max_rounds)[:-1]
),
)
class TodWorldSingleBatchTest(TodWorldInScriptTestBase):
"""
Checks that saved data is correct with a single batch.
"""
def _test_roundDataCorrect_helper(self, config):
config["batchsize"] = 1
config["max_turns"] = 10
agents, opt = self.setup_agents(config)
script = TestTodWorldScript(opt)
script.agents = agents
script.run()
self._check_correctness_from_script_logs(script, opt)
def test_roundDataCorrect(self):
self._test_roundDataCorrect()
def test_max_turn(self):
self._test_max_turn_helper(4)
self._test_max_turn_helper(7)
def _test_max_turn_helper(self, max_turns):
config = {}
config["batchsize"] = 1
config["max_turns"] = max_turns
config[test_agents.TEST_NUM_ROUNDS_OPT_KEY] = 10
config[test_agents.TEST_NUM_EPISODES_OPT_KEY] = 5 # cause why not
agents, opt = self.setup_agents(config)
script = TestTodWorldScript(opt)
script.agents = agents
script.run()
def filter_round_utt(utts):
# tad imprecise, but more important that it does stop.
# subtract 1 for the context turn, then 1 cause there's an off by one somewhere
return utts[: max_turns - 2]
self._check_correctness_from_script_logs(script, opt, filter_round_utt)
class TodWorldNonSingleBatchTest(TodWorldInScriptTestBase):
"""
Checks saved data is correct with larger batchsizes.
"""
def _test_roundDataCorrect_helper(self, config):
config["batchsize"] = 4
config["max_turns"] = 10
agents, opt = self.setup_agents(config)
script = TestTodWorldScript(opt)
script.agents = agents
script.run()
self._check_correctness_from_script_logs(script, opt)
def test_roundDataCorrect(self):
self._test_roundDataCorrect()
class TodWorldTestSingleDumpAgents(TodWorldInScriptTestBase):
"""
Just to be safe, make sure that the agents with "single" versions (ex goal + api
schema) are correctly aligned.
(Already tested in the agents test file as well, but to be safe.)
"""
def setup_agents(self, added_opts, api_agent, goal_agent):
full_opts = self.add_tod_world_opts(added_opts)
full_opts["fixed_response"] = "USER: [DONE]"
sys = test_agents.ApiCallAndSysUttAgent(full_opts)
agents = [
test_agents.UserUttAgent(full_opts),
sys,
StandaloneApiAgent(full_opts),
sys,
api_agent(full_opts),
goal_agent(full_opts),
]
return agents, full_opts
def _test_SingleGoalApiResp_helper(self, batchsize, num_episodes):
config = {}
config["batchsize"] = batchsize
config[test_agents.TEST_NUM_ROUNDS_OPT_KEY] = 10
config[test_agents.TEST_NUM_EPISODES_OPT_KEY] = num_episodes
single_agents, opt = self.setup_agents(
config, test_agents.SingleApiSchemaAgent, test_agents.SingleGoalAgent
)
single_script = TestTodWorldScript(opt)
single_script.agents = single_agents
single_script.run()
single_logs = [x for x in single_script.logger.get_logs() if len(x) > 0]
multi_agents, opt = self.setup_agents(
config, test_agents.ApiSchemaAgent, test_agents.GoalAgent
)
multi_script = TestTodWorldScript(opt)
multi_script.agents = multi_agents
multi_script.run()
multi_logs = [x for x in single_script.logger.get_logs() if len(x) > 0]
single_idx = 0
for multi_log in multi_logs:
context = multi_log[0]
goals = tod_core.SerializationHelpers.str_to_goals(
context[3]["text"][len("GOAL:") :].strip()
)
for goal in goals:
single_context = single_logs[single_idx][0]
single_goal = tod_core.SerializationHelpers.str_to_goals(
single_context[3]["text"][len("GOAL:") :].strip()
)
self.assertEqual(len(single_goal), 1)
self.assertEquals(goal, single_goal[0])
single_des = tod_core.SerializationHelpers.str_to_api_schemas(
single_context[0]["text"][len("APIS:") :].strip()
)
self.assertEqual(len(single_des), 1)
self.assertEqual(single_goal[0]["api_name"], single_des[0]["api_name"])
single_idx += 1
def test_SingleGoalApiResp_helper_singleBatch(self):
self._test_SingleGoalApiResp_helper(1, 2)
self._test_SingleGoalApiResp_helper(1, 5)
def test_SingleGoalApiResp_helper_multiBatch(self):
self._test_SingleGoalApiResp_helper(4, 8)
self._test_SingleGoalApiResp_helper(4, 11)
if __name__ == "__main__":
unittest.main()
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Group.resolved_at'
db.add_column('sentry_groupedmessage', 'resolved_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Group.resolved_at'
db.delete_column('sentry_groupedmessage', 'resolved_at')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_server_request(
resource_group_name: str,
server_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/encryptionProtector')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
server_name: str,
encryption_protector_name: Union[str, "_models.EncryptionProtectorName"],
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/encryptionProtector/{encryptionProtectorName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"encryptionProtectorName": _SERIALIZER.url("encryption_protector_name", encryption_protector_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
resource_group_name: str,
server_name: str,
encryption_protector_name: Union[str, "_models.EncryptionProtectorName"],
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/encryptionProtector/{encryptionProtectorName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"encryptionProtectorName": _SERIALIZER.url("encryption_protector_name", encryption_protector_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_revalidate_request_initial(
resource_group_name: str,
server_name: str,
encryption_protector_name: Union[str, "_models.EncryptionProtectorName"],
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/encryptionProtector/{encryptionProtectorName}/revalidate')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"encryptionProtectorName": _SERIALIZER.url("encryption_protector_name", encryption_protector_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
**kwargs
)
class EncryptionProtectorsOperations(object):
"""EncryptionProtectorsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_server(
self,
resource_group_name: str,
server_name: str,
**kwargs: Any
) -> Iterable["_models.EncryptionProtectorListResult"]:
"""Gets a list of server encryption protectors.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EncryptionProtectorListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.sql.models.EncryptionProtectorListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EncryptionProtectorListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_server_request(
resource_group_name=resource_group_name,
server_name=server_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_server.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_server_request(
resource_group_name=resource_group_name,
server_name=server_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("EncryptionProtectorListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/encryptionProtector'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
server_name: str,
encryption_protector_name: Union[str, "_models.EncryptionProtectorName"],
**kwargs: Any
) -> "_models.EncryptionProtector":
"""Gets a server encryption protector.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param encryption_protector_name: The name of the encryption protector to be retrieved.
:type encryption_protector_name: str or ~azure.mgmt.sql.models.EncryptionProtectorName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EncryptionProtector, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.EncryptionProtector
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EncryptionProtector"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
server_name=server_name,
encryption_protector_name=encryption_protector_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('EncryptionProtector', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/encryptionProtector/{encryptionProtectorName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
server_name: str,
encryption_protector_name: Union[str, "_models.EncryptionProtectorName"],
parameters: "_models.EncryptionProtector",
**kwargs: Any
) -> Optional["_models.EncryptionProtector"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.EncryptionProtector"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'EncryptionProtector')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
server_name=server_name,
encryption_protector_name=encryption_protector_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EncryptionProtector', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/encryptionProtector/{encryptionProtectorName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
server_name: str,
encryption_protector_name: Union[str, "_models.EncryptionProtectorName"],
parameters: "_models.EncryptionProtector",
**kwargs: Any
) -> LROPoller["_models.EncryptionProtector"]:
"""Updates an existing encryption protector.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param encryption_protector_name: The name of the encryption protector to be updated.
:type encryption_protector_name: str or ~azure.mgmt.sql.models.EncryptionProtectorName
:param parameters: The requested encryption protector resource state.
:type parameters: ~azure.mgmt.sql.models.EncryptionProtector
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either EncryptionProtector or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.sql.models.EncryptionProtector]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.EncryptionProtector"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
encryption_protector_name=encryption_protector_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('EncryptionProtector', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/encryptionProtector/{encryptionProtectorName}'} # type: ignore
def _revalidate_initial(
self,
resource_group_name: str,
server_name: str,
encryption_protector_name: Union[str, "_models.EncryptionProtectorName"],
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_revalidate_request_initial(
resource_group_name=resource_group_name,
server_name=server_name,
encryption_protector_name=encryption_protector_name,
subscription_id=self._config.subscription_id,
template_url=self._revalidate_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_revalidate_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/encryptionProtector/{encryptionProtectorName}/revalidate'} # type: ignore
@distributed_trace
def begin_revalidate(
self,
resource_group_name: str,
server_name: str,
encryption_protector_name: Union[str, "_models.EncryptionProtectorName"],
**kwargs: Any
) -> LROPoller[None]:
"""Revalidates an existing encryption protector.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param encryption_protector_name: The name of the encryption protector to be updated.
:type encryption_protector_name: str or ~azure.mgmt.sql.models.EncryptionProtectorName
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._revalidate_initial(
resource_group_name=resource_group_name,
server_name=server_name,
encryption_protector_name=encryption_protector_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_revalidate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/encryptionProtector/{encryptionProtectorName}/revalidate'} # type: ignore
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""todo: add classes docstring."""
from __future__ import annotations
from dataclasses import dataclass
from pprint import pformat
from string import Template
import logging
from clouddq.classes.dq_entity import DqEntity
from clouddq.classes.dq_entity_column import DqEntityColumn
from clouddq.classes.dq_entity_uri import EntityUri
from clouddq.classes.dq_row_filter import DqRowFilter
from clouddq.classes.dq_rule import DqRule
from clouddq.utils import assert_not_none_or_empty
from clouddq.utils import get_from_dict_and_assert
from clouddq.utils import get_keys_from_dict_and_assert_oneof
import clouddq.classes.dq_configs_cache as dq_configs_cache
logger = logging.getLogger(__name__)
@dataclass
class DqRuleBinding:
""" """
rule_binding_id: str
entity_id: str | None
entity_uri: EntityUri | None
column_id: str
row_filter_id: str
incremental_time_filter_column_id: str | None
rule_ids: list
metadata: dict | None
@classmethod
def from_dict(
cls: DqRuleBinding,
rule_binding_id: str,
kwargs: dict,
default_configs: dict | None = None,
) -> DqRuleBinding:
"""
Args:
cls: DqRuleBinding:
rule_binding_id: typing.Union[str, str]:
kwargs: typing.Dict:
Returns:
"""
entity_config: dict = get_keys_from_dict_and_assert_oneof(
config_id=rule_binding_id,
kwargs=kwargs,
keys=["entity_uri", "entity_id"],
)
if "entity_id" in entity_config:
entity_id = entity_config["entity_id"]
entity_uri = None
if "entity_uri" in entity_config:
parsed_entity_uri = EntityUri.from_uri(
entity_config["entity_uri"], default_configs=default_configs
)
entity_id = parsed_entity_uri.get_entity_id()
entity_uri = parsed_entity_uri
if entity_id:
entity_id.upper()
column_id: str = get_from_dict_and_assert(
config_id=rule_binding_id,
kwargs=kwargs,
key="column_id",
)
if column_id:
column_id.upper()
row_filter_id: str = get_from_dict_and_assert(
config_id=rule_binding_id,
kwargs=kwargs,
key="row_filter_id",
)
if row_filter_id:
row_filter_id.upper()
rule_ids: list[str] = get_from_dict_and_assert(
config_id=rule_binding_id,
kwargs=kwargs,
key="rule_ids",
assertion=lambda x: type(x) == list,
error_msg=f"Rule Binding ID: '{rule_binding_id}' must have defined value "
f"'rule_ids' of type 'list'.",
)
incremental_time_filter_column_id: str | None = kwargs.get(
"incremental_time_filter_column_id", None
)
if incremental_time_filter_column_id:
incremental_time_filter_column_id.upper()
metadata: dict | None = kwargs.get("metadata", dict())
if type(metadata) != dict:
raise ValueError(
f"Rule Binding ID: '{rule_binding_id}' has invalid "
f"metadata field with type {type(metadata)} and values: {metadata}\n"
"'metadata' must be of type dictionary."
)
return DqRuleBinding(
rule_binding_id=str(rule_binding_id).upper(),
entity_id=entity_id,
entity_uri=entity_uri,
column_id=column_id,
row_filter_id=row_filter_id,
incremental_time_filter_column_id=incremental_time_filter_column_id,
rule_ids=rule_ids,
metadata=metadata,
)
def to_dict(self: DqRuleBinding) -> dict:
"""
Args:
self: DqRuleBinding:
Returns:
"""
if self.entity_uri:
entity_uri = self.entity_uri.to_dict()
else:
entity_uri = None
return dict(
{
f"{self.rule_binding_id}": {
"entity_id": self.entity_id,
"entity_uri": entity_uri,
"column_id": self.column_id,
"row_filter_id": self.row_filter_id,
"incremental_time_filter_column_id": self.incremental_time_filter_column_id, # noqa: E501
"rule_ids": self.rule_ids,
"metadata": self.metadata,
}
}
)
def dict_values(self: DqRuleBinding) -> dict:
"""
Args:
self: DqRuleBinding:
Returns:
"""
return dict(self.to_dict().get(self.rule_binding_id))
def resolve_table_entity_config(
self: DqRuleBinding, configs_cache: dq_configs_cache.DqConfigsCache
) -> DqEntity:
"""
Args:
self: DqRuleBinding:
entities_collection: typing.Dict:
Returns:
"""
if self.entity_uri:
logger.debug(
f"Resolving entity uri from configs cache:\n{pformat(self.entity_uri.to_dict())}"
)
table_entity: DqEntity = configs_cache.get_table_entity_id(
self.entity_uri.get_db_primary_key().upper()
)
elif self.entity_id:
table_entity: DqEntity = configs_cache.get_table_entity_id(
self.entity_id.upper()
)
else:
raise ValueError(
f"Rule Binding ID: {self.rule_binding_id} must define "
"either 'entity_id' or 'entity_uri'."
)
return table_entity
def resolve_rule_sql_expr(self: DqRuleBinding, rule: DqRule) -> None:
try:
rule.resolve_sql_expr()
except Exception as e:
raise ValueError(
f"Failed to resolve rule_id '{rule.rule_id}' in "
f"rule_binding_id '{self.rule_binding_id}' "
f"with error:\n{e}"
)
def resolve_rule_config_list(
self: DqRuleBinding,
configs_cache: dq_configs_cache.DqConfigsCache,
) -> list[DqRule]:
"""
Args:
self: DqRuleBinding:
rules_collection: typing.Dict:
Returns:
"""
resolved_rule_config_list = []
for rule in self.rule_ids:
if type(rule) == dict:
if len(rule) > 1:
raise ValueError(
f"Rule Binding {self.rule_binding_id} has "
f"invalid configs in rule_ids. "
f"Each nested rule_id objects cannot "
f"have more than one rule_id. "
f"Current value: \n {rule}"
)
else:
rule_id = next(iter(rule))
arguments = rule[rule_id]
else:
rule_id = rule
arguments = None
rule_config = configs_cache.get_rule_id(rule_id.upper())
rule_config.update_rule_binding_arguments(arguments)
self.resolve_rule_sql_expr(rule_config)
resolved_rule_config_list.append(rule_config)
assert_not_none_or_empty(
resolved_rule_config_list,
"Rule Binding must have non-empty rule_ids list.",
)
return resolved_rule_config_list
def resolve_row_filter_config(
self: DqRuleBinding,
configs_cache: dq_configs_cache.DqConfigsCache,
) -> DqRowFilter:
row_filter = configs_cache.get_row_filter_id(self.row_filter_id.upper())
return row_filter
def resolve_all_configs_to_dict(
self: DqRuleBinding,
configs_cache: dq_configs_cache.DqConfigsCache,
) -> dict:
"""
Args:
self: DqRuleBinding:
entities_collection: typing.Dict:
rules_collection: typing.Dict:
row_filters_collection: typing.Dict:
Returns:
"""
try:
# Resolve table configs
table_entity: DqEntity = self.resolve_table_entity_config(configs_cache)
# Resolve column configs
column_configs: DqEntityColumn = table_entity.resolve_column_config(
self.column_id.upper()
)
incremental_time_filter_column = None
if self.incremental_time_filter_column_id:
incremental_time_filter_column_config: DqEntityColumn = (
table_entity.resolve_column_config(
self.incremental_time_filter_column_id.upper()
)
)
incremental_time_filter_column_type: str = (
incremental_time_filter_column_config.get_column_type_value()
)
if incremental_time_filter_column_type not in ("TIMESTAMP", "DATETIME"):
raise ValueError(
f"incremental_time_filter_column_id: "
f"{self.incremental_time_filter_column_id} "
f"must have type TIMESTAMP or DATETIME.\n"
f"Current type: {incremental_time_filter_column_type}."
)
incremental_time_filter_column = dict(
incremental_time_filter_column_config.dict_values()
).get("name")
# Resolve rules configs
rule_configs_dict = dict()
for rule in self.resolve_rule_config_list(configs_cache):
for rule_id, rule_config in rule.to_dict().items():
rule_id = rule_id.upper()
rule_sql_expr = Template(
rule_config["rule_sql_expr"]
).safe_substitute(
column=column_configs.column_name
if column_configs.column_name != "data"
else f"data.{column_configs.column_name}"
)
rule_config["rule_sql_expr"] = rule_sql_expr
rule_configs_dict[rule_id] = rule_config
# Resolve filter configs
row_filter_config = self.resolve_row_filter_config(configs_cache)
return dict(
{
"rule_binding_id": self.rule_binding_id,
"entity_id": self.entity_id,
"entity_configs": dict(table_entity.dict_values()),
"column_id": self.column_id,
"column_configs": dict(column_configs.dict_values()),
"rule_ids": list(self.rule_ids),
"rule_configs_dict": rule_configs_dict,
"row_filter_id": self.row_filter_id,
"row_filter_configs": dict(row_filter_config.dict_values()),
"incremental_time_filter_column": incremental_time_filter_column,
"metadata": self.metadata,
}
)
except Exception as error:
raise ValueError(
f"Failed to resolve Rule Binding ID '{self.rule_binding_id}' with error:\n{error}"
)
|
|
#-*- coding: utf-8 -*-
import copy
import datetime
import functools
import json
import logging
import random
import re
from contextlib import nested
import celery
import httpretty
import mock # noqa
from django.utils import timezone
from mock import call
from modularodm import Q
from modularodm.exceptions import KeyExistsException
import pytest
from nose.tools import * # flake8: noqa
from scripts.stuck_registration_audit import find_failed_registrations
from framework.auth import Auth
from framework.celery_tasks import handlers
from website.archiver import (
ARCHIVER_INITIATED,
ARCHIVER_SUCCESS,
ARCHIVER_FAILURE,
ARCHIVER_NETWORK_ERROR,
ARCHIVER_SIZE_EXCEEDED,
NO_ARCHIVE_LIMIT,
)
from website.archiver import utils as archiver_utils
from website.archiver.tasks import ArchivedFileNotFound
from website.app import * # noqa
from website.archiver import listeners
from website.archiver.tasks import * # noqa
from osf.models.archive import ArchiveTarget, ArchiveJob
from website.archiver.decorators import fail_archive_on_error
from website import mails
from website import settings
from website.util import waterbutler_url_for
from website.util.sanitize import strip_html
from osf.models import MetaSchema
from addons.base.models import BaseStorageAddon
from osf_tests import factories
from tests.factories import MockOAuthAddonNodeSettings
from tests.base import OsfTestCase, fake
from tests import utils as test_utils
from tests.utils import unique as _unique
SILENT_LOGGERS = (
'framework.celery_tasks.utils',
'framework.celery_tasks.signals',
'website.app',
'website.archiver.tasks',
)
for each in SILENT_LOGGERS:
logging.getLogger(each).setLevel(logging.CRITICAL)
sha256_factory = _unique(fake.sha256)
name_factory = _unique(fake.ean13)
def file_factory(name=None, sha256=None):
fname = name or name_factory()
return {
'path': '/' + fname,
'name': fname,
'kind': 'file',
'size': random.randint(4, 4000),
'extra': {
'hashes': {
'sha256': sha256 or sha256_factory()
}
}
}
def folder_factory(depth, num_files, num_folders, path_above):
new_path = os.path.join(path_above.rstrip('/'), fake.word())
return {
'path': new_path,
'kind': 'folder',
'children': [
file_factory()
for i in range(num_files)
] + [
folder_factory(depth - 1, num_files, num_folders, new_path)
] if depth > 0 else []
}
def file_tree_factory(depth, num_files, num_folders):
return {
'path': '/',
'kind': 'folder',
'children': [
file_factory()
for i in range(num_files)
] + [
folder_factory(depth - 1, num_files, num_folders, '/')
] if depth > 0 else []
}
def select_files_from_tree(file_tree):
"""
Select a file from every depth of a file_tree. This implementation relies on:
- every folder has a subtree of equal depth (i.e. any folder selection is
adequate to select a file from the maximum depth)
The file_tree_factory fulfills this condition.
"""
selected = {}
stack = [file_tree]
while len(stack):
file_node = stack.pop(0)
target_files = [f for f in file_node['children'] if f['kind'] == 'file']
if target_files:
target_file = target_files[0]
selected[target_file['extra']['hashes']['sha256']] = target_file
target_folders = [f for f in file_node['children'] if f['kind'] == 'folder']
if target_folders:
stack.append(target_folders[0])
return selected
FILE_TREE = {
'path': '/',
'name': '',
'kind': 'folder',
'children': [
{
'path': '/1234567',
'name': 'Afile.file',
'kind': 'file',
'size': '128',
},
{
'path': '/qwerty',
'name': 'A Folder',
'kind': 'folder',
'children': [
{
'path': '/qwerty/asdfgh',
'name': 'coolphoto.png',
'kind': 'file',
'size': '256',
}
],
}
],
}
class MockAddon(MockOAuthAddonNodeSettings):
complete = True
config = mock.MagicMock()
def _get_file_tree(self, user, version):
return FILE_TREE
def after_register(self, *args):
return None, None
@property
def archive_folder_name(self):
return 'Some Archive'
def archive_errors(self):
return False
mock_osfstorage = MockAddon()
mock_osfstorage.config.short_name = 'osfstorage'
mock_dropbox = MockAddon()
mock_dropbox.config.short_name = 'dropbox'
active_addons = {'osfstorage', 'dropbox'}
def _mock_get_addon(name, *args, **kwargs):
if name not in active_addons:
return None
if name == 'dropbox':
return mock_dropbox
if name == 'osfstorage':
return mock_osfstorage
def _mock_delete_addon(name, *args, **kwargs):
try:
active_addons.remove(name)
except ValueError:
pass
def _mock_get_or_add(name, *args, **kwargs):
active_addons.add(name)
return _mock_get_addon(name)
def use_fake_addons(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
with nested(
mock.patch('osf.models.mixins.AddonModelMixin.add_addon', mock.Mock(side_effect=_mock_get_or_add)),
mock.patch('osf.models.mixins.AddonModelMixin.get_addon', mock.Mock(side_effect=_mock_get_addon)),
mock.patch('osf.models.mixins.AddonModelMixin.delete_addon', mock.Mock(side_effect=_mock_delete_addon)),
mock.patch('osf.models.mixins.AddonModelMixin.get_or_add_addon', mock.Mock(side_effect=_mock_get_or_add))
):
ret = func(*args, **kwargs)
return ret
return wrapper
def generate_file_tree(nodes):
file_trees = {
n._id: file_tree_factory(3, 3, 3)
for n in nodes
}
selected_files = {}
selected_file_node_index = {}
for n in nodes:
file_tree = file_trees[n._id]
selected = select_files_from_tree(file_tree)
selected_file_node_index.update({
sha256: n._id
for sha256 in selected.keys()
})
selected_files.update(selected) # select files from each Node
return file_trees, selected_files, selected_file_node_index
def generate_schema_from_data(data):
def from_property(id, prop):
if isinstance(prop.get('value'), dict):
return {
'id': id,
'type': 'object',
'properties': [
from_property(pid, sp)
for pid, sp in prop['value'].items()
]
}
else:
return {
'id': id,
'type': 'osf-upload' if prop.get('extra') else 'string'
}
def from_question(qid, question):
if q.get('extra'):
return {
'qid': qid,
'type': 'osf-upload'
}
elif isinstance(q.get('value'), dict):
return {
'qid': qid,
'type': 'object',
'properties': [
from_property(id, value)
for id, value in question.get('value').items()
]
}
else:
return {
'qid': qid,
'type': 'string'
}
_schema = {
'name': 'Test',
'version': 2,
'config': {
'hasFiles': True
},
'pages': [{
'id': 'page1',
'questions': [
from_question(qid, q)
for qid, q in data.items()
]
}]
}
schema = MetaSchema(
name=_schema['name'],
schema_version=_schema['version'],
schema=_schema
)
try:
schema.save()
except KeyExistsException:
# Unfortunately, we don't have db isolation between test cases for some
# reason. Update the doc currently in the db rather than saving a new
# one.
schema = MetaSchema.find_one(
Q('name', 'eq', _schema['name']) &
Q('schema_version', 'eq', _schema['version'])
)
schema.schema = _schema
schema.save()
return schema
def generate_metadata(file_trees, selected_files, node_index):
data = {}
uploader_types = {
('q_' + selected_file['name']): {
'value': fake.word(),
'extra': [{
'sha256': sha256,
'viewUrl': '/project/{0}/files/osfstorage{1}'.format(
node_index[sha256],
selected_file['path']
),
'selectedFileName': selected_file['name'],
'nodeId': node_index[sha256]
}]
}
for sha256, selected_file in selected_files.items()
}
data.update(uploader_types)
object_types = {
('q_' + selected_file['name'] + '_obj'): {
'value': {
name_factory(): {
'value': fake.word(),
'extra': [{
'sha256': sha256,
'viewUrl': '/project/{0}/files/osfstorage{1}'.format(
node_index[sha256],
selected_file['path']
),
'selectedFileName': selected_file['name'],
'nodeId': node_index[sha256]
}]
},
name_factory(): {
'value': fake.word()
}
}
}
for sha256, selected_file in selected_files.items()
}
data.update(object_types)
other_questions = {
'q{}'.format(i): {
'value': fake.word()
}
for i in range(5)
}
data.update(other_questions)
return data
class ArchiverTestCase(OsfTestCase):
def setUp(self):
super(ArchiverTestCase, self).setUp()
handlers.celery_before_request()
self.user = factories.UserFactory()
self.auth = Auth(user=self.user)
self.src = factories.NodeFactory(creator=self.user)
self.dst = factories.RegistrationFactory(user=self.user, project=self.src, send_signals=False)
archiver_utils.before_archive(self.dst, self.user)
self.archive_job = self.dst.archive_job
class TestStorageAddonBase(ArchiverTestCase):
RESP_MAP = {
'/': dict(data=FILE_TREE['children']),
'/1234567': dict(data=FILE_TREE['children'][0]),
'/qwerty': dict(data=FILE_TREE['children'][1]['children']),
'/qwerty/asdfgh': dict(data=FILE_TREE['children'][1]['children'][0]),
}
@httpretty.activate
def _test__get_file_tree(self, addon_short_name):
requests_made = []
def callback(request, uri, headers):
path = request.querystring['path'][0]
requests_made.append(path)
return (200, headers, json.dumps(self.RESP_MAP[path]))
for path in self.RESP_MAP.keys():
url = waterbutler_url_for(
'metadata',
provider=addon_short_name,
path=path,
node=self.src,
user=self.user,
view_only=True,
_internal=True,
)
httpretty.register_uri(httpretty.GET,
url,
body=callback,
content_type='applcation/json')
addon = self.src.get_or_add_addon(addon_short_name, auth=self.auth)
root = {
'path': '/',
'name': '',
'kind': 'folder',
}
file_tree = addon._get_file_tree(root, self.user)
assert_equal(FILE_TREE, file_tree)
assert_equal(requests_made, ['/', '/qwerty']) # no requests made for files
def _test_addon(self, addon_short_name):
self._test__get_file_tree(addon_short_name)
@pytest.mark.skip('Unskip when figshare addon is implemented')
def test_addons(self):
# Test that each addon in settings.ADDONS_ARCHIVABLE other than wiki/forward implements the StorageAddonBase interface
for addon in [a for a in settings.ADDONS_ARCHIVABLE if a not in ['wiki', 'forward']]:
self._test_addon(addon)
class TestArchiverTasks(ArchiverTestCase):
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
@mock.patch('celery.chain')
def test_archive(self, mock_chain, mock_enqueue):
archive(job_pk=self.archive_job._id)
targets = [self.src.get_addon(name) for name in settings.ADDONS_ARCHIVABLE]
target_addons = [addon for addon in targets if (addon and addon.complete and isinstance(addon, BaseStorageAddon))]
assert_true(self.dst.archiving)
mock_chain.assert_called_with(
[
celery.group(
stat_addon.si(
addon_short_name=addon.config.short_name,
job_pk=self.archive_job._id,
) for addon in target_addons
),
archive_node.s(job_pk=self.archive_job._id)
]
)
def test_stat_addon(self):
with mock.patch.object(BaseStorageAddon, '_get_file_tree') as mock_file_tree:
mock_file_tree.return_value = FILE_TREE
res = stat_addon('osfstorage', self.archive_job._id)
assert_equal(res.target_name, 'osfstorage')
assert_equal(res.disk_usage, 128 + 256)
@mock.patch('website.archiver.tasks.archive_addon.delay')
def test_archive_node_pass(self, mock_archive_addon):
settings.MAX_ARCHIVE_SIZE = 1024 ** 3
with mock.patch.object(BaseStorageAddon, '_get_file_tree') as mock_file_tree:
mock_file_tree.return_value = FILE_TREE
results = [stat_addon(addon, self.archive_job._id) for addon in ['osfstorage']]
with mock.patch.object(celery, 'group') as mock_group:
archive_node(results, self.archive_job._id)
archive_osfstorage_signature = archive_addon.si(
'osfstorage',
self.archive_job._id,
results
)
assert(mock_group.called_with(archive_osfstorage_signature))
@use_fake_addons
def test_archive_node_fail(self):
settings.MAX_ARCHIVE_SIZE = 100
results = [stat_addon(addon, self.archive_job._id) for addon in ['osfstorage', 'dropbox']]
with mock.patch('website.archiver.tasks.ArchiverTask.on_failure') as mock_fail:
try:
archive_node.apply(args=(results, self.archive_job._id))
except:
pass
assert_true(isinstance(mock_fail.call_args[0][0], ArchiverSizeExceeded))
@mock.patch('website.project.signals.archive_callback.send')
@mock.patch('website.archiver.tasks.archive_addon.delay')
def test_archive_node_does_not_archive_empty_addons(self, mock_archive_addon, mock_send):
with mock.patch('osf.models.mixins.AddonModelMixin.get_addon') as mock_get_addon:
mock_addon = MockAddon()
def empty_file_tree(user, version):
return {
'path': '/',
'kind': 'folder',
'name': 'Fake',
'children': []
}
setattr(mock_addon, '_get_file_tree', empty_file_tree)
mock_get_addon.return_value = mock_addon
results = [stat_addon(addon, self.archive_job._id) for addon in ['osfstorage']]
archive_node(results, job_pk=self.archive_job._id)
assert_false(mock_archive_addon.called)
assert_true(mock_send.called)
@use_fake_addons
@mock.patch('website.archiver.tasks.archive_addon.delay')
def test_archive_node_no_archive_size_limit(self, mock_archive_addon):
settings.MAX_ARCHIVE_SIZE = 100
self.archive_job.initiator.add_system_tag(NO_ARCHIVE_LIMIT)
self.archive_job.initiator.save()
with mock.patch.object(BaseStorageAddon, '_get_file_tree') as mock_file_tree:
mock_file_tree.return_value = FILE_TREE
results = [stat_addon(addon, self.archive_job._id) for addon in ['osfstorage', 'dropbox']]
with mock.patch.object(celery, 'group') as mock_group:
archive_node(results, self.archive_job._id)
archive_dropbox_signature = archive_addon.si(
'dropbox',
self.archive_job._id,
results
)
assert(mock_group.called_with(archive_dropbox_signature))
@mock.patch('website.archiver.tasks.make_copy_request.delay')
def test_archive_addon(self, mock_make_copy_request):
result = archiver_utils.aggregate_file_tree_metadata('osfstorage', FILE_TREE, self.user)
archive_addon('osfstorage', self.archive_job._id, result)
assert_equal(self.archive_job.get_target('osfstorage').status, ARCHIVER_INITIATED)
cookie = self.user.get_or_create_cookie()
assert(mock_make_copy_request.called_with(
self.archive_job._id,
settings.WATERBUTLER_URL + '/ops/copy',
data=dict(
source=dict(
cookie=cookie,
nid=self.src._id,
provider='osfstorage',
path='/',
),
destination=dict(
cookie=cookie,
nid=self.dst._id,
provider=settings.ARCHIVE_PROVIDER,
path='/',
),
rename='Archive of OSF Storage',
)
))
def test_archive_success(self):
node = factories.NodeFactory(creator=self.user)
file_trees, selected_files, node_index = generate_file_tree([node])
data = generate_metadata(
file_trees,
selected_files,
node_index
)
schema = generate_schema_from_data(data)
with test_utils.mock_archive(node, schema=schema, data=data, autocomplete=True, autoapprove=True) as registration:
with mock.patch.object(BaseStorageAddon, '_get_file_tree', mock.Mock(return_value=file_trees[node._id])):
job = factories.ArchiveJobFactory(initiator=registration.creator)
archive_success(registration._id, job._id)
registration.reload()
for key, question in registration.registered_meta[schema._id].items():
target = None
if isinstance(question.get('value'), dict):
target = [v for v in question['value'].values() if 'extra' in v and 'sha256' in v['extra'][0]][0]
elif 'extra' in question and 'hashes' in question['extra'][0]:
target = question
if target:
assert_in(registration._id, target['extra'][0]['viewUrl'])
assert_not_in(node._id, target['extra'][0]['viewUrl'])
del selected_files[target['extra'][0]['sha256']]
else:
# check non-file questions are unmodified
assert_equal(data[key]['value'], question['value'])
assert_false(selected_files)
def test_archive_success_escaped_file_names(self):
file_tree = file_tree_factory(0, 0, 0)
fake_file = file_factory(name='>and&and<')
fake_file_name = strip_html(fake_file['name'])
file_tree['children'] = [fake_file]
node = factories.NodeFactory(creator=self.user)
data = {
('q_' + fake_file_name): {
'value': fake.word(),
'extra': [{
'sha256': fake_file['extra']['hashes']['sha256'],
'viewUrl': '/project/{0}/files/osfstorage{1}'.format(
node._id,
fake_file['path']
),
'selectedFileName': fake_file_name,
'nodeId': node._id
}]
}
}
schema = generate_schema_from_data(data)
draft = factories.DraftRegistrationFactory(branched_from=node, registration_schema=schema, registered_metadata=data)
with test_utils.mock_archive(node, schema=schema, data=data, autocomplete=True, autoapprove=True) as registration:
with mock.patch.object(BaseStorageAddon, '_get_file_tree', mock.Mock(return_value=file_tree)):
job = factories.ArchiveJobFactory(initiator=registration.creator)
archive_success(registration._id, job._id)
registration.reload()
for key, question in registration.registered_meta[schema._id].items():
assert_equal(question['extra'][0]['selectedFileName'], fake_file_name)
def test_archive_success_with_deeply_nested_schema(self):
node = factories.NodeFactory(creator=self.user)
file_trees, selected_files, node_index = generate_file_tree([node])
data = {
('q_' + selected_file['name']): {
'value': fake.word(),
'extra': [{
'selectedFileName': selected_file['name'],
'nodeId': node._id,
'sha256': sha256,
'viewUrl': '/project/{0}/files/osfstorage{1}'.format(node._id, selected_file['path'])
}]
}
for sha256, selected_file in selected_files.items()
}
schema = generate_schema_from_data(data)
with test_utils.mock_archive(node, schema=schema, data=data, autocomplete=True, autoapprove=True) as registration:
with mock.patch.object(BaseStorageAddon, '_get_file_tree', mock.Mock(return_value=file_trees[node._id])):
job = factories.ArchiveJobFactory(initiator=registration.creator)
archive_success(registration._id, job._id)
registration.reload()
for key, question in registration.registered_meta[schema._id].items():
target = None
if isinstance(question['value'], dict):
target = [v for v in question['value'].values() if 'extra' in v and 'sha256' in v['extra'][0]][0]
elif 'extra' in question and 'sha256' in question['extra'][0]:
target = question
if target:
assert_in(registration._id, target['extra'][0]['viewUrl'])
assert_not_in(node._id, target['extra'][0]['viewUrl'])
del selected_files[target['extra'][0]['sha256']]
else:
# check non-file questions are unmodified
assert_equal(data[key]['value'], question['value'])
assert_false(selected_files)
def test_archive_success_with_components(self):
node = factories.NodeFactory(creator=self.user)
comp1 = factories.NodeFactory(parent=node, creator=self.user)
factories.NodeFactory(parent=comp1, creator=self.user)
factories.NodeFactory(parent=node, creator=self.user)
nodes = [n for n in node.node_and_primary_descendants()]
file_trees, selected_files, node_index = generate_file_tree(nodes)
data = generate_metadata(
file_trees,
selected_files,
node_index
)
schema = generate_schema_from_data(data)
with test_utils.mock_archive(node, schema=schema, data=copy.deepcopy(data), autocomplete=True, autoapprove=True) as registration:
def mock_get_file_tree(self, *args, **kwargs):
return file_trees[self.owner.registered_from._id]
with mock.patch.object(BaseStorageAddon, '_get_file_tree', mock_get_file_tree):
job = factories.ArchiveJobFactory(initiator=registration.creator)
archive_success(registration._id, job._id)
registration.reload()
for key, question in registration.registered_meta[schema._id].items():
target = None
if isinstance(question['value'], dict):
target = [v for v in question['value'].values() if 'extra' in v and 'sha256' in v['extra'][0]]
elif 'extra' in question and 'sha256' in question['extra']:
target = question
if target:
node_id = re.search(
r'^/project/(?P<node_id>\w{5}).+$',
target[0]['extra'][0]['viewUrl']
).groupdict()['node_id']
assert_in(
node_id,
[r._id for r in registration.node_and_primary_descendants()]
)
if target[0]['extra'][0]['sha256'] in selected_files:
del selected_files[target[0]['extra'][0]['sha256']]
else:
# check non-file questions are unmodified
assert_equal(data[key]['value'], question['value'])
# ensure each selected file was checked
assert_false(selected_files)
def test_archive_success_different_name_same_sha(self):
file_tree = file_tree_factory(0, 0, 0)
fake_file = file_factory()
fake_file2 = file_factory(sha256=fake_file['extra']['hashes']['sha256'])
file_tree['children'] = [fake_file, fake_file2]
node = factories.NodeFactory(creator=self.user)
data = {
('q_' + fake_file['name']): {
'value': fake.word(),
'extra': [{
'sha256': fake_file['extra']['hashes']['sha256'],
'viewUrl': '/project/{0}/files/osfstorage{1}'.format(
node._id,
fake_file['path']
),
'selectedFileName': fake_file['name'],
'nodeId': node._id
}]
}
}
schema = generate_schema_from_data(data)
with test_utils.mock_archive(node, schema=schema, data=data, autocomplete=True, autoapprove=True) as registration:
with mock.patch.object(BaseStorageAddon, '_get_file_tree', mock.Mock(return_value=file_tree)):
job = factories.ArchiveJobFactory(initiator=registration.creator)
archive_success(registration._id, job._id)
for key, question in registration.registered_meta[schema._id].items():
assert_equal(question['extra'][0]['selectedFileName'], fake_file['name'])
def test_archive_failure_different_name_same_sha(self):
file_tree = file_tree_factory(0, 0, 0)
fake_file = file_factory()
fake_file2 = file_factory(sha256=fake_file['extra']['hashes']['sha256'])
file_tree['children'] = [fake_file2]
node = factories.NodeFactory(creator=self.user)
data = {
('q_' + fake_file['name']): {
'value': fake.word(),
'extra': [{
'sha256': fake_file['extra']['hashes']['sha256'],
'viewUrl': '/project/{0}/files/osfstorage{1}'.format(
node._id,
fake_file['path']
),
'selectedFileName': fake_file['name'],
'nodeId': node._id
}]
}
}
schema = generate_schema_from_data(data)
draft = factories.DraftRegistrationFactory(branched_from=node, registration_schema=schema, registered_metadata=data)
with test_utils.mock_archive(node, schema=schema, data=data, autocomplete=True, autoapprove=True) as registration:
with mock.patch.object(BaseStorageAddon, '_get_file_tree', mock.Mock(return_value=file_tree)):
job = factories.ArchiveJobFactory(initiator=registration.creator)
draft.registered_node = registration
draft.save()
with assert_raises(ArchivedFileNotFound):
archive_success(registration._id, job._id)
def test_archive_success_same_file_in_component(self):
file_tree = file_tree_factory(3, 3, 3)
selected = select_files_from_tree(file_tree).values()[0]
child_file_tree = file_tree_factory(0, 0, 0)
child_file_tree['children'] = [selected]
node = factories.NodeFactory(creator=self.user)
child = factories.NodeFactory(creator=self.user, parent=node)
data = {
('q_' + selected['name']): {
'value': fake.word(),
'extra': [{
'sha256': selected['extra']['hashes']['sha256'],
'viewUrl': '/project/{0}/files/osfstorage{1}'.format(
child._id,
selected['path']
),
'selectedFileName': selected['name'],
'nodeId': child._id
}]
}
}
schema = generate_schema_from_data(data)
with test_utils.mock_archive(node, schema=schema, data=data, autocomplete=True, autoapprove=True) as registration:
with mock.patch.object(BaseStorageAddon, '_get_file_tree', mock.Mock(return_value=file_tree)):
job = factories.ArchiveJobFactory(initiator=registration.creator)
archive_success(registration._id, job._id)
registration.reload()
child_reg = registration.nodes[0]
for key, question in registration.registered_meta[schema._id].items():
assert_in(child_reg._id, question['extra'][0]['viewUrl'])
class TestArchiverUtils(ArchiverTestCase):
@mock.patch('website.mails.send_mail')
def test_handle_archive_fail(self, mock_send_mail):
archiver_utils.handle_archive_fail(
ARCHIVER_NETWORK_ERROR,
self.src,
self.dst,
self.user,
{}
)
assert_equal(mock_send_mail.call_count, 2)
assert_true(self.dst.is_deleted)
@mock.patch('website.mails.send_mail')
def test_handle_archive_fail_copy(self, mock_send_mail):
archiver_utils.handle_archive_fail(
ARCHIVER_NETWORK_ERROR,
self.src,
self.dst,
self.user,
{}
)
args_user = dict(
to_addr=self.user.username,
user=self.user,
src=self.src,
mail=mails.ARCHIVE_COPY_ERROR_USER,
results={},
can_change_preferences=False,
mimetype='html',
)
args_desk = dict(
to_addr=settings.SUPPORT_EMAIL,
user=self.user,
src=self.src,
mail=mails.ARCHIVE_COPY_ERROR_DESK,
results={},
)
mock_send_mail.assert_has_calls([
call(**args_user),
call(**args_desk),
], any_order=True)
@mock.patch('website.mails.send_mail')
def test_handle_archive_fail_size(self, mock_send_mail):
archiver_utils.handle_archive_fail(
ARCHIVER_SIZE_EXCEEDED,
self.src,
self.dst,
self.user,
{}
)
args_user = dict(
to_addr=self.user.username,
user=self.user,
src=self.src,
mail=mails.ARCHIVE_SIZE_EXCEEDED_USER,
can_change_preferences=False,
mimetype='html',
)
args_desk = dict(
to_addr=settings.SUPPORT_EMAIL,
user=self.user,
src=self.src,
mail=mails.ARCHIVE_SIZE_EXCEEDED_DESK,
stat_result={},
)
mock_send_mail.assert_has_calls([
call(**args_user),
call(**args_desk),
], any_order=True)
def test_aggregate_file_tree_metadata(self):
a_stat_result = archiver_utils.aggregate_file_tree_metadata('dropbox', FILE_TREE, self.user)
assert_equal(a_stat_result.disk_usage, 128 + 256)
assert_equal(a_stat_result.num_files, 2)
assert_equal(len(a_stat_result.targets), 2)
@use_fake_addons
def test_archive_provider_for(self):
provider = self.src.get_addon(settings.ARCHIVE_PROVIDER)
assert_equal(archiver_utils.archive_provider_for(self.src, self.user)._id, provider._id)
@use_fake_addons
def test_has_archive_provider(self):
assert_true(archiver_utils.has_archive_provider(self.src, self.user))
wo = factories.NodeFactory(creator=self.user)
wo.delete_addon(settings.ARCHIVE_PROVIDER, auth=self.auth, _force=True)
assert_false(archiver_utils.has_archive_provider(wo, self.user))
@use_fake_addons
def test_link_archive_provider(self):
wo = factories.NodeFactory(creator=self.user)
wo.delete_addon(settings.ARCHIVE_PROVIDER, auth=self.auth, _force=True)
archiver_utils.link_archive_provider(wo, self.user)
assert_true(archiver_utils.has_archive_provider(wo, self.user))
def test_get_file_map(self):
node = factories.NodeFactory(creator=self.user)
file_tree = file_tree_factory(3, 3, 3)
with mock.patch.object(BaseStorageAddon, '_get_file_tree', mock.Mock(return_value=file_tree)):
file_map = archiver_utils.get_file_map(node)
stack = [file_tree]
file_map = {
sha256: value
for sha256, value, _ in file_map
}
while len(stack):
item = stack.pop(0)
if item['kind'] == 'file':
sha256 = item['extra']['hashes']['sha256']
assert_in(sha256, file_map)
map_file = file_map[sha256]
assert_equal(item, map_file)
else:
stack = stack + item['children']
def test_get_file_map_with_components(self):
node = factories.NodeFactory()
comp1 = factories.NodeFactory(parent=node)
factories.NodeFactory(parent=comp1)
factories.NodeFactory(parent=node)
file_tree = file_tree_factory(3, 3, 3)
with mock.patch.object(BaseStorageAddon, '_get_file_tree', mock.Mock(return_value=file_tree)):
file_map = archiver_utils.get_file_map(node)
stack = [file_tree]
file_map = {
sha256: value
for sha256, value, _ in file_map
}
while len(stack):
item = stack.pop(0)
if item['kind'] == 'file':
sha256 = item['extra']['hashes']['sha256']
assert_in(sha256, file_map)
map_file = file_map[sha256]
assert_equal(item, map_file)
else:
stack = stack + item['children']
def test_get_file_map_memoization(self):
node = factories.NodeFactory()
comp1 = factories.NodeFactory(parent=node)
factories.NodeFactory(parent=comp1)
factories.NodeFactory(parent=node)
with mock.patch.object(BaseStorageAddon, '_get_file_tree') as mock_get_file_tree:
mock_get_file_tree.return_value = file_tree_factory(3, 3, 3)
# first call
archiver_utils.get_file_map(node)
call_count = mock_get_file_tree.call_count
# second call
archiver_utils.get_file_map(node)
assert_equal(mock_get_file_tree.call_count, call_count)
class TestArchiverListeners(ArchiverTestCase):
@mock.patch('website.archiver.tasks.archive')
@mock.patch('website.archiver.utils.before_archive')
def test_after_register(self, mock_before_archive, mock_archive):
listeners.after_register(self.src, self.dst, self.user)
mock_before_archive.assert_called_with(self.dst, self.user)
mock_archive.assert_called_with(job_pk=self.archive_job._id)
@mock.patch('website.archiver.tasks.archive')
@mock.patch('celery.chain')
def test_after_register_archive_runs_only_for_root(self, mock_chain, mock_archive):
proj = factories.ProjectFactory()
c1 = factories.ProjectFactory(parent=proj)
c2 = factories.ProjectFactory(parent=c1)
reg = factories.RegistrationFactory(project=proj)
rc1 = reg.nodes[0]
rc2 = rc1.nodes[0]
mock_chain.reset_mock()
listeners.after_register(c1, rc1, self.user)
assert_false(mock_chain.called)
listeners.after_register(c2, rc2, self.user)
assert_false(mock_chain.called)
listeners.after_register(proj, reg, self.user)
for kwargs in [dict(job_pk=n.archive_job._id,) for n in [reg, rc1, rc2]]:
mock_archive.assert_any_call(**kwargs)
@mock.patch('website.archiver.tasks.archive')
@mock.patch('celery.chain')
def test_after_register_does_not_archive_pointers(self, mock_chain, mock_archive):
proj = factories.ProjectFactory(creator=self.user)
c1 = factories.ProjectFactory(creator=self.user, parent=proj)
other = factories.ProjectFactory(creator=self.user)
reg = factories.RegistrationFactory(project=proj)
r1 = reg._nodes.first()
proj.add_pointer(other, auth=Auth(self.user))
listeners.after_register(c1, r1, self.user)
listeners.after_register(proj, reg, self.user)
for kwargs in [dict(job_pk=n.archive_job._id,) for n in [reg, r1]]:
mock_archive.assert_any_call(**kwargs)
@mock.patch('website.archiver.tasks.archive_success.delay')
def test_archive_callback_pending(self, mock_delay):
self.archive_job.update_target(
'osfstorage',
ARCHIVER_INITIATED
)
self.dst.archive_job.update_target(
'osfstorage',
ARCHIVER_SUCCESS
)
self.dst.archive_job.save()
with mock.patch('website.mails.send_mail') as mock_send:
with mock.patch('website.archiver.utils.handle_archive_fail') as mock_fail:
listeners.archive_callback(self.dst)
assert_false(mock_send.called)
assert_false(mock_fail.called)
assert_true(mock_delay.called)
@mock.patch('website.mails.send_mail')
@mock.patch('website.archiver.tasks.archive_success.delay')
def test_archive_callback_done_success(self, mock_send, mock_archive_success):
self.dst.archive_job.update_target('osfstorage', ARCHIVER_SUCCESS)
self.dst.archive_job.save()
listeners.archive_callback(self.dst)
assert_equal(mock_send.call_count, 1)
@mock.patch('website.mails.send_mail')
@mock.patch('website.archiver.tasks.archive_success.delay')
def test_archive_callback_done_embargoed(self, mock_send, mock_archive_success):
end_date = timezone.now() + datetime.timedelta(days=30)
self.dst.archive_job.meta = {
'embargo_urls': {
contrib._id: None
for contrib in self.dst.contributors
}
}
self.dst.embargo_registration(self.user, end_date)
self.dst.archive_job.update_target('osfstorage', ARCHIVER_SUCCESS)
self.dst.save()
listeners.archive_callback(self.dst)
assert_equal(mock_send.call_count, 1)
def test_archive_callback_done_errors(self):
self.dst.archive_job.update_target('osfstorage', ARCHIVER_FAILURE)
self.dst.archive_job.save()
with mock.patch('website.archiver.utils.handle_archive_fail') as mock_fail:
listeners.archive_callback(self.dst)
mock_fail.assert_called_with(ARCHIVER_UNCAUGHT_ERROR, self.src, self.dst, self.user, self.dst.archive_job.target_addons)
def test_archive_callback_updates_archiving_state_when_done(self):
proj = factories.NodeFactory()
factories.NodeFactory(parent=proj)
reg = factories.RegistrationFactory(project=proj)
reg.archive_job.update_target('osfstorage', ARCHIVER_INITIATED)
child = reg.nodes[0]
child.archive_job.update_target('osfstorage', ARCHIVER_SUCCESS)
child.save()
listeners.archive_callback(child)
assert_false(child.archiving)
def test_archive_tree_finished_d1(self):
self.dst.archive_job.update_target('osfstorage', ARCHIVER_SUCCESS)
self.dst.save()
assert_true(self.dst.archive_job.archive_tree_finished())
def test_archive_tree_finished_d3(self):
proj = factories.NodeFactory()
child = factories.NodeFactory(parent=proj)
factories.NodeFactory(parent=child)
reg = factories.RegistrationFactory(project=proj)
rchild = reg._nodes.first()
rchild2 = rchild._nodes.first()
for node in [reg, rchild, rchild2]:
node.archive_job._set_target('osfstorage')
for node in [reg, rchild, rchild2]:
node.archive_job.update_target('osfstorage', ARCHIVER_SUCCESS)
for node in [reg, rchild, rchild2]:
assert_true(node.archive_job.archive_tree_finished())
def test_archive_tree_finished_false(self):
proj = factories.NodeFactory()
child = factories.NodeFactory(parent=proj)
factories.NodeFactory(parent=child)
reg = factories.RegistrationFactory(project=proj)
rchild = reg._nodes.first()
rchild2 = rchild._nodes.first()
for node in [reg, rchild, rchild2]:
node.archive_job._set_target('osfstorage')
for node in [reg, rchild]:
node.archive_job.update_target('osfstorage', ARCHIVER_SUCCESS)
rchild2.archive_job.update_target('osfstorage', ARCHIVER_INITIATED)
rchild2.save()
for node in [reg, rchild, rchild2]:
assert_false(node.archive_job.archive_tree_finished())
@mock.patch('website.mails.send_mail')
@mock.patch('website.archiver.tasks.archive_success.delay')
def test_archive_callback_on_tree_sends_only_one_email(self, mock_send_success, mock_arhive_success):
proj = factories.NodeFactory()
child = factories.NodeFactory(parent=proj)
factories.NodeFactory(parent=child)
reg = factories.RegistrationFactory(project=proj)
rchild = reg._nodes.first()
rchild2 = rchild._nodes.first()
for node in [reg, rchild, rchild2]:
node.archive_job._set_target('osfstorage')
for node in [reg, rchild, rchild2]:
node.archive_job.update_target('osfstorage', ARCHIVER_INITIATED)
rchild.archive_job.update_target('osfstorage', ARCHIVER_SUCCESS)
rchild.save()
listeners.archive_callback(rchild)
assert_false(mock_send_success.called)
reg.archive_job.update_target('osfstorage', ARCHIVER_SUCCESS)
reg.save()
listeners.archive_callback(reg)
assert_false(mock_send_success.called)
rchild2.archive_job.update_target('osfstorage', ARCHIVER_SUCCESS)
rchild2.save()
listeners.archive_callback(rchild2)
assert_equal(mock_send_success.call_count, 1)
assert_true(mock_send_success.called)
class TestArchiverScripts(ArchiverTestCase):
def test_find_failed_registrations(self):
failures = []
legacy = []
delta = settings.ARCHIVE_TIMEOUT_TIMEDELTA + datetime.timedelta(hours=1)
for i in range(5):
reg = factories.RegistrationFactory()
archive_job = reg.archive_job
archive_job.datetime_initiated = timezone.now() - delta
archive_job.save()
reg.save()
ArchiveJob.remove_one(archive_job)
legacy.append(reg._id)
for i in range(5):
reg = factories.RegistrationFactory()
datetime_initiated = timezone.now() - delta
archive_job = reg.archive_job
archive_job.datetime_initiated = datetime_initiated
archive_job.status = ARCHIVER_INITIATED
archive_job.save()
reg.save()
archive_job._set_target('osfstorage')
archive_job.update_target('osfstorage', ARCHIVER_INITIATED)
archive_job.sent = False
archive_job.save()
failures.append(reg._id)
pending = []
for i in range(5):
reg = factories.RegistrationFactory()
archive_job = reg.archive_job
archive_job._set_target('osfstorage')
archive_job.update_target('osfstorage', ARCHIVER_INITIATED)
archive_job.save()
pending.append(reg)
failed = find_failed_registrations()
assert_equal(len(failed), 5)
assert_items_equal([f._id for f in failed], failures)
for pk in legacy:
assert_false(pk in failed)
class TestArchiverDecorators(ArchiverTestCase):
@mock.patch('website.archiver.signals.archive_fail.send')
def test_fail_archive_on_error(self, mock_fail):
e = HTTPError(418)
def error(*args, **kwargs):
raise e
func = fail_archive_on_error(error)
func(node=self.dst)
mock_fail.assert_called_with(
self.dst,
errors=[e.message]
)
class TestArchiverBehavior(OsfTestCase):
@mock.patch('osf.models.AbstractNode.update_search')
def test_archiving_registrations_not_added_to_search_before_archival(self, mock_update_search):
proj = factories.ProjectFactory()
reg = factories.RegistrationFactory(project=proj)
reg.save()
assert_false(mock_update_search.called)
@mock.patch('osf.models.AbstractNode.update_search')
@mock.patch('website.mails.send_mail')
@mock.patch('website.archiver.tasks.archive_success.delay')
def test_archiving_nodes_added_to_search_on_archive_success_if_public(self, mock_update_search, mock_send, mock_archive_success):
proj = factories.ProjectFactory()
reg = factories.RegistrationFactory(project=proj)
reg.save()
with nested(
mock.patch('osf.models.ArchiveJob.archive_tree_finished', mock.Mock(return_value=True)),
mock.patch('osf.models.ArchiveJob.success', mock.PropertyMock(return_value=True))
) as (mock_finished, mock_success):
listeners.archive_callback(reg)
assert_equal(mock_update_search.call_count, 1)
@mock.patch('website.search.elastic_search.delete_doc')
@mock.patch('website.mails.send_mail')
def test_archiving_nodes_not_added_to_search_on_archive_failure(self, mock_send, mock_delete_index_node):
proj = factories.ProjectFactory()
reg = factories.RegistrationFactory(project=proj)
reg.save()
with nested(
mock.patch('osf.models.archive.ArchiveJob.archive_tree_finished', mock.Mock(return_value=True)),
mock.patch('osf.models.archive.ArchiveJob.success', mock.PropertyMock(return_value=False))
) as (mock_finished, mock_success):
listeners.archive_callback(reg)
assert_true(mock_delete_index_node.called)
@mock.patch('osf.models.AbstractNode.update_search')
@mock.patch('website.mails.send_mail')
def test_archiving_nodes_not_added_to_search_on_archive_incomplete(self, mock_send, mock_update_search):
proj = factories.ProjectFactory()
reg = factories.RegistrationFactory(project=proj)
reg.save()
with mock.patch('osf.models.ArchiveJob.archive_tree_finished', mock.Mock(return_value=False)):
listeners.archive_callback(reg)
assert_false(mock_update_search.called)
class TestArchiveTarget(OsfTestCase):
def test_repr(self):
target = ArchiveTarget()
result = repr(target)
assert_in('ArchiveTarget', result)
assert_in(str(target._id), result)
class TestArchiveJobModel(OsfTestCase):
def tearDown(self, *args, **kwargs):
super(TestArchiveJobModel, self).tearDown(*args, **kwargs)
with open(os.path.join(settings.ROOT, 'addons.json')) as fp:
addon_settings = json.load(fp)
settings.ADDONS_ARCHIVABLE = addon_settings['addons_archivable']
def test_repr(self):
job = ArchiveJob()
result = repr(job)
assert_in('ArchiveJob', result)
assert_in(str(job.done), result)
assert_in(str(job._id), result)
def test_target_info(self):
target = ArchiveTarget(name='neon-archive')
target.save()
job = factories.ArchiveJobFactory()
job.target_addons.add(target)
result = job.target_info()
assert_equal(len(result), 1)
item = result[0]
assert_equal(item['name'], target.name)
assert_equal(item['status'], target.status)
assert_equal(item['stat_result'], target.stat_result)
assert_equal(item['errors'], target.errors)
def test_get_target(self):
proj = factories.ProjectFactory()
reg = factories.RegistrationFactory(project=proj)
job = ArchiveJob.objects.create(src_node=proj, dst_node=reg, initiator=proj.creator)
job.set_targets()
osfstorage = job.get_target('osfstorage')
assert_false(not osfstorage)
none = job.get_target('fake')
assert_false(none)
def test_set_targets(self):
proj = factories.ProjectFactory()
reg = factories.RegistrationFactory(project=proj)
job = ArchiveJob(src_node=proj, dst_node=reg, initiator=proj.creator)
job.save()
job.set_targets()
assert_equal(list(job.target_addons.values_list('name', flat=True)), ['osfstorage'])
def test_archive_tree_finished_with_nodes(self):
proj = factories.NodeFactory()
factories.NodeFactory(parent=proj)
comp2 = factories.NodeFactory(parent=proj)
factories.NodeFactory(parent=comp2)
reg = factories.RegistrationFactory(project=proj)
rchild1 = reg._nodes.first()
for node in reg.node_and_primary_descendants():
assert_false(node.archive_job.archive_tree_finished())
for target in rchild1.archive_job.target_addons.all():
rchild1.archive_job.update_target(target.name, ARCHIVER_SUCCESS)
rchild1.archive_job.save()
assert_false(reg.archive_job.archive_tree_finished())
for node in reg.node_and_primary_descendants():
for target in node.archive_job.target_addons.all():
node.archive_job.update_target(target.name, ARCHIVER_SUCCESS)
for node in reg.node_and_primary_descendants():
assert_true(node.archive_job.archive_tree_finished())
|
|
"""
This is a subfile for IsyClass.py
These funtions are accessable via the Isy class opj
"""
__author__ = 'Peter Shipley <peter.shipley@gmail.com>'
__copyright__ = "Copyright (C) 2015 Peter Shipley"
__license__ = "BSD"
from ISY.IsyNodeClass import IsyNode, IsyScene, IsyNodeFolder#, _IsyNodeBase
from ISY.IsyUtilClass import IsySubClass
from ISY.IsyExceptionClass import IsyPropertyError, IsyResponseError, IsyRuntimeWarning, IsyWarning, IsyCommunicationError, IsyInvalidCmdError, IsySoapError
import warnings
# import string
##
## Node funtions
##
def load_nodes(self, reload=0) :
""" Load node list scene list and folder info
args : none
internal function call
"""
if not hasattr(self, '_nodedict') or not isinstance(self._nodedict, dict):
self._nodedict = dict ()
# current_node_set = None
# else :
# current_node_set = set( self._nodedict.viewkeys() )
if not hasattr(self, '_nodegroups') or not isinstance(self._nodegroups, dict):
self._nodegroups = dict ()
# else :
# current_node_set = self._nodegroups.viewkeys()
if reload or not hasattr(self, '_nodefolder') or not isinstance(self._nodefolder, dict):
self._nodefolder = dict ()
if reload or not hasattr(self, '_folder2addr') or not isinstance(self._folder2addr, dict):
self._folder2addr = dict ()
if reload or not hasattr(self, '_name2id') or not isinstance(self._name2id, dict):
self._name2id = dict ()
# self.nodeCdict = dict ()
# self._node2addr = dict ()
if self.debug & 0x01 :
print("load_nodes")
nodeinfo = self._getXMLetree("/rest/nodes")
if nodeinfo is None :
raise IsyCommunicationError("Load Node Info Fail : " \
+ self.error_str)
self._gen_folder_list(nodeinfo, reload=reload)
self._gen_nodedict(nodeinfo, reload=reload)
self._gen_nodegroups(nodeinfo, reload=reload)
# self._printdict(self._nodedict)
# print("load_nodes self._node2addr : ", len(self._node2addr))
self._gen_member_list()
def _gen_member_list(self, reload=0) :
"""ganerates node connecton lists
internal function call
"""
if not self._nodedict :
return
else :
# Folders can only belong to Folders
for faddr in self._nodefolder :
# make code easier to read
foldr = self._nodefolder[faddr]
# add members list if needed
if 'members' not in foldr :
foldr['members'] = list()
# check if folder obj has a parent
if 'parent' in foldr :
# this should always be true
if foldr['parent-type'] == '3' and \
foldr['parent'] in self._nodefolder :
if 'members' not in self._nodefolder[foldr['parent']] :
self._nodefolder[foldr['parent']]['members'] = list()
self._nodefolder[foldr['parent']]['members'].append( foldr['address'])
else:
# print("warn bad parenting foldr =", foldr)
warnings.warn("Bad Parent : Folder {0} {1} : {2}".format( \
foldr["name"], faddr, foldr['parent']), IsyRuntimeWarning)
# Scenes can only belong to Folders
for sa in self._nodegroups :
s = self._nodegroups[sa]
if "parent" in s :
if s['parent-type'] == '3' and s['parent'] in self._nodefolder :
self._nodefolder[s['parent']]['members'].append( s['address'])
else:
# print("warn bad parenting s = ", s)
warnings.warn("Bad Parent : Scene {0} {1} : {2}".format( \
s["name"], sa, s['parent']), IsyRuntimeWarning)
# A Node can belong only to ONE and only ONE Folder or another Node
for naddr in self._nodedict :
n = self._nodedict[naddr]
# print("n = ", n)
if 'pnode' in n and n['pnode'] != n['address'] :
if 'members' not in self._nodedict[n['pnode']] :
self._nodedict[n['pnode']]['members'] = list ()
self._nodedict[n['pnode']]['members'].append( n['address'] )
if 'parent' in n :
if 'pnode' not in n or n['parent'] != n['pnode'] :
if n['parent-type'] == 3 :
if n['parent'] in self._nodefolder :
self._nodefolder[n['parent']]['members'].append( n['address'])
elif n['parent-type'] == 1 :
if n['parent'] in self._nodegroups :
self._nodegroups[n['parent']]['members'].add( n['address'])
# 'parent': '16 6C D2 1', 'parent-type': '1',
# 'parent': '12743', 'parent-type': '3',
# if n.pnode == n.parent and n.pnode == n.address
# next
def node_releoad(self) :
return self.load_nodes(reload=1)
def _gen_folder_list(self, nodeinfo, reload=0) :
""" generate folder dictionary for load_node() """
# self._nodefolder = dict ()
# self._folder2addr = dict ()
for fold in nodeinfo.iter('folder'):
xelm = fold.find('address')
if hasattr(xelm, 'text') :
if xelm.text in self._nodegroups :
fprop = self._nodefolder[xelm.text]
else :
fprop = self._nodefolder[xelm.text] = dict()
else :
warnings.warn("Error : no address in folder", IsyRuntimeWarning)
continue
for k, v in fold.items() :
fprop[fold.tag + "-" + k] = v
for child in list(fold):
fprop[child.tag] = child.text
if child.attrib :
for k, v in child.items() :
fprop[child.tag + "-" + k] = v
# self._nodefolder[fprop["address"]] = fprop
n = fprop["name"].upper()
self._folder2addr[n] = fprop["address"]
# name2id to replace folder2addr as a global lookup table
if n in self._name2id :
print("Dup name2id (Folder) : \"" + n + "\" ", fprop["address"])
print("\t_name2id ", self._name2id[n])
else :
self._name2id[n] = ("folder", fprop["address"])
#self._printdict(self._nodefolder)
#self._printdict(self._folder2addr)
def _gen_nodegroups(self, nodeinfo, reload=0) :
""" generate scene / group dictionary for load_node() """
# self._nodegroups = dict ()
self._groups2addr = dict ()
for grp in nodeinfo.iter('group'):
xelm = grp.find('address')
if hasattr(xelm, 'text') :
if xelm.text in self._nodegroups :
gprop = self._nodegroups[xelm.text]
else :
gprop = self._nodegroups[xelm.text] = dict()
else :
warnings.warn("Error : no address in scene", IsyRuntimeWarning)
continue
for k, v in grp.items() :
gprop[grp.tag + "-" + k] = v
for child in list(grp) :
if child.tag == "parent" :
gprop[child.tag] = child.text
for k, v in child.items() :
gprop[child.tag + "-" + k] = v
elif child.tag == "members" :
glist = dict ()
for lnk in child.iter('link'):
glist[lnk.text] = lnk.attrib['type']
gprop[child.tag] = glist
else :
gprop[child.tag] = child.text
if child.attrib :
for k, v in child.items() :
gprop[child.tag + "-" + k] = v
if "address" in gprop :
# self._nodegroups[gprop["address"]] = gprop
if "name" in gprop :
n = gprop["name"]
if n in self._groups2addr :
warnings.warn("Duplicate group name {0} : {1} {2}".format(n, \
str(gprop["address"]), self._groups2addr[n]), IsyRuntimeWarning)
else :
self._groups2addr[n] = str(gprop["address"])
if n in self._name2id :
pass
# warnings.warn("Dup name2id (Group) : \"" + n + "\" ", gprop["address"] + "\n\t_name2id " + self._name2id[n], IsyRuntimeWarning)
else :
self._name2id[n] = ("group", gprop["address"])
else :
# should raise an exception ?
self._printinfo(grp, "Error : no address in group :")
def _gen_nodedict(self, nodeinfo, reload=0) :
""" generate node dictionary for load_node() """
warn_dup_name_list = list()
self._node2addr = dict()
for inode in nodeinfo.iter('node'):
# self._printinfo(inode, "\n\n inode")
xelm = inode.find('address')
if hasattr(xelm, 'text') :
if xelm.text in self._nodedict :
idict = self._nodedict[xelm.text]
else :
idict = self._nodedict[xelm.text] = dict()
else :
warnings.warn("Error : no address in node", IsyRuntimeWarning)
continue
for k, v in inode.items() :
idict[inode.tag + "-" + k] = v
for child in list(inode) :
# self._printinfo(child, "\tchild")
if child.tag == "parent" :
idict[child.tag] = child.text
for k, v in child.items() :
idict[child.tag + "-" + k] = v
# special case where ST, OL, and RR
elif child.tag == "property" :
if child.tag not in idict :
idict[child.tag] = dict ()
nprop = dict ()
for k, v in child.items() :
# print("child.items", k, v)
nprop[k] = v
if "id" in nprop :
idict[child.tag][nprop["id"]] = nprop
else :
idict[child.tag] = child.text
if "address" in idict :
# self._nodedict[idict["address"]] = idict
if "name" in idict :
n = idict["name"]
if n in self._node2addr :
warn_dup_name_list.append( (n ,idict["address"], self._name2id[n]) )
warn_mess = "Duplicate Node name \"{0}\" :".format(n) \
+ " \"{1}\"\n\t\"{2}\"".format(\
n, idict["address"], self._node2addr[n])
warnings.warn(warn_mess, IsyRuntimeWarning)
else :
self._node2addr[n] = idict["address"]
# thinking of replacing _node2addr with _name2id
# do to ease managment of the three node types
if not reload and n in self._name2id :
warn_dup_name_list.append( (n ,idict["address"], self._name2id[n]) )
warn_mess = "Dup name2id (Node) \"{0}\" :".format(n) \
+ " \"{1}\"\n\t\"{2}\"".format(\
n ,idict["address"], self._name2id[n])
warnings.warn(warn_mess, IsyRuntimeWarning)
else :
self._name2id[n] = ("node", idict["address"])
else :
# should raise an exception
# self._printinfo(inode, "Error : no address in node :")
warnings.warn("Error : no address in node", IsyRuntimeWarning)
#print("\n>>>>\t", self._nodedict, "\n<<<<<\n")
#
# access methods to Node data
#
def node_names(self) :
""" access method for node names
returns a dict of ( Node names : Node address )
"""
if not self._node2addr :
self.load_nodes()
return self._node2addr[:]
def scene_names(self) :
""" access method for scene names
returns a dict of ( Node names : Node address )
"""
if not self._groups2addr :
self.load_nodes()
return self._groups2addr[:]
def node_addrs(self) :
""" access method for node addresses
returns a iist scene/group addresses
"""
if not self._nodedict :
self.load_nodes()
return self._nodedict.viewkeys()
def scene_addrs(self) :
""" access method for scene addresses
returns a iist scene/group addresses
"""
if not self._nodegroups :
self.load_nodes()
return self._nodegroups.viewkeys()
def node_get_path(self, nodeid) :
" get path of parent names "
if not self._nodedict :
self.load_node()
node_type, node_id = self._node_get_id(nodeid)
if not node_id :
raise IsyInvalidCmdError("node_path: unknown node : " + str(nodeid) )
return self._node_get_path(node_id, node_type)
def _node_get_path(self, node_id, node_type) :
if node_type == "node" :
noded = self._nodedict[node_id]
elif node_type == "scene" :
noded = self._nodegroups[node_id]
elif node_type == "folder" :
noded = self._nodefolder[node_id]
else :
warnings.warn("Internal Error : unknown node type", IsyRuntimeWarning)
return "/" + node_id
fpath = "/" + noded['name']
while "parent" in noded :
if noded["parent-type"] == '3' :
noded = self._nodefolder[ noded["parent"] ]
elif noded["parent-type"] == '1' :
noded = self._nodedict[ noded["parent"] ]
fpath = "/" + noded["name"] + fpath
return fpath
def get_node(self, node_id) :
""" Get a Node object for given node or scene name or ID
args:
node : node name of id
return:
An IsyNode object representing the requested Scene or Node
"""
if self.debug & 0x01 :
print("get_node")
(nodetype, nodeid) = self._node_get_id(node_id)
if nodeid in self.nodeCdict :
return self.nodeCdict[nodeid]
if nodeid in self._nodedict :
self.nodeCdict[nodeid] = IsyNode(self, self._nodedict[nodeid])
return self.nodeCdict[nodeid]
elif nodeid in self._nodegroups:
self.nodeCdict[nodeid] = IsyScene(self, self._nodegroups[nodeid])
return self.nodeCdict[nodeid]
elif nodeid in self._nodefolder:
self.nodeCdict[nodeid] = IsyNodeFolder(self, self._nodefolder[nodeid])
return self.nodeCdict[nodeid]
else :
# print("Isy get_node no node : \"{!s:}\"".format(nodeid))
raise LookupError("no node such Node : " + str(nodeid) )
# should never get here
#print "And you may ask yourself-Well...How did I get here?"
return None
def _node_get_name(self, nid):
if not self._nodedict :
self.load_nodes()
if isinstance(nid, IsySubClass) :
return nid["name"]
else :
n = str(nid).strip()
if n in self._nodedict :
return ("node", self._nodedict[n]["name"])
if n in self._nodegroups :
return ("scene", self._nodegroups[n]["name"])
if n in self._nodefolder :
return ("folder", self._nodefolder[n]["name"])
return (None, n)
def _node_get_id(self, nid):
""" node/scene/Folder name to node/scene/folder ID """
if not self._nodedict :
self.load_nodes()
if isinstance(nid, IsySubClass) :
return nid["addr"]
else :
n = str(nid).strip()
##
if n in self._nodedict :
# print("_node_get_id : " + n + " nodedict " + n
return ("node", n)
if n in self._node2addr :
# print("_node_get_id : " + n + " _node2addr " + self._node2addr[n])
return ("node", self._node2addr[n])
##
if n in self._nodegroups :
# print("_node_get_id : " + n + " nodegroups " + n)
return ("group", n)
if n in self._groups2addr :
# print("_node_get_id : " + n + " _groups2addr " + self._groups2addr[n])
return ("group", self._groups2addr[n])
##
if n in self._folder2addr :
# print("_node_get_id : " + n + " _folder2addr " + self._folder2addr[n])
return ("folder", self._folder2addr[n])
if n in self._nodefolder :
# print("_node_get_id : " + n + " _nodefolder " + n)
return ("folder", n)
# Fail #
#print("_node_get_id : " + n + " None")
return(None, None)
# [Needs fixing]
#
# Get property for a node
#
def node_get_prop(self, naddr, prop_id) :
""" Get node node_get_prop
args:
naddr = name, address or node obj
prop_id = name of property
raise:
LookupError : if node name or Id is invalid
IsyPropertyError : if property invalid
"""
#<isQueryAble>true</isQueryAble>
if self.debug & 0x01 :
print("node_get_prop")
(nodetype, node_id) = self._node_get_id(naddr)
if not node_id :
raise LookupError("node_get_prop: unknown node : " + str(naddr) )
if prop_id :
prop = prop_id
if "isQueryAble" in self.controls[prop_id] and \
self.controls["prop_id"]["isQueryAble"] == "false" :
raise IsyPropertyError("non Queryable property " + prop_id)
if prop_id in ['ST', 'OL', 'RR'] :
if prop in self._nodedict[node_id]["property"] :
return self._nodedict[node_id]["property"]["value"]
else :
raise IsyPropertyError("unknown property " + prop_id)
if prop in self._nodedict[node_id] :
return self._nodedict[node_id][prop]
else :
raise IsyPropertyError("unknown property " + prop_id)
# Set property for a node
#
def node_set_prop(self, naddr, prop, val) :
""" Set node property
args:
naddr = name, address or node obj
prop = name of property
val = new value to assign
raise:
LookupError : if node name or Id is invalid
IsyPropertyError : if property invalid
TypeError : if property valid
calls /rest/nodes/<node-id>/set/<property>/<value>
"""
if self.debug & 0x01 :
print("node_set_prop")
(nodetype, node_id) = self._node_get_id(naddr)
if not node_id :
raise LookupError("node_set_prop: unknown node : " + str(naddr) )
prop_id = self._get_control_id(prop)
if prop_id :
# raise TypeError("node_set_prop: unknown prop : " + str(cmd) )
if "readOnly" in self.controls[prop_id] and \
self.controls["prop_id"]["readOnly"] == "true" :
raise IsyPropertyError("readOnly property " + prop_id)
prop = str(prop)
if "isNumeric" in self.controls[prop_id] and \
self.controls["prop_id"]["isNumeric"] == "true" and \
not str(val).isdigit :
raise IsyPropertyError("Numeric property " + prop_id)
# if not prop in ['ST', 'OL', 'RR'] :
# raise TypeError("node_set_prop: unknown propery : " + str(prop) )
# if val :
# pass
# self._node_set_prop(naddr, prop, val)
self._node_send(naddr, "set", prop, val)
if not self.eventupdates :
self._updatenode(naddr)
# to replace _node_set_prop and _node_comm
def _node_send(self, naddr, action, prop, *args) :
""" called by node_comm() or node_set_prop() after argument validation """
#print("_node_send : node=%s prop=%s val=%s" % str(naddr), prop, val)
# print ("_node_send : node=" + str(naddr) + " prop=" + prop + " val=" + val )
xurl = "/rest/nodes/{!s:}/{!s:}/{!s:}/{!s:}".format(naddr, action, prop, "/".join(str(x) for x in args) )
if self.debug & 0x02 : print("xurl = " + xurl)
resp = self._getXMLetree(xurl)
# self._printXML(resp)
if resp is None or resp.attrib["succeeded"] != 'true' :
raise IsyResponseError(
"Node Cmd/Property Set error : node=%s prop=%s " %
naddr, prop )
#def _node_set_prop(self, naddr, prop, val) :
# """ node_set_prop without argument validation """
# #print("_node_set_prop : node=%s prop=%s val=%s" % str(naddr), prop, val)
# print ("_node_set_prop : node=" + str(naddr) + " prop=" + prop +
# " val=" + val )
# xurl = "/rest/nodes/" + naddr + "/set/" + prop + "/" + val
# resp = self._getXMLetree(xurl)
# self._printXML(resp)
# if resp.attrib["succeeded"] != 'true' :
# raise IsyResponseError("Node Property Set error : node=%s prop=%s val=%s" %
# naddr, prop, val )
#
#
# Send command to Node/Scene
#
def node_comm(self, naddr, cmd, *args) :
""" Send node command
args:
naddr = name, address or node obj
cmd = name of command
value = optional value argument for cmd
raise:
LookupError : if node name or Id is invalid
IsyPropertyError : if property invalid
TypeError : if property valid
calls /rest/nodes/<node-id>/cmd/<cmd>>/<cmd value>
"""
if self.debug & 0x04 :
print("node_comm", naddr, cmd)
(nodetype, node_id) = self._node_get_id(naddr)
cmd_id = self._get_control_id(cmd)
#print("self.controls :", self.controls)
#print("self.name2control :", self.name2control)
if not node_id :
raise LookupError("node_comm: unknown node : {!s}".format(naddr) )
print("naddr : ", naddr, " : ", node_id)
if not cmd_id :
raise TypeError("node_comm: unknown command : {!s}".format(cmd) )
#self._node_comm(node_id, cmd_id, args)
self._node_send(node_id, "cmd", cmd_id, args)
if not self.eventupdates :
self._updatenode(naddr)
#
# Send command to Node without all the arg checking
#
#def _node_comm(self, node_id, cmd_id, *args) :
# """ send command to a node or scene without name to ID overhead """
# if self.debug & 0x04 :
# print("_node_comm", node_id, cmd_id)
# # rest/nodes/<nodeid>/cmd/<command_name>/<param1>/<param2>/.../<param5>
# xurl = ("/rest/nodes/" + node_id + "/cmd/" + cmd_id +
# "/" + "/".join(str(x) for x in args) )
#
# if self.debug & 0x02 :
# print("xurl = " + xurl)
# resp = self._getXMLetree(xurl)
# self._printXML(resp)
# if resp.attrib["succeeded"] != 'true' :
# raise IsyResponseError("ISY command error : node_id=" +
# str(node_id) + " cmd=" + str(cmd_id))
#
##
## Node Type
##
def load_node_types(self) :
""" Load node type info into a multi dimentional dictionary
args : none
internal function call
"""
if self.debug & 0x01 :
print("load_node_types")
typeinfo = self._getXMLetree("/WEB/cat.xml")
if typeinfo is None :
raise IsyCommunicationError("Load Node Type Info Fail : " \
+ self.error_str)
if not hasattr(self, '_nodeCategory') or not isinstance(self._nodeCategory, dict):
self._nodeCategory = dict ()
for ncat in typeinfo.iter('nodeCategory'):
if not ncat.attrib["id"] in self._nodeCategory :
self._nodeCategory[ncat.attrib["id"]] = dict ()
self._nodeCategory[ncat.attrib["id"]]["name"] = ncat.attrib["name"]
typeinfo = self._getXMLetree("/WEB/1_fam.xml")
if typeinfo is None :
raise IsyCommunicationError("Load Node Type Info Fail : " \
+ self.error_str)
for ncat in typeinfo.iter('nodeCategory'):
for subcat in ncat.iter('nodeSubCategory'):
## FIX
if not ncat.attrib["id"] in self._nodeCategory :
self._nodeCategory[ncat.attrib["id"]] = dict ()
# print("ID : ", ncat.attrib["id"], " : ", subcat.attrib["id"])
# print("ID name: ", subcat.attrib["name"])
self._nodeCategory[ncat.attrib["id"]][subcat.attrib["id"]] = subcat.attrib["name"]
#self._printinfo(subcat, "subcat :")
if self.debug & 0x100 :
print("nodeCategory : ", self._nodeCategory)
self._printdict(self._nodeCategory)
def node_get_type(self, typid) :
""" Take a node's type value and returns a string idnentifying the device """
if not self._nodeCategory :
self.load_node_types()
#
# devcat = "UNKNOWN"
# subcat = "UNKNOWN"
#
a = typid.split('.')
#
if len(a) >= 2 :
devcat = a[0]
subcat = a[1]
if self._nodeCategory[a[0]] :
devcat = self._nodeCategory[a[0]]["name"]
if self._nodeCategory[a[0]][a[1]] :
subcat = self._nodeCategory[a[0]][a[1]].replace('DEV_CAT_', '')
else :
devcat = typid
subcat = ""
#
return (devcat, subcat)
def node_iter(self, **kargs):
""" Iterate though nodes
args:
nodetype : type of node to return
returns :
Return an iterator over the Node Obj
"""
nodetype = kargs.get("nodetype", ("node", "scene"))
# this should be generalized to be any attr
parent = kargs.get("parent", None)
if parent :
if isinstance(parent, IsySubClass):
parent = parent.address
if not self._nodedict :
self.load_nodes()
k = list()
if "node" in nodetype :
# print "adding node"
k.extend( sorted(self._nodedict.keys()) )
if "scene" in nodetype :
# print "adding scene"
k.extend( sorted(self._nodegroups.keys()) )
if "folder" in nodetype :
# print "adding folder"
k.extend( sorted(self._nodefolder.keys()) )
# else :
# k = sorted(self._nodedict.keys())
# k.extend( sorted(self._nodegroups.keys()))
for n in k :
if parent :
nod = self.get_node(n)
if parent == getattr(nod, "parent", None) :
yield nod
else :
yield self.get_node(n)
## redundant
#def _updatenode(self, naddr) :
# """ update a node's property from ISY device """
# xurl = "/rest/nodes/" + self._nodedict[naddr]["address"]
# if self.debug & (0x01 & 0x10) :
# print("_updatenode pre _getXML")
# _nodestat = self._getXMLetree(xurl)
# # del self._nodedict[naddr]["property"]["ST"]
# for prop in _nodestat.iter('property'):
# tprop = dict ( )
# for k, v in prop.items() :
# tprop[k] = v
# if "id" in tprop :
# self._nodedict[naddr]["property"][tprop["id"]] = tprop
# self._nodedict[naddr]["property"]["time"] = time.gmtime()
# redundant
def _updatenode(self, naddr) :
""" update a node's property from ISY device """
xurl = "/rest/nodes/" + naddr
if self.debug & 0x01 :
print("_updatenode pre _getXML")
_nodestat = self._getXMLetree(xurl)
# del self._nodedict[naddr]["property"]["ST"]
for child in list(_nodestat) :
if child.tag == "property" :
continue
if child.text :
self._nodedict[naddr][child.tag] = child.text
if child.attrib :
for k, v in list(child.items()) :
self._nodedict[naddr][child.tag + "-" + k] = v
for prop in _nodestat.iter('property'):
tprop = dict ( )
for k, v in prop.items() :
tprop[k] = v
if "id" in tprop :
self._nodedict[naddr]["property"][tprop["id"]].update(tprop)
#self._nodedict[naddr]["property"]["time"] = time.gmtime()
def node_get_notes(self, naddr) :
""" Get node node_get_notes
args:
naddr = name, address or node obj
raise:
LookupError : if node name or Id is invalid
IsyPropertyError : if property invalid
"""
if self.debug & 0x04 :
print("node_get_notes", naddr)
(nodetype, node_id) = self._node_get_id(naddr)
if not node_id :
raise LookupError("node_get_notes: unknown node : {!s}".format(naddr) )
ret_prop = dict ( )
xurl = "/rest/nodes/{!s:}/notes".format(node_id)
if self.debug & 0x02 : print("xurl = " + xurl)
resp = self._getXMLetree(xurl)
if resp is not None :
for notes in resp.iter('NodeProperties') :
for note_val in list(notes) :
ret_prop[note_val.tag] = note_val.text
return ret_prop
#
# Send command to Node/Scene
#
def node_enable(self, naddr, enable=True) :
""" enable/disable node
args:
naddr = name, address or node obj
enable = bool ( True=enable / False=disable)
raise:
LookupError : if node name or Id is invalid
IsyResponseError : if Error in ISY responce
calls /rest/nodes/<node-id>/enable
calls /rest/nodes/<node-id>/disable
"""
if self.debug & 0x04 :
print("node_enable", naddr, enable)
(nodetype, node_id) = self._node_get_id(naddr)
if not node_id :
raise LookupError("node_comm: unknown node : " + str(naddr) )
# print("naddr : ", naddr, " : ", node_id)
if enable :
op = "enable"
else :
op = "disable"
xurl = "/rest/nodes/{!s:}/{!s:}".format(naddr, op)
if self.debug & 0x02 : print("xurl = " + xurl)
resp = self._getXMLetree(xurl)
# self._printXML(resp)
if resp is None or resp.attrib["succeeded"] != 'true' :
raise IsyResponseError(
"Node Cmd/Property Set error : node=%s resp=%s " %
naddr, resp )
def node_set_powerinfo(self, naddr, deviceClass=None, wattage=None, dcPeriod=None ) :
"""
args :
node node id
wattage watts (unsigned int)
dcPeriod duty cycle
"""
if self.debug & 0x04 :
print("node_power_info", naddr, deviceClass, wattage, dcPeriod)
(nodetype, node_id) = self._node_get_id(naddr)
if not node_id :
raise LookupError("node_comm: unknown node : " + str(naddr) )
if nodetype != "node" :
raise IsyPropertyError("Can't set powerinfo on non-node devices")
if wattage is None :
wattage = self._nodedict[naddr]['wattage']
if deviceClass is None :
deviceClass = self._nodedict[naddr]['deviceClass']
if dcPeriod is None :
dcPeriod = self._nodedict[naddr]['dcPeriod']
return self.soapcomm("SetNodePowerInfo", node=node_id,
deviceClass=deviceClass, wattage=wattage, dcPeriod=dcPeriod)
def node_del(self, naddr) :
"""
Permanently remove a Node/Scene/Folder from device configuration
args :
naddr node id or name
raise:
LookupError : if node name or Id is invalid
IsyResponseError : if Error in ISY responce
IsyInternalError : should never happen
"""
(nodetype, node_id) = self._node_get_id(naddr)
if self.debug & 0x04 :
print("node_del", naddr)
if not node_id :
raise LookupError(
"node_del: {0} not a node ( {1}={2} )".format(
naddr, node_id, nodetype))
try :
r = self._node_remove(node_id)
except IsySoapError, se :
# if error code is 501 then Node did not exist or was already deleted
# this is messy and needs to change or be removed
code = se.code()
if code == 501 :
return se.httperrbody
raise
else :
return r
def _node_remove(self, node_id) :
"""
Removes a node (permanently) from configuration
Calls soap RemoveNode
"""
if self.debug & 0x04 :
print("_node_remove", node_id)
return self.soapcomm("RemoveNode", node=node_id)
def node_restore_all(self, flag=0) :
"""
Restores a all device from the configuration in ISY
args :
flag 0 or 1
Flag :
0 = All shall be restored from the configuration files in ISY
1 = Does not regenerate groups/scenes - good for testing
raise:
IsyResponseError : if Error in ISY responce
"""
return self.soapcomm("RestoreDevices", flag=flag)
# move from ISYClass
#def node_rename(self, naddr, name) :
# return self.rename(naddr, name)
def node_restore(self, naddr, flag=0) :
"""
Restores a specific device from the configuration in ISY
args :
naddr node id or name
flag 0 or 1
Flag :
0 = All shall be restored from the configuration files in ISY
1 = Does not regenerate groups/scenes - good for testing
raise:
LookupError : if node name or Id is invalid
IsyResponseError : if Error in ISY responce
"""
(nodetype, node_id) = self._node_get_id(naddr)
if not node_id :
raise LookupError("node_restore: unknown node : " + str(naddr) )
return self.soapcomm("RestoreDevice", node=node_id, flag=flag)
# Do nothing
# (syntax check)
#
if __name__ == "__main__":
import __main__
print(__main__.__file__)
print("syntax ok")
exit(0)
|
|
from __future__ import unicode_literals
import logging
from django import forms
from django.contrib import messages
from django.forms import widgets
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _
from djblets.forms.fields import TimeZoneField
from djblets.siteconfig.models import SiteConfiguration
from djblets.configforms.forms import ConfigPageForm
from reviewboard.accounts.backends import get_enabled_auth_backends
from reviewboard.reviews.models import Group
from reviewboard.site.urlresolvers import local_site_reverse
class AccountPageForm(ConfigPageForm):
"""Base class for a form on the My Account page.
AccountPageForms belong to AccountPages, and will be displayed on the
My Account page for a user.
A simple form presents fields that can be filled out and posted.
More advanced forms can supply their own template or even their own
JavaScript models and views.
"""
class AccountSettingsForm(AccountPageForm):
"""Form for the Settings page for an account."""
form_id = 'settings'
form_title = _('Settings')
save_label = _('Save Settings')
timezone = TimeZoneField(
label=_('Time zone'),
required=True,
help_text=_("The time zone you're in."))
syntax_highlighting = forms.BooleanField(
label=_('Enable syntax highlighting in the diff viewer'),
required=False)
open_an_issue = forms.BooleanField(
label=_('Always open an issue when comment box opens'),
required=False)
should_send_email = forms.BooleanField(
label=_('Get e-mail notification for review requests and reviews'),
required=False)
should_send_own_updates = forms.BooleanField(
label=_('Get e-mail notifications for my own activity'),
required=False)
default_use_rich_text = forms.BooleanField(
label=_('Always use Markdown for text fields'),
required=False)
def load(self):
"""Load data for the form."""
self.set_initial({
'open_an_issue': self.profile.open_an_issue,
'should_send_email': self.profile.should_send_email,
'should_send_own_updates': self.profile.should_send_own_updates,
'syntax_highlighting': self.profile.syntax_highlighting,
'timezone': self.profile.timezone,
'default_use_rich_text': self.profile.should_use_rich_text,
})
siteconfig = SiteConfiguration.objects.get_current()
if not siteconfig.get('diffviewer_syntax_highlighting'):
del self.fields['syntax_highlighting']
def save(self):
"""Save the form."""
if 'syntax_highlighting' in self.cleaned_data:
self.profile.syntax_highlighting = \
self.cleaned_data['syntax_highlighting']
self.profile.open_an_issue = self.cleaned_data['open_an_issue']
self.profile.should_send_email = self.cleaned_data['should_send_email']
self.profile.should_send_own_updates = \
self.cleaned_data['should_send_own_updates']
self.profile.default_use_rich_text = \
self.cleaned_data['default_use_rich_text']
self.profile.timezone = self.cleaned_data['timezone']
self.profile.save()
messages.add_message(self.request, messages.INFO,
_('Your settings have been saved.'))
class APITokensForm(AccountPageForm):
"""Form for showing a user's API tokens."""
form_id = 'api_tokens'
form_title = _('API Tokens')
save_label = None
js_view_class = 'RB.APITokensView'
def get_js_view_data(self):
"""Get data to pass to the JavaScript view."""
# Fetch the list of the user's API tokens, globally.
api_tokens = self.user.webapi_tokens.all()
# Group the API tokens by LocalSite or the global site.
serialized_api_tokens = SortedDict()
serialized_api_tokens[''] = \
self._serialize_api_tokens(None, api_tokens)
for local_site in self.page.config_view.ordered_user_local_sites:
serialized_api_tokens[local_site.name] = \
self._serialize_api_tokens(local_site, api_tokens)
return {
'apiTokens': serialized_api_tokens,
}
def _serialize_api_tokens(self, local_site, api_tokens):
if local_site:
local_site_prefix = local_site_reverse(
'root',
local_site_name=local_site.name)[1:]
else:
local_site_prefix = None
return {
'localSitePrefix': local_site_prefix,
'tokens': [
{
'id': api_token.pk,
'tokenValue': api_token.token,
'timeAdded': api_token.time_added,
'lastUpdated': api_token.last_updated,
'note': api_token.note,
'policy': api_token.policy,
}
for api_token in api_tokens
if api_token.local_site == local_site
]
}
class ChangePasswordForm(AccountPageForm):
"""Form for changing a user's password."""
form_id = 'change_password'
form_title = _('Change Password')
save_label = _('Change Password')
old_password = forms.CharField(
label=_('Current password'),
required=True,
widget=widgets.PasswordInput())
password1 = forms.CharField(
label=_('New password'),
required=True,
widget=widgets.PasswordInput())
password2 = forms.CharField(
label=_('New password (confirm)'),
required=True,
widget=widgets.PasswordInput())
def is_visible(self):
"""Get whether or not the "change password" form should be shown."""
backend = get_enabled_auth_backends()[0]
return backend.supports_change_password
def clean_old_password(self):
"""Validate the 'old_password' field.
This checks to make sure the old password is correct when changing the
password.
"""
backend = get_enabled_auth_backends()[0]
password = self.cleaned_data['old_password']
try:
is_authenticated = backend.authenticate(self.user.username,
password)
except Exception as e:
logging.error('Error when calling authenticate for auth backend '
'%r: %s',
backend, e, exc_info=1)
raise forms.ValidationError(_('Unexpected error when validating '
'the password. Please contact the '
'administrator.'))
if not is_authenticated:
raise forms.ValidationError(_('This password is incorrect'))
def clean_password2(self):
"""Validate the 'password2' field.
This makes sure that the two password fields match.
"""
p1 = self.cleaned_data['password1']
p2 = self.cleaned_data['password2']
if p1 != p2:
raise forms.ValidationError(_('Passwords do not match'))
return p2
def save(self):
"""Save the form."""
backend = get_enabled_auth_backends()[0]
try:
backend.update_password(self.user, self.cleaned_data['password1'])
self.user.save()
messages.add_message(self.request, messages.INFO,
_('Your password has been changed.'))
except Exception as e:
logging.error('Error when calling update_password for auth '
'backend %r: %s',
backend, e, exc_info=1)
messages.add_message(self.request, messages.INFO,
_('Unexpected error when changing your '
'password. Please contact the '
'administrator.'))
class ProfileForm(AccountPageForm):
"""Form for the Profile page for an account."""
form_id = 'profile'
form_title = _('Profile')
save_label = _('Save Profile')
first_name = forms.CharField(
label=_('First name'),
required=False)
last_name = forms.CharField(
label=_('Last name'),
required=False)
email = forms.EmailField(
label=_('E-mail address'),
required=True)
profile_private = forms.BooleanField(
required=False,
label=_("Keep profile information private"))
def load(self):
"""Load data for the form."""
self.set_initial({
'first_name': self.user.first_name,
'last_name': self.user.last_name,
'email': self.user.email,
'profile_private': self.profile.is_private,
})
backend = get_enabled_auth_backends()[0]
if not backend.supports_change_name:
del self.fields['first_name']
del self.fields['last_name']
if not backend.supports_change_email:
del self.fields['email']
def save(self):
"""Save the form."""
backend = get_enabled_auth_backends()[0]
if backend.supports_change_name:
self.user.first_name = self.cleaned_data['first_name']
self.user.last_name = self.cleaned_data['last_name']
try:
backend.update_name(self.user)
except Exception as e:
logging.error('Error when calling update_name for auth '
'backend %r: %s',
backend, e, exc_info=1)
if backend.supports_change_email:
new_email = self.cleaned_data['email']
if new_email != self.user.email:
self.user.email = new_email
try:
backend.update_email(self.user)
except Exception as e:
logging.error('Error when calling update_email for auth '
'backend %r: %s',
backend, e, exc_info=1)
self.user.save()
self.profile.is_private = self.cleaned_data['profile_private']
self.profile.save()
messages.add_message(self.request, messages.INFO,
_('Your profile has been saved.'))
class GroupsForm(AccountPageForm):
"""Form for the group membership page.
Unlike most forms, this doesn't deal with fields or saving to the database.
Instead, it sets up the JavaScript View and provides serialized data
representing the groups. The View handles group membership through the
API.
"""
form_id = 'groups'
form_title = _('Groups')
save_label = None
js_view_class = 'RB.JoinedGroupsView'
def get_js_view_data(self):
"""Get data to pass to the JavaScript view."""
# Fetch the list of IDs of groups the user has joined.
joined_group_ids = self.user.review_groups.values_list('pk', flat=True)
# Fetch the list of groups available to the user.
serialized_groups = SortedDict()
serialized_groups[''] = self._serialize_groups(None, joined_group_ids)
for local_site in self.page.config_view.ordered_user_local_sites:
serialized_groups[local_site.name] = self._serialize_groups(
local_site, joined_group_ids)
return {
'groups': serialized_groups,
}
def _serialize_groups(self, local_site, joined_group_ids):
if local_site:
local_site_name = local_site.name
else:
local_site_name = None
groups = Group.objects.accessible(user=self.user,
local_site=local_site)
return [
{
'name': group.name,
'reviewGroupID': group.pk,
'displayName': group.display_name,
'localSiteName': local_site_name,
'joined': group.pk in joined_group_ids,
'url': local_site_reverse('group',
local_site_name=local_site_name,
kwargs={'name': group.name}),
}
for group in groups.order_by('name')
]
|
|
#
# io_fits.py -- Module wrapper for loading FITS files.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
There are two possible choices for a python FITS file reading package
compatible with Ginga: astropy/pyfits and fitsio. Both are based on
the CFITSIO library, although it seems that astropy's version has
changed quite a bit from the original, while fitsio is still tracking
the current version.
To force the use of one, do:
from ginga.util import io_fits
io_fits.use('package')
(replace 'package' with one of {'astropy', 'fitsio'}) before you load
any images. Otherwise Ginga will try to pick one for you.
"""
import numpy
from ginga.util import iohelper
fits_configured = False
fitsLoaderClass = None
have_pyfits = False
have_fitsio = False
class FITSError(Exception):
pass
def use(fitspkg, raise_err=True):
global fits_configured, fitsLoaderClass, \
have_pyfits, pyfits, \
have_fitsio, fitsio
if fitspkg == 'astropy':
try:
from astropy.io import fits as pyfits
have_pyfits = True
fitsLoaderClass = PyFitsFileHandler
return True
except ImportError:
try:
# maybe they have a standalone version of pyfits?
import pyfits
have_pyfits = True
fitsLoaderClass = PyFitsFileHandler
return True
except ImportError as e:
if raise_err:
raise
return False
elif fitspkg == 'fitsio':
try:
import fitsio
have_fitsio = True
fitsLoaderClass = FitsioFileHandler
return True
except ImportError as e:
if raise_err:
raise
return False
return False
class BaseFitsFileHandler(object):
# Reserved for future use
pass
class PyFitsFileHandler(BaseFitsFileHandler):
def __init__(self, logger):
super(PyFitsFileHandler, self).__init__()
if not have_pyfits:
raise FITSError("Need astropy or pyfits module installed to use this file handler")
self.logger = logger
self.kind = 'pyfits'
def fromHDU(self, hdu, ahdr):
header = hdu.header
if hasattr(header, 'cards'):
# newer astropy.io.fits don't have ascardlist()
for card in header.cards:
if len(card.keyword) == 0:
continue
ahdr.set_card(card.keyword, card.value, comment=card.comment)
else:
for card in header.ascardlist():
if len(card.key) == 0:
continue
ahdr.set_card(card.key, card.value, comment=card.comment)
def load_hdu(self, hdu, ahdr, fobj=None, naxispath=None):
data = hdu.data
if data is None:
data = numpy.zeros((0, 0))
elif not isinstance(data, numpy.ndarray):
data = numpy.zeros((0, 0))
elif 0 in data.shape:
data = numpy.zeros((0, 0))
elif len(data.shape) < 2:
# Expand 1D arrays into 1xN array
data = data.reshape((1, data.shape[0]))
else:
if naxispath is None:
naxispath = []
else:
# Drill down naxispath
for idx in naxispath:
data = data[idx]
self.fromHDU(hdu, ahdr)
return (data, naxispath)
def load_file(self, filespec, ahdr, numhdu=None, naxispath=None,
phdr=None, memmap=None):
info = iohelper.get_fileinfo(filespec)
if not info.ondisk:
raise FITSError("File does not appear to be on disk: %s" % (
info.url))
filepath = info.filepath
self.logger.debug("Loading file '%s' ..." % (filepath))
fits_f = pyfits.open(filepath, 'readonly', memmap=memmap)
# this seems to be necessary now for some fits files...
try:
fits_f.verify('fix')
except Exception as e:
raise FITSError("Error loading fits file '%s': %s" % (
filepath, str(e)))
if numhdu is None:
info = fits_f.info(output=False)
extver_db = {}
found_valid_hdu = False
for i in range(len(fits_f)):
hdu = fits_f[i]
tup = info[i]
name = tup[1]
# figure out the EXTVER for this HDU
extver = extver_db.setdefault(name, 0)
extver += 1
extver_db[name] = extver
# rule out HDUs we can't deal with
if not (isinstance(hdu, pyfits.ImageHDU) or
isinstance(hdu, pyfits.PrimaryHDU)):
# Don't open tables, etc.
continue
if not isinstance(hdu.data, numpy.ndarray):
# We need to open a numpy array
continue
if 0 in hdu.data.shape:
# non-pixel or zero-length data hdu?
continue
#print "data type is %s" % hdu.data.dtype.kind
# Looks good, let's try it
found_valid_hdu = True
if len(name) == 0:
numhdu = i
else:
numhdu = (name, extver)
break
if not found_valid_hdu:
## raise FITSError("No data HDU found that Ginga can open in '%s'" % (
## filepath))
# Load just the header
numhdu = 0
elif isinstance(numhdu, (int, str)):
hdu = fits_f[numhdu]
name = hdu.name
extver = hdu.ver
if len(name) > 0:
numhdu = (name, extver)
hdu = fits_f[numhdu]
data, naxispath = self.load_hdu(hdu, ahdr, fobj=fits_f,
naxispath=naxispath)
# Read PRIMARY header
if phdr is not None:
self.fromHDU(fits_f[0], phdr)
fits_f.close()
return (data, numhdu, naxispath)
def create_fits(self, data, header):
fits_f = pyfits.HDUList()
hdu = pyfits.PrimaryHDU()
hdu.data = data
for kwd in header.keys():
card = header.get_card(kwd)
hdu.header[card.key] = (card.value, card.comment)
fits_f.append(hdu)
return fits_f
def write_fits(self, path, data, header, **kwdargs):
fits_f = self.create_fits(data, header)
fits_f.writeto(path, **kwdargs)
fits_f.close()
def save_as_file(self, filepath, data, header, **kwdargs):
self.write_fits(filepath, data, header, **kwdargs)
class FitsioFileHandler(BaseFitsFileHandler):
def __init__(self, logger):
super(FitsioFileHandler, self).__init__()
if not have_fitsio:
raise FITSError("Need fitsio module installed to use this file handler")
self.logger = logger
self.kind = 'fitsio'
def fromHDU(self, hdu, ahdr):
header = hdu.read_header()
for d in header.records():
if len(d['name']) == 0:
continue
bnch = ahdr.__setitem__(d['name'], d['value'])
bnch.comment = d['comment']
def load_hdu(self, hdu, ahdr, fobj=None, naxispath=None):
data = hdu.read()
if data is None:
data = numpy.zeros((0, 0))
elif not isinstance(data, numpy.ndarray):
data = numpy.zeros((0, 0))
elif 0 in data.shape:
data = numpy.zeros((0, 0))
elif len(data.shape) < 2:
# Expand 1D arrays into 1xN array
data = data.reshape((1, data.shape[0]))
else:
if naxispath is None:
naxispath = []
else:
# Drill down naxispath
for idx in naxispath:
data = data[idx]
self.fromHDU(hdu, ahdr)
return (data, naxispath)
def load_file(self, filespec, ahdr, numhdu=None, naxispath=None,
phdr=None, memmap=None):
info = iohelper.get_fileinfo(filespec)
if not info.ondisk:
raise FITSError("File does not appear to be on disk: %s" % (
info.url))
filepath = info.filepath
self.logger.debug("Loading file '%s' ..." % (filepath))
fits_f = fitsio.FITS(filepath, memmap=memmap)
if numhdu is None:
found_valid_hdu = False
for i in range(len(fits_f)):
hdu = fits_f[i]
info = hdu.get_info()
#print(info)
name = info['extname']
extver = info['extver']
if not ('ndims' in info) or (info['ndims'] == 0):
# compressed FITS file or non-pixel data hdu?
continue
#print(dir(hdu))
# Looks good, let's try it
found_valid_hdu = True
if len(name) == 0:
numhdu = i
else:
numhdu = (name, extver)
break
if not found_valid_hdu:
## raise FITSError("No data HDU found that Ginga can open in '%s'" % (
## filepath))
# Just load the header
numhdu = 0
elif isinstance(numhdu, (int, str)):
hdu = fits_f[numhdu]
info = hdu.get_info()
name = info['extname']
extver = info['extver']
if len(name) > 0:
numhdu = (name, extver)
hdu = fits_f[numhdu]
data, naxispath = self.load_hdu(hdu, ahdr, fobj=fits_f,
naxispath=naxispath)
# Read PRIMARY header
if phdr is not None:
self.fromHDU(fits_f[0], phdr)
fits_f.close()
return (data, numhdu, naxispath)
def create_fits(self, data, header):
fits_f = pyfits.HDUList()
hdu = pyfits.PrimaryHDU()
hdu.data = data
for kwd in header.keys():
card = header.get_card(kwd)
hdu.header.update(card.key, card.value, comment=card.comment)
fits_f.append(hdu)
return fits_f
def write_fits(self, path, data, header):
fits_f = fitsio.FITS(path, 'rw')
fits_f = self.create_fits(data, header)
fits_f.writeto(path, output_verify='fix')
fits_f.close()
def save_as_file(self, filepath, data, header, **kwdargs):
self.write_fits(filepath, data, header, **kwdargs)
if not fits_configured:
# default
fitsLoaderClass = PyFitsFileHandler
# try to use them in this order
# astropy is faster
for name in ('astropy', 'fitsio'):
if use(name, raise_err=False):
break
def get_fitsloader(kind=None, logger=None):
return fitsLoaderClass(logger)
#END
|
|
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import six
from nailgun.test.base import BaseTestCase
from nailgun import consts
from nailgun.objects import OpenStackWorkloadStats
from nailgun.objects import OpenStackWorkloadStatsCollection
from nailgun.statistics.oswl.saver import oswl_data_checksum
from nailgun.statistics.oswl.saver import oswl_statistics_save
class TestOSWLServerInfoSaving(BaseTestCase):
vms_info = {
"id": 1,
"status": "running",
"power_state": 1,
"created_at": "dt",
"host_id": "111",
"tenant_id": "222",
"image_id": "333",
"flavor_id": "444"
}
@property
def empty_data(self):
return {
'cluster_id': 1,
'resource_type': consts.OSWL_RESOURCE_TYPES.vm,
'created_date': datetime.datetime.utcnow().date(),
'resource_data': {'added': [],
'removed': [],
'modified': [],
'current': []},
'resource_checksum': oswl_data_checksum([]),
'is_sent': False
}
def data_w_default_vm_info(self, time):
data = self.empty_data
data['resource_data'].update({
'added': [{'time': time.isoformat(), 'id': 1}],
'current': [self.vms_info]
})
return data
def check_overall_rec_count(self, count):
saved = OpenStackWorkloadStatsCollection.all()
self.assertEqual(saved.count(), count)
return saved
def check_data_vs_rec(self, data, rec):
data['resource_checksum'] = \
oswl_data_checksum(data['resource_data']['current'])
for k, v in six.iteritems(data):
if isinstance(v, (list, tuple)):
self.assertItemsEqual(v, getattr(rec, k))
else:
self.assertEqual(v, getattr(rec, k))
def save_data_and_check_record(self, data):
oswl_statistics_save(1, consts.OSWL_RESOURCE_TYPES.vm, data)
last = OpenStackWorkloadStats.get_last_by(
1, consts.OSWL_RESOURCE_TYPES.vm)
self.assertEqual(last, self.check_overall_rec_count(1).first())
return last
def add_default_vm_info_and_check(self):
last = self.save_data_and_check_record([self.vms_info])
time_update = last.updated_time
data = self.data_w_default_vm_info(time_update)
self.check_data_vs_rec(data, last)
return time_update, data
def test_empty_data(self):
last = self.save_data_and_check_record([])
self.check_data_vs_rec(self.empty_data, last)
def test_added_same_info(self):
# VM is added
time_update, data = self.add_default_vm_info_and_check()
# save same info
last = self.save_data_and_check_record([self.vms_info])
# DB row was not updated
self.assertEqual(time_update, last.updated_time)
self.check_data_vs_rec(data, last)
def test_added_one_by_one(self):
# VM with id=1 is added
time_update1, data = self.add_default_vm_info_and_check()
# VM with id=2 is added
two_vms = [dict(self.vms_info), dict(self.vms_info)]
two_vms[1]['id'] = 2
last = self.save_data_and_check_record(two_vms)
time_update2 = last.updated_time
data['resource_data'].update({
'added': [{'time': time_update1.isoformat(), 'id': 1},
{'time': time_update2.isoformat(), 'id': 2}],
'current': two_vms
})
self.check_data_vs_rec(data, last)
def test_added_on_cluster_reset(self):
# VM with id=1 is added
time_update1, data = self.add_default_vm_info_and_check()
# VM with id=2 is added
two_vms = [self.vms_info]
self.save_data_and_check_record(two_vms)
# reset cluster
self.save_data_and_check_record([])
last = self.save_data_and_check_record(two_vms)
time_update2 = last.updated_time
time_removed2 = last.resource_data['removed'][0]['time']
data['resource_data'].update({
'added': [{'time': time_update1.isoformat(), 'id': 1},
{'time': time_update2.isoformat(), 'id': 1}],
'current': two_vms,
'removed': [dict(two_vms[0], **{'time': time_removed2})]
})
self.check_data_vs_rec(data, last)
def test_added_then_removed(self):
# VM is added
time_update, data = self.add_default_vm_info_and_check()
# VM is removed
last = self.save_data_and_check_record([])
time_update = last.updated_time
removed = dict(self.vms_info)
removed['time'] = time_update.isoformat()
data['resource_data'].update({
'removed': [removed],
'current': []
})
self.check_data_vs_rec(data, last)
def test_modified(self):
# VM is added
time_update, data = self.add_default_vm_info_and_check()
# VM power state and status are changed
vms_new = [dict(self.vms_info)]
vms_new[0]['power_state'] = 0
vms_new[0]['status'] = 'stopped'
last = self.save_data_and_check_record(vms_new)
time_update = last.updated_time
modified1 = {'power_state': self.vms_info['power_state'],
'status': self.vms_info['status'],
'time': time_update.isoformat(),
'id': self.vms_info['id']}
data['resource_data'].update({
'modified': [modified1],
'current': vms_new
})
self.check_data_vs_rec(data, last)
# VM power state is changed back
vms_new1 = [dict(vms_new[0])]
vms_new1[0]['power_state'] = 1
last = self.save_data_and_check_record(vms_new1)
time_update = last.updated_time
modified2 = {'power_state': vms_new[0]['power_state'],
'time': time_update.isoformat(),
'id': vms_new[0]['id']}
data['resource_data'].update({
'modified': [modified1, modified2],
'current': vms_new1
})
self.check_data_vs_rec(data, last)
# VM status is changed back
last = self.save_data_and_check_record([self.vms_info])
time_update = last.updated_time
modified3 = {'status': vms_new1[0]['status'],
'time': time_update.isoformat(),
'id': vms_new1[0]['id']}
data['resource_data'].update({
'modified': [modified1, modified2, modified3],
'current': [self.vms_info]
})
self.check_data_vs_rec(data, last)
def test_add_row_per_day(self):
# VM is added
last = self.save_data_and_check_record([self.vms_info])
date_cur = last.created_date
time_update = last.updated_time
date_1st_rec = date_cur - datetime.timedelta(days=1)
# make existing record one day older
OpenStackWorkloadStats.update(last,
{'created_date': date_1st_rec})
# pass the same data
# no new record was created and existing one remains unchanged
self.assertEqual(last,
self.save_data_and_check_record([self.vms_info]))
# VM is removed
oswl_statistics_save(1, consts.OSWL_RESOURCE_TYPES.vm, [])
saved = self.check_overall_rec_count(2)
last = OpenStackWorkloadStats.get_last_by(
1, consts.OSWL_RESOURCE_TYPES.vm)
self.assertEqual(last.created_date, date_cur)
for rec in saved:
if rec.created_date == date_cur:
self.assertEqual(rec, last)
# last record contains 'removed' and empty 'added'
data = self.empty_data
removed = dict(self.vms_info)
removed['time'] = last.updated_time.isoformat()
data['resource_data']['removed'] = [removed]
self.check_data_vs_rec(data, rec)
elif rec.created_date == date_1st_rec:
# first record contains 'added' and empty 'removed'
data = self.data_w_default_vm_info(time_update)
data['created_date'] = date_1st_rec
self.check_data_vs_rec(data, rec)
def test_oswl_is_sent_restored_on_changes(self):
cluster_id = 1
vm_info = {
"id": 1,
"power_state": 1,
}
oswl_statistics_save(cluster_id, consts.OSWL_RESOURCE_TYPES.vm,
[vm_info])
last = OpenStackWorkloadStats.get_last_by(
cluster_id, consts.OSWL_RESOURCE_TYPES.vm)
# Setting is_sent to True
OpenStackWorkloadStats.update(last, {'is_sent': True})
self.assertEqual(True, last.is_sent)
# Checking is_sent is not changed if data is not changed
oswl_statistics_save(cluster_id, consts.OSWL_RESOURCE_TYPES.vm,
[vm_info])
last_no_change = OpenStackWorkloadStats.get_last_by(
cluster_id, consts.OSWL_RESOURCE_TYPES.vm)
self.assertEqual(True, last_no_change.is_sent)
# Checking is_sent is changed if data is changed
vm_info["power_state"] += 1
oswl_statistics_save(cluster_id, consts.OSWL_RESOURCE_TYPES.vm,
[vm_info])
last_changed = OpenStackWorkloadStats.get_last_by(
cluster_id, consts.OSWL_RESOURCE_TYPES.vm)
self.assertEqual(False, last_changed.is_sent)
|
|
#!/usr/bin/env python
"""
esptool.py "unit" tests (really integration tests). Uses a device connected to the serial port.
WILL MESS UP THE DEVICE'S SPI FLASH CONTENTS
Chip name & serial port are passed in as arguments to test. Same test suite
runs on esp8266 & esp32 (some addresses will change, see below.)
"""
import os
import os.path
import re
import subprocess
import sys
import tempfile
import time
import unittest
import serial
import esptool, espefuse
sys.path.append('..')
# point is this file is not 4 byte aligned in length
NODEMCU_FILE = "nodemcu-master-7-modules-2017-01-19-11-10-03-integer.bin"
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
os.chdir(os.path.dirname(__file__))
try:
ESPTOOL_PY = os.environ["ESPTOOL_PY"]
except KeyError:
ESPTOOL_PY = os.path.join(TEST_DIR, "..", "esptool.py")
ESPSECURE_PY = os.path.join(TEST_DIR, "..", "espsecure.py")
# Command line options for test environment
global default_baudrate, chip, serialport, trace_enabled
default_baudrate = 115200
serialport = None
trace_enabled = False
try:
chip = sys.argv[2]
except IndexError:
chip = None
RETURN_CODE_FATAL_ERROR = 2
class EsptoolTestCase(unittest.TestCase):
def run_espsecure(self, args):
cmd = [sys.executable, ESPSECURE_PY ] + args.split(" ")
print("Running %s..." % (" ".join(cmd)))
try:
output = subprocess.check_output([str(s) for s in cmd], cwd=TEST_DIR, stderr=subprocess.STDOUT)
print(output) # for more complete stdout logs on failure
return output.decode("utf-8")
except subprocess.CalledProcessError as e:
print(e.output)
raise e
def run_esptool(self, args, baud=None):
""" Run esptool with the specified arguments. --chip, --port and --baud
are filled in automatically from the command line. (can override default baud rate with baud param.)
Additional args passed in args parameter as a string.
Returns output from esptool.py as a string if there is any. Raises an exception if esptool.py fails.
"""
if baud is None:
baud = default_baudrate
trace_args = [ "--trace" ] if trace_enabled else []
cmd = [sys.executable, ESPTOOL_PY ] + trace_args + [ "--chip", chip, "--port", serialport, "--baud", str(baud) ] + args.split(" ")
print("Running %s..." % (" ".join(cmd)))
try:
output = subprocess.check_output([str(s) for s in cmd], cwd=TEST_DIR, stderr=subprocess.STDOUT)
print(output) # for more complete stdout logs on failure
return output.decode("utf-8")
except subprocess.CalledProcessError as e:
print(e.output)
raise e
def run_esptool_error(self, args, baud=None):
""" Run esptool.py similar to run_esptool, but expect an
error.
Verifies the error is an expected error not an unhandled exception,
and returns the output from esptool.py as a string.
"""
with self.assertRaises(subprocess.CalledProcessError) as fail:
self.run_esptool(args, baud)
failure = fail.exception
self.assertEqual(RETURN_CODE_FATAL_ERROR, failure.returncode)
return failure.output.decode("utf-8")
def setUp(self):
self.tempfiles = []
print(50*"*")
def tearDown(self):
for t in self.tempfiles:
try:
os.remove(t)
except OSError:
pass
def readback(self, offset, length):
""" Read contents of flash back, return to caller. """
tf = tempfile.NamedTemporaryFile(delete=False) # need a file we can read into
self.tempfiles.append(tf.name)
tf.close()
self.run_esptool("--before default_reset read_flash %d %d %s" % (offset, length, tf.name))
with open(tf.name, "rb") as f:
rb = f.read()
self.assertEqual(length, len(rb), "read_flash length %d offset 0x%x yielded %d bytes!" % (length, offset, len(rb)))
return rb
def verify_readback(self, offset, length, compare_to, is_bootloader=False):
rb = self.readback(offset, length)
with open(compare_to, "rb") as f:
ct = f.read()
if len(rb) != len(ct):
print("WARNING: Expected length %d doesn't match comparison %d")
print("Readback %d bytes" % len(rb))
if is_bootloader:
# writing a bootloader image to bootloader offset can set flash size/etc,
# so don't compare the 8 byte header
self.assertEqual(ct[0], rb[0], "First bytes should be identical")
rb = rb[8:]
ct = ct[8:]
for rb_b,ct_b,offs in zip(rb,ct,range(len(rb))):
if rb_b != ct_b:
self.fail("First difference at offset 0x%x Expected %r got %r" % (offs, ct_b, rb_b))
class TestFlashEncryption(EsptoolTestCase):
def valid_key_present(self):
esp = esptool.ESP32ROM(serialport)
esp.connect()
efuses = espefuse.EspEfuses(esp)
blk1_rd_en = efuses["BLK1"].is_readable()
return not blk1_rd_en
""" since flash crypt config is not set correct this test should abort write """
def test_blank_efuse_encrypt_write_abort(self):
print('test_blank_efuse_encrypt_write_abort')
if self.valid_key_present() is True:
raise unittest.SkipTest("Valid encryption key already programmed, aborting the test")
self.run_esptool("write_flash 0x1000 images/bootloader.bin 0x8000 images/partitions_singleapp.bin 0x10000 images/helloworld-esp32.bin")
output = self.run_esptool_error("write_flash --encrypt 0x10000 images/helloworld-esp32.bin")
self.assertIn("Incorrect efuse setting: aborting flash write", output)
""" since ignore option is specified write should happen even though flash crypt config is 0
later encrypted flash contents should be read back & compared with precomputed ciphertext
pass case """
def test_blank_efuse_encrypt_write_continue1(self):
print('test_blank_efuse_encrypt_write_continue1')
if self.valid_key_present() is True:
raise unittest.SkipTest("Valid encryption key already programmed, aborting the test")
self.run_esptool("write_flash --encrypt --ignore-flash-encryption-efuse-setting 0x10000 images/helloworld-esp32.bin")
self.run_esptool("read_flash 0x10000 192 images/read_encrypted_flash.bin")
self.run_espsecure("encrypt_flash_data --address 0x10000 --keyfile images/aes_key.bin --flash_crypt_conf 0 --output images/local_enc.bin images/helloworld-esp32.bin")
try:
with open("images/read_encrypted_flash.bin", "rb") as file1:
read_file1 = file1.read()
with open("images/local_enc.bin", "rb") as file2:
read_file2 = file2.read()
for rf1, rf2,i in zip(read_file1, read_file2, range(len(read_file2))):
self.assertEqual(rf1, rf2, "encrypted write failed: file mismatch at byte position %d" % i)
print('encrypted write success')
finally:
os.remove("images/read_encrypted_flash.bin")
os.remove("images/local_enc.bin")
""" since ignore option is specified write should happen even though flash crypt config is 0
later encrypted flash contents should be read back & compared with precomputed ciphertext
fail case """
@unittest.expectedFailure
def test_blank_efuse_encrypt_write_continue2(self):
print('test_blank_efuse_encrypt_write_continue2')
if self.valid_key_present() is True:
raise unittest.SkipTest("Valid encryption key already programmed, aborting the test")
self.run_esptool("write_flash --encrypt --ignore-flash-encryption-efuse-setting 0x10000 images/helloworld-esp32_edit.bin")
self.run_esptool("read_flash 0x10000 192 images/read_encrypted_flash.bin")
self.run_espsecure("encrypt_flash_data --address 0x10000 --keyfile images/aes_key.bin --flash_crypt_conf 0 --output images/local_enc.bin images/helloworld-esp32.bin")
try:
with open("images/read_encrypted_flash.bin", "rb") as file1:
read_file1 = file1.read()
with open("images/local_enc.bin", "rb") as file2:
read_file2 = file2.read()
for rf1, rf2,i in zip(read_file1, read_file2, range(len(read_file2))):
self.assertEqual(rf1, rf2, "files mismatch at byte position %d" % i)
finally:
os.remove("images/read_encrypted_flash.bin")
os.remove("images/local_enc.bin")
class TestFlashing(EsptoolTestCase):
def test_short_flash(self):
self.run_esptool("write_flash 0x0 images/one_kb.bin")
self.verify_readback(0, 1024, "images/one_kb.bin")
def test_highspeed_flash(self):
self.run_esptool("write_flash 0x0 images/fifty_kb.bin", baud=921600)
self.verify_readback(0, 50*1024, "images/fifty_kb.bin")
def test_adjacent_flash(self):
self.run_esptool("write_flash 0x0 images/sector.bin 0x1000 images/fifty_kb.bin")
self.verify_readback(0, 4096, "images/sector.bin")
self.verify_readback(4096, 50*1024, "images/fifty_kb.bin")
def test_adjacent_independent_flash(self):
self.run_esptool("write_flash 0x0 images/sector.bin")
self.verify_readback(0, 4096, "images/sector.bin")
self.run_esptool("write_flash 0x1000 images/fifty_kb.bin")
self.verify_readback(4096, 50*1024, "images/fifty_kb.bin")
# writing flash the second time shouldn't have corrupted the first time
self.verify_readback(0, 4096, "images/sector.bin")
def test_correct_offset(self):
""" Verify writing at an offset actually writes to that offset. """
self.run_esptool("write_flash 0x2000 images/sector.bin")
time.sleep(0.1)
three_sectors = self.readback(0, 0x3000)
last_sector = three_sectors[0x2000:]
with open("images/sector.bin", "rb") as f:
ct = f.read()
self.assertEqual(last_sector, ct)
def test_no_compression_flash(self):
self.run_esptool("write_flash -u 0x0 images/sector.bin 0x1000 images/fifty_kb.bin")
self.verify_readback(0, 4096, "images/sector.bin")
self.verify_readback(4096, 50*1024, "images/fifty_kb.bin")
@unittest.skipUnless(chip == 'esp32', 'ESP32 only')
def test_compressed_nostub_flash(self):
self.run_esptool("--no-stub write_flash -z 0x0 images/sector.bin 0x1000 images/fifty_kb.bin")
self.verify_readback(0, 4096, "images/sector.bin")
self.verify_readback(4096, 50*1024, "images/fifty_kb.bin")
def _test_partition_table_then_bootloader(self, args):
self.run_esptool(args + " 0x4000 images/partitions_singleapp.bin")
self.verify_readback(0x4000, 96, "images/partitions_singleapp.bin")
self.run_esptool(args + " 0x1000 images/bootloader.bin")
self.verify_readback(0x1000, 7888, "images/bootloader.bin", True)
self.verify_readback(0x4000, 96, "images/partitions_singleapp.bin")
def test_partition_table_then_bootloader(self):
self._test_partition_table_then_bootloader("write_flash")
def test_partition_table_then_bootloader_no_compression(self):
self._test_partition_table_then_bootloader("write_flash -u")
def test_partition_table_then_bootloader_nostub(self):
self._test_partition_table_then_bootloader("--no-stub write_flash")
# note: there is no "partition table then bootloader" test that
# uses --no-stub and -z, as the ESP32 ROM over-erases and can't
# flash this set of files in this order. we do
# test_compressed_nostub_flash() instead.
def test_length_not_aligned_4bytes(self):
nodemcu = "nodemcu-master-7-modules-2017-01-19-11-10-03-integer.bin"
size = 390411
self.run_esptool("write_flash 0x0 images/%s" % nodemcu)
def test_length_not_aligned_4bytes_no_compression(self):
self.run_esptool("write_flash -u 0x0 images/%s" % NODEMCU_FILE)
def test_write_overlap(self):
output = self.run_esptool_error("write_flash 0x0 images/bootloader.bin 0x1000 images/one_kb.bin")
self.assertIn("Detected overlap at address: 0x1000 ", output)
def test_write_sector_overlap(self):
# These two 1KB files don't overlap, but they do both touch sector at 0x1000 so should fail
output = self.run_esptool_error("write_flash 0xd00 images/one_kb.bin 0x1d00 images/one_kb.bin")
self.assertIn("Detected overlap at address: 0x1d00", output)
def test_write_no_overlap(self):
output = self.run_esptool("write_flash 0x0 images/bootloader.bin 0x2000 images/one_kb.bin")
self.assertNotIn("Detected overlap at address", output)
def test_compressible_file(self):
self.run_esptool("write_flash 0x10000 images/one_mb_zeroes.bin")
def test_zero_length(self):
# Zero length files are skipped with a warning
output = self.run_esptool("write_flash 0x10000 images/one_kb.bin 0x11000 images/zerolength.bin")
self.verify_readback(0x10000, 1024, "images/one_kb.bin")
self.assertIn("zerolength.bin is empty", output)
def test_single_byte(self):
output = self.run_esptool("write_flash 0x0 images/onebyte.bin")
self.verify_readback(0x0, 1, "images/onebyte.bin")
class TestFlashSizes(EsptoolTestCase):
def test_high_offset(self):
self.run_esptool("write_flash -fs 4MB 0x300000 images/one_kb.bin")
self.verify_readback(0x300000, 1024, "images/one_kb.bin")
def test_high_offset_no_compression(self):
self.run_esptool("write_flash -u -fs 4MB 0x300000 images/one_kb.bin")
self.verify_readback(0x300000, 1024, "images/one_kb.bin")
def test_large_image(self):
self.run_esptool("write_flash -fs 4MB 0x280000 images/one_mb.bin")
self.verify_readback(0x280000, 0x100000, "images/one_mb.bin")
def test_large_no_compression(self):
self.run_esptool("write_flash -u -fs 4MB 0x280000 images/one_mb.bin")
self.verify_readback(0x280000, 0x100000, "images/one_mb.bin")
def test_invalid_size_arg(self):
self.run_esptool_error("write_flash -fs 10MB 0x6000 images/one_kb.bin")
def test_write_past_end_fails(self):
output = self.run_esptool_error("write_flash -fs 1MB 0x280000 images/one_kb.bin")
self.assertIn("File images/one_kb.bin", output)
self.assertIn("will not fit", output)
def test_write_no_compression_past_end_fails(self):
output = self.run_esptool_error("write_flash -u -fs 1MB 0x280000 images/one_kb.bin")
self.assertIn("File images/one_kb.bin", output)
self.assertIn("will not fit", output)
class TestFlashDetection(EsptoolTestCase):
def test_correct_offset(self):
""" Verify writing at an offset actually writes to that offset. """
res = self.run_esptool("flash_id")
self.assertTrue("Manufacturer:" in res)
self.assertTrue("Device:" in res)
class TestErase(EsptoolTestCase):
def test_chip_erase(self):
self.run_esptool("write_flash 0x10000 images/one_kb.bin")
self.verify_readback(0x10000, 0x400, "images/one_kb.bin")
self.run_esptool("erase_flash")
empty = self.readback(0x10000, 0x400)
self.assertTrue(empty == b'\xFF'*0x400)
def test_region_erase(self):
self.run_esptool("write_flash 0x10000 images/one_kb.bin")
self.run_esptool("write_flash 0x11000 images/sector.bin")
self.verify_readback(0x10000, 0x400, "images/one_kb.bin")
self.verify_readback(0x11000, 0x1000, "images/sector.bin")
# erase only the flash sector containing one_kb.bin
self.run_esptool("erase_region 0x10000 0x1000")
self.verify_readback(0x11000, 0x1000, "images/sector.bin")
empty = self.readback(0x10000, 0x1000)
self.assertTrue(empty == b'\xFF'*0x1000)
def test_large_region_erase(self):
# verifies that erasing a large region doesn't time out
self.run_esptool("erase_region 0x0 0x100000")
class TestSectorBoundaries(EsptoolTestCase):
def test_end_sector(self):
self.run_esptool("write_flash 0x10000 images/sector.bin")
self.run_esptool("write_flash 0x0FC00 images/one_kb.bin")
self.verify_readback(0x0FC00, 0x400, "images/one_kb.bin")
self.verify_readback(0x10000, 0x1000, "images/sector.bin")
def test_end_sector_uncompressed(self):
self.run_esptool("write_flash -u 0x10000 images/sector.bin")
self.run_esptool("write_flash -u 0x0FC00 images/one_kb.bin")
self.verify_readback(0x0FC00, 0x400, "images/one_kb.bin")
self.verify_readback(0x10000, 0x1000, "images/sector.bin")
def test_overlap(self):
self.run_esptool("write_flash 0x20800 images/sector.bin")
self.verify_readback(0x20800, 0x1000, "images/sector.bin")
class TestVerifyCommand(EsptoolTestCase):
def test_verify_success(self):
self.run_esptool("write_flash 0x5000 images/one_kb.bin")
self.run_esptool("verify_flash 0x5000 images/one_kb.bin")
def test_verify_failure(self):
self.run_esptool("write_flash 0x6000 images/sector.bin")
output = self.run_esptool_error("verify_flash --diff=yes 0x6000 images/one_kb.bin")
self.assertIn("verify FAILED", output)
self.assertIn("first @ 0x00006000", output)
def test_verify_unaligned_length(self):
self.run_esptool("write_flash 0x0 images/%s" % NODEMCU_FILE)
self.run_esptool("verify_flash 0x0 images/%s" % NODEMCU_FILE)
class TestReadIdentityValues(EsptoolTestCase):
def test_read_mac(self):
output = self.run_esptool("read_mac")
mac = re.search(r"[0-9a-f:]{17}", output)
self.assertIsNotNone(mac)
mac = mac.group(0)
self.assertNotEqual("00:00:00:00:00:00", mac)
self.assertNotEqual("ff:ff:ff:ff:ff:ff", mac)
@unittest.skipUnless(chip == 'esp8266', 'ESP8266 only')
def test_read_chip_id(self):
output = self.run_esptool("chip_id")
idstr = re.search("Chip ID: 0x([0-9a-f]+)", output)
self.assertIsNotNone(idstr)
idstr = idstr.group(1)
self.assertNotEqual("0"*8, idstr)
self.assertNotEqual("f"*8, idstr)
class TestKeepImageSettings(EsptoolTestCase):
""" Tests for the -fm keep, -ff keep options for write_flash """
HEADER_ONLY = "images/image_header_only.bin" # 8 byte file, contains image header
def setUp(self):
super(TestKeepImageSettings, self).setUp()
self.flash_offset = 0x1000 if chip == "esp32" else 0 # bootloader offset
with open(self.HEADER_ONLY, "rb") as f:
self.header = f.read(8)
def test_keep_does_not_change_settings(self):
# defaults should be keep, except for flash size which has to match header
flash_size = "1MB" if chip == "esp32" else "512KB" # hex 0
self.run_esptool("write_flash -fs %s 0x%x %s" % (flash_size, self.flash_offset, self.HEADER_ONLY))
self.verify_readback(self.flash_offset, 8, self.HEADER_ONLY, False)
# can also explicitly set these options
self.run_esptool("write_flash -fm keep -ff keep -fs %s 0x%x %s" % (flash_size, self.flash_offset, self.HEADER_ONLY))
self.verify_readback(self.flash_offset, 8, self.HEADER_ONLY, False)
# verify_flash should also use 'keep'
self.run_esptool("verify_flash -fs %s 0x%x %s" % (flash_size, self.flash_offset, self.HEADER_ONLY))
def test_detect_size_changes_size(self):
self.run_esptool("write_flash 0x%x %s" % (self.flash_offset, self.HEADER_ONLY))
readback = self.readback(self.flash_offset, 8)
self.assertEqual(self.header[:3], readback[:3]) # first 3 bytes unchanged
self.assertNotEqual(self.header[3], readback[3]) # size_freq byte changed
self.assertEqual(self.header[4:], readback[4:]) # rest unchanged
def test_explicit_set_size_freq_mode(self):
self.run_esptool("write_flash -fs 2MB -fm qio -ff 80m 0x%x %s" % (self.flash_offset, self.HEADER_ONLY))
def val(x):
try:
return ord(x) # converts character to integer on Python 2
except TypeError:
return x # throws TypeError on Python 3 where x is already an integer
header = list(self.header)
readback = self.readback(self.flash_offset, 8)
self.assertEqual(self.header[0], readback[0])
self.assertEqual(self.header[1], readback[1])
self.assertEqual(0, val(readback[2])) # qio mode
self.assertNotEqual(0, val(self.header[2]))
self.assertEqual(0x1f if chip == "esp32" else 0x3f, val(readback[3])) # size_freq
self.assertNotEqual(self.header[3], readback[3])
self.assertEqual(self.header[4:], readback[4:])
# verify_flash should pass if we match params, fail otherwise
self.run_esptool("verify_flash -fs 2MB -fm qio -ff 80m 0x%x %s" % (self.flash_offset, self.HEADER_ONLY))
self.run_esptool_error("verify_flash 0x%x %s" % (self.flash_offset, self.HEADER_ONLY))
class TestLoadRAM(EsptoolTestCase):
def test_load_ram(self):
""" Verify load_ram command
The "hello world" binary programs for each chip print
"Hello world!\n" to the serial port.
"""
self.run_esptool("load_ram images/helloworld-%s.bin" % chip)
p = serial.serial_for_url(serialport, default_baudrate)
p.timeout = 0.2
self.assertIn(b"Hello world!", p.read(32))
p.close()
class TestDeepSleepFlash(EsptoolTestCase):
@unittest.skipUnless(chip == 'esp8266', 'ESP8266 only')
def test_deep_sleep_flash(self):
""" Regression test for https://github.com/espressif/esptool/issues/351
ESP8266 deep sleep can disable SPI flash chip, stub loader (or ROM loader) needs to re-enable it.
NOTE: If this test fails, the ESP8266 may need a hard power cycle (probably with GPIO0 held LOW)
to recover.
"""
# not even necessary to wake successfully from sleep, going into deep sleep is enough
# (so GPIO16, etc, config is not important for this test)
self.run_esptool("write_flash 0x0 images/esp8266_deepsleep.bin", baud=230400)
time.sleep(0.25) # give ESP8266 time to enter deep sleep
self.run_esptool("write_flash 0x0 images/fifty_kb.bin", baud=230400)
self.verify_readback(0, 50*1024, "images/fifty_kb.bin")
if __name__ == '__main__':
if len(sys.argv) < 3:
print("Usage: %s [--trace] <serial port> <chip name> [optional default baud rate] [optional tests]" % sys.argv[0])
sys.exit(1)
if sys.argv[1] == "--trace":
trace_enabled = True
sys.argv.pop(1)
serialport = sys.argv[1]
# chip is already set to sys.argv[2], so @skipUnless can evaluate against it
args_used = 2
try:
default_baudrate = int(sys.argv[3])
args_used = 3
except IndexError:
pass # no additional args
except ValueError:
pass # arg3 not a number, must be a test name
# unittest also uses argv, so trim the args we used
sys.argv = [ sys.argv[0] ] + sys.argv[args_used + 1:]
print("Running esptool.py tests...")
unittest.main(buffer=True)
|
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
from iptest.assert_util import *
skiptest("win32")
from iptest.console_util import IronPythonInstance
remove_ironpython_dlls(testpath.public_testdir)
from sys import executable
from System import Environment
from sys import exec_prefix
extraArgs = ""
if "-X:LightweightScopes" in Environment.GetCommandLineArgs():
extraArgs += " -X:LightweightScopes"
def test_strings():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# String exception
response = ipi.ExecuteLine("raise 'foo'", True)
AreEqual(response.replace("\r\r\n", "\n").replace("\r", ""),
"""Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: exceptions must be classes, or instances, not str""")
# Multi-line string literal
ipi.ExecutePartialLine("\"\"\"Hello")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
AreEqual("'Hello\\n\\n\\nWorld'", ipi.ExecuteLine("World\"\"\""))
ipi.ExecutePartialLine("if False: print 3")
ipi.ExecutePartialLine("else: print 'hello'")
AreEqual(r'hello', ipi.ExecuteLine(""))
# Empty line
AreEqual("", ipi.ExecuteLine(""))
ipi.End()
def test_exceptions():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
response = ipi.ExecuteLine("raise Exception", True)
AreEqual(response,
'''Traceback (most recent call last):
File "<stdin>", line 1, in <module>
Exception'''.replace("\n", "\r\r\n") + "\r")
ipi.End()
def test_exceptions_nested():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("def a(): return b()")
ipi.ExecuteLine("")
ipi.ExecutePartialLine("def b(): return 1/0")
ipi.ExecuteLine("")
response = ipi.ExecuteLine("a()", True)
response = response.replace("\r\r\n", "\n").strip()
Assert(response.startswith('''Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 1, in a
File "<stdin>", line 1, in b
ZeroDivisionError:'''), response)
ipi.End()
###############################################################################
# Test "ipy.exe -i script.py"
def test_interactive_mode():
inputScript = testpath.test_inputs_dir + "\\simpleCommand.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i \"" + inputScript + "\"")
AreEqual(ipi.Start(), True)
ipi.EnsureInteractive()
AreEqual("1", ipi.ExecuteLine("x"))
ipi.End()
inputScript = testpath.test_inputs_dir + "\\raise.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i \"" + inputScript + "\"")
AreEqual(ipi.Start(), True)
ipi.ReadError()
ipi.EnsureInteractive()
AreEqual("1", ipi.ExecuteLine("x"))
ipi.End()
inputScript = testpath.test_inputs_dir + "\\syntaxError.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i \"" + inputScript + "\"")
AreEqual(ipi.Start(), True)
# ipi.EnsureInteractive()
AssertContains(ipi.ExecuteLine("x", True), "NameError")
ipi.End()
inputScript = testpath.test_inputs_dir + "\\exit.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i \"" + inputScript + "\"")
(result, output, output2, exitCode) = ipi.StartAndRunToCompletion()
AreEqual(exitCode, 0)
ipi.End()
# interactive + -c
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i -c x=2")
AreEqual(ipi.Start(), True)
ipi.EnsureInteractive()
Assert(ipi.ExecuteLine("x", True).find("2") != -1)
ipi.End()
###############################################################################
# Test sys.exitfunc
def test_sys_exitfunc():
import clr
inputScript = testpath.test_inputs_dir + "\\exitFuncRuns.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " \"" + inputScript + "\"")
(result, output, output2, exitCode) = ipi.StartAndRunToCompletion()
AreEqual(exitCode, 0)
AreEqual(output.find('hello world') > -1, True)
ipi.End()
args = extraArgs
if clr.GetCurrentRuntime().Configuration.DebugMode:
args = "-D " + args
inputScript = testpath.test_inputs_dir + "\\exitFuncRaises.py"
ipi = IronPythonInstance(executable, exec_prefix, args + " \"" + inputScript + "\"")
(result, output, output2, exitCode) = ipi.StartAndRunToCompletion()
AreEqual(exitCode, 0)
AreEqual(output2.find('Error in sys.exitfunc:') > -1, True)
AreEqual(output2.find('exitFuncRaises.py", line 19, in foo') > -1, True)
ipi.End()
# verify sys.exit(True) and sys.exit(False) return 1 and 0
ipi = IronPythonInstance(executable, exec_prefix, '-c "import sys; sys.exit(False)"')
res = ipi.StartAndRunToCompletion()
AreEqual(res[0], True) # should have started
AreEqual(res[1], '') # no std out
AreEqual(res[2], '') # no std err
AreEqual(res[3], 0) # should return 0
ipi = IronPythonInstance(executable, exec_prefix, '-c "import sys; sys.exit(True)"')
res = ipi.StartAndRunToCompletion()
AreEqual(res[0], True) # should have started
AreEqual(res[1], '') # no std out
AreEqual(res[2], '') # no std err
AreEqual(res[3], 1) # should return 0
# and verify it works at the interactive console as well
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
ipi.ExecuteLine("import sys")
AreEqual(ipi.ExecuteAndExit("sys.exit(False)"), 0)
# and verify it works at the interactive console as well
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
ipi.ExecuteLine("import sys")
AreEqual(ipi.ExecuteAndExit("sys.exit(True)"), 1)
#############################################################################
# verify we need to dedent to a previous valid indentation level
def test_indentation():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("if False:")
ipi.ExecutePartialLine(" print 'hello'")
response = ipi.ExecuteLine(" print 'goodbye'", True)
AreEqual(response.find('IndentationError') > 1, True)
ipi.End()
#############################################################################
# verify we dump exception details
def test_dump_exception():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -X:ExceptionDetail")
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("raise 'goodbye'", True)
AreEqual(response.count("IronPython.Hosting") >= 1, True)
ipi.End()
#############################################################################
# make sure we can enter try/except blocks
def test_try_except():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("try:")
ipi.ExecutePartialLine(" raise Exception('foo')")
ipi.ExecutePartialLine("except Exception, e:")
ipi.ExecutePartialLine(" if e.message=='foo':")
ipi.ExecutePartialLine(" print 'okay'")
response = ipi.ExecuteLine("")
Assert(response.find('okay') > -1)
ipi.End()
###########################################################
# Throw on "complete" incomplete syntax bug #864
def test_incomplate_syntax():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class K:")
response = ipi.ExecuteLine("", True)
Assert("IndentationError:" in response)
ipi.End()
def test_incomplate_syntax_backslash():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
for i in range(4):
for j in range(i):
ipi.ExecutePartialLine("\\")
ipi.ExecutePartialLine("1 + \\")
for j in range(i):
ipi.ExecutePartialLine("\\")
response = ipi.ExecuteLine("2", True)
Assert("3" in response)
ipi.End()
###########################################################
# if , while, try, for and then EOF.
def test_missing_test():
for x in ['if', 'while', 'for', 'try']:
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine(x, True)
Assert("SyntaxError:" in response)
ipi.End()
##########################################################
# Support multiple-levels of indentation
def test_indentation_levels():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class K:")
ipi.ExecutePartialLine(" def M(self):")
ipi.ExecutePartialLine(" if 1:")
ipi.ExecutePartialLine(" pass")
response = ipi.ExecuteLine("")
ipi.End()
##########################################################
# Support partial lists
def test_partial_lists():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("[1")
ipi.ExecutePartialLine(" ,")
ipi.ExecutePartialLine(" 2")
response = ipi.ExecuteLine("]")
Assert("[1, 2]" in response)
ipi.ExecutePartialLine("[")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
response = ipi.ExecuteLine("]")
Assert("[]" in response)
ipi.End()
def test_partial_lists_cp3530():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
try:
ipi.ExecutePartialLine("[{'a':None},")
response = ipi.ExecuteLine("]")
Assert("[{'a': None}]" in response, response)
ipi.ExecutePartialLine("[{'a'")
response = ipi.ExecutePartialLine(":None},")
response = ipi.ExecuteLine("]")
Assert("[{'a': None}]" in response, response)
ipi.ExecutePartialLine("[{'a':None},")
ipi.ExecutePartialLine("1,")
response = ipi.ExecuteLine("2]")
Assert("[{'a': None}, 1, 2]" in response, response)
finally:
ipi.End()
##########################################################
# Support partial tuples
def test_partial_tuples():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("(2")
ipi.ExecutePartialLine(" ,")
ipi.ExecutePartialLine(" 3")
response = ipi.ExecuteLine(")")
Assert("(2, 3)" in response)
ipi.ExecutePartialLine("(")
response = ipi.ExecuteLine(")")
Assert("()" in response)
ipi.ExecutePartialLine("'abc %s %s %s %s %s' % (")
ipi.ExecutePartialLine(" 'def'")
ipi.ExecutePartialLine(" ,'qrt',")
ipi.ExecutePartialLine(" 'jkl'")
ipi.ExecutePartialLine(",'jkl'")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine(",")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("'123'")
response = ipi.ExecuteLine(")")
Assert("'abc def qrt jkl jkl 123'" in response)
ipi.ExecutePartialLine("a = (")
ipi.ExecutePartialLine(" 1")
ipi.ExecutePartialLine(" , ")
ipi.ExecuteLine(")")
response = ipi.ExecuteLine("a")
Assert("(1,)" in response)
ipi.ExecutePartialLine("(")
ipi.ExecutePartialLine("'joe'")
ipi.ExecutePartialLine(" ")
ipi.ExecutePartialLine(" #")
ipi.ExecutePartialLine(",")
ipi.ExecutePartialLine("2")
response = ipi.ExecuteLine(")")
Assert("('joe', 2)" in response)
ipi.ExecutePartialLine("(")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
response = ipi.ExecuteLine(")")
Assert("()" in response)
ipi.End()
##########################################################
# Support partial dicts
def test_partial_dicts():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("{2:2")
ipi.ExecutePartialLine(" ,")
ipi.ExecutePartialLine(" 2:2")
response = ipi.ExecuteLine("}")
Assert("{2: 2}" in response)
ipi.ExecutePartialLine("{")
response = ipi.ExecuteLine("}")
Assert("{}" in response)
ipi.ExecutePartialLine("a = {")
ipi.ExecutePartialLine(" None:2")
ipi.ExecutePartialLine(" , ")
ipi.ExecuteLine("}")
response = ipi.ExecuteLine("a")
Assert("{None: 2}" in response)
ipi.ExecutePartialLine("{")
ipi.ExecutePartialLine("'joe'")
ipi.ExecutePartialLine(": ")
ipi.ExecutePartialLine(" 42")
ipi.ExecutePartialLine(",")
ipi.ExecutePartialLine("3:45")
response = ipi.ExecuteLine("}")
Assert(repr({'joe':42, 3:45}) in response)
ipi.ExecutePartialLine("{")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
response = ipi.ExecuteLine("}")
Assert("{}" in response)
ipi.End()
###########################################################
# Some whitespace wackiness
def test_whitespace():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecuteLine(" ")
response = ipi.ExecuteLine("")
ipi.End()
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecuteLine(" ")
response = ipi.ExecuteLine("2")
Assert("2" in response)
ipi.End()
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecuteLine(" ")
response = ipi.ExecuteLine(" 2", True)
Assert("SyntaxError:" in response)
ipi.End()
###########################################################
# test the indentation error in the interactive mode
def test_indentation_interactive():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class C:pass")
response = ipi.ExecuteLine("")
AreEqual(response, "")
ipi.ExecutePartialLine("class D(C):")
response = ipi.ExecuteLine("", True)
Assert("IndentationError:" in response)
ipi.End()
###########################################################
# test /mta w/ no other args
def test_mta():
ipi = IronPythonInstance(executable, exec_prefix, '-X:MTA')
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class C:pass")
response = ipi.ExecuteLine("")
AreEqual(response, "")
ipi.ExecutePartialLine("class D(C):")
response = ipi.ExecuteLine("", True)
Assert("IndentationError:" in response)
ipi.End()
###########################################################
# test for comments in interactive input
def test_comments():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("# this is some comment line")
AreEqual(response, "")
response = ipi.ExecuteLine(" # this is some comment line")
AreEqual(response, "")
response = ipi.ExecuteLine("# this is some more comment line")
AreEqual(response, "")
ipi.ExecutePartialLine("if 100:")
ipi.ExecutePartialLine(" print 100")
ipi.ExecutePartialLine("# this is some more comment line inside if")
ipi.ExecutePartialLine("# this is some indented comment line inside if")
ipi.ExecutePartialLine(" print 200")
response = ipi.ExecuteLine("")
AreEqual(response, "100" + newline + "200")
ipi.End()
def test_global_values():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecuteLine("import clr")
response = ipi.ExecuteLine("[x for x in globals().values()]")
Assert(response.startswith('['))
d = eval(ipi.ExecuteLine("globals().fromkeys(['a', 'b'], 'c')"))
AreEqual(d, {'a':'c', 'b':'c'})
def test_globals8961():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("print globals().keys()")
res = set(eval(response))
AreEqual(res, set(['__builtins__', '__name__', '__doc__']))
ipi.ExecuteLine("a = None")
response = ipi.ExecuteLine("print globals().keys()")
res = set(eval(response))
AreEqual(res, set(['__builtins__', '__name__', '__doc__', 'a']))
response = ipi.ExecuteLine("print globals().values()")
l = eval(response.replace("<module '__builtin__' (built-in)>", '"builtin"'))
res = set(l)
AreEqual(len(l), 4)
AreEqual(res, set(['builtin', '__main__', None]))
ipi.ExecuteLine("b = None")
response = ipi.ExecuteLine("print globals().values()")
l = eval(response.replace("<module '__builtin__' (built-in)>", '"builtin"'))
res = set(l)
AreEqual(len(l), 5)
AreEqual(res, set(['builtin', '__main__', None]))
def test_console_input_output():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
input_output = [
("x=100",""),
("x=200\n",""),
("\nx=300",""),
("\nx=400\n",""),
("500","500"),
("600\n\n\n\n\n\n\n\n\n\n\n","600"),
("valid=3;more_valid=4;valid","3"),
("valid=5;more_valid=6;more_valid\n\n\n\n\n","6"),
("valid=7;more_valid=8;#valid",""),
("valid=9;valid;# more_valid\n","9"),
("valid=11;more_valid=12;more_valid# should be valid input\n\n\n\n","12"),
]
for x in input_output:
AreEqual(ipi.Start(), True)
AreEqual(ipi.ExecuteLine(x[0]),x[1])
ipi.End()
# expect a clean exception message/stack from thread
def test_thrown_from_thread():
inputScript = path_combine(testpath.temporary_dir, "throwingfromthread.py")
write_to_file(inputScript, '''
def f(): raise AssertionError, 'hello'
import thread, time
thread.start_new_thread(f, tuple())
time.sleep(2)
''')
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " " + inputScript)
(result, output, output2, exitCode) = ipi.StartAndRunToCompletion()
AreEqual(exitCode, 0)
Assert("AssertionError: hello" in output2)
Assert("IronPython." not in output2) # '.' is necessary here
ipi.End()
def test_aform_feeds():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("\fprint 'hello'")
AreEqual(response, "hello")
response = ipi.ExecuteLine(" \fprint 'hello'")
AreEqual(response, "hello")
ipi.ExecutePartialLine("def f():")
ipi.ExecutePartialLine("\f print 'hello'")
ipi.ExecuteLine('')
response = ipi.ExecuteLine('f()')
AreEqual(response, "hello")
# \f resets indent to 0
ipi.ExecutePartialLine("def f():")
ipi.ExecutePartialLine(" \f x = 'hello'")
ipi.ExecutePartialLine("\f print x")
ipi.ExecuteLine('')
response = ipi.ExecuteLine('f()')
AreEqual(response, "hello")
# \f resets indent to 0
ipi.ExecutePartialLine("def f():")
ipi.ExecutePartialLine(" \f x = 'hello'")
ipi.ExecutePartialLine(" print x")
ipi.ExecuteLine('')
response = ipi.ExecuteLine('f()')
AreEqual(response, "hello")
def test_ipy_dash_S():
"""ipy -S should still install Lib into sys.path"""
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -S")
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("import sys")
response = ipi.ExecuteLine("print sys.path")
Assert(response.find('Lib') != -1)
def test_startup_dir():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("print dir()")
AreEqual(sorted(eval(response)), sorted(['__builtins__', '__doc__', '__name__']))
def test_ipy_dash_m():
import sys
for path in sys.path:
if path.find('Lib') != -1:
filename = System.IO.Path.Combine(path, 'somemodule.py')
break
try:
f = file(filename, 'w')
f.write('print "hello"\n')
f.write('import sys\n')
f.write('print sys.argv')
f.close()
# need to run these tests where we have access to runpy.py
path = System.IO.FileInfo(__file__).DirectoryName
# simple case works
ipi = IronPythonInstance(executable, path, extraArgs + " -m somemodule")
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(res, True) # run should have worked
AreEqual(exit, 0) # should have returned 0
output = output.replace('\r\n', '\n')
lines = output.split('\n')
AreEqual(lines[0], 'hello')
Assert(samefile(eval(lines[1])[0],
filename))
# we receive any arguments in sys.argv
ipi = IronPythonInstance(executable, path, extraArgs + " -m somemodule foo bar")
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(res, True) # run should have worked
AreEqual(exit, 0) # should have returned 0
output = output.replace('\r\n', '\n')
lines = output.split('\n')
AreEqual(lines[0], 'hello')
AreEqual(eval(lines[1]), [filename, 'foo', 'bar'])
f = file(filename, 'w')
f.write('print "hello"\n')
f.write('import sys\n')
f.write('sys.exit(1)')
f.close()
# sys.exit works
ipi = IronPythonInstance(executable, path, extraArgs + " -m somemodule")
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(res, True) # run should have worked
AreEqual(exit, 1) # should have returned 0
output = output.replace('\r\n', '\n')
lines = output.split('\n')
AreEqual(lines[0], 'hello')
finally:
nt.unlink(filename)
@disabled("CodePlex Work Item 10925")
def test_ipy_dash_m_negative():
# builtin modules should not work
for modname in [ "sys", "datetime" ]:
ipi = IronPythonInstance(executable, exec_prefix,
extraArgs + " -m " + modname)
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(exit, -1)
# Modules within packages should not work
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -m testpkg1.mod1")
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(res, True) # run should have worked
AreEqual(exit, 1) # should have returned 0
Assert("SyntaxError: invalid syntax" in err,
"stderr is:" + str(err))
def test_ipy_dash_m_pkgs():
# Python packages work
import nt
Assert("testpkg1" in [x.lower() for x in nt.listdir(nt.getcwd())], nt.getcwd())
old_ipy_path = get_environ_variable("IRONPYTHONPATH")
try:
nt.environ["IRONPYTHONPATH"] = nt.getcwd()
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -m testpkg1")
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(res, True) # run should have worked
AreEqual(exit, 0) # should have returned 0
AreEqual(output, "")
# Bad module names should not work
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -m libxyz")
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(res, True) # run should have worked
AreEqual(exit, 1) # should have returned 0
Assert("ImportError: No module named libxyz" in err,
"stderr is:" + str(err))
finally:
nt.environ["IRONPYTHONPATH"] = old_ipy_path
def test_ipy_dash_c():
"""verify ipy -c cmd doesn't print expression statements"""
ipi = IronPythonInstance(executable, exec_prefix, "-c True;False")
res = ipi.StartAndRunToCompletion()
AreEqual(res[0], True) # should have started
AreEqual(res[1], '') # no std out
AreEqual(res[2], '') # no std err
AreEqual(res[3], 0) # should return 0
#############################################################################
# CP11924 - verify 'from __future__ import division' works
def test_future_division():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecuteLine("from __future__ import division")
response = ipi.ExecuteLine("11/4")
AreEqual(response, "2.75")
ipi.End()
#############################################################################
# CP2206
def test_future_with():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class K(object):")
ipi.ExecutePartialLine(" def __enter__(self): return 3.14")
ipi.ExecutePartialLine(" def __exit__(self, type, value, tb): return False")
ipi.ExecuteLine("")
ipi.ExecutePartialLine("with K() as d:")
ipi.ExecutePartialLine(" print d")
response = ipi.ExecuteLine("")
AreEqual(response, "3.14")
ipi.End()
#############################################################################
# Merlin 148481
def test_ipy_dash():
#Verify that typing a - in the arguments starts an interactive session
ipi = IronPythonInstance(executable, exec_prefix, "-")
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("42")
AreEqual(response, "42")
ipi.End()
#############################################################################
def test_mta():
ipi = IronPythonInstance(executable, exec_prefix, '-X:MTA')
AreEqual(ipi.Start(), True)
ipi.ExecuteLine("import System")
response = ipi.ExecuteLine("str(System.Threading.Thread.CurrentThread.ApartmentState)")
AreEqual(response, "'MTA'")
ipi.ExecutePartialLine("class C:pass")
response = ipi.ExecuteLine("")
AreEqual(response, "")
response = ipi.ExecuteLine("str(System.Threading.Thread.CurrentThread.ApartmentState)")
AreEqual(response, "'MTA'")
ipi.End()
def test_displayhook():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
ipi.ExecuteLine("import sys")
ipi.ExecutePartialLine("def f(x): print 'foo', x")
ipi.ExecuteLine("")
response = ipi.ExecuteLine("sys.displayhook = f")
response = ipi.ExecuteLine("42")
AreEqual(response, "foo 42")
def test_excepthook():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
ipi.ExecuteLine("import sys")
ipi.ExecutePartialLine("def f(*args): print 'foo', args")
ipi.ExecuteLine("")
response = ipi.ExecuteLine("sys.excepthook = f")
response = ipi.ExecuteLine("raise Exception", True)
AssertContains(response, "foo (<type 'exceptions.Exception'>, Exception(), <traceback object at")
def test_last_exception():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
ipi.ExecuteLine("import sys")
response = ipi.ExecuteLine("hasattr(sys, 'last_value')")
AreEqual(response, 'False')
AssertContains(ipi.ExecuteLine("x", True), "NameError")
response = ipi.ExecuteLine("sys.last_value")
AreEqual(response, "NameError(\"name 'x' is not defined\",)")
response = ipi.ExecuteLine("sys.last_type")
AreEqual(response, "<type 'exceptions.NameError'>")
response = ipi.ExecuteLine("sys.last_traceback")
AssertContains(response, "<traceback object at ")
def test_sta_sleep_Warning():
ipi = IronPythonInstance(executable, exec_prefix, '-c "from System.Threading import Thread;Thread.Sleep(100)"')
retval, stdouttext, stderrtext, exitcode = ipi.StartAndRunToCompletion()
Assert(stderrtext.endswith("RuntimeWarning: Calling Thread.Sleep on an STA thread doesn't pump messages. Use Thread.CurrentThread.Join instead.\r\n"))
def test_newline():
ipi = IronPythonInstance(executable, exec_prefix, "")
ipi.proc.Start()
ipi.reader = ipi.proc.StandardOutput
output = ipi.EatToPrompt()
Assert('\r\r\n' not in output)
Assert('\r\n' in output)
#############################################################################
# Remote console tests
from System.Diagnostics import Process
def get_process_ids(ipi):
ipi.EnsureInteractiveRemote()
ipi.proc.Refresh()
consoleProcessId = ipi.proc.Id
ipi.ExecuteLine("import System")
remoteRuntimeProcessId = ipi.ExecuteLineRemote("System.Diagnostics.Process.GetCurrentProcess().Id")
Assert(remoteRuntimeProcessId.isdigit(), "remoteRuntimeProcessId is '%s'" % remoteRuntimeProcessId)
return consoleProcessId, int(remoteRuntimeProcessId)
def start_remote_console(args = ""):
inputScript = testpath.test_inputs_dir + "\\RemoteConsole.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " \"" + inputScript + "\" -X:ExceptionDetail " + args)
AreEqual(ipi.Start(), True)
return ipi
# Basic check that the remote console actually uses two processes
def test_remote_console_processes():
# First check that a simple local console uses a single process
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
consoleProcessId, remoteRuntimeProcessId = get_process_ids(ipi)
AreEqual(consoleProcessId, remoteRuntimeProcessId)
ipi.End()
# Now use the remote console
ipi = start_remote_console()
consoleProcessId, remoteRuntimeProcessId = get_process_ids(ipi)
AreNotEqual(consoleProcessId, remoteRuntimeProcessId)
ipi.End()
# The remote runtime should terminate when the console terminates
def test_remote_runtime_normal_exit():
ipi = start_remote_console()
consoleProcessId, remoteRuntimeProcessId = get_process_ids(ipi)
runtimeProcess = Process.GetProcessById(remoteRuntimeProcessId)
Assert(not runtimeProcess.HasExited)
ipi.End()
runtimeProcess.WaitForExit() # The test is that this wait succeeds
# Stress the input-output streams
def test_remote_io():
ipi = start_remote_console()
for i in range(100):
AreEqual(ipi.ExecuteLineRemote("2+2"), "4")
ipi.End()
# Kill the remote runtime and ensure that another process starts up again
def test_remote_server_restart():
ipi = start_remote_console()
consoleProcessId, remoteRuntimeProcessId = get_process_ids(ipi)
runtimeProcess = Process.GetProcessById(remoteRuntimeProcessId)
AreNotEqual(runtimeProcess, consoleProcessId)
runtimeProcess.Kill()
runtimeProcess.WaitForExit()
# The Process.Exited event is fired asynchronously, and might take sometime to fire.
# Hence, we need to block for a known marker
ipi.EatToMarker("Remote runtime terminated")
# We need to press Enter to nudge the old console out of the ReadLine...
restartMessage = ipi.ExecuteLine("", True)
ipi.ReadError()
consoleProcessId2, remoteRuntimeProcessId2 = get_process_ids(ipi)
AreEqual(consoleProcessId, consoleProcessId2)
# This is technically not a 100% correct as there is a small chance the the process id might get reused
AreNotEqual(remoteRuntimeProcessId, remoteRuntimeProcessId2)
ipi.End()
# Check that an exception can be remoted back over the reverse channel
# Note that exceptions are not written to stdout by the remote process
def test_remote_console_exception():
ipi = start_remote_console()
zeroDivisionErrorOutput = ipi.ExecuteLine("1/0", True)
AssertContains(zeroDivisionErrorOutput, "ZeroDivisionError")
ipi.End()
def test_remote_startup_script():
ipi = start_remote_console("-i " + testpath.test_inputs_dir + "\\simpleCommand.py")
AreEqual(ipi.ExecuteLine("x"), "1")
ipi.End()
def get_abort_command_output():
ipi = start_remote_console()
ipi.ExecuteLine("import System")
ipi.ExecutePartialLine ("def Hang():")
ipi.ExecutePartialLine (" print 'ABORT ME!!!' # This string token should trigger an abort...")
ipi.ExecutePartialLine (" infinite = System.Threading.Timeout.Infinite")
ipi.ExecutePartialLine (" System.Threading.Thread.CurrentThread.Join(infinite)")
ipi.ExecuteLine ("")
result = ipi.ExecuteLine("Hang()", True)
ipi.End()
return result
def test_remote_abort_command():
for i in range(10):
output = get_abort_command_output()
if "KeyboardInterrupt" in output:
AssertDoesNotContain(output, "Thread was being aborted.") # ThreadAbortException
return
else:
# Rarely, under stress conditions, ThreadAbortException leaks through.
# Keep retrying until we actually get KeyboardInterrupt
AssertContains(output, "Thread was being aborted.") # ThreadAbortException
continue
Assert(False, "KeyboardInterrupt not thrown. Only KeyboardInterrupt was thrown")
def test_exception_slicing_warning():
ipi = IronPythonInstance(executable, exec_prefix, '-c "print Exception(*range(2))[1]"')
res = ipi.StartAndRunToCompletion()
AreEqual(res[0], True) # should have started
AreEqual(res[1], '1\r\n') # some std out
AreEqual(res[2], '') # no std err
AreEqual(res[3], 0) # should return 0
ipi = IronPythonInstance(executable, exec_prefix,
'-3 -c "import warnings;'
'warnings.filters.reverse();'
'warnings.filters.pop();'
'print Exception(*range(2))[1]"')
res = ipi.StartAndRunToCompletion()
AreEqual(res[0], True) # should have started
AreEqual(res[1], '1\r\n') # std out
Assert(res[2].endswith('DeprecationWarning: __getitem__ not supported for exception classes in 3.x; use args attribute\r\n')) #std err
AreEqual(res[3], 0) # should return 0
#------------------------------------------------------------------------------
run_test(__name__)
|
|
"""numpy.distutils.fcompiler
Contains FCompiler, an abstract base class that defines the interface
for the numpy.distutils Fortran compiler abstraction model.
Terminology:
To be consistent, where the term 'executable' is used, it means the single
file, like 'gcc', that is executed, and should be a string. In contrast,
'command' means the entire command line, like ['gcc', '-c', 'file.c'], and
should be a list.
But note that FCompiler.executables is actually a dictionary of commands.
"""
__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers',
'dummy_fortran_file']
import os
import sys
import re
from distutils.sysconfig import get_python_lib
from distutils.fancy_getopt import FancyGetopt
from distutils.errors import DistutilsModuleError, \
DistutilsExecError, CompileError, LinkError, DistutilsPlatformError
from distutils.util import split_quoted, strtobool
from numpy.distutils.ccompiler import CCompiler, gen_lib_options
from numpy.distutils import log
from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \
make_temp_file, get_shared_lib_extension
from numpy.distutils.exec_command import find_executable
from numpy.distutils import _shell_utils
from .environment import EnvironmentConfig
__metaclass__ = type
class CompilerNotFound(Exception):
pass
def flaglist(s):
if is_string(s):
return split_quoted(s)
else:
return s
def str2bool(s):
if is_string(s):
return strtobool(s)
return bool(s)
def is_sequence_of_strings(seq):
return is_sequence(seq) and all_strings(seq)
class FCompiler(CCompiler):
"""Abstract base class to define the interface that must be implemented
by real Fortran compiler classes.
Methods that subclasses may redefine:
update_executables(), find_executables(), get_version()
get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug()
get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(),
get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(),
get_flags_arch_f90(), get_flags_debug_f90(),
get_flags_fix(), get_flags_linker_so()
DON'T call these methods (except get_version) after
constructing a compiler instance or inside any other method.
All methods, except update_executables() and find_executables(),
may call the get_version() method.
After constructing a compiler instance, always call customize(dist=None)
method that finalizes compiler construction and makes the following
attributes available:
compiler_f77
compiler_f90
compiler_fix
linker_so
archiver
ranlib
libraries
library_dirs
"""
# These are the environment variables and distutils keys used.
# Each configuration description is
# (<hook name>, <environment variable>, <key in distutils.cfg>, <convert>, <append>)
# The hook names are handled by the self._environment_hook method.
# - names starting with 'self.' call methods in this class
# - names starting with 'exe.' return the key in the executables dict
# - names like 'flags.YYY' return self.get_flag_YYY()
# convert is either None or a function to convert a string to the
# appropriate type used.
distutils_vars = EnvironmentConfig(
distutils_section='config_fc',
noopt = (None, None, 'noopt', str2bool, False),
noarch = (None, None, 'noarch', str2bool, False),
debug = (None, None, 'debug', str2bool, False),
verbose = (None, None, 'verbose', str2bool, False),
)
command_vars = EnvironmentConfig(
distutils_section='config_fc',
compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False),
compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False),
compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False),
version_cmd = ('exe.version_cmd', None, None, None, False),
linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False),
linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False),
archiver = (None, 'AR', 'ar', None, False),
ranlib = (None, 'RANLIB', 'ranlib', None, False),
)
flag_vars = EnvironmentConfig(
distutils_section='config_fc',
f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True),
f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True),
free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True),
fix = ('flags.fix', None, None, flaglist, False),
opt = ('flags.opt', 'FOPT', 'opt', flaglist, True),
opt_f77 = ('flags.opt_f77', None, None, flaglist, False),
opt_f90 = ('flags.opt_f90', None, None, flaglist, False),
arch = ('flags.arch', 'FARCH', 'arch', flaglist, False),
arch_f77 = ('flags.arch_f77', None, None, flaglist, False),
arch_f90 = ('flags.arch_f90', None, None, flaglist, False),
debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True),
debug_f77 = ('flags.debug_f77', None, None, flaglist, False),
debug_f90 = ('flags.debug_f90', None, None, flaglist, False),
flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True),
linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True),
linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True),
ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True),
)
language_map = {'.f': 'f77',
'.for': 'f77',
'.F': 'f77', # XXX: needs preprocessor
'.ftn': 'f77',
'.f77': 'f77',
'.f90': 'f90',
'.F90': 'f90', # XXX: needs preprocessor
'.f95': 'f90',
}
language_order = ['f90', 'f77']
# These will be set by the subclass
compiler_type = None
compiler_aliases = ()
version_pattern = None
possible_executables = []
executables = {
'version_cmd': ["f77", "-v"],
'compiler_f77': ["f77"],
'compiler_f90': ["f90"],
'compiler_fix': ["f90", "-fixed"],
'linker_so': ["f90", "-shared"],
'linker_exe': ["f90"],
'archiver': ["ar", "-cr"],
'ranlib': None,
}
# If compiler does not support compiling Fortran 90 then it can
# suggest using another compiler. For example, gnu would suggest
# gnu95 compiler type when there are F90 sources.
suggested_f90_compiler = None
compile_switch = "-c"
object_switch = "-o " # Ending space matters! It will be stripped
# but if it is missing then object_switch
# will be prefixed to object file name by
# string concatenation.
library_switch = "-o " # Ditto!
# Switch to specify where module files are created and searched
# for USE statement. Normally it is a string and also here ending
# space matters. See above.
module_dir_switch = None
# Switch to specify where module files are searched for USE statement.
module_include_switch = '-I'
pic_flags = [] # Flags to create position-independent code
src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR']
obj_extension = ".o"
shared_lib_extension = get_shared_lib_extension()
static_lib_extension = ".a" # or .lib
static_lib_format = "lib%s%s" # or %s%s
shared_lib_format = "%s%s"
exe_extension = ""
_exe_cache = {}
_executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90',
'compiler_fix', 'linker_so', 'linker_exe', 'archiver',
'ranlib']
# This will be set by new_fcompiler when called in
# command/{build_ext.py, build_clib.py, config.py} files.
c_compiler = None
# extra_{f77,f90}_compile_args are set by build_ext.build_extension method
extra_f77_compile_args = []
extra_f90_compile_args = []
def __init__(self, *args, **kw):
CCompiler.__init__(self, *args, **kw)
self.distutils_vars = self.distutils_vars.clone(self._environment_hook)
self.command_vars = self.command_vars.clone(self._environment_hook)
self.flag_vars = self.flag_vars.clone(self._environment_hook)
self.executables = self.executables.copy()
for e in self._executable_keys:
if e not in self.executables:
self.executables[e] = None
# Some methods depend on .customize() being called first, so
# this keeps track of whether that's happened yet.
self._is_customised = False
def __copy__(self):
obj = self.__new__(self.__class__)
obj.__dict__.update(self.__dict__)
obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook)
obj.command_vars = obj.command_vars.clone(obj._environment_hook)
obj.flag_vars = obj.flag_vars.clone(obj._environment_hook)
obj.executables = obj.executables.copy()
return obj
def copy(self):
return self.__copy__()
# Use properties for the attributes used by CCompiler. Setting them
# as attributes from the self.executables dictionary is error-prone,
# so we get them from there each time.
def _command_property(key):
def fget(self):
assert self._is_customised
return self.executables[key]
return property(fget=fget)
version_cmd = _command_property('version_cmd')
compiler_f77 = _command_property('compiler_f77')
compiler_f90 = _command_property('compiler_f90')
compiler_fix = _command_property('compiler_fix')
linker_so = _command_property('linker_so')
linker_exe = _command_property('linker_exe')
archiver = _command_property('archiver')
ranlib = _command_property('ranlib')
# Make our terminology consistent.
def set_executable(self, key, value):
self.set_command(key, value)
def set_commands(self, **kw):
for k, v in kw.items():
self.set_command(k, v)
def set_command(self, key, value):
if not key in self._executable_keys:
raise ValueError(
"unknown executable '%s' for class %s" %
(key, self.__class__.__name__))
if is_string(value):
value = split_quoted(value)
assert value is None or is_sequence_of_strings(value[1:]), (key, value)
self.executables[key] = value
######################################################################
## Methods that subclasses may redefine. But don't call these methods!
## They are private to FCompiler class and may return unexpected
## results if used elsewhere. So, you have been warned..
def find_executables(self):
"""Go through the self.executables dictionary, and attempt to
find and assign appropriate executables.
Executable names are looked for in the environment (environment
variables, the distutils.cfg, and command line), the 0th-element of
the command list, and the self.possible_executables list.
Also, if the 0th element is "<F77>" or "<F90>", the Fortran 77
or the Fortran 90 compiler executable is used, unless overridden
by an environment setting.
Subclasses should call this if overridden.
"""
assert self._is_customised
exe_cache = self._exe_cache
def cached_find_executable(exe):
if exe in exe_cache:
return exe_cache[exe]
fc_exe = find_executable(exe)
exe_cache[exe] = exe_cache[fc_exe] = fc_exe
return fc_exe
def verify_command_form(name, value):
if value is not None and not is_sequence_of_strings(value):
raise ValueError(
"%s value %r is invalid in class %s" %
(name, value, self.__class__.__name__))
def set_exe(exe_key, f77=None, f90=None):
cmd = self.executables.get(exe_key, None)
if not cmd:
return None
# Note that we get cmd[0] here if the environment doesn't
# have anything set
exe_from_environ = getattr(self.command_vars, exe_key)
if not exe_from_environ:
possibles = [f90, f77] + self.possible_executables
else:
possibles = [exe_from_environ] + self.possible_executables
seen = set()
unique_possibles = []
for e in possibles:
if e == '<F77>':
e = f77
elif e == '<F90>':
e = f90
if not e or e in seen:
continue
seen.add(e)
unique_possibles.append(e)
for exe in unique_possibles:
fc_exe = cached_find_executable(exe)
if fc_exe:
cmd[0] = fc_exe
return fc_exe
self.set_command(exe_key, None)
return None
ctype = self.compiler_type
f90 = set_exe('compiler_f90')
if not f90:
f77 = set_exe('compiler_f77')
if f77:
log.warn('%s: no Fortran 90 compiler found' % ctype)
else:
raise CompilerNotFound('%s: f90 nor f77' % ctype)
else:
f77 = set_exe('compiler_f77', f90=f90)
if not f77:
log.warn('%s: no Fortran 77 compiler found' % ctype)
set_exe('compiler_fix', f90=f90)
set_exe('linker_so', f77=f77, f90=f90)
set_exe('linker_exe', f77=f77, f90=f90)
set_exe('version_cmd', f77=f77, f90=f90)
set_exe('archiver')
set_exe('ranlib')
def update_executables(self):
"""Called at the beginning of customisation. Subclasses should
override this if they need to set up the executables dictionary.
Note that self.find_executables() is run afterwards, so the
self.executables dictionary values can contain <F77> or <F90> as
the command, which will be replaced by the found F77 or F90
compiler.
"""
pass
def get_flags(self):
"""List of flags common to all compiler types."""
return [] + self.pic_flags
def _get_command_flags(self, key):
cmd = self.executables.get(key, None)
if cmd is None:
return []
return cmd[1:]
def get_flags_f77(self):
"""List of Fortran 77 specific flags."""
return self._get_command_flags('compiler_f77')
def get_flags_f90(self):
"""List of Fortran 90 specific flags."""
return self._get_command_flags('compiler_f90')
def get_flags_free(self):
"""List of Fortran 90 free format specific flags."""
return []
def get_flags_fix(self):
"""List of Fortran 90 fixed format specific flags."""
return self._get_command_flags('compiler_fix')
def get_flags_linker_so(self):
"""List of linker flags to build a shared library."""
return self._get_command_flags('linker_so')
def get_flags_linker_exe(self):
"""List of linker flags to build an executable."""
return self._get_command_flags('linker_exe')
def get_flags_ar(self):
"""List of archiver flags. """
return self._get_command_flags('archiver')
def get_flags_opt(self):
"""List of architecture independent compiler flags."""
return []
def get_flags_arch(self):
"""List of architecture dependent compiler flags."""
return []
def get_flags_debug(self):
"""List of compiler flags to compile with debugging information."""
return []
get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt
get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch
get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug
def get_libraries(self):
"""List of compiler libraries."""
return self.libraries[:]
def get_library_dirs(self):
"""List of compiler library directories."""
return self.library_dirs[:]
def get_version(self, force=False, ok_status=[0]):
assert self._is_customised
version = CCompiler.get_version(self, force=force, ok_status=ok_status)
if version is None:
raise CompilerNotFound()
return version
############################################################
## Public methods:
def customize(self, dist = None):
"""Customize Fortran compiler.
This method gets Fortran compiler specific information from
(i) class definition, (ii) environment, (iii) distutils config
files, and (iv) command line (later overrides earlier).
This method should be always called after constructing a
compiler instance. But not in __init__ because Distribution
instance is needed for (iii) and (iv).
"""
log.info('customize %s' % (self.__class__.__name__))
self._is_customised = True
self.distutils_vars.use_distribution(dist)
self.command_vars.use_distribution(dist)
self.flag_vars.use_distribution(dist)
self.update_executables()
# find_executables takes care of setting the compiler commands,
# version_cmd, linker_so, linker_exe, ar, and ranlib
self.find_executables()
noopt = self.distutils_vars.get('noopt', False)
noarch = self.distutils_vars.get('noarch', noopt)
debug = self.distutils_vars.get('debug', False)
f77 = self.command_vars.compiler_f77
f90 = self.command_vars.compiler_f90
f77flags = []
f90flags = []
freeflags = []
fixflags = []
if f77:
f77 = _shell_utils.NativeParser.split(f77)
f77flags = self.flag_vars.f77
if f90:
f90 = _shell_utils.NativeParser.split(f90)
f90flags = self.flag_vars.f90
freeflags = self.flag_vars.free
# XXX Assuming that free format is default for f90 compiler.
fix = self.command_vars.compiler_fix
# NOTE: this and similar examples are probably just
# excluding --coverage flag when F90 = gfortran --coverage
# instead of putting that flag somewhere more appropriate
# this and similar examples where a Fortran compiler
# environment variable has been customized by CI or a user
# should perhaps eventually be more thoroughly tested and more
# robustly handled
if fix:
fix = _shell_utils.NativeParser.split(fix)
fixflags = self.flag_vars.fix + f90flags
oflags, aflags, dflags = [], [], []
# examine get_flags_<tag>_<compiler> for extra flags
# only add them if the method is different from get_flags_<tag>
def get_flags(tag, flags):
# note that self.flag_vars.<tag> calls self.get_flags_<tag>()
flags.extend(getattr(self.flag_vars, tag))
this_get = getattr(self, 'get_flags_' + tag)
for name, c, flagvar in [('f77', f77, f77flags),
('f90', f90, f90flags),
('f90', fix, fixflags)]:
t = '%s_%s' % (tag, name)
if c and this_get is not getattr(self, 'get_flags_' + t):
flagvar.extend(getattr(self.flag_vars, t))
if not noopt:
get_flags('opt', oflags)
if not noarch:
get_flags('arch', aflags)
if debug:
get_flags('debug', dflags)
fflags = self.flag_vars.flags + dflags + oflags + aflags
if f77:
self.set_commands(compiler_f77=f77+f77flags+fflags)
if f90:
self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags)
if fix:
self.set_commands(compiler_fix=fix+fixflags+fflags)
#XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS
linker_so = self.linker_so
if linker_so:
linker_so_flags = self.flag_vars.linker_so
if sys.platform.startswith('aix'):
python_lib = get_python_lib(standard_lib=1)
ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')
python_exp = os.path.join(python_lib, 'config', 'python.exp')
linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp]
self.set_commands(linker_so=linker_so+linker_so_flags)
linker_exe = self.linker_exe
if linker_exe:
linker_exe_flags = self.flag_vars.linker_exe
self.set_commands(linker_exe=linker_exe+linker_exe_flags)
ar = self.command_vars.archiver
if ar:
arflags = self.flag_vars.ar
self.set_commands(archiver=[ar]+arflags)
self.set_library_dirs(self.get_library_dirs())
self.set_libraries(self.get_libraries())
def dump_properties(self):
"""Print out the attributes of a compiler instance."""
props = []
for key in list(self.executables.keys()) + \
['version', 'libraries', 'library_dirs',
'object_switch', 'compile_switch']:
if hasattr(self, key):
v = getattr(self, key)
props.append((key, None, '= '+repr(v)))
props.sort()
pretty_printer = FancyGetopt(props)
for l in pretty_printer.generate_help("%s instance properties:" \
% (self.__class__.__name__)):
if l[:4]==' --':
l = ' ' + l[4:]
print(l)
###################
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile 'src' to product 'obj'."""
src_flags = {}
if is_f_file(src) and not has_f90_header(src):
flavor = ':f77'
compiler = self.compiler_f77
src_flags = get_f77flags(src)
extra_compile_args = self.extra_f77_compile_args or []
elif is_free_format(src):
flavor = ':f90'
compiler = self.compiler_f90
if compiler is None:
raise DistutilsExecError('f90 not supported by %s needed for %s'\
% (self.__class__.__name__, src))
extra_compile_args = self.extra_f90_compile_args or []
else:
flavor = ':fix'
compiler = self.compiler_fix
if compiler is None:
raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\
% (self.__class__.__name__, src))
extra_compile_args = self.extra_f90_compile_args or []
if self.object_switch[-1]==' ':
o_args = [self.object_switch.strip(), obj]
else:
o_args = [self.object_switch.strip()+obj]
assert self.compile_switch.strip()
s_args = [self.compile_switch, src]
if extra_compile_args:
log.info('extra %s options: %r' \
% (flavor[1:], ' '.join(extra_compile_args)))
extra_flags = src_flags.get(self.compiler_type, [])
if extra_flags:
log.info('using compile options from source: %r' \
% ' '.join(extra_flags))
command = compiler + cc_args + extra_flags + s_args + o_args \
+ extra_postargs + extra_compile_args
display = '%s: %s' % (os.path.basename(compiler[0]) + flavor,
src)
try:
self.spawn(command, display=display)
except DistutilsExecError as e:
msg = str(e)
raise CompileError(msg) from None
def module_options(self, module_dirs, module_build_dir):
options = []
if self.module_dir_switch is not None:
if self.module_dir_switch[-1]==' ':
options.extend([self.module_dir_switch.strip(), module_build_dir])
else:
options.append(self.module_dir_switch.strip()+module_build_dir)
else:
print('XXX: module_build_dir=%r option ignored' % (module_build_dir))
print('XXX: Fix module_dir_switch for ', self.__class__.__name__)
if self.module_include_switch is not None:
for d in [module_build_dir]+module_dirs:
options.append('%s%s' % (self.module_include_switch, d))
else:
print('XXX: module_dirs=%r option ignored' % (module_dirs))
print('XXX: Fix module_include_switch for ', self.__class__.__name__)
return options
def library_option(self, lib):
return "-l" + lib
def library_dir_option(self, dir):
return "-L" + dir
def link(self, target_desc, objects,
output_filename, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
libraries, library_dirs, runtime_library_dirs = \
self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
libraries)
if is_string(output_dir):
output_filename = os.path.join(output_dir, output_filename)
elif output_dir is not None:
raise TypeError("'output_dir' must be a string or None")
if self._need_link(objects, output_filename):
if self.library_switch[-1]==' ':
o_args = [self.library_switch.strip(), output_filename]
else:
o_args = [self.library_switch.strip()+output_filename]
if is_string(self.objects):
ld_args = objects + [self.objects]
else:
ld_args = objects + self.objects
ld_args = ld_args + lib_opts + o_args
if debug:
ld_args[:0] = ['-g']
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
if target_desc == CCompiler.EXECUTABLE:
linker = self.linker_exe[:]
else:
linker = self.linker_so[:]
command = linker + ld_args
try:
self.spawn(command)
except DistutilsExecError as e:
msg = str(e)
raise LinkError(msg) from None
else:
log.debug("skipping %s (up-to-date)", output_filename)
def _environment_hook(self, name, hook_name):
if hook_name is None:
return None
if is_string(hook_name):
if hook_name.startswith('self.'):
hook_name = hook_name[5:]
hook = getattr(self, hook_name)
return hook()
elif hook_name.startswith('exe.'):
hook_name = hook_name[4:]
var = self.executables[hook_name]
if var:
return var[0]
else:
return None
elif hook_name.startswith('flags.'):
hook_name = hook_name[6:]
hook = getattr(self, 'get_flags_' + hook_name)
return hook()
else:
return hook_name()
def can_ccompiler_link(self, ccompiler):
"""
Check if the given C compiler can link objects produced by
this compiler.
"""
return True
def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):
"""
Convert a set of object files that are not compatible with the default
linker, to a file that is compatible.
Parameters
----------
objects : list
List of object files to include.
output_dir : str
Output directory to place generated object files.
extra_dll_dir : str
Output directory to place extra DLL files that need to be
included on Windows.
Returns
-------
converted_objects : list of str
List of converted object files.
Note that the number of output files is not necessarily
the same as inputs.
"""
raise NotImplementedError()
## class FCompiler
_default_compilers = (
# sys.platform mappings
('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95',
'intelvem', 'intelem', 'flang')),
('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')),
('linux.*', ('gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag', 'vast', 'compaq',
'intele', 'intelem', 'gnu', 'g95', 'pathf95', 'nagfor', 'fujitsu')),
('darwin.*', ('gnu95', 'nag', 'nagfor', 'absoft', 'ibm', 'intel', 'gnu',
'g95', 'pg')),
('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')),
('irix.*', ('mips', 'gnu', 'gnu95',)),
('aix.*', ('ibm', 'gnu', 'gnu95',)),
# os.name mappings
('posix', ('gnu', 'gnu95',)),
('nt', ('gnu', 'gnu95',)),
('mac', ('gnu95', 'gnu', 'pg')),
)
fcompiler_class = None
fcompiler_aliases = None
def load_all_fcompiler_classes():
"""Cache all the FCompiler classes found in modules in the
numpy.distutils.fcompiler package.
"""
from glob import glob
global fcompiler_class, fcompiler_aliases
if fcompiler_class is not None:
return
pys = os.path.join(os.path.dirname(__file__), '*.py')
fcompiler_class = {}
fcompiler_aliases = {}
for fname in glob(pys):
module_name, ext = os.path.splitext(os.path.basename(fname))
module_name = 'numpy.distutils.fcompiler.' + module_name
__import__ (module_name)
module = sys.modules[module_name]
if hasattr(module, 'compilers'):
for cname in module.compilers:
klass = getattr(module, cname)
desc = (klass.compiler_type, klass, klass.description)
fcompiler_class[klass.compiler_type] = desc
for alias in klass.compiler_aliases:
if alias in fcompiler_aliases:
raise ValueError("alias %r defined for both %s and %s"
% (alias, klass.__name__,
fcompiler_aliases[alias][1].__name__))
fcompiler_aliases[alias] = desc
def _find_existing_fcompiler(compiler_types,
osname=None, platform=None,
requiref90=False,
c_compiler=None):
from numpy.distutils.core import get_distribution
dist = get_distribution(always=True)
for compiler_type in compiler_types:
v = None
try:
c = new_fcompiler(plat=platform, compiler=compiler_type,
c_compiler=c_compiler)
c.customize(dist)
v = c.get_version()
if requiref90 and c.compiler_f90 is None:
v = None
new_compiler = c.suggested_f90_compiler
if new_compiler:
log.warn('Trying %r compiler as suggested by %r '
'compiler for f90 support.' % (compiler_type,
new_compiler))
c = new_fcompiler(plat=platform, compiler=new_compiler,
c_compiler=c_compiler)
c.customize(dist)
v = c.get_version()
if v is not None:
compiler_type = new_compiler
if requiref90 and c.compiler_f90 is None:
raise ValueError('%s does not support compiling f90 codes, '
'skipping.' % (c.__class__.__name__))
except DistutilsModuleError:
log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type)
except CompilerNotFound:
log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type)
if v is not None:
return compiler_type
return None
def available_fcompilers_for_platform(osname=None, platform=None):
if osname is None:
osname = os.name
if platform is None:
platform = sys.platform
matching_compiler_types = []
for pattern, compiler_type in _default_compilers:
if re.match(pattern, platform) or re.match(pattern, osname):
for ct in compiler_type:
if ct not in matching_compiler_types:
matching_compiler_types.append(ct)
if not matching_compiler_types:
matching_compiler_types.append('gnu')
return matching_compiler_types
def get_default_fcompiler(osname=None, platform=None, requiref90=False,
c_compiler=None):
"""Determine the default Fortran compiler to use for the given
platform."""
matching_compiler_types = available_fcompilers_for_platform(osname,
platform)
log.info("get_default_fcompiler: matching types: '%s'",
matching_compiler_types)
compiler_type = _find_existing_fcompiler(matching_compiler_types,
osname=osname,
platform=platform,
requiref90=requiref90,
c_compiler=c_compiler)
return compiler_type
# Flag to avoid rechecking for Fortran compiler every time
failed_fcompilers = set()
def new_fcompiler(plat=None,
compiler=None,
verbose=0,
dry_run=0,
force=0,
requiref90=False,
c_compiler = None):
"""Generate an instance of some FCompiler subclass for the supplied
platform/compiler combination.
"""
global failed_fcompilers
fcompiler_key = (plat, compiler)
if fcompiler_key in failed_fcompilers:
return None
load_all_fcompiler_classes()
if plat is None:
plat = os.name
if compiler is None:
compiler = get_default_fcompiler(plat, requiref90=requiref90,
c_compiler=c_compiler)
if compiler in fcompiler_class:
module_name, klass, long_description = fcompiler_class[compiler]
elif compiler in fcompiler_aliases:
module_name, klass, long_description = fcompiler_aliases[compiler]
else:
msg = "don't know how to compile Fortran code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler." % compiler
msg = msg + " Supported compilers are: %s)" \
% (','.join(fcompiler_class.keys()))
log.warn(msg)
failed_fcompilers.add(fcompiler_key)
return None
compiler = klass(verbose=verbose, dry_run=dry_run, force=force)
compiler.c_compiler = c_compiler
return compiler
def show_fcompilers(dist=None):
"""Print list of available compilers (used by the "--help-fcompiler"
option to "config_fc").
"""
if dist is None:
from distutils.dist import Distribution
from numpy.distutils.command.config_compiler import config_fc
dist = Distribution()
dist.script_name = os.path.basename(sys.argv[0])
dist.script_args = ['config_fc'] + sys.argv[1:]
try:
dist.script_args.remove('--help-fcompiler')
except ValueError:
pass
dist.cmdclass['config_fc'] = config_fc
dist.parse_config_files()
dist.parse_command_line()
compilers = []
compilers_na = []
compilers_ni = []
if not fcompiler_class:
load_all_fcompiler_classes()
platform_compilers = available_fcompilers_for_platform()
for compiler in platform_compilers:
v = None
log.set_verbosity(-2)
try:
c = new_fcompiler(compiler=compiler, verbose=dist.verbose)
c.customize(dist)
v = c.get_version()
except (DistutilsModuleError, CompilerNotFound) as e:
log.debug("show_fcompilers: %s not found" % (compiler,))
log.debug(repr(e))
if v is None:
compilers_na.append(("fcompiler="+compiler, None,
fcompiler_class[compiler][2]))
else:
c.dump_properties()
compilers.append(("fcompiler="+compiler, None,
fcompiler_class[compiler][2] + ' (%s)' % v))
compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers))
compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2])
for fc in compilers_ni]
compilers.sort()
compilers_na.sort()
compilers_ni.sort()
pretty_printer = FancyGetopt(compilers)
pretty_printer.print_help("Fortran compilers found:")
pretty_printer = FancyGetopt(compilers_na)
pretty_printer.print_help("Compilers available for this "
"platform, but not found:")
if compilers_ni:
pretty_printer = FancyGetopt(compilers_ni)
pretty_printer.print_help("Compilers not available on this platform:")
print("For compiler details, run 'config_fc --verbose' setup command.")
def dummy_fortran_file():
fo, name = make_temp_file(suffix='.f')
fo.write(" subroutine dummy()\n end\n")
fo.close()
return name[:-2]
is_f_file = re.compile(r'.*\.(for|ftn|f77|f)\Z', re.I).match
_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search
_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search
_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search
_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match
def is_free_format(file):
"""Check if file is in free format Fortran."""
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
with open(file, encoding='latin1') as f:
line = f.readline()
n = 10000 # the number of non-comment lines to scan for hints
if _has_f_header(line) or _has_fix_header(line):
n = 0
elif _has_f90_header(line):
n = 0
result = 1
while n>0 and line:
line = line.rstrip()
if line and line[0]!='!':
n -= 1
if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&':
result = 1
break
line = f.readline()
return result
def has_f90_header(src):
with open(src, encoding='latin1') as f:
line = f.readline()
return _has_f90_header(line) or _has_fix_header(line)
_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P<fcname>\w+)\s*\)\s*=\s*(?P<fflags>.*)', re.I)
def get_f77flags(src):
"""
Search the first 20 lines of fortran 77 code for line pattern
`CF77FLAGS(<fcompiler type>)=<f77 flags>`
Return a dictionary {<fcompiler type>:<f77 flags>}.
"""
flags = {}
with open(src, encoding='latin1') as f:
i = 0
for line in f:
i += 1
if i>20: break
m = _f77flags_re.match(line)
if not m: continue
fcname = m.group('fcname').strip()
fflags = m.group('fflags').strip()
flags[fcname] = split_quoted(fflags)
return flags
# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags
if __name__ == '__main__':
show_fcompilers()
|
|
#!/usr/bin/env python
# Copyright (c) 2011 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
This file contains the definitions for all of the RawMetrics supported by
RAMCloud. When executed, it generates two files, RawMetrics.in.h and
RawMetrics.in.cc, which are included by other files when building RAMCloud.
"""
from __future__ import division, print_function
from glob import glob
from optparse import OptionParser
from pprint import pprint
from functools import partial
import math
import os
import random
import re
import sys
from common import *
__all__ = ['average', 'avgAndStdDev', 'parseRecovery']
### Utilities:
class Counter:
"""Used to share an incrementing value.
"""
def __init__(self):
self.current = 0
def next(self):
self.current += 1
def value(self):
return self.current
class Out:
"""Indents text and writes it to a file.
Useful for generated code.
"""
def __init__(self, stream=sys.stdout, indent=0):
self._stream = stream
self._indent = indent
def __call__(self, s):
self._stream.write('%s%s\n' % (' ' * 4 * self._indent, s))
def indent(self):
return Out(self._stream, self._indent + 1)
class Metric:
"""A single performance metric.
"""
def __init__(self, name, documentation):
""" name is the variable name to use for this metric """
self.name = name
self.documentation = documentation
def dump_header(self, out):
out('/// %s' % self.documentation)
out('RawMetric %s;' % self.name)
def initializer(self):
return '%s(0)' % (self.name)
def instance_name(self):
""" Compute the name to use for an instance of this metric. """
return self.name
def dump_metric_info_code(self, out, path, counter):
""" Generate a case statement as part of a giant switch statement
that allows for iteration over all metrics.
path is a hierarchical name identifying this element, such
as 'backup.local' (it includes this object's name, if that
is desired).
counter is a Counter used to generate "case" clauses with
incrementing values.
"""
out(' case %s:' % (counter.value()))
out(' return {"%s",' % path)
out(' &%s};' % path)
counter.next()
class Group:
"""A group of related performance metrics and subgroups. Translates
into a nested struct inside the C++ RawMetrics object.
"""
def __init__(self, name, documentation):
""" name is the name of a class to use for this group (i.e.
initial capital letter).
"""
self.name = name
self.documentation = documentation
self.metrics = []
self.groups = []
def metric(self, name, documentation):
self.metrics.append(Metric(name, documentation))
def group(self, group):
self.groups.append(group)
def dump_header(self, out):
indent = ' ' * 4 * (out._indent + 2)
out('/// %s' % self.documentation)
constructorBody = ''
if self.name != 'RawMetrics':
out('struct %s {' % self.name)
else:
constructorBody = 'init();'
children = self.groups + self.metrics;
out(' %s()' % self.name)
out(' : %s {%s}' %
(('\n%s, ' % (indent)).join(
[child.initializer() for child in children]),
constructorBody))
for child in children:
child.dump_header(out.indent())
if self.name != 'RawMetrics':
out('} %s;' % self.instance_name())
def initializer(self):
return '%s()' % self.instance_name()
def instance_name(self):
""" Compute the name to use for an instance of this group. """
return self.name[0].lower() + self.name[1:]
def dump_metric_info_code(self, out, path, counter):
""" Generate a case statement as part of a giant switch statement
that allows for iteration over all metrics.
path is a hierarchical name identifying this element, such
as 'backup.local' (it includes this object's name, if that
is desired).
counter is a Counter used to generate "case" clauses with
incrementing values.
"""
prefix = path
if len(path) != 0:
prefix += '.'
for child in self.groups + self.metrics:
child.dump_metric_info_code(out,
prefix + child.instance_name(), counter)
### Metrics definitions:
coordinator = Group('Coordinator', 'metrics for coordinator')
coordinator.metric('recoveryCount',
'number of recoveries in which this coordinator participated')
coordinator.metric('recoveryTicks', 'elapsed time during recoveries')
coordinator.metric('recoveryBuildReplicaMapTicks',
'time contacting backups and finding replicas for crashed '
'master')
coordinator.metric('recoveryStartTicks', 'time in Recovery::start')
coordinator.metric('recoveryCompleteTicks',
'time sending recovery complete RPCs to backups')
master = Group('Master', 'metrics for masters')
master.metric('recoveryCount',
'number of recoveries in which this master participated')
master.metric('recoveryTicks', 'the elapsed time during recoveries')
master.metric('replicaManagerTicks', 'time spent in ReplicaManager')
master.metric('segmentAppendTicks', 'time spent in Segment::append')
master.metric('segmentAppendCopyTicks',
'time spent copying in Segment::append')
master.metric('segmentReadCount',
'number of BackupClient::getRecoveryData calls issued')
master.metric('segmentReadTicks',
'elapsed time for getRecoveryData calls to backups')
master.metric('segmentReadStallTicks',
'time stalled waiting for segments from backups')
master.metric('segmentReadByteCount',
'bytes of recovery segments received from backups')
master.metric('verifyChecksumTicks',
'time verifying checksums on objects from backups')
master.metric('recoverSegmentTicks',
'spent in MasterService::recoverSegment')
master.metric('backupInRecoverTicks',
'time spent in ReplicaManager::proceed '
'called from MasterService::recoverSegment')
master.metric('segmentCloseCount',
'number of complete segments written to backups')
master.metric('recoverySegmentEntryCount',
'number of recovery segment entries (e.g. objects, tombstones)')
master.metric('recoverySegmentEntryBytes',
'number of bytes in recovery segment entries (without overhead)')
master.metric('liveObjectCount',
'number of live objects written during recovery')
master.metric('liveObjectBytes',
'number of bytes of live object data written during recovery')
master.metric('objectAppendCount',
'number of objects appended to the log during recovery')
master.metric('objectDiscardCount',
'number of objects not appended to the log during recovery')
master.metric('safeVersionRecoveryCount',
'number of safeVersion updates during recovery')
master.metric('safeVersionNonRecoveryCount',
'number of safeVersion discarded during recovery')
master.metric('tombstoneAppendCount',
'number of tombstones kept during recovery')
master.metric('tombstoneDiscardCount',
'number of tombstones discarded during recovery')
master.metric('logSyncTicks',
'time syncing the log at the end of recovery')
master.metric('logSyncBytes',
'bytes sent during log sync')
master.metric('recoveryWillTicks',
'time rebuilding will at the end of recovery')
master.metric('removeTombstoneTicks',
'time deleting tombstones at the end of recovery')
master.metric('replicationTicks',
'time with outstanding RPCs to backups')
master.metric('replicationBytes',
'bytes sent during recovery from first gRD response '
'through log sync')
master.metric('replicas',
'number of backups on which to replicate each segment')
master.metric('backupCloseTicks',
'time closing segments in ReplicaManager')
master.metric('backupCloseCount',
'number of segments closed in ReplicaManager')
master.metric('logSyncCloseTicks',
'time close segments during log sync')
master.metric('logSyncCloseCount',
'number of segments closed during log sync')
master.metric('replicaRecoveries',
'number of replicas which have started replica recreation')
master.metric('openReplicaRecoveries',
'of replicaRecoveries how many were for replicas which were open')
master.metric('replicationTasks',
'max number of outstanding tasks in ReplicaManager')
master.metric('replicationTransmitCopyTicks',
'time spent copying outgoing rpcs in transport')
master.metric('logSyncTransmitCopyTicks',
'time spent copying outgoing rpcs in transport just during log sync')
master.metric('replayMemoryReadBytes',
'rough estimate of memory read during log replay')
master.metric('replayMemoryWrittenBytes',
'rough estimate of memory written during log replay')
master.metric('replicationTransmitActiveTicks',
'time transport tx was active during replication')
master.metric('logSyncTransmitActiveTicks',
'time transport tx was active during log sync')
master.metric('replicationPostingWriteRpcTicks',
'time spent during recovery starting write rpcs in transport')
master.metric('recoverSegmentPostingWriteRpcTicks',
'time spent during recoverSegment starting write rpcs in transport')
master.metric('logSyncPostingWriteRpcTicks',
'time spent during recovery final log sync starting write rpcs in transport')
backup = Group('Backup', 'metrics for backups')
backup.metric('recoveryCount',
'number of recoveries in which this backup participated')
backup.metric('recoveryTicks', 'elapsed time during recovery')
backup.metric('serviceTicks', 'time spent servicing RPC requests')
backup.metric('readCompletionCount',
'number of getRecoveryData requests successfully completed')
backup.metric('readingDataTicks',
'time from startReadingData to done reading')
backup.metric('storageReadCount', 'number of segment reads from disk')
backup.metric('storageReadBytes', 'amount of bytes read from disk')
backup.metric('storageReadTicks', 'time reading from disk')
backup.metric('writeClearTicks',
'time clearing segment memory during segment open')
backup.metric('writeCopyBytes', 'bytes written to backup segments')
backup.metric('writeCopyTicks', 'time copying data to backup segments')
backup.metric('storageWriteCount', 'number of segment writes to disk')
backup.metric('storageWriteBytes', 'bytes written to disk')
backup.metric('storageWriteTicks', 'time writing to disk')
backup.metric('filterTicks', 'time filtering segments')
backup.metric('primaryLoadCount', 'number of primary segments requested')
backup.metric('secondaryLoadCount', 'number of secondary segments requested')
backup.metric('storageType', '1 = in-memory, 2 = on-disk')
backup.metric('uncommittedFramesFreed', 'number of segment frames freed before being fully flushed to disk')
# This class records basic statistics for RPCs (count & execution time):
rpc = Group('Rpc', 'metrics for remote procedure calls')
# The order of entries here, and for the "*Ticks" definitions below,
# must be the same as the order in the RpcOpcode definition in Rpc.h.
rpc.metric('rpc0Count', 'number of invocations of RPC 0 (undefined)')
rpc.metric('rpc1Count', 'number of invocations of RPC 1 (undefined)')
rpc.metric('rpc2Count', 'number of invocations of RPC 2 (undefined)')
rpc.metric('rpc3Count', 'number of invocations of RPC 3 (undefined)')
rpc.metric('rpc4Count', 'number of invocations of RPC 4 (undefined)')
rpc.metric('rpc5Count', 'number of invocations of RPC 5 (undefined)')
rpc.metric('rpc6Count', 'number of invocations of RPC 6 (undefined)')
rpc.metric('pingCount', 'number of invocations of PING RPC')
rpc.metric('proxyPingCount', 'number of invocations of PROXY_PING RPC')
rpc.metric('killCount', 'number of invocations of KILL RPC')
rpc.metric('createTableCount', 'number of invocations of CREATE_TABLE RPC')
rpc.metric('getTableIdCount', 'number of invocations of GET_TABLE_ID RPC')
rpc.metric('dropTableCount', 'number of invocations of DROP_TABLE RPC')
rpc.metric('readCount', 'number of invocations of READ RPC')
rpc.metric('writeCount', 'number of invocations of WRITE RPC')
rpc.metric('removeCount', 'number of invocations of REMOVE RPC')
rpc.metric('enlistServerCount', 'number of invocations of ENLIST_SERVER RPC')
rpc.metric('getServerListCount', 'number of invocations of GET_SERVER_LIST RPC')
rpc.metric('getTabletMapCount', 'number of invocations of GET_TABLET_MAP RPC')
rpc.metric('recoverCount', 'number of invocations of RECOVER RPC')
rpc.metric('hintServerDownCount', 'number of invocations of HINT_SERVER_DOWN RPC')
rpc.metric('recoveryMasterFinishedCount', 'number of invocations of RECOVERY_MASTER_FINISHED RPC')
rpc.metric('enumerateCount', 'number of invocations of ENUMERATE RPC')
rpc.metric('setMasterRecoveryInfoCount', 'number of invocations of SET_MASTER_RECOVERY_INFO RPC')
rpc.metric('fillWithTestDataCount', 'number of invocations of FILL_WITH_TEST_DATA RPC')
rpc.metric('multiReadCount', 'number of invocations of MULTI_READ RPC')
rpc.metric('getMetricsCount', 'number of invocations of GET_METRICS RPC')
rpc.metric('rpc27Count', 'number of invocations of RPC 27 (undefined)')
rpc.metric('backupFreeCount', 'number of invocations of BACKUP_FREE RPC')
rpc.metric('backupGetRecoveryDataCount', 'number of invocations of BACKUP_GETRECOVERYDATA RPC')
rpc.metric('rpc30Count', 'number of invocations of RPC 30 (undefined)')
rpc.metric('backupStartReadingDataCount', 'number of invocations of BACKUP_STARTREADINGDATA RPC')
rpc.metric('backupWriteCount', 'number of invocations of BACKUP_WRITE RPC')
rpc.metric('backupRecoveryCompleteCount', 'number of invocations of BACKUP_RECOVERYCOMPLETE RPC')
rpc.metric('backupQuiesceCount', 'number of invocations of BACKUP_QUIESCE RPC')
rpc.metric('updateServerListCount', 'number of invocations of UPDATE_SERVER_LIST RPC')
rpc.metric('backupStartPartitionCount', 'time spent executing BACKUP_STARTPARTITION RPC')
rpc.metric('rpc37Count', 'number of invocations of RPC 37 (undefined)')
rpc.metric('rpc38Count', 'number of invocations of RPC 38 (undefined)')
rpc.metric('dropTabletOwnershipCount', 'number of invocations of DROP_TABLET_OWNERSHIP RPC')
rpc.metric('takeTabletOwnershipCount', 'number of invocations of TAKE_TABLET_OWNERSHIP RPC')
rpc.metric('backupAssignGroupCount', 'number of invocations of BACKUP_ASSIGN_GROUP RPC')
rpc.metric('getHeadOfLogCount', 'number of invocations of GET_HEAD_OF_LOG RPC')
rpc.metric('incrementRpcCount', 'number of invocations of INCREMENT RPC')
rpc.metric('prepForMigrationCount', 'number of invocations of PREP_FOR_MIGRATION RPC')
rpc.metric('receiveMigrationDataCount', 'number of invocations of RECEIVE_MIGRATION_DATA RPC')
rpc.metric('reassignTabletOwnershipCount', 'number of invocations of REASSIGN_TABLET_OWNERSHIP RPC')
rpc.metric('migrateTabletCount', 'number of invocations of MIGRATE_TABLET RPC')
rpc.metric('isReplicaNeededCount', 'number of invocations of IS_REPLICA_NEEDED RPC')
rpc.metric('splitTabletCount', 'number of invocations of SPLIT_TABLET')
rpc.metric('getServerStatisticsCount', 'number of invocations of GET_SERVER_STATISTICS RPC')
rpc.metric('setRuntimeOptionCount', 'number of invocations of SET_RUNTIME_OPTION RPC')
rpc.metric('getServerConfigCount', 'number of invocations of GET_SERVER_CONFIG RPC')
rpc.metric('getLogMetricsCount', 'number of invocations of GET_LOG_METRICS RPC')
rpc.metric('multiWriteCount', 'number of invocations of MULTI_WRITE RPC')
rpc.metric('verifyMembershipCount', 'number of invocations of VERIFY_MEMBERSHIP RPC')
rpc.metric('illegalRpcCount', 'number of invocations of RPCs with illegal opcodes')
rpc.metric('rpc0Ticks', 'time spent executing RPC 0 (undefined)')
rpc.metric('rpc1Ticks', 'time spent executing RPC 1 (undefined)')
rpc.metric('rpc2Ticks', 'time spent executing RPC 2 (undefined)')
rpc.metric('rpc3Ticks', 'time spent executing RPC 3 (undefined)')
rpc.metric('rpc4Ticks', 'time spent executing RPC 4 (undefined)')
rpc.metric('rpc5Ticks', 'time spent executing RPC 5 (undefined)')
rpc.metric('rpc6Ticks', 'time spent executing RPC 6 (undefined)')
rpc.metric('pingTicks', 'time spent executing PING RPC')
rpc.metric('proxyPingTicks', 'time spent executing PROXY_PING RPC')
rpc.metric('killTicks', 'time spent executing KILL RPC')
rpc.metric('createTableTicks', 'time spent executing CREATE_TABLE RPC')
rpc.metric('getTableIdTicks', 'time spent executing GET_TABLE_ID RPC')
rpc.metric('dropTableTicks', 'time spent executing DROP_TABLE RPC')
rpc.metric('readTicks', 'time spent executing READ RPC')
rpc.metric('writeTicks', 'time spent executing WRITE RPC')
rpc.metric('removeTicks', 'time spent executing REMOVE RPC')
rpc.metric('enlistServerTicks', 'time spent executing ENLIST_SERVER RPC')
rpc.metric('getServerListTicks', 'time spent executing GET_SERVER_LIST RPC')
rpc.metric('getTabletMapTicks', 'time spent executing GET_TABLET_MAP RPC')
rpc.metric('recoverTicks', 'time spent executing RECOVER RPC')
rpc.metric('hintServerDownTicks', 'time spent executing HINT_SERVER_DOWN RPC')
rpc.metric('recoveryMasterFinishedTicks', 'time spent executing RECOVERY_MASTER_FINISHED RPC')
rpc.metric('enumerateTicks', 'time spent executing ENUMERATE RPC')
rpc.metric('setMasterRecoveryInfoTicks', 'time spent executing SET_MASTER_RECOVERY_INFO RPC')
rpc.metric('fillWithTestDataTicks', 'time spent executing FILL_WITH_TEST_DATA RPC')
rpc.metric('multiReadTicks', 'time spent executing MULTI_READ RPC')
rpc.metric('getMetricsTicks', 'time spent executing GET_METRICS RPC')
rpc.metric('rpc27Ticks', 'time spent executing RPC 27 (undefined)')
rpc.metric('backupFreeTicks', 'time spent executing BACKUP_FREE RPC')
rpc.metric('backupGetRecoveryDataTicks', 'time spent executing BACKUP_GETRECOVERYDATA RPC')
rpc.metric('rpc30Ticks', 'time spent executing RPC 30 (undefined)')
rpc.metric('backupStartReadingDataTicks', 'time spent executing BACKUP_STARTREADINGDATA RPC')
rpc.metric('backupWriteTicks', 'time spent executing BACKUP_WRITE RPC')
rpc.metric('backupRecoveryCompleteTicks', 'time spent executing BACKUP_RECOVERYCOMPLETE RPC')
rpc.metric('backupQuiesceTicks', 'time spent executing BACKUP_QUIESCE RPC')
rpc.metric('setServerListTicks', 'time spent executing SET_SERVER_LIST RPC')
rpc.metric('updateServerListTicks', 'time spent executing BACKUP_STARTPARTITION RPC')
rpc.metric('backupStartPartition', 'time spent executing RPC 4 (undefined)')
rpc.metric('rpc37Ticks', 'time spent executing RPC 37 (undefined)')
rpc.metric('rpc38Ticks', 'time spent executing RPC 38 (undefined)')
rpc.metric('dropTabletOwnershipTicks', 'number of invocations of DROP_TABLET_OWNERSHIP RPC')
rpc.metric('takeTabletOwnershipTicks', 'number of invocations of TAKE_TABLET_OWNERSHIP RPC')
rpc.metric('backupAssignGroupTicks', 'time spent executing BACKUP_ASSIGN_GROUP RPC')
rpc.metric('getHeadOfLogTicks', 'time spent executing GET_HEAD_OF_LOG RPC')
rpc.metric('incrementTicks', 'time spent executing INCREMENT RPC')
rpc.metric('prepForMigrationTicks', 'time spent executing PREP_FOR_MIGRATION RPC')
rpc.metric('receiveMigrationDataTicks', 'time spent executing RECEIVE_MIGRATION_DATA RPC')
rpc.metric('reassignTabletOwnershipTicks', 'time spent executing REASSIGN_TABLET_OWNERSHIP RPC')
rpc.metric('migrateTabletTicks', 'time spent executing MIGRATE_TABLET RPC')
rpc.metric('isReplicaNeededTicks', 'time spent executing IS_REPLICA_NEEDED_RPC')
rpc.metric('splitTabletTicks', 'time spent executing SPLIT_TABLET RPC')
rpc.metric('getServerStatisticsTicks', 'time spent executing GET_SERVER_STATISTICS RPC')
rpc.metric('setRuntimeOptionTicks', 'time spent executing SET_RUNTIME_OPTION RPC')
rpc.metric('getServerConfigTicks', 'time spent executing GET_SERVER_CONFIG RPC')
rpc.metric('getLogMetricsTicks', 'time spent executing GET_LOG_METRICS RPC')
rpc.metric('multiWriteTicks', 'time spent executing MULTI_WRITE RPC')
rpc.metric('verifyMembershipTicks', 'number of invocations of VERIFY_MEMBERSHIP')
rpc.metric('illegalRpcTicks', 'time spent executing RPCs with illegal opcodes')
transmit = Group('Transmit', 'metrics related to transmitting messages')
transmit.metric('ticks', 'elapsed time transmitting messages')
transmit.metric('messageCount', 'number of messages transmitted')
transmit.metric('packetCount', 'number of packets transmitted')
transmit.metric('iovecCount', 'number of Buffer chunks transmitted')
transmit.metric('byteCount', 'number of bytes transmitted')
transmit.metric('copyTicks', 'elapsed time copying messages')
transmit.metric('dmaTicks', 'elapsed time waiting for DMA to HCA')
receive = Group('Receive', 'metrics related to receiving messages')
receive.metric('ticks', 'elapsed time receiving messages')
receive.metric('messageCount', 'number of messages received')
receive.metric('packetCount', 'number of packets received')
receive.metric('iovecCount', 'number of Buffer chunks received')
receive.metric('byteCount', 'number of bytes received')
infiniband = Group('Infiniband', 'metrics for Infiniband networking')
infiniband.metric('transmitActiveTicks', 'time with packets on the transmit queue')
transport = Group('Transport', 'transport metrics')
transport.group(transmit)
transport.group(receive)
transport.group(infiniband)
transport.metric('sessionOpenTicks',
'time opening sessions for RPCs')
transport.metric('sessionOpenCount',
'number of sessions opened for RPCs')
transport.metric('sessionOpenSquaredTicks',
'used for calculating the standard deviation of sessionOpenTicks')
transport.metric('retrySessionOpenCount',
'member of timeouts during session open')
transport.metric('clientRpcsActiveTicks',
'time with a client RPC active on the network')
temp = Group('Temp', 'metrics for temporary use')
for i in range(10):
temp.metric('ticks{0:}'.format(i),'amount of time for some undefined activity')
temp.metric('count{0:}'.format(i),'number of occurrences of some undefined event')
definitions = Group('RawMetrics', 'server metrics')
definitions.group(coordinator);
definitions.group(master);
definitions.group(backup);
definitions.group(rpc);
definitions.group(transport);
definitions.group(temp);
definitions.metric('serverId', 'server id assigned by coordinator')
definitions.metric('pid', 'process ID on machine')
definitions.metric('clockFrequency', 'cycles per second for the cpu')
definitions.metric('segmentSize','size in bytes of segments')
def writeBuildFiles(definitions):
counter = Counter()
cc = Out(open('%s/RawMetrics.in.cc' % obj_dir, 'w'))
cc('// This file was automatically generated by scripts/rawmetrics.py.')
cc('// Do not edit it.')
cc('namespace RAMCloud {')
cc('RawMetrics::MetricInfo RawMetrics::metricInfo(int i)\n{')
cc(' switch (i) {')
definitions.dump_metric_info_code(cc, '', counter)
cc(' }')
cc(' return {NULL, NULL};')
cc('}')
cc('} // namespace RAMCloud')
h = Out(open('%s/RawMetrics.in.h' % obj_dir, 'w'))
h('// This file was automatically generated by scripts/rawmetrics.py.')
h('// Do not edit it.')
definitions.dump_header(h)
h(' static const int numMetrics = %d;' % (counter.value()))
if __name__ == '__main__':
writeBuildFiles(definitions)
sys.exit()
|
|
"""Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import numpy as np
import scipy.sparse as sp
np_version = []
for x in np.__version__.split('.'):
try:
np_version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
np_version.append(x)
np_version = tuple(np_version)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.copy(x)
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x-y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
|
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import os
import shutil
from collections import defaultdict
from textwrap import dedent
from pex.interpreter import PythonInterpreter
from pants.backend.python.targets.python_target import PythonTarget
from pants.base.exceptions import TaskError
from pants.process.lock import OwnerPrintingInterProcessFileLock
from pants.python.python_setup import PythonSetup
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import safe_concurrent_creation, safe_mkdir
from pants.util.memo import memoized_property
logger = logging.getLogger(__name__)
# TODO: Move under subsystems/ .
class PythonInterpreterCache(Subsystem):
"""Finds Python interpreters on the local system."""
options_scope = "python-interpreter-cache"
@classmethod
def subsystem_dependencies(cls):
return super().subsystem_dependencies() + (PythonSetup,)
class UnsatisfiableInterpreterConstraintsError(TaskError):
"""Indicates a Python interpreter matching given constraints could not be located."""
@staticmethod
def _matches(interpreter, filters=()):
return not filters or any(interpreter.identity.matches(filt) for filt in filters)
@classmethod
def _matching(cls, interpreters, filters=()):
for interpreter in interpreters:
if cls._matches(interpreter, filters=filters):
yield interpreter
@property
def python_setup(self):
return PythonSetup.global_instance()
@memoized_property
def _cache_dir(self):
cache_dir = self.python_setup.interpreter_cache_dir
safe_mkdir(cache_dir)
return cache_dir
def partition_targets_by_compatibility(self, targets):
"""Partition targets by their compatibility constraints.
:param targets: a list of `PythonTarget` objects
:returns: (tgts_by_compatibilities, filters): a dict that maps compatibility constraints
to a list of matching targets, the aggregate set of compatibility constraints imposed
by the target set
:rtype: (dict(str, list), set)
"""
tgts_by_compatibilities = defaultdict(list)
filters = set()
for target in targets:
if isinstance(target, PythonTarget):
c = self.python_setup.compatibility_or_constraints(target.compatibility)
tgts_by_compatibilities[c].append(target)
filters.update(c)
return tgts_by_compatibilities, filters
def select_interpreter_for_targets(self, targets):
"""Pick an interpreter compatible with all the specified targets."""
tgts_by_compatibilities, total_filter_set = self.partition_targets_by_compatibility(targets)
allowed_interpreters = set(self.setup(filters=total_filter_set))
# Constrain allowed_interpreters based on each target's compatibility requirements.
for compatibility in tgts_by_compatibilities:
compatible_with_target = set(self._matching(allowed_interpreters, compatibility))
allowed_interpreters &= compatible_with_target
if not allowed_interpreters:
# Create a helpful error message.
all_interpreter_version_strings = sorted(
{
interpreter.version_string
# NB: self.setup() requires filters to be passed, or else it will use the global interpreter
# constraints. We allow any interpreter other than CPython 3.0-3.3, which is known to choke
# with Pants.
for interpreter in self.setup(filters=("CPython<3", "CPython>=3.3", "PyPy"))
}
)
unique_compatibilities = {tuple(c) for c in tgts_by_compatibilities.keys()}
unique_compatibilities_strs = [",".join(x) for x in unique_compatibilities if x]
tgts_by_compatibilities_strs = [
t[0].address.spec for t in tgts_by_compatibilities.values()
]
raise self.UnsatisfiableInterpreterConstraintsError(
dedent(
"""\
Unable to detect a suitable interpreter for compatibilities: {} (Conflicting targets: {})
Pants detected these interpreter versions on your system: {}
Possible ways to fix this:
* Modify your Python interpreter constraints by following https://www.pantsbuild.org/python_readme.html#configure-the-python-version.
* Ensure the targeted Python version is installed and discoverable.
* Modify Pants' interpreter search paths via --python-setup-interpreter-search-paths.""".format(
" && ".join(sorted(unique_compatibilities_strs)),
", ".join(tgts_by_compatibilities_strs),
", ".join(all_interpreter_version_strings),
)
)
)
# Return the lowest compatible interpreter.
return min(allowed_interpreters)
def _interpreter_from_relpath(self, path, filters=()):
path = os.path.join(self._cache_dir, path)
try:
executable = os.readlink(os.path.join(path, "python"))
if not os.path.exists(executable):
self._purge_interpreter(path)
return None
except OSError:
return None
interpreter = PythonInterpreter.from_binary(executable)
if self._matches(interpreter, filters=filters):
return interpreter
return None
def _setup_interpreter(self, interpreter, identity_str):
cache_target_path = os.path.join(self._cache_dir, identity_str)
with safe_concurrent_creation(cache_target_path) as safe_path:
os.mkdir(
safe_path
) # Parent will already have been created by safe_concurrent_creation.
os.symlink(interpreter.binary, os.path.join(safe_path, "python"))
return interpreter
def _setup_cached(self, filters=()):
"""Find all currently-cached interpreters."""
for interpreter_dir in os.listdir(self._cache_dir):
pi = self._interpreter_from_relpath(interpreter_dir, filters=filters)
if pi:
logger.debug(f"Detected interpreter {pi.binary}: {pi.identity}")
yield pi
def _setup_paths(self, paths, filters=()):
"""Find interpreters under paths, and cache them."""
for interpreter in self._matching(PythonInterpreter.all(paths), filters=filters):
identity_str = str(interpreter.identity)
pi = self._interpreter_from_relpath(identity_str, filters=filters)
if pi is None:
self._setup_interpreter(interpreter, identity_str)
pi = self._interpreter_from_relpath(identity_str, filters=filters)
if pi:
yield pi
def setup(self, filters=()):
"""Sets up a cache of python interpreters.
:param filters: A sequence of strings that constrain the interpreter compatibility for this
cache, using the Requirement-style format, e.g. ``'CPython>=3', or just ['>=2.7','<3']``
for requirements agnostic to interpreter class.
:returns: A list of cached interpreters
:rtype: list of :class:`pex.interpreter.PythonInterpreter`
"""
# We filter the interpreter cache itself (and not just the interpreters we pull from it)
# because setting up some python versions (e.g., 3<=python<3.3) crashes, and this gives us
# an escape hatch.
filters = filters if any(filters) else self.python_setup.interpreter_constraints
setup_paths = self.python_setup.interpreter_search_paths
logger.debug(
f"Initializing Python interpreter cache matching filters `{':'.join(filters)}` "
f"from paths `{':'.join(setup_paths)}`"
)
interpreters = []
def unsatisfied_filters():
return [f for f in filters if len(list(self._matching(interpreters, [f]))) == 0]
with OwnerPrintingInterProcessFileLock(path=os.path.join(self._cache_dir, ".file_lock")):
interpreters.extend(self._setup_cached(filters=filters))
if not interpreters or unsatisfied_filters():
interpreters.extend(self._setup_paths(setup_paths, filters=filters))
for filt in unsatisfied_filters():
logger.debug(f"No valid interpreters found for {filt}!")
matches = list(self._matching(interpreters, filters=filters))
if len(matches) == 0:
logger.debug("Found no valid interpreters!")
logger.debug(
"Initialized Python interpreter cache with {}".format(
", ".join([x.binary for x in matches])
)
)
return matches
def _purge_interpreter(self, interpreter_dir):
try:
logger.info(
f"Detected stale interpreter `{interpreter_dir}` in the interpreter cache, "
f"purging."
)
shutil.rmtree(interpreter_dir, ignore_errors=True)
except Exception as e:
logger.warning(
f"Caught exception {e!r} during interpreter purge. "
f"Please run `{self.get_options().pants_bin_name} clean-all`!"
)
|
|
import sys
from alembic.testing import TestBase
from sqlalchemy import MetaData, Column, Table, Integer, String, \
ForeignKeyConstraint
from alembic.testing import eq_
py3k = sys.version_info >= (3, )
from ._autogen_fixtures import AutogenFixtureTest
class AutogenerateForeignKeysTest(AutogenFixtureTest, TestBase):
__backend__ = True
def test_remove_fk(self):
m1 = MetaData()
m2 = MetaData()
Table('table', m1,
Column('test', String(10), primary_key=True),
mysql_engine='InnoDB')
Table('user', m1,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('test2', String(10)),
ForeignKeyConstraint(['test2'], ['table.test']),
mysql_engine='InnoDB')
Table('table', m2,
Column('test', String(10), primary_key=True),
mysql_engine='InnoDB')
Table('user', m2,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('test2', String(10)),
mysql_engine='InnoDB'
)
diffs = self._fixture(m1, m2)
self._assert_fk_diff(
diffs[0], "remove_fk",
"user", ['test2'],
'table', ['test'],
conditional_name="servergenerated"
)
def test_add_fk(self):
m1 = MetaData()
m2 = MetaData()
Table('table', m1,
Column('id', Integer, primary_key=True),
Column('test', String(10)),
mysql_engine='InnoDB')
Table('user', m1,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('test2', String(10)),
mysql_engine='InnoDB')
Table('table', m2,
Column('id', Integer, primary_key=True),
Column('test', String(10)),
mysql_engine='InnoDB')
Table('user', m2,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('test2', String(10)),
ForeignKeyConstraint(['test2'], ['table.test']),
mysql_engine='InnoDB')
diffs = self._fixture(m1, m2)
self._assert_fk_diff(
diffs[0], "add_fk",
"user", ["test2"],
"table", ["test"]
)
def test_no_change(self):
m1 = MetaData()
m2 = MetaData()
Table('table', m1,
Column('id', Integer, primary_key=True),
Column('test', String(10)),
mysql_engine='InnoDB')
Table('user', m1,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('test2', Integer),
ForeignKeyConstraint(['test2'], ['table.id']),
mysql_engine='InnoDB')
Table('table', m2,
Column('id', Integer, primary_key=True),
Column('test', String(10)),
mysql_engine='InnoDB')
Table('user', m2,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('test2', Integer),
ForeignKeyConstraint(['test2'], ['table.id']),
mysql_engine='InnoDB')
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_no_change_composite_fk(self):
m1 = MetaData()
m2 = MetaData()
Table('table', m1,
Column('id_1', String(10), primary_key=True),
Column('id_2', String(10), primary_key=True),
mysql_engine='InnoDB')
Table('user', m1,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('other_id_1', String(10)),
Column('other_id_2', String(10)),
ForeignKeyConstraint(['other_id_1', 'other_id_2'],
['table.id_1', 'table.id_2']),
mysql_engine='InnoDB')
Table('table', m2,
Column('id_1', String(10), primary_key=True),
Column('id_2', String(10), primary_key=True),
mysql_engine='InnoDB'
)
Table('user', m2,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('other_id_1', String(10)),
Column('other_id_2', String(10)),
ForeignKeyConstraint(['other_id_1', 'other_id_2'],
['table.id_1', 'table.id_2']),
mysql_engine='InnoDB')
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_add_composite_fk_with_name(self):
m1 = MetaData()
m2 = MetaData()
Table('table', m1,
Column('id_1', String(10), primary_key=True),
Column('id_2', String(10), primary_key=True),
mysql_engine='InnoDB')
Table('user', m1,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('other_id_1', String(10)),
Column('other_id_2', String(10)),
mysql_engine='InnoDB')
Table('table', m2,
Column('id_1', String(10), primary_key=True),
Column('id_2', String(10), primary_key=True),
mysql_engine='InnoDB')
Table('user', m2,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('other_id_1', String(10)),
Column('other_id_2', String(10)),
ForeignKeyConstraint(['other_id_1', 'other_id_2'],
['table.id_1', 'table.id_2'],
name='fk_test_name'),
mysql_engine='InnoDB')
diffs = self._fixture(m1, m2)
self._assert_fk_diff(
diffs[0], "add_fk",
"user", ['other_id_1', 'other_id_2'],
'table', ['id_1', 'id_2'],
name="fk_test_name"
)
def test_remove_composite_fk(self):
m1 = MetaData()
m2 = MetaData()
Table('table', m1,
Column('id_1', String(10), primary_key=True),
Column('id_2', String(10), primary_key=True),
mysql_engine='InnoDB')
Table('user', m1,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('other_id_1', String(10)),
Column('other_id_2', String(10)),
ForeignKeyConstraint(['other_id_1', 'other_id_2'],
['table.id_1', 'table.id_2'],
name='fk_test_name'),
mysql_engine='InnoDB')
Table('table', m2,
Column('id_1', String(10), primary_key=True),
Column('id_2', String(10), primary_key=True),
mysql_engine='InnoDB')
Table('user', m2,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('other_id_1', String(10)),
Column('other_id_2', String(10)),
mysql_engine='InnoDB')
diffs = self._fixture(m1, m2)
self._assert_fk_diff(
diffs[0], "remove_fk",
"user", ['other_id_1', 'other_id_2'],
"table", ['id_1', 'id_2'],
conditional_name="fk_test_name"
)
def test_add_fk_colkeys(self):
m1 = MetaData()
m2 = MetaData()
Table('table', m1,
Column('id_1', String(10), primary_key=True),
Column('id_2', String(10), primary_key=True),
mysql_engine='InnoDB')
Table('user', m1,
Column('id', Integer, primary_key=True),
Column('other_id_1', String(10)),
Column('other_id_2', String(10)),
mysql_engine='InnoDB')
Table('table', m2,
Column('id_1', String(10), key='tid1', primary_key=True),
Column('id_2', String(10), key='tid2', primary_key=True),
mysql_engine='InnoDB')
Table('user', m2,
Column('id', Integer, primary_key=True),
Column('other_id_1', String(10), key='oid1'),
Column('other_id_2', String(10), key='oid2'),
ForeignKeyConstraint(['oid1', 'oid2'],
['table.tid1', 'table.tid2'],
name='fk_test_name'),
mysql_engine='InnoDB')
diffs = self._fixture(m1, m2)
self._assert_fk_diff(
diffs[0], "add_fk",
"user", ['other_id_1', 'other_id_2'],
'table', ['id_1', 'id_2'],
name="fk_test_name"
)
def test_no_change_colkeys(self):
m1 = MetaData()
m2 = MetaData()
Table('table', m1,
Column('id_1', String(10), primary_key=True),
Column('id_2', String(10), primary_key=True),
mysql_engine='InnoDB')
Table('user', m1,
Column('id', Integer, primary_key=True),
Column('other_id_1', String(10)),
Column('other_id_2', String(10)),
ForeignKeyConstraint(['other_id_1', 'other_id_2'],
['table.id_1', 'table.id_2']),
mysql_engine='InnoDB')
Table('table', m2,
Column('id_1', String(10), key='tid1', primary_key=True),
Column('id_2', String(10), key='tid2', primary_key=True),
mysql_engine='InnoDB')
Table('user', m2,
Column('id', Integer, primary_key=True),
Column('other_id_1', String(10), key='oid1'),
Column('other_id_2', String(10), key='oid2'),
ForeignKeyConstraint(['oid1', 'oid2'],
['table.tid1', 'table.tid2']),
mysql_engine='InnoDB')
diffs = self._fixture(m1, m2)
eq_(diffs, [])
class IncludeHooksTest(AutogenFixtureTest, TestBase):
__backend__ = True
__requires__ = 'fk_names',
def test_remove_connection_fk(self):
m1 = MetaData()
m2 = MetaData()
ref = Table(
'ref', m1, Column('id', Integer, primary_key=True),
mysql_engine='InnoDB')
t1 = Table(
't', m1, Column('x', Integer), Column('y', Integer),
mysql_engine='InnoDB')
t1.append_constraint(
ForeignKeyConstraint([t1.c.x], [ref.c.id], name="fk1")
)
t1.append_constraint(
ForeignKeyConstraint([t1.c.y], [ref.c.id], name="fk2")
)
ref = Table(
'ref', m2, Column('id', Integer, primary_key=True),
mysql_engine='InnoDB')
Table(
't', m2, Column('x', Integer), Column('y', Integer),
mysql_engine='InnoDB')
def include_object(object_, name, type_, reflected, compare_to):
return not (
isinstance(object_, ForeignKeyConstraint) and
type_ == 'foreign_key_constraint'
and reflected and name == 'fk1')
diffs = self._fixture(m1, m2, object_filters=[include_object])
self._assert_fk_diff(
diffs[0], "remove_fk",
't', ['y'], 'ref', ['id'],
conditional_name='fk2'
)
eq_(len(diffs), 1)
def test_add_metadata_fk(self):
m1 = MetaData()
m2 = MetaData()
Table(
'ref', m1,
Column('id', Integer, primary_key=True), mysql_engine='InnoDB')
Table(
't', m1,
Column('x', Integer), Column('y', Integer), mysql_engine='InnoDB')
ref = Table(
'ref', m2, Column('id', Integer, primary_key=True),
mysql_engine='InnoDB')
t2 = Table(
't', m2, Column('x', Integer), Column('y', Integer),
mysql_engine='InnoDB')
t2.append_constraint(
ForeignKeyConstraint([t2.c.x], [ref.c.id], name="fk1")
)
t2.append_constraint(
ForeignKeyConstraint([t2.c.y], [ref.c.id], name="fk2")
)
def include_object(object_, name, type_, reflected, compare_to):
return not (
isinstance(object_, ForeignKeyConstraint) and
type_ == 'foreign_key_constraint'
and not reflected and name == 'fk1')
diffs = self._fixture(m1, m2, object_filters=[include_object])
self._assert_fk_diff(
diffs[0], "add_fk",
't', ['y'], 'ref', ['id'],
name='fk2'
)
eq_(len(diffs), 1)
def test_change_fk(self):
m1 = MetaData()
m2 = MetaData()
r1a = Table(
'ref_a', m1,
Column('a', Integer, primary_key=True),
mysql_engine='InnoDB'
)
Table(
'ref_b', m1,
Column('a', Integer, primary_key=True),
Column('b', Integer, primary_key=True),
mysql_engine='InnoDB'
)
t1 = Table(
't', m1, Column('x', Integer),
Column('y', Integer), Column('z', Integer),
mysql_engine='InnoDB')
t1.append_constraint(
ForeignKeyConstraint([t1.c.x], [r1a.c.a], name="fk1")
)
t1.append_constraint(
ForeignKeyConstraint([t1.c.y], [r1a.c.a], name="fk2")
)
Table(
'ref_a', m2,
Column('a', Integer, primary_key=True),
mysql_engine='InnoDB'
)
r2b = Table(
'ref_b', m2,
Column('a', Integer, primary_key=True),
Column('b', Integer, primary_key=True),
mysql_engine='InnoDB'
)
t2 = Table(
't', m2, Column('x', Integer),
Column('y', Integer), Column('z', Integer),
mysql_engine='InnoDB')
t2.append_constraint(
ForeignKeyConstraint(
[t2.c.x, t2.c.z], [r2b.c.a, r2b.c.b], name="fk1")
)
t2.append_constraint(
ForeignKeyConstraint(
[t2.c.y, t2.c.z], [r2b.c.a, r2b.c.b], name="fk2")
)
def include_object(object_, name, type_, reflected, compare_to):
return not (
isinstance(object_, ForeignKeyConstraint) and
type_ == 'foreign_key_constraint'
and name == 'fk1'
)
diffs = self._fixture(m1, m2, object_filters=[include_object])
self._assert_fk_diff(
diffs[0], "remove_fk",
't', ['y'], 'ref_a', ['a'],
name='fk2'
)
self._assert_fk_diff(
diffs[1], "add_fk",
't', ['y', 'z'], 'ref_b', ['a', 'b'],
name='fk2'
)
eq_(len(diffs), 2)
|
|
from django.test import TestCase
from django.conf import settings
from restclients.nws import NWS
from restclients.exceptions import DataFailureException, InvalidUUID, InvalidNetID
from vm.v1.viewmodels import Channel, Endpoint, Subscription
from unittest2 import skipIf
class NWSTestSubscription(TestCase):
def test_subscriptions_channel_id(self):
with self.settings(
RESTCLIENTS_NWS_DAO_CLASS='restclients.dao_implementation.nws.File'):
nws = NWS()
subscriptions = nws.get_subscriptions_by_channel_id("b779df7b-d6f6-4afb-8165-8dbe6232119f")
self.assertEquals(len(subscriptions), 5)
def test_subscriptions_subscriber_id(self):
with self.settings(
RESTCLIENTS_NWS_DAO_CLASS='restclients.dao_implementation.nws.File'):
nws = NWS()
subscriptions = nws.get_subscriptions_by_subscriber_id("javerage", "10")
self.assertEquals(len(subscriptions), 5)
def test_subscriptions_channel_id_subscriber_id(self):
with self.settings(
RESTCLIENTS_NWS_DAO_CLASS='restclients.dao_implementation.nws.File'):
nws = NWS()
subscriptions = nws.get_subscriptions_by_channel_id_and_subscriber_id("b779df7b-d6f6-4afb-8165-8dbe6232119f", "javerage")
self.assertEquals(len(subscriptions), 5)
def test_create_subscription(self):
with self.settings(
RESTCLIENTS_NWS_DAO_CLASS='restclients.dao_implementation.nws.File'):
subscription = Subscription()
subscription.subscription_id = "c4597f93-0f62-4feb-ac88-af5f0329001f"
subscription.endpoint = Endpoint()
subscription.endpoint.endpoint_address = "javerage0@uw.edu"
subscription.endpoint.protocol = "Email"
subscription.endpoint.subscriber_id = "javerage@washington.edu"
subscription.endpoint.owner_id = "javerage@washington.edu"
subscription.endpoint.user = "javerage@washington.edu"
subscription.channel = Channel()
subscription.channel.channel_id = "b779df7b-d6f6-4afb-8165-8dbe6232119f"
nws = NWS()
response_status = nws.create_new_subscription(subscription)
self.assertEquals(response_status, 201)
def test_create_invalid_subscriberid_subscription(self):
with self.settings(
RESTCLIENTS_NWS_DAO_CLASS='restclients.dao_implementation.nws.File'):
subscription = Subscription()
subscription.subscription_id = "c4597f93-0f62-4feb-ac88-af5f0329001f"
subscription.endpoint = Endpoint()
subscription.endpoint.endpoint_address = "javerage0@uw.edu"
subscription.endpoint.protocol = "Email"
subscription.endpoint.subscriber_id = "-@#$ksjdsfkli13290243290490"
subscription.endpoint.owner_id = "javerage"
subscription.channel = Channel()
subscription.channel.channel_id = "b779df7b-d6f6-4afb-8165-8dbe6232119f"
nws = NWS()
self.assertRaises(InvalidNetID, nws.create_new_subscription, subscription)
def test_create_empty_channelid_subscription(self):
with self.settings(
RESTCLIENTS_NWS_DAO_CLASS='restclients.dao_implementation.nws.File'):
subscription = Subscription()
subscription.endpoint = Endpoint()
subscription.endpoint.endpoint_address = "javerage0@uw.edu"
subscription.endpoint.protocol = "Email"
subscription.endpoint.subscriber_id = "javerage"
subscription.endpoint.owner_id = "javerage"
subscription.endpoint.user = "javerage@washington.edu"
subscription.channel = Channel()
nws = NWS()
self.assertRaises(InvalidUUID, nws.create_new_subscription, subscription)
# def test_create_empty_subscription(self):
# with self.settings(
# RESTCLIENTS_NWS_DAO_CLASS='restclients.dao_implementation.nws.File'):
# subscription = Subscription()
#
# nws = NWS()
# self.assertRaises(InvalidUUID, nws.create_new_subscription, subscription)
def test_update_subscription(self):
with self.settings(
RESTCLIENTS_NWS_DAO_CLASS='restclients.dao_implementation.nws.File'):
subscription = Subscription()
subscription.subscription_id = "c4597f93-0f62-4feb-ac88-af5f0329001f"
subscription.endpoint = Endpoint()
subscription.endpoint.endpoint_address = "javerage0@uw.edu"
subscription.endpoint.protocol = "Email"
subscription.endpoint.user = "javerage@washington.edu"
subscription.endpoint.owner = "javerage@washington.edu"
subscription.channel = Channel()
subscription.channel.channel_id = "b779df7b-d6f6-4afb-8165-8dbe6232119f"
nws = NWS()
response_status = nws.update_subscription(subscription)
self.assertEquals(response_status, 204)
def test_update_invalid_subscriberid_subscription(self):
with self.settings(
RESTCLIENTS_NWS_DAO_CLASS='restclients.dao_implementation.nws.File'):
subscription = Subscription()
subscription.endpoint = Endpoint()
subscription.endpoint.endpoint_address = "javerage0@uw.edu"
subscription.endpoint.protocol = "Email"
subscription.endpoint.user = "-@#$ksjdsfkli13290243290490"
subscription.endpoint.owner = "javerage"
subscription.channel = Channel()
subscription.channel.channel_id = "b779df7b-d6f6-4afb-8165-8dbe6232119f"
#subscription.subscriber_type = "Individual"
nws = NWS()
self.assertRaises(InvalidNetID, nws.update_subscription, subscription)
def test_update_empty_subscriberid_subscription(self):
with self.settings(
RESTCLIENTS_NWS_DAO_CLASS='restclients.dao_implementation.nws.File'):
subscription = Subscription()
subscription.endpoint = Endpoint()
subscription.endpoint.endpoint_address = "javerage0@uw.edu"
subscription.endpoint.protocol = "Email"
subscription.endpoint.user = ''
subscription.endpoint.owner = "javerage"
subscription.channel = Channel()
subscription.channel.channel_id = "b779df7b-d6f6-4afb-8165-8dbe6232119f"
nws = NWS()
self.assertRaises(InvalidNetID, nws.update_subscription, subscription)
def test_delete_subscription(self):
with self.settings(
RESTCLIENTS_NWS_DAO_CLASS='restclients.dao_implementation.nws.File'):
nws = NWS()
response_status = nws.delete_subscription("652236c6-a85a-4845-8dc5-3e518bec044c")
self.assertEquals(response_status, 204)
def test_delete_invalid_subscription(self):
with self.settings(
RESTCLIENTS_NWS_DAO_CLASS='restclients.dao_implementation.nws.File'):
nws = NWS()
#Invalid UUID - missing the last character
self.assertRaises(InvalidUUID, nws.delete_subscription, "652236c6-a85a-4845-8dc5-3e518bec044")
def test_subscriber_id_validation(self):
nws = NWS()
nws._validate_subscriber_id('javerage')
nws._validate_subscriber_id('javerage@washington.edu')
self.assertRaises(InvalidNetID, nws._validate_subscriber_id, '00ok')
self.assertRaises(InvalidNetID, nws._validate_subscriber_id, 'ok123456789')
self.assertRaises(InvalidNetID, nws._validate_subscriber_id, 'javerage@gmail.com')
self.assertRaises(InvalidNetID, nws._validate_subscriber_id, 'javerage@')
@skipIf(True, "Used only for live testing")
def test_subscription_live(self):
with self.settings(
RESTCLIENTS_NWS_DAO_CLASS='restclients.dao_implementation.nws.Live'):
self._subscription_channel_id_live()
self._create_subscription_live()
self._update_subscription_live()
self._delete_subscription_live()
def _subscription_channel_id_live(self):
with self.settings(
RESTCLIENTS_NWS_DAO_CLASS='restclients.dao_implementation.nws.Live'):
nws = NWS()
subscriptions = nws.get_subscriptions_by_channel_id("ce1d46fe-1cdf-4c5a-a316-20f6c99789b8")
self.assertTrue(len(subscriptions) > 0)
def _create_subscription_live(self):
with self.settings(
RESTCLIENTS_NWS_DAO_CLASS='restclients.dao_implementation.nws.Live'):
subscription = Subscription()
subscription.subscription_id = "6445864b-6d1c-47b7-a409-279ba4a4ccf4"
subscription.end_point = "javerage09@uw.edu"
subscription.protocol = "Email"
subscription.user = "javerage"
subscription.channel_id = "ce1d46fe-1cdf-4c5a-a316-20f6c99789b8"
subscription.owner = "javerage"
#subscription.subscriber_type = "Individual"
nws = NWS()
response_status = nws.create_new_subscription(subscription)
self.assertEquals(response_status, 201)
def _update_subscription_live(self):
with self.settings(
RESTCLIENTS_NWS_DAO_CLASS='restclients.dao_implementation.nws.Live'):
subscription = Subscription()
subscription.subscription_id = "6445864b-6d1c-47b7-a409-279ba4a4ccf4"
subscription.end_point = "javerage10@uw.edu"
subscription.protocol = "Email"
subscription.subscriber_id = "javerage"
subscription.channel_id = "ce1d46fe-1cdf-4c5a-a316-20f6c99789b8"
subscription.owner_id = "javerage"
#subscription.subscriber_type = "Individual"
nws = NWS()
response_status = nws.update_subscription(subscription)
self.assertEquals(response_status, 204)
def _delete_subscription_live(self):
with self.settings(
RESTCLIENTS_NWS_DAO_CLASS='restclients.dao_implementation.nws.Live'):
nws = NWS()
response_status = nws.delete_subscription("6445864b-6d1c-47b7-a409-279ba4a4ccf4")
self.assertEquals(response_status, 204)
|
|
# Copyright (c) 2015 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from lxml import etree
import mock
import six
from manila import exception
EONTAPI_EINVAL = '22'
EAPIERROR = '13001'
EAPINOTFOUND = '13005'
ESNAPSHOTNOTALLOWED = '13023'
EVOLUMEOFFLINE = '13042'
EINTERNALERROR = '13114'
EDUPLICATEENTRY = '13130'
EVOLNOTCLONE = '13170'
EVOL_NOT_MOUNTED = '14716'
ESIS_CLONE_NOT_LICENSED = '14956'
EOBJECTNOTFOUND = '15661'
E_VIFMGR_PORT_ALREADY_ASSIGNED_TO_BROADCAST_DOMAIN = '18605'
def mock_netapp_lib(modules):
"""Inject fake netapp_lib module classes."""
netapp_lib = mock.Mock()
netapp_lib.api.zapi.zapi.NaElement = NaElement
netapp_lib.api.zapi.zapi.NaApiError = NaApiError
netapp_lib.api.zapi.zapi.NaServer = mock.Mock()
netapp_lib.api.zapi.errors = sys.modules[__name__]
for module in modules:
setattr(module, 'netapp_api', netapp_lib.api.zapi.zapi)
setattr(module, 'netapp_error', netapp_lib.api.zapi.errors)
class NaApiError(exception.ManilaException):
"""Fake NetApi API invocation error."""
def __init__(self, code=None, message=None):
if not code:
code = 'unknown'
if not message:
message = 'unknown'
self.code = code
self.message = message
super(NaApiError, self).__init__(message=message)
class NaElement(object):
"""Fake XML wrapper class for NetApp API."""
def __init__(self, name):
"""Name of the element or etree.Element."""
if isinstance(name, etree._Element):
self._element = name
else:
self._element = etree.Element(name)
def get_name(self):
"""Returns the tag name of the element."""
return self._element.tag
def set_content(self, text):
"""Set the text string for the element."""
self._element.text = text
def get_content(self):
"""Get the text for the element."""
return self._element.text
def add_attr(self, name, value):
"""Add the attribute to the element."""
self._element.set(name, value)
def add_attrs(self, **attrs):
"""Add multiple attributes to the element."""
for attr in attrs.keys():
self._element.set(attr, attrs.get(attr))
def add_child_elem(self, na_element):
"""Add the child element to the element."""
if isinstance(na_element, NaElement):
self._element.append(na_element._element)
return
raise
def get_child_by_name(self, name):
"""Get the child element by the tag name."""
for child in self._element.iterchildren():
if child.tag == name or etree.QName(child.tag).localname == name:
return NaElement(child)
return None
def get_child_content(self, name):
"""Get the content of the child."""
for child in self._element.iterchildren():
if child.tag == name or etree.QName(child.tag).localname == name:
return child.text
return None
def get_children(self):
"""Get the children for the element."""
return [NaElement(el) for el in self._element.iterchildren()]
def has_attr(self, name):
"""Checks whether element has attribute."""
attributes = self._element.attrib or {}
return name in attributes.keys()
def get_attr(self, name):
"""Get the attribute with the given name."""
attributes = self._element.attrib or {}
return attributes.get(name)
def get_attr_names(self):
"""Returns the list of attribute names."""
attributes = self._element.attrib or {}
return attributes.keys()
def add_new_child(self, name, content, convert=False):
"""Add child with tag name and context.
Convert replaces entity refs to chars.
"""
child = NaElement(name)
if convert:
content = NaElement._convert_entity_refs(content)
child.set_content(content)
self.add_child_elem(child)
@staticmethod
def _convert_entity_refs(text):
"""Converts entity refs to chars to handle etree auto conversions."""
text = text.replace("<", "<")
text = text.replace(">", ">")
return text
@staticmethod
def create_node_with_children(node, **children):
"""Creates and returns named node with children."""
parent = NaElement(node)
for child in children.keys():
parent.add_new_child(child, children.get(child, None))
return parent
def add_node_with_children(self, node, **children):
"""Creates named node with children."""
parent = NaElement.create_node_with_children(node, **children)
self.add_child_elem(parent)
def to_string(self, pretty=False, method='xml', encoding='UTF-8'):
"""Prints the element to string."""
return etree.tostring(self._element, method=method, encoding=encoding,
pretty_print=pretty)
def __getitem__(self, key):
"""Dict getter method for NaElement.
Returns NaElement list if present,
text value in case no NaElement node
children or attribute value if present.
"""
child = self.get_child_by_name(key)
if child:
if child.get_children():
return child
else:
return child.get_content()
elif self.has_attr(key):
return self.get_attr(key)
raise KeyError('No element by given name %s.' % key)
def __setitem__(self, key, value):
"""Dict setter method for NaElement.
Accepts dict, list, tuple, str, int, float and long as valid value.
"""
if key:
if value:
if isinstance(value, NaElement):
child = NaElement(key)
child.add_child_elem(value)
self.add_child_elem(child)
elif isinstance(value, (str, int, float, long)):
self.add_new_child(key, six.text_type(value))
elif isinstance(value, (list, tuple, dict)):
child = NaElement(key)
child.translate_struct(value)
self.add_child_elem(child)
else:
raise TypeError('Not a valid value for NaElement.')
else:
self.add_child_elem(NaElement(key))
else:
raise KeyError('NaElement name cannot be null.')
def translate_struct(self, data_struct):
"""Convert list, tuple, dict to NaElement and appends."""
if isinstance(data_struct, (list, tuple)):
for el in data_struct:
if isinstance(el, (list, tuple, dict)):
self.translate_struct(el)
else:
self.add_child_elem(NaElement(el))
elif isinstance(data_struct, dict):
for k in data_struct.keys():
child = NaElement(k)
if isinstance(data_struct[k], (dict, list, tuple)):
child.translate_struct(data_struct[k])
else:
if data_struct[k]:
child.set_content(six.text_type(data_struct[k]))
self.add_child_elem(child)
else:
raise ValueError('Type cannot be converted into NaElement.')
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 26 16:36:42 2014
@author: Ninja
"""
#-----------------------------------------------------------------------------#
# Import Librairies #
#-----------------------------------------------------------------------------#
from BeautifulSoup import BeautifulSoup
import pycurl
from StringIO import StringIO
import time
from stem import Signal
from stem.control import Controller
import os
import random
import pymongo
#-----------------------------------------------------------------------------#
# creation des dosssiers #
#-----------------------------------------------------------------------------#
def create_path() :
print "Verifying internal paths", "\n"
path_list = ["log/", "loginit/"]
for i in path_list :
if os.path.exists(i) :
pass
else :
print i, "... now Created"
os.mkdir(i)
print "\n"
#-----------------------------------------------------------------------------#
# dispatch #
#-----------------------------------------------------------------------------#
def dispatch(textfilename, listename, ratio=1) :
print "#-----------------------------------------------------------------#"
print "dispatch des liens en sous listes"
compteur0 = 0
fichier = open(textfilename, "w")
list_init =[]
nbr_lot = int(float(len(listename)) / float(ratio))
if nbr_lot < float(len(listename)) / float(ratio) :
nbr_total_lot = nbr_lot + 1
else :
nbr_total_lot = nbr_lot
for x in range(len(listename)) :
list0 = []
for i in listename :
compteur0 += 1
if compteur0 > ratio :
compteur0 = 0
break
else :
list0.append(i)
fichier.write(i + "\n")
listename.remove(i)
list_init.append(list0)
compteur1 = 0
list_list = []
for z in list_init :
compteur1 += 1
if len(z) != 0 :
list_list.append(z)
else :
pass
print len(list_list), "ensembles(s) de listes traitees pour" , len(list_init), "liens "
print "#-----------------------------------------------------------------#"
print "\n"
return list_list
#-----------------------------------------------------------------------------#
# Curl #
#-----------------------------------------------------------------------------#
def curl(url, tor="no") :
m = pycurl.CurlMulti()
m.handles = []
for i in url :
c = pycurl.Curl()
c.body = StringIO()
c.http_code = -1
m.handles.append(c)
c.setopt(pycurl.URL, str(i))
c.setopt(pycurl.WRITEFUNCTION, c.body.write)
c.setopt(pycurl.FOLLOWLOCATION, 1)
c.setopt(pycurl.MAXREDIRS, 5)
c.setopt(pycurl.NOSIGNAL, 1)
c.setopt(pycurl.USERAGENT, 'Mozilla/5.0 (X11; Linux x86_64) Ubuntu/12.04 Chromium/14.0.835.202')
c.setopt(pycurl.HTTPHEADER, ['User-agent: %s' % 'Mozilla/5.0 (X11; Linux x86_64) Ubuntu/12.04 Chromium/14.0.835.202 Data Mining and Research'])
#---------------------------------------------------------------------#
# GET DATA WITH TOR ET DNS : #
#---------------------------------------------------------------------#
if tor == "no" :
pass
elif tor == "yes" :
c.setopt(pycurl.PROXY, '127.0.0.1')
c.setopt(pycurl.PROXYPORT, 9050)
c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5)
else :
print "You must set a value to tor option
break
#---------------------------------------------------------------------#
# Next ... #
#---------------------------------------------------------------------#
c.setopt(pycurl.REFERER, 'http://www.google.co.uk/') #http://www.google.co.in/
m.add_handle(c)
num_handles = len(m.handles)
while 1 :
ret, num_handles = m.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM :
break
while num_handles :
m.select(1.0)
while 1 :
ret, num_handles = m.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM :
break
for c in m.handles :
c.close()
m.close()
return m.handles
#-----------------------------------------------------------------------------#
# Recuperation des donnees qui contient l ip adress #
#-----------------------------------------------------------------------------#
def read_ipadress(path_log="loginit/") :
ip_url = ["http://www.my-ip-address.net", "http://www.mon-ip.com",
"http://www.adresseip.com", "http://my-ip.heroku.com",
"http://www.whatsmyip.net", "http://www.geobytes.com/phpdemo.php",
"http://checkip.dyndns.com", "http://www.myglobalip.com"]
url = random.choice(ip_url)
if url == "http://www.my-ip-address.net" :
print "url : ", url,
pool = curl([url])
for c in pool :
data = c.body.getvalue()
soup1 = BeautifulSoup(data)
s1 = soup1.findAll('h2')[0].text
s1 = s1.replace("IP Address :", "")
s1 = s1.replace("Your IP Address is", "")
elif url == "http://www.mon-ip.com" :
print "url : ", url,
pool = curl([url])
for c in pool :
data = c.body.getvalue()
soup1 = BeautifulSoup(data)
s1 = soup1.findAll('span', {'class' : 'clip'})[0].text
elif url == "http://www.adresseip.com" :
print "url : ", url,
pool = curl([url])
for c in pool :
data = c.body.getvalue()
soup1 = BeautifulSoup(data)
s1 = soup1.findAll('h2', {'class' : 'title'})[0].text
s1 = s1.replace("Votre Adresse IP est :", "")
elif url == "http://www.whatsmyip.net" :
print "url : ", url,
pool = curl([url])
for c in pool :
data = c.body.getvalue()
soup1 = BeautifulSoup(data)
s1 = soup1.findAll('h1', {'class' : 'ip'})[0]
s1 = s1.findAll('input')[0]['value']
elif url == "http://my-ip.heroku.com" :
print "url : ", url,
pool = curl([url])
for c in pool :
data = c.body.getvalue()
soup1 = BeautifulSoup(data)
s1 = soup1.text
elif url == "http://www.geobytes.com/phpdemo.php" :
print "url : ", url,
pool = curl([url])
for c in pool :
data = c.body.getvalue()
soup1 = BeautifulSoup(data)
s1 = soup1.findAll('b')[0].text
elif url == "http://checkip.dyndns.com" :
print "url : ", url,
pool = curl([url])
for c in pool :
data = c.body.getvalue()
soup1 = BeautifulSoup(data)
s1 = soup1.text
s1 = s1.replace("Current IP CheckCurrent IP Address: ", "")
elif url == "http://www.myglobalip.com" :
print "url : ", url,
pool = curl([url])
for c in pool :
data = c.body.getvalue()
soup1 = BeautifulSoup(data)
s1 = soup1.findAll('h3')
s1 = s1[0].findAll('span', {'class' :'ip'})
s1 = s1[0].text
else :
print "Problem"
ip_adress = s1
return ip_adress
#-----------------------------------------------------------------------------#
# New IP Adress #
#-----------------------------------------------------------------------------#
def change_ipadress(passphrase="Femmes125", sleep=5) :
with Controller.from_port(port = 9051) as controller:
controller.authenticate(passphrase)
controller.signal(Signal.NEWNYM)
time.sleep(sleep)
#-----------------------------------------------------------------------------#
# Try to read the ip adress #
#-----------------------------------------------------------------------------#
def try_read_ipadress() :
try :
print read_ipadress()
except :
#---------------------------------------------------------------------#
# 1er re lancement de read_ipadress #
#---------------------------------------------------------------------#
print "1st time read_ipadress failed to launch"
print "re start 1 read_ipadress"
print "\n"
try :
print read_ipadress()
except :
#-----------------------------------------------------------------#
# 2eme re lancement de read_ipadress #
#-----------------------------------------------------------------#
print "2nd time read_ipadress failed to launch"
print "re start 2 read_ipadress"
print "\n"
try :
print read_ipadress()
except :
#-------------------------------------------------------------#
# 3eme re lancement de read_ipadress #
#-------------------------------------------------------------#
print "3rd time read_ipadress failed to launch"
print "re start 3 read_ipadress"
print "\n"
print read_ipadress()
#-----------------------------------------------------------------------------#
# Re-new the ip adress #
#-----------------------------------------------------------------------------#
def oldnew_ipadress(ip_adress=read_ipadress()) :
print "Old : ",
try_read_ipadress()
change_ipadress()
print "New : ",
try_read_ipadress()
print "\n"
#-----------------------------------------------------------------------------#
# MongoDB connection #
#-----------------------------------------------------------------------------#
def mongo(db_name,collection_name,doc) :
try :
cn = pymongo.MongoClient()
except pymongo.errors.ConnectionFailure, e :
print "Could not connect to MongoDB %s" % (e)
db = cn[db_name]
collection = db[collection_name]
collection.insert(doc)
|
|
# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import argparse
import glob
import hashlib
import itertools
import os.path
import subprocess
import sys
try:
from urlparse import urlparse, urlunparse
except ImportError:
from urllib.parse import urlparse, urlunparse
import pkginfo
import pkg_resources
import requests
from requests_toolbelt.multipart import MultipartEncoder
import twine.exceptions as exc
from twine.utils import get_config, get_username, get_password
from twine.wheel import Wheel
from twine.wininst import WinInst
DIST_TYPES = {
"bdist_wheel": Wheel,
"bdist_wininst": WinInst,
"bdist_egg": pkginfo.BDist,
"sdist": pkginfo.SDist,
}
DIST_EXTENSIONS = {
".whl": "bdist_wheel",
".exe": "bdist_wininst",
".egg": "bdist_egg",
".tar.bz2": "sdist",
".tar.gz": "sdist",
".zip": "sdist",
}
def group_wheel_files_first(dist_files):
if not any(fname for fname in dist_files if fname.endswith(".whl")):
# Return early if there's no wheel files
return dist_files
group_func = lambda x: x.endswith(".whl")
sorted_distfiles = sorted(dist_files, key=group_func)
wheels, not_wheels = [], []
for grp, files in itertools.groupby(sorted_distfiles, key=group_func):
if grp:
wheels.extend(files)
else:
not_wheels.extend(files)
return wheels + not_wheels
def find_dists(dists):
uploads = []
for filename in dists:
if os.path.exists(filename):
uploads.append(filename)
continue
# The filename didn't exist so it may be a glob
files = glob.glob(filename)
# If nothing matches, files is []
if not files:
raise ValueError(
"Cannot find file (or expand pattern): '%s'" % filename
)
# Otherwise, files will be filenames that exist
uploads.extend(files)
return group_wheel_files_first(uploads)
def sign_file(sign_with, filename, identity):
print("Signing {0}".format(os.path.basename(filename)))
gpg_args = [sign_with, "--detach-sign", "-a", filename]
if identity:
gpg_args[2:2] = ["--local-user", identity]
subprocess.check_call(gpg_args)
def upload(dists, repository, sign, identity, username, password, comment,
sign_with, config_file):
# Check that a nonsensical option wasn't given
if not sign and identity:
raise ValueError("sign must be given along with identity")
# Determine if the user has passed in pre-signed distributions
signatures = dict(
(os.path.basename(d), d) for d in dists if d.endswith(".asc")
)
dists = [i for i in dists if not i.endswith(".asc")]
# Get our config from the .pypirc file
try:
config = get_config(config_file)[repository]
except KeyError:
msg = (
"Missing '{repo}' section from the configuration file.\n"
"Maybe you have a out-dated '{cfg}' format?\n"
"more info: "
"https://docs.python.org/distutils/packageindex.html#pypirc\n"
).format(
repo=repository,
cfg=config_file
)
raise KeyError(msg)
parsed = urlparse(config["repository"])
if parsed.netloc in ["pypi.python.org", "testpypi.python.org"]:
config["repository"] = urlunparse(
("https",) + parsed[1:]
)
print("Uploading distributions to {0}".format(config["repository"]))
username = get_username(username, config)
password = get_password(password, config)
session = requests.session()
uploads = find_dists(dists)
for filename in uploads:
# Sign the dist if requested
if sign:
sign_file(sign_with, filename, identity)
# Extract the metadata from the package
for ext, dtype in DIST_EXTENSIONS.items():
if filename.endswith(ext):
meta = DIST_TYPES[dtype](filename)
break
else:
raise ValueError(
"Unknown distribution format: '%s'" %
os.path.basename(filename)
)
if dtype == "bdist_egg":
pkgd = pkg_resources.Distribution.from_filename(filename)
py_version = pkgd.py_version
elif dtype == "bdist_wheel":
py_version = meta.py_version
elif dtype == "bdist_wininst":
py_version = meta.py_version
else:
py_version = None
# Fill in the data - send all the meta-data in case we need to
# register a new release
data = {
# action
":action": "file_upload",
"protcol_version": "1",
# identify release
"name": pkg_resources.safe_name(meta.name),
"version": meta.version,
# file content
"filetype": dtype,
"pyversion": py_version,
# additional meta-data
"metadata_version": meta.metadata_version,
"summary": meta.summary,
"home_page": meta.home_page,
"author": meta.author,
"author_email": meta.author_email,
"maintainer": meta.maintainer,
"maintainer_email": meta.maintainer_email,
"license": meta.license,
"description": meta.description,
"keywords": meta.keywords,
"platform": meta.platforms,
"classifiers": meta.classifiers,
"download_url": meta.download_url,
"supported_platform": meta.supported_platforms,
"comment": comment,
# PEP 314
"provides": meta.provides,
"requires": meta.requires,
"obsoletes": meta.obsoletes,
# Metadata 1.2
"project_urls": meta.project_urls,
"provides_dist": meta.provides_dist,
"obsoletes_dist": meta.obsoletes_dist,
"requires_dist": meta.requires_dist,
"requires_external": meta.requires_external,
"requires_python": meta.requires_python,
}
md5_hash = hashlib.md5()
with open(filename, "rb") as fp:
content = fp.read(4096)
while content:
md5_hash.update(content)
content = fp.read(4096)
data["md5_digest"] = md5_hash.hexdigest()
signed_name = os.path.basename(filename) + ".asc"
if signed_name in signatures:
with open(signatures[signed_name], "rb") as gpg:
data["gpg_signature"] = (signed_name, gpg.read())
elif sign:
with open(filename + ".asc", "rb") as gpg:
data["gpg_signature"] = (signed_name, gpg.read())
print("Uploading {0}".format(os.path.basename(filename)))
data_to_send = []
for key, value in data.items():
if isinstance(value, (list, tuple)):
for item in value:
data_to_send.append((key, item))
else:
data_to_send.append((key, value))
with open(filename, "rb") as fp:
data_to_send.append((
"content",
(os.path.basename(filename), fp, "application/octet-stream"),
))
encoder = MultipartEncoder(data_to_send)
resp = session.post(
config["repository"],
data=encoder,
auth=(username, password),
allow_redirects=False,
headers={'Content-Type': encoder.content_type},
)
# Bug 28. Try to silence a ResourceWarning by releasing the socket and
# clearing the connection pool.
resp.close()
session.close()
# Bug 92. If we get a redirect we should abort because something seems
# funky. The behaviour is not well defined and redirects being issued
# by PyPI should never happen in reality. This should catch malicious
# redirects as well.
if resp.is_redirect:
raise exc.RedirectDetected(
('"{0}" attempted to redirect to "{1}" during upload.'
' Aborting...').format(config["respository"],
resp.headers["location"]))
# Otherwise, raise an HTTPError based on the status code.
resp.raise_for_status()
def main(args):
parser = argparse.ArgumentParser(prog="twine upload")
parser.add_argument(
"-r", "--repository",
default="pypi",
help="The repository to upload the files to (default: %(default)s)",
)
parser.add_argument(
"-s", "--sign",
action="store_true",
default=False,
help="Sign files to upload using gpg",
)
parser.add_argument(
"--sign-with",
default="gpg",
help="GPG program used to sign uploads (default: %(default)s)",
)
parser.add_argument(
"-i", "--identity",
help="GPG identity used to sign files",
)
parser.add_argument(
"-u", "--username",
help="The username to authenticate to the repository as",
)
parser.add_argument(
"-p", "--password",
help="The password to authenticate to the repository with",
)
parser.add_argument(
"-c", "--comment",
help="The comment to include with the distribution file",
)
parser.add_argument(
"--config-file",
default="~/.pypirc",
help="The .pypirc config file to use",
)
parser.add_argument(
"dists",
nargs="+",
metavar="dist",
help="The distribution files to upload to the repository, may "
"additionally contain a .asc file to include an existing "
"signature with the file upload",
)
args = parser.parse_args(args)
# Call the upload function with the arguments from the command line
try:
upload(**vars(args))
except Exception as exc:
sys.exit("{exc.__class__.__name__}: {exc}".format(exc=exc))
if __name__ == "__main__":
sys.exit(main())
|
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""The data layer used during training to train a Fast R-CNN network.
RoIDataLayer implements a Caffe Python layer.
"""
import caffe
from fast_rcnn.config import cfg
from roi_data_layer.minibatch import get_minibatch
import numpy as np
import yaml
from multiprocessing import Process, Queue
class RoIDataLayer(caffe.Layer):
"""Fast R-CNN data layer used for training."""
def _shuffle_roidb_inds(self, gpu_id=0):
self.gpu_id = gpu_id
"""Randomly permute the training roidb."""
if cfg.TRAIN.ASPECT_GROUPING:
widths = np.array([r['width'] for r in self._roidb])
heights = np.array([r['height'] for r in self._roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((
np.random.permutation(horz_inds),
np.random.permutation(vert_inds)))
inds = np.reshape(inds, (-1, 2))
np.random.seed(gpu_id)
row_perm = np.random.permutation(np.arange(inds.shape[0]))
inds = np.reshape(inds[row_perm, :], (-1,))
self._perm = inds
else:
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds(self.gpu_id)
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
return db_inds
def _get_next_minibatch(self):
"""Return the blobs to be used for the next minibatch.
If cfg.TRAIN.USE_PREFETCH is True, then blobs will be computed in a
separate process and made available through self._blob_queue.
"""
if cfg.TRAIN.USE_PREFETCH:
return self._blob_queue.get()
else:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
return get_minibatch(minibatch_db, self._num_classes)
def set_roidb(self, roidb, gpu_id=0):
"""Set the roidb to be used by this layer during training."""
self._roidb = roidb
self._shuffle_roidb_inds(gpu_id)
if cfg.TRAIN.USE_PREFETCH:
self._blob_queue = Queue(10)
self._prefetch_process = BlobFetcher(self._blob_queue,
self._roidb,
self._num_classes, gpu_id)
self._prefetch_process.start()
# Terminate the child process when the parent exists
def cleanup():
print 'Terminating BlobFetcher'
self._prefetch_process.terminate()
self._prefetch_process.join()
import atexit
atexit.register(cleanup)
def setup(self, bottom, top):
"""Setup the RoIDataLayer."""
# parse the layer parameter string, which must be valid YAML
layer_params = yaml.load(self.param_str)
self._num_classes = layer_params['num_classes']
self._name_to_top_map = {}
# data blob: holds a batch of N images, each with 3 channels
idx = 0
top[idx].reshape(cfg.TRAIN.IMS_PER_BATCH, 3,
max(cfg.TRAIN.SCALES), cfg.TRAIN.MAX_SIZE)
self._name_to_top_map['data'] = idx
idx += 1
if cfg.TRAIN.HAS_RPN:
top[idx].reshape(1, 3)
self._name_to_top_map['im_info'] = idx
idx += 1
top[idx].reshape(1, 4)
self._name_to_top_map['gt_boxes'] = idx
idx += 1
else: # not using RPN
# rois blob: holds R regions of interest, each is a 5-tuple
# (n, x1, y1, x2, y2) specifying an image batch index n and a
# rectangle (x1, y1, x2, y2)
top[idx].reshape(1, 5, 1, 1)
self._name_to_top_map['rois'] = idx
idx += 1
# labels blob: R categorical labels in [0, ..., K] for K foreground
# classes plus background
top[idx].reshape(1, 1, 1, 1)
self._name_to_top_map['labels'] = idx
idx += 1
if cfg.TRAIN.BBOX_REG:
# bbox_targets blob: R bounding-box regression targets with 4
# targets per class
num_reg_class = 2 if cfg.TRAIN.AGNOSTIC else self._num_classes
top[idx].reshape(1, num_reg_class * 4, 1, 1)
self._name_to_top_map['bbox_targets'] = idx
idx += 1
# bbox_inside_weights blob: At most 4 targets per roi are active;
# thisbinary vector sepcifies the subset of active targets
top[idx].reshape(1, num_reg_class * 4, 1, 1)
self._name_to_top_map['bbox_inside_weights'] = idx
idx += 1
top[idx].reshape(1, num_reg_class * 4, 1, 1)
self._name_to_top_map['bbox_outside_weights'] = idx
idx += 1
print 'RoiDataLayer: name_to_top:', self._name_to_top_map
assert len(top) == len(self._name_to_top_map)
def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
blobs = self._get_next_minibatch()
for blob_name, blob in blobs.iteritems():
top_ind = self._name_to_top_map[blob_name]
shape = blob.shape
if len(shape) == 1:
blob = blob.reshape(blob.shape[0], 1, 1, 1)
if len(shape) == 2 and blob_name != 'im_info':
blob = blob.reshape(blob.shape[0], blob.shape[1], 1, 1)
top[top_ind].reshape(*(blob.shape))
# Copy data into net's input blobs
top[top_ind].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
class BlobFetcher(Process):
"""Experimental class for prefetching blobs in a separate process."""
def __init__(self, queue, roidb, num_classes, gpu_id=0):
super(BlobFetcher, self).__init__()
self._queue = queue
self._roidb = roidb
self._num_classes = num_classes
self._perm = None
self._cur = 0
self.gpu_id = gpu_id
np.random.seed(gpu_id)
self._shuffle_roidb_inds()
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
# TODO(rbg): remove duplicated code
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
# TODO(rbg): remove duplicated code
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
return db_inds
def run(self):
print 'BlobFetcher started'
while True:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
blobs = get_minibatch(minibatch_db, self._num_classes)
self._queue.put(blobs)
|
|
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 Nebula, Inc.
# Copyright 2013 Alessio Ababilov
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception definitions.
"""
########################################################################
#
# THIS MODULE IS DEPRECATED
#
# Please refer to
# https://etherpad.openstack.org/p/kilo-oslo-library-proposals for
# the discussion leading to this deprecation.
#
# We recommend checking out the python-openstacksdk project
# (https://launchpad.net/python-openstacksdk) instead.
#
########################################################################
import inspect
import sys
import six
from manilaclient.openstack.common._i18n import _
class ClientException(Exception):
"""The base exception class for all exceptions this library raises.
"""
pass
class ValidationError(ClientException):
"""Error in validation on API client side."""
pass
class UnsupportedVersion(ClientException):
"""User is trying to use an unsupported version of the API."""
pass
class CommandError(ClientException):
"""Error in CLI tool."""
pass
class AuthorizationFailure(ClientException):
"""Cannot authorize API client."""
pass
class ConnectionError(ClientException):
"""Cannot connect to API service."""
pass
class ConnectionRefused(ConnectionError):
"""Connection refused while trying to connect to API service."""
pass
class AuthPluginOptionsMissing(AuthorizationFailure):
"""Auth plugin misses some options."""
def __init__(self, opt_names):
super(AuthPluginOptionsMissing, self).__init__(
_("Authentication failed. Missing options: %s") %
", ".join(opt_names))
self.opt_names = opt_names
class AuthSystemNotFound(AuthorizationFailure):
"""User has specified an AuthSystem that is not installed."""
def __init__(self, auth_system):
super(AuthSystemNotFound, self).__init__(
_("AuthSystemNotFound: %r") % auth_system)
self.auth_system = auth_system
class NoUniqueMatch(ClientException):
"""Multiple entities found instead of one."""
pass
class EndpointException(ClientException):
"""Something is rotten in Service Catalog."""
pass
class EndpointNotFound(EndpointException):
"""Could not find requested endpoint in Service Catalog."""
pass
class AmbiguousEndpoints(EndpointException):
"""Found more than one matching endpoint in Service Catalog."""
def __init__(self, endpoints=None):
super(AmbiguousEndpoints, self).__init__(
_("AmbiguousEndpoints: %r") % endpoints)
self.endpoints = endpoints
class HttpError(ClientException):
"""The base exception class for all HTTP exceptions.
"""
http_status = 0
message = _("HTTP Error")
def __init__(self, message=None, details=None,
response=None, request_id=None,
url=None, method=None, http_status=None):
self.http_status = http_status or self.http_status
self.message = message or self.message
self.details = details
self.request_id = request_id
self.response = response
self.url = url
self.method = method
formatted_string = "%s (HTTP %s)" % (self.message, self.http_status)
if request_id:
formatted_string += " (Request-ID: %s)" % request_id
super(HttpError, self).__init__(formatted_string)
class HTTPRedirection(HttpError):
"""HTTP Redirection."""
message = _("HTTP Redirection")
class HTTPClientError(HttpError):
"""Client-side HTTP error.
Exception for cases in which the client seems to have erred.
"""
message = _("HTTP Client Error")
class HttpServerError(HttpError):
"""Server-side HTTP error.
Exception for cases in which the server is aware that it has
erred or is incapable of performing the request.
"""
message = _("HTTP Server Error")
class MultipleChoices(HTTPRedirection):
"""HTTP 300 - Multiple Choices.
Indicates multiple options for the resource that the client may follow.
"""
http_status = 300
message = _("Multiple Choices")
class BadRequest(HTTPClientError):
"""HTTP 400 - Bad Request.
The request cannot be fulfilled due to bad syntax.
"""
http_status = 400
message = _("Bad Request")
class Unauthorized(HTTPClientError):
"""HTTP 401 - Unauthorized.
Similar to 403 Forbidden, but specifically for use when authentication
is required and has failed or has not yet been provided.
"""
http_status = 401
message = _("Unauthorized")
class PaymentRequired(HTTPClientError):
"""HTTP 402 - Payment Required.
Reserved for future use.
"""
http_status = 402
message = _("Payment Required")
class Forbidden(HTTPClientError):
"""HTTP 403 - Forbidden.
The request was a valid request, but the server is refusing to respond
to it.
"""
http_status = 403
message = _("Forbidden")
class NotFound(HTTPClientError):
"""HTTP 404 - Not Found.
The requested resource could not be found but may be available again
in the future.
"""
http_status = 404
message = _("Not Found")
class MethodNotAllowed(HTTPClientError):
"""HTTP 405 - Method Not Allowed.
A request was made of a resource using a request method not supported
by that resource.
"""
http_status = 405
message = _("Method Not Allowed")
class NotAcceptable(HTTPClientError):
"""HTTP 406 - Not Acceptable.
The requested resource is only capable of generating content not
acceptable according to the Accept headers sent in the request.
"""
http_status = 406
message = _("Not Acceptable")
class ProxyAuthenticationRequired(HTTPClientError):
"""HTTP 407 - Proxy Authentication Required.
The client must first authenticate itself with the proxy.
"""
http_status = 407
message = _("Proxy Authentication Required")
class RequestTimeout(HTTPClientError):
"""HTTP 408 - Request Timeout.
The server timed out waiting for the request.
"""
http_status = 408
message = _("Request Timeout")
class Conflict(HTTPClientError):
"""HTTP 409 - Conflict.
Indicates that the request could not be processed because of conflict
in the request, such as an edit conflict.
"""
http_status = 409
message = _("Conflict")
class Gone(HTTPClientError):
"""HTTP 410 - Gone.
Indicates that the resource requested is no longer available and will
not be available again.
"""
http_status = 410
message = _("Gone")
class LengthRequired(HTTPClientError):
"""HTTP 411 - Length Required.
The request did not specify the length of its content, which is
required by the requested resource.
"""
http_status = 411
message = _("Length Required")
class PreconditionFailed(HTTPClientError):
"""HTTP 412 - Precondition Failed.
The server does not meet one of the preconditions that the requester
put on the request.
"""
http_status = 412
message = _("Precondition Failed")
class RequestEntityTooLarge(HTTPClientError):
"""HTTP 413 - Request Entity Too Large.
The request is larger than the server is willing or able to process.
"""
http_status = 413
message = _("Request Entity Too Large")
def __init__(self, *args, **kwargs):
try:
self.retry_after = int(kwargs.pop('retry_after'))
except (KeyError, ValueError):
self.retry_after = 0
super(RequestEntityTooLarge, self).__init__(*args, **kwargs)
class RequestUriTooLong(HTTPClientError):
"""HTTP 414 - Request-URI Too Long.
The URI provided was too long for the server to process.
"""
http_status = 414
message = _("Request-URI Too Long")
class UnsupportedMediaType(HTTPClientError):
"""HTTP 415 - Unsupported Media Type.
The request entity has a media type which the server or resource does
not support.
"""
http_status = 415
message = _("Unsupported Media Type")
class RequestedRangeNotSatisfiable(HTTPClientError):
"""HTTP 416 - Requested Range Not Satisfiable.
The client has asked for a portion of the file, but the server cannot
supply that portion.
"""
http_status = 416
message = _("Requested Range Not Satisfiable")
class ExpectationFailed(HTTPClientError):
"""HTTP 417 - Expectation Failed.
The server cannot meet the requirements of the Expect request-header field.
"""
http_status = 417
message = _("Expectation Failed")
class UnprocessableEntity(HTTPClientError):
"""HTTP 422 - Unprocessable Entity.
The request was well-formed but was unable to be followed due to semantic
errors.
"""
http_status = 422
message = _("Unprocessable Entity")
class InternalServerError(HttpServerError):
"""HTTP 500 - Internal Server Error.
A generic error message, given when no more specific message is suitable.
"""
http_status = 500
message = _("Internal Server Error")
# NotImplemented is a python keyword.
class HttpNotImplemented(HttpServerError):
"""HTTP 501 - Not Implemented.
The server either does not recognize the request method, or it lacks
the ability to fulfill the request.
"""
http_status = 501
message = _("Not Implemented")
class BadGateway(HttpServerError):
"""HTTP 502 - Bad Gateway.
The server was acting as a gateway or proxy and received an invalid
response from the upstream server.
"""
http_status = 502
message = _("Bad Gateway")
class ServiceUnavailable(HttpServerError):
"""HTTP 503 - Service Unavailable.
The server is currently unavailable.
"""
http_status = 503
message = _("Service Unavailable")
class GatewayTimeout(HttpServerError):
"""HTTP 504 - Gateway Timeout.
The server was acting as a gateway or proxy and did not receive a timely
response from the upstream server.
"""
http_status = 504
message = _("Gateway Timeout")
class HttpVersionNotSupported(HttpServerError):
"""HTTP 505 - HttpVersion Not Supported.
The server does not support the HTTP protocol version used in the request.
"""
http_status = 505
message = _("HTTP Version Not Supported")
# _code_map contains all the classes that have http_status attribute.
_code_map = dict(
(getattr(obj, 'http_status', None), obj)
for name, obj in six.iteritems(vars(sys.modules[__name__]))
if inspect.isclass(obj) and getattr(obj, 'http_status', False)
)
def from_response(response, method, url):
"""Returns an instance of :class:`HttpError` or subclass based on response.
:param response: instance of `requests.Response` class
:param method: HTTP method used for request
:param url: URL used for request
"""
req_id = response.headers.get("x-openstack-request-id")
# NOTE(hdd) true for older versions of nova and cinder
if not req_id:
req_id = response.headers.get("x-compute-request-id")
kwargs = {
"http_status": response.status_code,
"response": response,
"method": method,
"url": url,
"request_id": req_id,
}
if "retry-after" in response.headers:
kwargs["retry_after"] = response.headers["retry-after"]
content_type = response.headers.get("Content-Type", "")
if content_type.startswith("application/json"):
try:
body = response.json()
except ValueError:
pass
else:
if isinstance(body, dict):
error = body.get(list(body)[0])
if isinstance(error, dict):
kwargs["message"] = (error.get("message") or
error.get("faultstring"))
kwargs["details"] = (error.get("details") or
six.text_type(body))
elif content_type.startswith("text/"):
kwargs["details"] = response.text
try:
cls = _code_map[response.status_code]
except KeyError:
if 500 <= response.status_code < 600:
cls = HttpServerError
elif 400 <= response.status_code < 500:
cls = HTTPClientError
else:
cls = HttpError
return cls(**kwargs)
|
|
# -*- coding: utf-8 -*-
"""
Django settings for crakins project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import join
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from configurations import Configuration, values
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
class Common(Configuration):
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'south', # Database migration helpers:
'crispy_forms', # Form layouts
'avatar', # for user avatars
#'gunicorn', # web server
)
# Apps specific for this project go here.
LOCAL_APPS = (
'users', # custom users app
'weather',
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
INSTALLED_APPS += (
# Needs to come last for now because of a weird edge case between
# South and allauth
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
########## END APP CONFIGURATION
########## MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = values.BooleanValue(True)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
# In production, this is changed to a values.SecretValue() setting
SECRET_KEY = "CHANGEME!!!"
########## END SECRET CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
join(BASE_DIR, 'fixtures'),
)
########## END FIXTURE CONFIGURATION
########## EMAIL CONFIGURATION
EMAIL_BACKEND = values.Value('django.core.mail.backends.console.EmailBackend')
########## END EMAIL CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Christopher Akins', 'rakins@gmail.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = values.DatabaseURLValue('postgres://crakins:1.kins.1@localhost/crakins')
########## END DATABASE CONFIGURATION
########## CACHING
# Do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
# memcacheify is what's used in Production
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
########## END CACHING
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
########## END TEMPLATE CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## URL Configuration
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
########## End URL Configuration
########## AUTHENTICATION CONFIGURATION
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = "username"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
########## END AUTHENTICATION CONFIGURATION
########## Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "users:redirect"
########## END Custom user app defaults
########## SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = "slugify.slugify"
########## END SLUGLIFIER
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## Your common stuff: Below this line define 3rd party libary settings
DEBUG_TOOLBAR_PATCH_SETTINGS = False
class Local(Common):
########## INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
########## END INSTALLED_APPS
########## Mail settings
EMAIL_HOST = "localhost"
EMAIL_PORT = 1025
EMAIL_BACKEND = values.Value('django.core.mail.backends.console.EmailBackend')
########## End mail settings
########## django-debug-toolbar
MIDDLEWARE_CLASSES = Common.MIDDLEWARE_CLASSES + ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TEMPLATE_CONTEXT': True,
}
########## end django-debug-toolbar
########## Your local stuff: Below this line define 3rd party libary settings
class Production(Common):
########## INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
########## END INSTALLED_APPS
########## SECRET KEY
SECRET_KEY = values.SecretValue()
########## END SECRET KEY
########## django-secure
INSTALLED_APPS += ("djangosecure", )
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
########## end django-secure
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
########## END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
########## STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# see: https://github.com/antonagestam/collectfast
AWS_PRELOAD_METADATA = True
INSTALLED_APPS += ("collectfast", )
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIREY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (AWS_EXPIREY,
AWS_EXPIREY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
########## END STORAGE CONFIGURATION
########## EMAIL
DEFAULT_FROM_EMAIL = values.Value(
'crakins <crakins-noreply@crakins.com>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[crakins] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
########## END TEMPLATE CONFIGURATION
########## CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
########## END CACHING
########## Your production stuff: Below this line define 3rd party libary settings
DEBUG_TOOLBAR_PATCH_SETTINGS = False
|
|
#!/usr/bin/env python
# Copyright (c) 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
from string import Template
from java_class_component import Enum, Field
from java_method import Method
class JavaClassLoader(object):
"""Manager class maintains all loaded java classes."""
def __init__(self, src_path, class_list):
self._src_path = src_path
self._class_list = class_list
self._java_data_map = {}
for clazz in self._class_list:
self.LoadJavaFile(clazz)
for key,java_data in self._java_data_map.items():
for method in java_data._methods:
method.PrepareStrings()
def IsInternalClass(self, clazz):
return clazz in self._class_list
def GetJavaData(self, clazz):
return self._java_data_map.get(clazz)
def LoadJavaFile(self, clazz):
if self._java_data_map.has_key(clazz):
return
file_name = os.path.join(self._src_path, '%s.java' % clazz)
try:
file_handle = open(file_name, 'r')
file_content = file_handle.read()
file_handle.close()
except Exception:
print 'Error reading input Java file, please check.'
return
java_data = InternalJavaFileData(self)
java_data.SetClassContent(file_content)
self._java_data_map[clazz] = java_data
def GenerateDoc(self, doc):
if not doc:
return ''
def ReplaceInternal(matchobj):
match = matchobj.group(0)
if self.IsInternalClass(match):
return self.GetJavaData(match).wrapper_name
else:
return match
return re.sub('XWalk[a-zA-Z_0-9]*Internal',
ReplaceInternal, doc).lstrip('\n')
class InternalJavaFileData(object):
"""Data class stores the generator information of internal class."""
ANNOTATION_CREATE_INTERNALLY = 'createInternally'
ANNOTATION_CREATE_EXTERNALLY = 'createExternally'
ANNOTATION_EXTEND_CLASS = 'extendClass'
ANNOTATION_NO_INSTANCE = 'noInstance'
ANNOTATION_INSTANCE = 'instance'
ANNOTATION_IMPL = 'impl'
def __init__(self, class_loader):
self._class_loader = class_loader
self._class_name = ''
self._bridge_name = ''
self._wrapper_name = ''
self._class_type = '' # class or interface
self._class_doc = ''
self._class_annotations = {}
self._methods = []
self._fields = []
self._imports = []
self._enums = {}
self._package_name = ''
self._need_default_constructor = True
@property
def class_name(self):
return self._class_name
@property
def bridge_name(self):
return self._bridge_name
@property
def wrapper_name(self):
return self._wrapper_name
@property
def class_type(self):
return self._class_type
@property
def class_doc(self):
return self._class_doc
@property
def class_annotations(self):
return self._class_annotations
@property
def methods(self):
return self._methods
@property
def fields(self):
return self._fields
@property
def imports(self):
return self._imports
@property
def enums(self):
return self._enums
@property
def package_name(self):
return self._package_name
@property
def need_default_constructor(self):
return self._need_default_constructor
def GetJavaData(self, clazz):
return self._class_loader.GetJavaData(clazz)
def IsInternalClass(self, clazz):
return self._class_loader.IsInternalClass(clazz)
def MangleInternalNameToBridgeName(self, internal_name):
if not self.IsInternalClass(internal_name):
return internal_name
else:
return internal_name.replace('Internal', 'Bridge')
def MangleInternalNameToWrapperName(self, internal_name):
if not self.IsInternalClass(internal_name):
return internal_name
else:
return internal_name.replace('Internal', '')
def SetClassContent(self, content):
self.ExtractPackageName(content)
self.ExtractImports(content)
self.ExtractClassProperties(content)
self.ExtractMethods(content)
self.ExtractFields(content)
self.ExtractEnums(content)
def ExtractPackageName(self, java_content):
package_re = re.compile('\s*package\s+(?P<package>[a-zA-Z0-9._]+)\s*;')
for match in re.finditer(package_re, java_content):
self._package_name = match.group('package')
def ExtractImports(self, java_content):
imports_re = re.compile('\s*import\s+(?P<imported>[a-zA-Z0-9._*]+)\s*;')
for match in re.finditer(imports_re, java_content):
imported = match.group('imported')
# Determine whether the import rule should be ignored for generated code.
# TODO: Currently we only use a blacklist to filter the import rule.
if imported.startswith('org.xwalk.core.internal') or \
imported.startswith('org.chromium'):
continue
self._imports.append(imported)
def ExtractClassProperties(self, java_content):
class_re = re.compile(
'(?P<class_doc>(\n\s*/\*\*.*\n(\s+\*(.)*\n)+\s+\*/\s*)?)\n'
'\s*@XWalkAPI\(?'
'(?P<annotation_content>[a-zA-Z0-9.,=\s]*)\)?'
'\s*public\s+([a-z]+\s+)*'
'(?P<type>(class|interface))\s+'
'(?P<class_name>[a-zA-Z0-9]*)')
for match in re.finditer(class_re, java_content):
annotation_content = match.group('annotation_content')
self._class_name = match.group('class_name')
self._bridge_name = \
self.MangleInternalNameToBridgeName(self._class_name)
self._wrapper_name = \
self.MangleInternalNameToWrapperName(self._class_name)
self._class_type = match.group('type')
self._class_doc = match.group('class_doc')
self.ParseClassAnnotations(annotation_content)
def ParseClassAnnotations(self, annotation):
"""Class annotation contains the following optional attributes:
'extendClass' - The class have to extend
'createExternally' - boolean
'craeteInternally' - boolean
'noInstance' - boolean
'isConst' - boolean
'impl' - Class to impl
'instance - instance'"""
extend_class_re = re.compile('extendClass\s*=\s*'
'(?P<extend_class>[a-zA-Z0-9.]+)')
for match in re.finditer(extend_class_re, annotation):
extend_class = match.group('extend_class')
self._class_annotations['extendClass'] = extend_class
create_internally_re = re.compile('createInternally\s*=\s*'
'(?P<create_internally>(true|false))')
for match in re.finditer(create_internally_re, annotation):
create_internally = match.group('create_internally')
if create_internally == 'true':
self._class_annotations['createInternally'] = True
self._need_default_constructor = False
elif create_internally == 'false':
self._class_annotations['createInternally'] = False
create_externally_re = re.compile('createExternally\s*=\s*'
'(?P<create_externally>(true|false))')
for match in re.finditer(create_externally_re, annotation):
create_externally = match.group('create_externally')
if create_externally == 'true':
self._class_annotations['createExternally'] = True
elif create_externally == 'false':
self._class_annotations['createExternally'] = False
no_instance_re = re.compile('noInstance\s*=\s*'
'(?P<no_instance>(true|false))')
for match in re.finditer(no_instance_re, annotation):
no_instance = match.group('no_instance')
if no_instance == 'true':
self._class_annotations['noInstance'] = True
self._need_default_constructor = False
elif no_instance == 'false':
self._class_annotations['noInstance'] = False
is_const_re = re.compile('isConst\s*=\s*'
'(?P<is_const>(true|false))')
for match in re.finditer(is_const_re, annotation):
is_const = match.group('is_const')
if is_const == 'true':
self._class_annotations['isConst'] = True
elif is_const == 'false':
self._class_annotations['isConst'] = False
impl_re = re.compile('impl\s*=\s*'
'(?P<impl>[a-zA-Z0-9.]+)')
for match in re.finditer(impl_re, annotation):
impl = match.group('impl')
self._class_annotations['impl'] = impl
instance_re = re.compile('instance\s*=\s*'
'(?P<instance>[a-zA-Z0-9.]+)')
for match in re.finditer(instance_re, annotation):
instance = match.group('instance')
self._class_annotations['instance'] = instance
def ExtractMethods(self, java_content):
constructor_re = re.compile(
'(?P<method_doc>(\n\s*/\*\*.*\n(\s+\*(.)*\n)+\s+\*/\s*)?)\n'
'\s*@XWalkAPI\(?'
'(?P<method_annotation>[a-zA-Z0-9\$%,\s\(\)\{\};._"=]*)\)?'
'\s*public\s(?P<method_name>[a-zA-Z0-9]+)\('
'(?P<method_params>[a-zA-Z0-9\s,\[\]\>\<]*)\)')
for match in re.finditer(constructor_re, java_content):
method_annotation = match.group('method_annotation')
method_name = match.group('method_name')
method_params = match.group('method_params')
method_doc = match.group('method_doc')
method = Method(
self._class_name,
self._class_loader,
True, # is_constructor
False, # is_static
False, # is_abstract
method_name, None,
method_params, method_annotation, method_doc)
self._methods.append(method)
self._need_default_constructor = False
method_re = re.compile(
'(?P<method_doc>(\n\s*/\*\*.*\n(\s+\*(.)*\n)+\s+\*/\s*)?)\n'
'\s*@XWalkAPI\(?'
'(?P<method_annotation>[a-zA-Z0-9%,\s\(\)\{\};._"=]*)\)?'
'\s*public\s+(?P<method_return>[a-zA-Z0-9]+(\<[a-zA-Z0-9]+,\s[a-zA-Z0-9]+\>)*)\s+'
'(?P<method_name>[a-zA-Z0-9]+)\('
'(?P<method_params>[a-zA-Z0-9\s,\]\[\<\>]*)\)')
for match in re.finditer(method_re, java_content):
method_annotation = match.group('method_annotation')
method_name = match.group('method_name')
method_params = match.group('method_params')
method_return = match.group('method_return')
method_doc = match.group('method_doc')
method = Method(
self._class_name,
self._class_loader,
False, # is_constructor
False, # is_static
False, # is_abstract
method_name, method_return, method_params,
method_annotation, method_doc)
self._methods.append(method)
method_re = re.compile(
'(?P<method_doc>(\n\s*/\*\*.*\n(\s+\*(.)*\n)+\s+\*/\s*)?)\n'
'\s*@XWalkAPI\(?'
'(?P<method_annotation>[a-zA-Z0-9%,\s\(\)\{\};._"=]*)\)?'
'\s*public\s+static\s+(synchronized\s+)*'
'(?P<method_return>[a-zA-Z0-9]+)\s+'
'(?P<method_name>[a-zA-Z0-9]+)\('
'(?P<method_params>[a-zA-Z0-9\s,\[\]\<\>]*)\)')
for match in re.finditer(method_re, java_content):
method_annotation = match.group('method_annotation')
method_name = match.group('method_name')
method_params = match.group('method_params')
method_return = match.group('method_return')
method_doc = match.group('method_doc')
method = Method(
self._class_name,
self._class_loader,
False, # is_constructor
True, # is_static
False, # is_abstract
method_name, method_return, method_params,
method_annotation, method_doc)
self._methods.append(method)
method_re = re.compile(
'(?P<method_doc>(\n\s*/\*\*.*\n(\s+\*(.)*\n)+\s+\*/\s*)?)\n'
'\s*@XWalkAPI\(?'
'(?P<method_annotation>[a-zA-Z0-9%,\s\(\)\{\};._"=]*)\)?'
'\s*public\s+abstract\s+(synchronized\s+)*'
'(?P<method_return>[a-zA-Z0-9]+)\s+'
'(?P<method_name>[a-zA-Z0-9]+)\('
'(?P<method_params>[a-zA-Z0-9\s,\[\]\<\>]*)\)')
for match in re.finditer(method_re, java_content):
method_annotation = match.group('method_annotation')
method_name = match.group('method_name')
method_params = match.group('method_params')
method_return = match.group('method_return')
method_doc = match.group('method_doc')
method = Method(
self._class_name,
self._class_loader,
False, # is_constructor
False, # is_static
True, # is_abstract
method_name, method_return, method_params,
method_annotation, method_doc)
self._methods.append(method)
def ExtractFields(self, java_content):
field_re = re.compile(
'(?P<field_doc>(\n\s*/\*\*.*\n(\s+\*(.)*\n)+\s+\*/\s*)?)\n'
'\s*@XWalkAPI\s*public\s+static\s+final\s+'
'(?P<field_type>[a-zA-Z0-9_]+)\s+'
'(?P<field_name>[a-zA-Z0-9_]+)\s*=\s*'
'(?P<field_value>[a-zA-Z0-9-_"]+)\s*;')
for match in re.finditer(field_re, java_content):
field_type = match.group('field_type')
field_name = match.group('field_name')
field_value = match.group('field_value')
field_doc = match.group('field_doc')
field_object = Field(field_type, field_name, field_value, field_doc)
self._fields.append(field_object)
def ExtractEnums(self, java_content):
enum_re = re.compile(
'(?P<enum_doc>(\n\s*/\*\*.*\n(\s+\*(.)*\n)+\s+\*/\s*)?)\n'
'\s*@XWalkAPI\s*public\s+enum\s+'
'(?P<enum_name>[a-zA-Z0-9_]+)\s+{'
'(?P<enum_content>(.|\n)*?)\s*}')
for match in re.finditer(enum_re, java_content):
enum_name = match.group('enum_name')
enum_content = match.group('enum_content')
enum_doc = match.group('enum_doc')
enum_object = Enum(enum_name, enum_content, enum_doc)
self._enums[enum_name] = enum_object
def HasNoInstanceAnnotation(self):
return self._class_annotations.get(
InternalJavaFileData.ANNOTATION_NO_INSTANCE, False)
def HasCreateInternallyAnnotation(self):
return self._class_annotations.get(
InternalJavaFileData.ANNOTATION_CREATE_INTERNALLY, False)
def HasInstanceCreateInternallyAnnotation(self):
instance = None
clazz = self._class_annotations.get(
InternalJavaFileData.ANNOTATION_INSTANCE, None)
if clazz:
instance = self.GetJavaData(clazz.replace('.class', ''))
if instance:
return instance.HasCreateInternallyAnnotation()
else:
return self.HasCreateInternallyAnnotation()
def UseAsInstanceInBridgeCall(self, var):
return '%s.getWrapper()' % self.UseAsReturnInBridgeSuperCall(var)
def UseAsInstanceInBridgeOverrideCall(self, var):
clazz = self._class_annotations.get(
InternalJavaFileData.ANNOTATION_INSTANCE, self._class_name)
clazz = clazz.replace('.class', '')
if self.GetJavaData(clazz).class_annotations.get(
InternalJavaFileData.ANNOTATION_CREATE_INTERNALLY, False):
return self.UseAsReturnInBridgeSuperCall(var)
return '(%s) %s' % (self.GetJavaData(clazz).bridge_name, var)
def UseAsReturnInBridgeSuperCall(self, var):
clazz = self._class_annotations.get(
InternalJavaFileData.ANNOTATION_INSTANCE, self._class_name)
clazz = clazz.replace('.class', '')
if self.GetJavaData(clazz).class_annotations.get(
InternalJavaFileData.ANNOTATION_CREATE_INTERNALLY, False):
typed_var_template = Template('(${VAR} instanceof ${BRIDGE_TYPE} ?'\
' ((${BRIDGE_TYPE}) ${VAR} ) : new ${BRIDGE_TYPE}(${INTERNAL_VAR}))')
value = {'VAR': var,
'BRIDGE_TYPE': self.GetJavaData(clazz).bridge_name,
'INTERNAL_VAR': var if clazz == self._class_name else\
'(%s) %s' % (clazz, var)}
var = typed_var_template.substitute(value)
return var
def UseAsInstanceInBridgeSuperCall(self, var):
# pylint: disable=R0201
return var
def UseAsInstanceInWrapperCall(self, var):
clazz = self._class_annotations.get('instance', self._class_name)
clazz = clazz.replace('.class', '')
if clazz != self._class_name:
var = '((%s) %s)' % (self.GetJavaData(clazz).wrapper_name, var)
return '%s.getBridge()' % var
def UseAsTypeInWrapperCall(self):
return self._wrapper_name
def GetBridgeName(self, subclass=None):
if not self.IsInternalClass(self._class_name):
return self._class_name
else:
clazz = self._class_annotations.get(
InternalJavaFileData.ANNOTATION_INSTANCE, self._class_name)
clazz = clazz.replace('.class', '')
if not subclass:
return self.GetJavaData(clazz).bridge_name
else:
return clazz + '$' + subclass
def GetWrapperName(self, subclass=None):
if not self.IsInternalClass(self._class_name):
return self._class_name
else:
if not subclass:
return self._wrapper_name
else:
return "%s$%s" % (self._wrapper_name, subclass.replace('Internal', ''))
|
|
__author__ = "Radical.Utils Development Team (Andre Merzky, Ole Weidner)"
__copyright__ = "Copyright 2013, RADICAL@Rutgers"
__license__ = "MIT"
import os
import contrib.urlparse25 as urlparse
import signatures as rus
# ------------------------------------------------------------------------------
#
class Url (object):
""" The RADICAL Url class.
URLs are used in several places in the RADICAL software projects: to
specify service endpoints for job submission or resource management, for
file or directory locations, etc.
The URL class is designed to simplify URL management for these
purposes -- it allows to manipulate individual URL elements, while
ensuring that the resulting URL is well formatted. Example::
# create a URL from a string
location = radical.utils.Url ("file://localhost/tmp/file.dat")
d = radical.utils.filesystem.Directory(location)
A URL consists of the following components (where one ore more can
be 'None')::
<scheme>://<user>:<pass>@<host>:<port>/<path>?<query>#<fragment>
Each of these components can be accessed via its property or
alternatively, via getter / setter methods. Example::
url = radical.utils.Url ("scheme://pass:user@host:123/path?query#fragment")
# modify the scheme
url.scheme = "anotherscheme"
# above is equivalent with
url.set_scheme("anotherscheme")
"""
# --------------------------------------------------------------------------
#
@rus.takes ('Url',
rus.optional (basestring, 'Url'))
@rus.returns (rus.nothing)
def __init__(self, url_in=''):
"""
__init__(url_in='')
Create a new Url object from a string or another Url object.
"""
if not url_in :
url_in = ""
self._urlobj = urlparse.urlparse (str (url_in))
self._renew_url ()
# --------------------------------------------------------------------------
#
##
@rus.takes ('Url')
@rus.returns ((rus.nothing, basestring))
def __str__ (self):
"""
__str__()
String representation.
"""
return self._urlobj.geturl()
# --------------------------------------------------------------------------
#
##
@rus.takes ('Url')
@rus.returns (basestring)
def __unicode__(self):
"""
__unicode__()
Unicode representation.
"""
return u'%s' % unicode(self._urlobj.geturl())
# --------------------------------------------------------------------------
#
##
@rus.takes ('Url',
('Url', dict))
@rus.returns ('Url')
def __deepcopy__(self, memo):
"""
__deepcopy__(self, memo)
Deep copy of a Url
"""
return Url(self)
# --------------------------------------------------------------------------
#
##
@rus.takes ('Url',
rus.optional(basestring),
rus.optional(basestring),
rus.optional(basestring),
rus.optional((basestring, int)))
@rus.returns (basestring)
def _make_netloc (self, username, password, hostname, port):
"""
_make_netloc(self, username, password, hostname, port)
Private helper function to generate netloc string.
"""
netloc = str()
if username :
if password : netloc += "%s:%s@" % (username, password)
else : netloc += "%s@" % (username)
if hostname : netloc += "%s" % (hostname)
if port : netloc += ":%s" % (port)
return netloc
def _renew_netloc (self, username='', password='', hostname='', port='') :
newloc = self._make_netloc (username or self._urlobj.username,
password or self._urlobj.password,
hostname or self._urlobj.hostname,
port or self._urlobj.port)
self._renew_url (netloc=newloc)
def _renew_url (self, scheme='', netloc='', path='',
params='', query='', fragment='') :
# always normalize the path
if path :
path = os.path.normpath (path)
newurl = urlparse.urlunparse ((scheme or self._urlobj.scheme,
netloc or self._urlobj.netloc,
path or self._urlobj.path,
params or self._urlobj.params,
query or self._urlobj.query,
fragment or self._urlobj.fragment))
self._urlobj = urlparse.urlparse (newurl)
# --------------------------------------------------------------------------
#
# Scheme property
#
@rus.takes ('Url',
(rus.nothing, basestring))
@rus.returns (rus.nothing)
def set_scheme(self, scheme):
"""
set_scheme(scheme)
Set the URL 'scheme' component.
:param scheme: The new scheme
:type scheme: str
"""
self._renew_url (scheme=scheme)
@rus.takes ('Url')
@rus.returns ((rus.nothing, basestring))
def get_scheme(self):
"""
get_scheme()
Return the URL 'scheme' component.
"""
return self._urlobj.scheme
scheme = property(get_scheme, set_scheme)
schema = scheme # alias, as both terms are used...
""" The scheme component. """
# --------------------------------------------------------------------------
#
# Host property
#
@rus.takes ('Url',
(rus.nothing, basestring))
@rus.returns (rus.nothing)
def set_host(self, hostname):
"""
set_host(hostname)
Set the 'hostname' component.
:param hostname: The new hostname
:type hostname: str
"""
netloc = self._renew_netloc (hostname=hostname)
@rus.takes ('Url')
@rus.returns ((rus.nothing, basestring))
def get_host(self):
"""
get_host()
Return the URL 'hostname' component.
"""
return self._urlobj.hostname
host = property(get_host, set_host)
""" The hostname component. """
# --------------------------------------------------------------------------
#
# Port property
#
@rus.takes ('Url',
(rus.nothing, basestring, int))
@rus.returns (rus.nothing)
def set_port(self, port):
"""
set_port(port)
Set the URL 'port' component.
:param port: The new port
:type port: int
"""
self._renew_netloc (port=port)
@rus.takes ('Url')
@rus.returns ((rus.nothing, int))
def get_port(self):
"""
get_port()
Return the URL 'port' component.
"""
if self._urlobj.port is not None:
return int(self._urlobj.port)
else:
return None
port = property(get_port, set_port)
""" The port component. """
# --------------------------------------------------------------------------
#
# Username property
#
@rus.takes ('Url',
(rus.nothing, basestring))
@rus.returns (rus.nothing)
def set_username(self, username):
"""
set_username(username)
Set the URL 'username' component.
:param username: The new username
:type username: str
"""
self._renew_netloc (username=username)
@rus.takes ('Url')
@rus.returns ((rus.nothing, basestring))
def get_username(self):
"""
get_username()
Return the URL 'username' component.
"""
return self._urlobj.username
username = property(get_username, set_username)
""" The username component. """
# --------------------------------------------------------------------------
#
# Password property
#
@rus.takes ('Url',
(rus.nothing, basestring))
@rus.returns (rus.nothing)
def set_password(self, password):
"""
set_password(password)
Set the URL 'password' component.
:param password: The new password
:type password: str
"""
self._renew_netloc (password=password)
@rus.takes ('Url')
@rus.returns ((rus.nothing, basestring))
def get_password(self):
"""
get_password()
Return the URL 'username' component.
"""
return self._urlobj.password
password = property(get_password, set_password)
""" The password component. """
# --------------------------------------------------------------------------
#
# Fragment property
#
@rus.takes ('Url',
(rus.nothing, basestring))
@rus.returns (rus.nothing)
def set_fragment(self, fragment):
"""
set_fragment(fragment)
Set the URL 'fragment' component.
:param fragment: The new fragment
:type fragment: str
"""
self._renew_url (fragment=fragment)
@rus.takes ('Url')
@rus.returns ((rus.nothing, basestring))
def get_fragment(self):
"""
get_fragment()
Return the URL 'fragment' component.
"""
return self._urlobj.fragment
fragment = property(get_fragment, set_fragment)
""" The fragment component. """
# --------------------------------------------------------------------------
#
# Path property
#
@rus.takes ('Url',
(rus.nothing, basestring))
@rus.returns (rus.nothing)
def set_path(self, path):
"""
set_path(path)
Set the URL 'path' component.
:param path: The new path
:type path: str
"""
self._renew_url (path=path)
@rus.takes ('Url')
@rus.returns ((rus.nothing, basestring))
def get_path(self):
"""
get_path()
Return the URL 'path' component.
"""
import os
if '?' in self._urlobj.path:
(path, query) = self._urlobj.path.split('?')
return os.path.normpath(path)
else:
return os.path.normpath(self._urlobj.path)
path = property(get_path, set_path)
""" The path component. """
# --------------------------------------------------------------------------
#
# Query property
#
@rus.takes ('Url',
(rus.nothing, basestring))
@rus.returns (rus.nothing)
def set_query(self, query):
"""
set_query(query)
Set the URL 'query' component.
:param query: The new query
:type query: str
"""
self._renew_url (query=query)
@rus.takes ('Url')
@rus.returns ((rus.nothing, basestring))
def get_query(self):
"""
get_query()
Return the URL 'query' component.
"""
if not self._urlobj.query:
if '?' in self._urlobj.path:
(path, query) = self._urlobj.path.split('?')
return query
else:
return self._urlobj.query
query = property(get_query, set_query)
""" The query component. """
# --------------------------------------------------------------------
#
|
|
#
# CORE
# Copyright (c)2012 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# authors: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
'''
conf.py: common support for configurable objects
'''
import string
from core.api import coreapi
class ConfigurableManager(object):
''' A generic class for managing Configurables. This class can register
with a session to receive Config Messages for setting some parameters
for itself or for the Configurables that it manages.
'''
# name corresponds to configuration object field
_name = ""
# type corresponds with register message types
_type = None
def __init__(self, session=None):
self.session = session
self.session.addconfobj(self._name, self._type, self.configure)
# Configurable key=values, indexed by node number
self.configs = {}
def configure(self, session, msg):
''' Handle configure messages. The configuration message sent to a
ConfigurableManager usually is used to:
1. Request a list of Configurables (request flag)
2. Reset manager and clear configs (reset flag)
3. Send values that configure the manager or one of its
Configurables
Returns any reply messages.
'''
objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
if conftype == coreapi.CONF_TYPE_FLAGS_REQUEST:
return self.configure_request(msg)
elif conftype == coreapi.CONF_TYPE_FLAGS_RESET:
if objname == "all" or objname == self._name:
return self.configure_reset(msg)
else:
return self.configure_values(msg,
msg.gettlv(coreapi.CORE_TLV_CONF_VALUES))
def configure_request(self, msg):
''' Request configuration data.
'''
return None
def configure_reset(self, msg):
''' By default, resets this manager to clear configs.
'''
return self.reset()
def configure_values(self, msg, values):
''' Values have been sent to this manager.
'''
return None
def configure_values_keyvalues(self, msg, values, target, keys):
''' Helper that can be used for configure_values for parsing in
'key=value' strings from a values field. The key name must be
in the keys list, and target.key=value is set.
'''
if values is None:
return None
kvs = values.split('|')
for kv in kvs:
try:
# key=value
(key, value) = kv.split('=', 1)
if value is not None and not value.strip():
value = None
except ValueError:
# value only
key = keys[kvs.index(kv)]
value = kv
if key not in keys:
raise ValueError, "invalid key: %s" % key
if value is not None:
setattr(target, key, value)
return None
def reset(self):
return None
def setconfig(self, nodenum, conftype, values):
''' add configuration values for a node to a dictionary; values are
usually received from a Configuration Message, and may refer to a
node for which no object exists yet
'''
conflist = []
if nodenum in self.configs:
oldlist = self.configs[nodenum]
found = False
for (t, v) in oldlist:
if (t == conftype):
# replace existing config
found = True
conflist.append((conftype, values))
else:
conflist.append((t, v))
if not found:
conflist.append((conftype, values))
else:
conflist.append((conftype, values))
self.configs[nodenum] = conflist
def getconfig(self, nodenum, conftype, defaultvalues):
''' get configuration values for a node; if the values don't exist in
our dictionary then return the default values supplied
'''
if nodenum in self.configs:
# return configured values
conflist = self.configs[nodenum]
for (t, v) in conflist:
if (conftype is None) or (t == conftype):
return (t, v)
# return default values provided (may be None)
return (conftype, defaultvalues)
def getallconfigs(self, use_clsmap=True):
''' Return (nodenum, conftype, values) tuples for all stored configs.
Used when reconnecting to a session.
'''
r = []
for nodenum in self.configs:
for (t, v) in self.configs[nodenum]:
if use_clsmap:
t = self._modelclsmap[t]
r.append( (nodenum, t, v) )
return r
def clearconfig(self, nodenum):
''' remove configuration values for the specified node;
when nodenum is None, remove all configuration values
'''
if nodenum is None:
self.configs = {}
return
if nodenum in self.configs:
self.configs.pop(nodenum)
def setconfig_keyvalues(self, nodenum, conftype, keyvalues):
''' keyvalues list of tuples
'''
if conftype not in self._modelclsmap:
self.warn("Unknown model type '%s'" % (conftype))
return
model = self._modelclsmap[conftype]
keys = model.getnames()
# defaults are merged with supplied values here
values = list(model.getdefaultvalues())
for key, value in keyvalues:
if key not in keys:
self.warn("Skipping unknown configuration key for %s: '%s'" % \
(conftype, key))
continue
i = keys.index(key)
values[i] = value
self.setconfig(nodenum, conftype, values)
def getmodels(self, n):
''' Return a list of model classes and values for a net if one has been
configured. This is invoked when exporting a session to XML.
This assumes self.configs contains an iterable of (model-names, values)
and a self._modelclsmapdict exists.
'''
r = []
if n.objid in self.configs:
v = self.configs[n.objid]
for model in v:
cls = self._modelclsmap[model[0]]
vals = model[1]
r.append((cls, vals))
return r
def info(self, msg):
self.session.info(msg)
def warn(self, msg):
self.session.warn(msg)
class Configurable(object):
''' A generic class for managing configuration parameters.
Parameters are sent via Configuration Messages, which allow the GUI
to build dynamic dialogs depending on what is being configured.
'''
_name = ""
# Configuration items:
# ('name', 'type', 'default', 'possible-value-list', 'caption')
_confmatrix = []
_confgroups = None
_bitmap = None
def __init__(self, session=None, objid=None):
self.session = session
self.objid = objid
def reset(self):
pass
def register(self):
pass
@classmethod
def getdefaultvalues(cls):
return tuple( map(lambda x: x[2], cls._confmatrix) )
@classmethod
def getnames(cls):
return tuple( map( lambda x: x[0], cls._confmatrix) )
@classmethod
def configure(cls, mgr, msg):
''' Handle configuration messages for this object.
'''
reply = None
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
if mgr.verbose:
mgr.info("received configure message for %s" % cls._name)
if conftype == coreapi.CONF_TYPE_FLAGS_REQUEST:
if mgr.verbose:
mgr.info("replying to configure request for %s model" %
cls._name)
# when object name is "all", the reply to this request may be None
# if this node has not been configured for this model; otherwise we
# reply with the defaults for this model
if objname == "all":
defaults = None
typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE
else:
defaults = cls.getdefaultvalues()
typeflags = coreapi.CONF_TYPE_FLAGS_NONE
values = mgr.getconfig(nodenum, cls._name, defaults)[1]
if values is None:
# node has no active config for this model (don't send defaults)
return None
# reply with config options
reply = cls.toconfmsg(0, nodenum, typeflags, values)
elif conftype == coreapi.CONF_TYPE_FLAGS_RESET:
if objname == "all":
mgr.clearconfig(nodenum)
#elif conftype == coreapi.CONF_TYPE_FLAGS_UPDATE:
else:
# store the configuration values for later use, when the node
# object has been created
if objname is None:
mgr.info("no configuration object for node %s" % nodenum)
return None
values_str = msg.gettlv(coreapi.CORE_TLV_CONF_VALUES)
defaults = cls.getdefaultvalues()
if values_str is None:
# use default or preconfigured values
values = mgr.getconfig(nodenum, cls._name, defaults)[1]
else:
# use new values supplied from the conf message
values = values_str.split('|')
# determine new or old style config
new = cls.haskeyvalues(values)
if new:
new_values = list(defaults)
keys = cls.getnames()
for v in values:
key, value = v.split('=', 1)
try:
new_values[keys.index(key)] = value
except ValueError:
mgr.info("warning: ignoring invalid key '%s'" % key)
values = new_values
mgr.setconfig(nodenum, objname, values)
return reply
@classmethod
def toconfmsg(cls, flags, nodenum, typeflags, values):
''' Convert this class to a Config API message. Some TLVs are defined
by the class, but node number, conf type flags, and values must
be passed in.
'''
keys = cls.getnames()
keyvalues = map(lambda a,b: "%s=%s" % (a,b), keys, values)
values_str = string.join(keyvalues, '|')
tlvdata = ""
if nodenum is not None:
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE,
nodenum)
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ,
cls._name)
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE,
typeflags)
datatypes = tuple( map(lambda x: x[1], cls._confmatrix) )
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES,
datatypes)
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES,
values_str)
captions = reduce( lambda a,b: a + '|' + b, \
map(lambda x: x[4], cls._confmatrix))
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_CAPTIONS,
captions)
possiblevals = reduce( lambda a,b: a + '|' + b, \
map(lambda x: x[3], cls._confmatrix))
tlvdata += coreapi.CoreConfTlv.pack(
coreapi.CORE_TLV_CONF_POSSIBLE_VALUES, possiblevals)
if cls._bitmap is not None:
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_BITMAP,
cls._bitmap)
if cls._confgroups is not None:
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_GROUPS,
cls._confgroups)
msg = coreapi.CoreConfMessage.pack(flags, tlvdata)
return msg
@staticmethod
def booltooffon(value):
''' Convenience helper turns bool into on (True) or off (False) string.
'''
if value == "1" or value == "true" or value == "on":
return "on"
else:
return "off"
@staticmethod
def offontobool(value):
if type(value) == str:
if value.lower() == "on":
return 1
elif value.lower() == "off":
return 0
return value
@classmethod
def valueof(cls, name, values):
''' Helper to return a value by the name defined in confmatrix.
Checks if it is boolean'''
i = cls.getnames().index(name)
if cls._confmatrix[i][1] == coreapi.CONF_DATA_TYPE_BOOL and \
values[i] != "":
return cls.booltooffon(values[i])
else:
return values[i]
@staticmethod
def haskeyvalues(values):
''' Helper to check for list of key=value pairs versus a plain old
list of values. Returns True if all elements are "key=value".
'''
if len(values) == 0:
return False
for v in values:
if "=" not in v:
return False
return True
def getkeyvaluelist(self):
''' Helper to return a list of (key, value) tuples. Keys come from
self._confmatrix and values are instance attributes.
'''
r = []
for k in self.getnames():
if hasattr(self, k):
r.append((k, getattr(self, k)))
return r
|
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from nipype.interfaces.base import (BaseInterface, BaseInterfaceInputSpec, traits,
File, TraitedSpec, InputMultiPath,
OutputMultiPath, isdefined)
from nipype.utils.filemanip import split_filename
import os, os.path as op
import numpy as np
import networkx as nx
import scipy.io as sio
import pickle
from nipype.utils.misc import package_check
import warnings
from ... import logging
iflogger = logging.getLogger('interface')
have_cmp = True
try:
package_check('cmp')
except Exception, e:
have_cmp = False
warnings.warn('cmp not installed')
else:
import cmp
def read_unknown_ntwk(ntwk):
if not isinstance(ntwk, nx.classes.graph.Graph):
path, name, ext = split_filename(ntwk)
if ext == '.pck':
ntwk = nx.read_gpickle(ntwk)
elif ext == '.graphml':
ntwk = nx.read_graphml(ntwk)
return ntwk
def remove_all_edges(ntwk):
ntwktmp = ntwk.copy()
edges = ntwktmp.edges_iter()
for edge in edges:
ntwk.remove_edge(edge[0], edge[1])
return ntwk
def fix_keys_for_gexf(orig):
"""
GEXF Networks can be read in Gephi, however, the keys for the node and edge IDs must be converted to strings
"""
import networkx as nx
ntwk = nx.Graph()
nodes = orig.nodes_iter()
edges = orig.edges_iter()
for node in nodes:
newnodedata = {}
newnodedata.update(orig.node[node])
if orig.node[node].has_key('dn_fsname'):
newnodedata['label'] = orig.node[node]['dn_fsname']
ntwk.add_node(str(node), newnodedata)
if ntwk.node[str(node)].has_key('dn_position') and newnodedata.has_key('dn_position'):
ntwk.node[str(node)]['dn_position'] = str(newnodedata['dn_position'])
for edge in edges:
data = {}
data = orig.edge[edge[0]][edge[1]]
ntwk.add_edge(str(edge[0]), str(edge[1]), data)
if ntwk.edge[str(edge[0])][str(edge[1])].has_key('fiber_length_mean'):
ntwk.edge[str(edge[0])][str(edge[1])]['fiber_length_mean'] = str(data['fiber_length_mean'])
if ntwk.edge[str(edge[0])][str(edge[1])].has_key('fiber_length_std'):
ntwk.edge[str(edge[0])][str(edge[1])]['fiber_length_std'] = str(data['fiber_length_std'])
if ntwk.edge[str(edge[0])][str(edge[1])].has_key('number_of_fibers'):
ntwk.edge[str(edge[0])][str(edge[1])]['number_of_fibers'] = str(data['number_of_fibers'])
if ntwk.edge[str(edge[0])][str(edge[1])].has_key('value'):
ntwk.edge[str(edge[0])][str(edge[1])]['value'] = str(data['value'])
return ntwk
def add_dicts_by_key(in_dict1, in_dict2):
"""
Combines two dictionaries and adds the values for those keys that are shared
"""
both = {}
for key1 in in_dict1:
for key2 in in_dict2:
if key1 == key2:
both[key1] = in_dict1[key1] + in_dict2[key2]
return both
def average_networks(in_files, ntwk_res_file, group_id):
"""
Sums the edges of input networks and divides by the number of networks
Writes the average network as .pck and .gexf and returns the name of the written networks
"""
import networkx as nx
import os.path as op
iflogger.info("Creating average network for group: {grp}".format(grp=group_id))
matlab_network_list = []
if len(in_files) == 1:
avg_ntwk = read_unknown_ntwk(in_files[0])
else:
count_to_keep_edge = np.round(float(len(in_files)) / 2)
iflogger.info("Number of networks: {L}, an edge must occur in at least {c} to remain in the average network".format(L=len(in_files), c=count_to_keep_edge))
ntwk_res_file = read_unknown_ntwk(ntwk_res_file)
iflogger.info("{n} Nodes found in network resolution file".format(n=ntwk_res_file.number_of_nodes()))
ntwk = remove_all_edges(ntwk_res_file)
counting_ntwk = ntwk.copy()
# Sums all the relevant variables
for index, subject in enumerate(in_files):
tmp = nx.read_gpickle(subject)
iflogger.info('File {s} has {n} edges'.format(s=subject, n=tmp.number_of_edges()))
edges = tmp.edges_iter()
for edge in edges:
data = {}
data = tmp.edge[edge[0]][edge[1]]
data['count'] = 1
if ntwk.has_edge(edge[0], edge[1]):
current = {}
current = ntwk.edge[edge[0]][edge[1]]
data = add_dicts_by_key(current, data)
ntwk.add_edge(edge[0], edge[1], data)
nodes = tmp.nodes_iter()
for node in nodes:
data = {}
data = ntwk.node[node]
if tmp.node[node].has_key('value'):
data['value'] = data['value'] + tmp.node[node]['value']
ntwk.add_node(node, data)
# Divides each value by the number of files
nodes = ntwk.nodes_iter()
edges = ntwk.edges_iter()
iflogger.info('Total network has {n} edges'.format(n=ntwk.number_of_edges()))
avg_ntwk = nx.Graph()
newdata = {}
for node in nodes:
data = ntwk.node[node]
newdata = data
if data.has_key('value'):
newdata['value'] = data['value'] / len(in_files)
ntwk.node[node]['value'] = newdata
avg_ntwk.add_node(node, newdata)
edge_dict = {}
edge_dict['count'] = np.zeros((avg_ntwk.number_of_nodes(), avg_ntwk.number_of_nodes()))
for edge in edges:
data = ntwk.edge[edge[0]][edge[1]]
if ntwk.edge[edge[0]][edge[1]]['count'] >= count_to_keep_edge:
for key in data.keys():
if not key == 'count':
data[key] = data[key] / len(in_files)
ntwk.edge[edge[0]][edge[1]] = data
avg_ntwk.add_edge(edge[0],edge[1],data)
edge_dict['count'][edge[0]-1][edge[1]-1] = ntwk.edge[edge[0]][edge[1]]['count']
iflogger.info('After thresholding, the average network has has {n} edges'.format(n=avg_ntwk.number_of_edges()))
avg_edges = avg_ntwk.edges_iter()
for edge in avg_edges:
data = avg_ntwk.edge[edge[0]][edge[1]]
for key in data.keys():
if not key == 'count':
edge_dict[key] = np.zeros((avg_ntwk.number_of_nodes(), avg_ntwk.number_of_nodes()))
edge_dict[key][edge[0]-1][edge[1]-1] = data[key]
for key in edge_dict.keys():
tmp = {}
network_name = group_id + '_' + key + '_average.mat'
matlab_network_list.append(op.abspath(network_name))
tmp[key] = edge_dict[key]
sio.savemat(op.abspath(network_name), tmp)
iflogger.info('Saving average network for key: {k} as {out}'.format(k=key, out=op.abspath(network_name)))
# Writes the networks and returns the name
network_name = group_id + '_average.pck'
nx.write_gpickle(avg_ntwk, op.abspath(network_name))
iflogger.info('Saving average network as {out}'.format(out=op.abspath(network_name)))
avg_ntwk = fix_keys_for_gexf(avg_ntwk)
network_name = group_id + '_average.gexf'
nx.write_gexf(avg_ntwk, op.abspath(network_name))
iflogger.info('Saving average network as {out}'.format(out=op.abspath(network_name)))
return network_name, matlab_network_list
def compute_node_measures(ntwk, calculate_cliques=False):
"""
These return node-based measures
"""
iflogger.info('Computing node measures:')
measures = {}
iflogger.info('...Computing degree...')
measures['degree'] = np.array(ntwk.degree().values())
iflogger.info('...Computing load centrality...')
measures['load_centrality'] = np.array(nx.load_centrality(ntwk).values())
iflogger.info('...Computing betweenness centrality...')
measures['betweenness_centrality'] = np.array(nx.betweenness_centrality(ntwk).values())
iflogger.info('...Computing degree centrality...')
measures['degree_centrality'] = np.array(nx.degree_centrality(ntwk).values())
iflogger.info('...Computing closeness centrality...')
measures['closeness_centrality'] = np.array(nx.closeness_centrality(ntwk).values())
# iflogger.info('...Computing eigenvector centrality...')
# measures['eigenvector_centrality'] = np.array(nx.eigenvector_centrality(ntwk, max_iter=100000).values())
iflogger.info('...Computing triangles...')
measures['triangles'] = np.array(nx.triangles(ntwk).values())
iflogger.info('...Computing clustering...')
measures['clustering'] = np.array(nx.clustering(ntwk).values())
iflogger.info('...Computing k-core number')
measures['core_number'] = np.array(nx.core_number(ntwk).values())
iflogger.info('...Identifying network isolates...')
isolate_list = nx.isolates(ntwk)
binarized = np.zeros((ntwk.number_of_nodes(), 1))
for value in isolate_list:
value = value - 1 # Zero indexing
binarized[value] = 1
measures['isolates'] = binarized
if calculate_cliques:
iflogger.info('...Calculating node clique number')
measures['node_clique_number'] = np.array(nx.node_clique_number(ntwk).values())
iflogger.info('...Computing number of cliques for each node...')
measures['number_of_cliques'] = np.array(nx.number_of_cliques(ntwk).values())
return measures
def compute_edge_measures(ntwk):
"""
These return edge-based measures
"""
iflogger.info('Computing edge measures:')
measures = {}
#iflogger.info('...Computing google matrix...' #Makes really large networks (500k+ edges))
#measures['google_matrix'] = nx.google_matrix(ntwk)
#iflogger.info('...Computing hub matrix...')
#measures['hub_matrix'] = nx.hub_matrix(ntwk)
#iflogger.info('...Computing authority matrix...')
#measures['authority_matrix'] = nx.authority_matrix(ntwk)
return measures
def compute_dict_measures(ntwk):
"""
Returns a dictionary
"""
iflogger.info('Computing measures which return a dictionary:')
measures = {}
iflogger.info('...Computing rich club coefficient...')
measures['rich_club_coef'] = nx.rich_club_coefficient(ntwk)
return measures
def compute_singlevalued_measures(ntwk, weighted=True, calculate_cliques=False):
"""
Returns a single value per network
"""
iflogger.info('Computing single valued measures:')
measures = {}
iflogger.info('...Computing degree assortativity (pearson number) ...')
try:
measures['degree_pearsonr'] = nx.degree_pearsonr(ntwk)
except AttributeError: # For NetworkX 1.6
measures['degree_pearsonr'] = nx.degree_pearson_correlation_coefficient(ntwk)
iflogger.info('...Computing degree assortativity...')
try:
measures['degree_assortativity'] = nx.degree_assortativity(ntwk)
except AttributeError:
measures['degree_assortativity'] = nx.degree_assortativity_coefficient(ntwk)
iflogger.info('...Computing transitivity...')
measures['transitivity'] = nx.transitivity(ntwk)
iflogger.info('...Computing number of connected_components...')
measures['number_connected_components'] = nx.number_connected_components(ntwk)
iflogger.info('...Computing graph density...')
measures['graph_density'] = nx.density(ntwk)
iflogger.info('...Recording number of edges...')
measures['number_of_edges'] = nx.number_of_edges(ntwk)
iflogger.info('...Recording number of nodes...')
measures['number_of_nodes'] = nx.number_of_nodes(ntwk)
iflogger.info('...Computing average clustering...')
measures['average_clustering'] = nx.average_clustering(ntwk)
if nx.is_connected(ntwk):
iflogger.info('...Calculating average shortest path length...')
measures['average_shortest_path_length'] = nx.average_shortest_path_length(ntwk, weighted)
else:
iflogger.info('...Calculating average shortest path length...')
measures['average_shortest_path_length'] = nx.average_shortest_path_length(nx.connected_component_subgraphs(ntwk)[0], weighted)
if calculate_cliques:
iflogger.info('...Computing graph clique number...')
measures['graph_clique_number'] = nx.graph_clique_number(ntwk) #out of memory error
return measures
def compute_network_measures(ntwk):
measures = {}
#iflogger.info('Identifying k-core')
#measures['k_core'] = nx.k_core(ntwk)
#iflogger.info('Identifying k-shell')
#measures['k_shell'] = nx.k_shell(ntwk)
#iflogger.info('Identifying k-crust')
#measures['k_crust'] = nx.k_crust(ntwk)
return measures
def add_node_data(node_array, ntwk):
node_ntwk = nx.Graph()
newdata = {}
for idx, data in ntwk.nodes_iter(data=True):
if not int(idx) == 0:
newdata['value'] = node_array[int(idx) - 1]
data.update(newdata)
node_ntwk.add_node(int(idx), data)
return node_ntwk
def add_edge_data(edge_array, ntwk, above=0, below=0):
edge_ntwk = ntwk.copy()
data = {}
for x, row in enumerate(edge_array):
for y in range(0, np.max(np.shape(edge_array[x]))):
if not edge_array[x, y] == 0:
data['value'] = edge_array[x, y]
if data['value'] <= below or data['value'] >= above:
if edge_ntwk.has_edge(x + 1, y + 1):
old_edge_dict = edge_ntwk.edge[x + 1][y + 1]
edge_ntwk.remove_edge(x + 1, y + 1)
data.update(old_edge_dict)
edge_ntwk.add_edge(x + 1, y + 1, data)
return edge_ntwk
class NetworkXMetricsInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc='Input network')
out_k_core = File('k_core', usedefault=True, desc='Computed k-core network stored as a NetworkX pickle.')
out_k_shell = File('k_shell', usedefault=True, desc='Computed k-shell network stored as a NetworkX pickle.')
out_k_crust = File('k_crust', usedefault=True, desc='Computed k-crust network stored as a NetworkX pickle.')
treat_as_weighted_graph = traits.Bool(True, usedefault=True, desc='Some network metrics can be calculated while considering only a binarized version of the graph')
compute_clique_related_measures = traits.Bool(False, usedefault=True, desc='Computing clique-related measures (e.g. node clique number) can be very time consuming')
out_global_metrics_matlab = File(genfile=True, desc='Output node metrics in MATLAB .mat format')
out_node_metrics_matlab = File(genfile=True, desc='Output node metrics in MATLAB .mat format')
out_edge_metrics_matlab = File(genfile=True, desc='Output edge metrics in MATLAB .mat format')
out_pickled_extra_measures = File('extra_measures', usedefault=True, desc='Network measures for group 1 that return dictionaries stored as a Pickle.')
class NetworkXMetricsOutputSpec(TraitedSpec):
gpickled_network_files = OutputMultiPath(File(desc='Output gpickled network files'))
matlab_matrix_files = OutputMultiPath(File(desc='Output network metrics in MATLAB .mat format'))
global_measures_matlab = File(desc='Output global metrics in MATLAB .mat format')
node_measures_matlab = File(desc='Output node metrics in MATLAB .mat format')
edge_measures_matlab = File(desc='Output edge metrics in MATLAB .mat format')
node_measure_networks = OutputMultiPath(File(desc='Output gpickled network files for all node-based measures'))
edge_measure_networks = OutputMultiPath(File(desc='Output gpickled network files for all edge-based measures'))
k_networks = OutputMultiPath(File(desc='Output gpickled network files for the k-core, k-shell, and k-crust networks'))
k_core = File(desc='Computed k-core network stored as a NetworkX pickle.')
k_shell = File(desc='Computed k-shell network stored as a NetworkX pickle.')
k_crust = File(desc='Computed k-crust network stored as a NetworkX pickle.')
pickled_extra_measures = File(desc='Network measures for the group that return dictionaries, stored as a Pickle.')
matlab_dict_measures = OutputMultiPath(File(desc='Network measures for the group that return dictionaries, stored as matlab matrices.'))
class NetworkXMetrics(BaseInterface):
"""
Calculates and outputs NetworkX-based measures for an input network
Example
-------
>>> import nipype.interfaces.cmtk as cmtk
>>> nxmetrics = cmtk.NetworkXMetrics()
>>> nxmetrics.inputs.in_file = 'subj1.pck'
>>> nxmetrics.run() # doctest: +SKIP
"""
input_spec = NetworkXMetricsInputSpec
output_spec = NetworkXMetricsOutputSpec
def _run_interface(self, runtime):
global gpickled, nodentwks, edgentwks, kntwks, matlab
gpickled = list()
nodentwks = list()
edgentwks = list()
kntwks = list()
matlab = list()
ntwk = nx.read_gpickle(self.inputs.in_file)
# Each block computes, writes, and saves a measure
# The names are then added to the output .pck file list
# In the case of the degeneracy networks, they are given specified output names
calculate_cliques = self.inputs.compute_clique_related_measures
weighted = self.inputs.treat_as_weighted_graph
global_measures = compute_singlevalued_measures(ntwk, weighted, calculate_cliques)
if isdefined(self.inputs.out_global_metrics_matlab):
global_out_file = op.abspath(self.inputs.out_global_metrics_matlab)
else:
global_out_file = op.abspath(self._gen_outfilename('globalmetrics', 'mat'))
sio.savemat(global_out_file, global_measures, oned_as='column')
matlab.append(global_out_file)
node_measures = compute_node_measures(ntwk, calculate_cliques)
for key in node_measures.keys():
newntwk = add_node_data(node_measures[key], ntwk)
out_file = op.abspath(self._gen_outfilename(key, 'pck'))
nx.write_gpickle(newntwk, out_file)
nodentwks.append(out_file)
if isdefined(self.inputs.out_node_metrics_matlab):
node_out_file = op.abspath(self.inputs.out_node_metrics_matlab)
else:
node_out_file = op.abspath(self._gen_outfilename('nodemetrics', 'mat'))
sio.savemat(node_out_file, node_measures, oned_as='column')
matlab.append(node_out_file)
gpickled.extend(nodentwks)
edge_measures = compute_edge_measures(ntwk)
for key in edge_measures.keys():
newntwk = add_edge_data(edge_measures[key], ntwk)
out_file = op.abspath(self._gen_outfilename(key, 'pck'))
nx.write_gpickle(newntwk, out_file)
edgentwks.append(out_file)
if isdefined(self.inputs.out_edge_metrics_matlab):
edge_out_file = op.abspath(self.inputs.out_edge_metrics_matlab)
else:
edge_out_file = op.abspath(self._gen_outfilename('edgemetrics', 'mat'))
sio.savemat(edge_out_file, edge_measures, oned_as='column')
matlab.append(edge_out_file)
gpickled.extend(edgentwks)
ntwk_measures = compute_network_measures(ntwk)
for key in ntwk_measures.keys():
if key == 'k_core':
out_file = op.abspath(self._gen_outfilename(self.inputs.out_k_core, 'pck'))
if key == 'k_shell':
out_file = op.abspath(self._gen_outfilename(self.inputs.out_k_shell, 'pck'))
if key == 'k_crust':
out_file = op.abspath(self._gen_outfilename(self.inputs.out_k_crust, 'pck'))
nx.write_gpickle(ntwk_measures[key], out_file)
kntwks.append(out_file)
gpickled.extend(kntwks)
out_pickled_extra_measures = op.abspath(self._gen_outfilename(self.inputs.out_pickled_extra_measures, 'pck'))
dict_measures = compute_dict_measures(ntwk)
iflogger.info('Saving extra measure file to {path} in Pickle format'.format(path=op.abspath(out_pickled_extra_measures)))
file = open(out_pickled_extra_measures, 'w')
pickle.dump(dict_measures, file)
file.close()
iflogger.info('Saving MATLAB measures as {m}'.format(m=matlab))
# Loops through the measures which return a dictionary,
# converts the keys and values to a Numpy array,
# stacks them together, and saves them in a MATLAB .mat file via Scipy
global dicts
dicts = list()
for idx, key in enumerate(dict_measures.keys()):
for idxd, keyd in enumerate(dict_measures[key].keys()):
if idxd == 0:
nparraykeys = np.array(keyd)
nparrayvalues = np.array(dict_measures[key][keyd])
else:
nparraykeys = np.append(nparraykeys, np.array(keyd))
values = np.array(dict_measures[key][keyd])
nparrayvalues = np.append(nparrayvalues, values)
nparray = np.vstack((nparraykeys, nparrayvalues))
out_file = op.abspath(self._gen_outfilename(key, 'mat'))
npdict = {}
npdict[key] = nparray
sio.savemat(out_file, npdict, oned_as='column')
dicts.append(out_file)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["k_core"] = op.abspath(self._gen_outfilename(self.inputs.out_k_core, 'pck'))
outputs["k_shell"] = op.abspath(self._gen_outfilename(self.inputs.out_k_shell, 'pck'))
outputs["k_crust"] = op.abspath(self._gen_outfilename(self.inputs.out_k_crust, 'pck'))
outputs["gpickled_network_files"] = gpickled
outputs["k_networks"] = kntwks
outputs["node_measure_networks"] = nodentwks
outputs["edge_measure_networks"] = edgentwks
outputs["matlab_dict_measures"] = dicts
outputs["global_measures_matlab"] = op.abspath(self._gen_outfilename('globalmetrics', 'mat'))
outputs["node_measures_matlab"] = op.abspath(self._gen_outfilename('nodemetrics', 'mat'))
outputs["edge_measures_matlab"] = op.abspath(self._gen_outfilename('edgemetrics', 'mat'))
outputs["matlab_matrix_files"] = [outputs["global_measures_matlab"], outputs["node_measures_matlab"], outputs["edge_measures_matlab"]]
outputs["pickled_extra_measures"] = op.abspath(self._gen_outfilename(self.inputs.out_pickled_extra_measures, 'pck'))
return outputs
def _gen_outfilename(self, name, ext):
return name + '.' + ext
class AverageNetworksInputSpec(BaseInterfaceInputSpec):
in_files = InputMultiPath(File(exists=True), mandatory=True, desc='Networks for a group of subjects')
resolution_network_file = File(exists=True, desc='Parcellation files from Connectome Mapping Toolkit. This is not necessary' \
', but if included, the interface will output the statistical maps as networkx graphs.')
group_id = traits.Str('group1', usedefault=True, desc='ID for group')
out_gpickled_groupavg = File(desc='Average network saved as a NetworkX .pck')
out_gexf_groupavg = File(desc='Average network saved as a .gexf file')
class AverageNetworksOutputSpec(TraitedSpec):
gpickled_groupavg = File(desc='Average network saved as a NetworkX .pck')
gexf_groupavg = File(desc='Average network saved as a .gexf file')
matlab_groupavgs = OutputMultiPath(File(desc='Average network saved as a .gexf file'))
class AverageNetworks(BaseInterface):
"""
Calculates and outputs the average network given a set of input NetworkX gpickle files
This interface will only keep an edge in the averaged network if that edge is present in
at least half of the input networks.
Example
-------
>>> import nipype.interfaces.cmtk as cmtk
>>> avg = cmtk.AverageNetworks()
>>> avg.inputs.in_files = ['subj1.pck', 'subj2.pck']
>>> avg.run() # doctest: +SKIP
"""
input_spec = AverageNetworksInputSpec
output_spec = AverageNetworksOutputSpec
def _run_interface(self, runtime):
if isdefined(self.inputs.resolution_network_file):
ntwk_res_file = self.inputs.resolution_network_file
else:
ntwk_res_file = self.inputs.in_files[0]
global matlab_network_list
network_name, matlab_network_list = average_networks(self.inputs.in_files, ntwk_res_file, self.inputs.group_id)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_gpickled_groupavg):
outputs["gpickled_groupavg"] = op.abspath(self._gen_outfilename(self.inputs.group_id + '_average', 'pck'))
else:
outputs["gpickled_groupavg"] = op.abspath(self.inputs.out_gpickled_groupavg)
if not isdefined(self.inputs.out_gexf_groupavg):
outputs["gexf_groupavg"] = op.abspath(self._gen_outfilename(self.inputs.group_id + '_average', 'gexf'))
else:
outputs["gexf_groupavg"] = op.abspath(self.inputs.out_gexf_groupavg)
outputs["matlab_groupavgs"] = matlab_network_list
return outputs
def _gen_outfilename(self, name, ext):
return name + '.' + ext
|
|
"""
Get the normalized best template to do flux calibration.
"""
#- TODO: refactor algorithmic code into a separate module/function
import argparse
import sys
import numpy as np
from astropy.io import fits
from astropy import units
from astropy.table import Table
import astropy.coordinates as acoo
import desispec.fluxcalibration
from desispec import io
from desispec.fluxcalibration import match_templates,normalize_templates,isStdStar
from desispec.interpolation import resample_flux
from desiutil.log import get_logger
from desispec.parallel import default_nproc
from desispec.io.filters import load_legacy_survey_filter, load_gaia_filter
from desiutil.dust import dust_transmission,extinction_total_to_selective_ratio, SFDMap, gaia_extinction
from desispec.fiberbitmasking import get_fiberbitmasked_frame
def parse(options=None):
parser = argparse.ArgumentParser(description="Fit of standard star spectra in frames.")
parser.add_argument('--frames', type = str, default = None, required=True, nargs='*',
help = 'list of path to DESI frame fits files (needs to be same exposure, spectro)')
parser.add_argument('--skymodels', type = str, default = None, required=True, nargs='*',
help = 'list of path to DESI sky model fits files (needs to be same exposure, spectro)')
parser.add_argument('--fiberflats', type = str, default = None, required=True, nargs='*',
help = 'list of path to DESI fiberflats fits files (needs to be same exposure, spectro)')
parser.add_argument('--starmodels', type = str, help = 'path of spectro-photometric stellar spectra fits')
parser.add_argument('-o','--outfile', type = str, help = 'output file for normalized stdstar model flux')
parser.add_argument('--ncpu', type = int, default = default_nproc, required = False, help = 'use ncpu for multiprocessing')
parser.add_argument('--delta-color', type = float, default = 0.2, required = False, help = 'max delta-color for the selection of standard stars (on top of meas. errors)')
parser.add_argument('--color', type = str, default = None, choices=['G-R', 'R-Z', 'GAIA-BP-RP','GAIA-G-RP'], required = False, help = 'color for selection of standard stars')
parser.add_argument('--z-max', type = float, default = 0.008, required = False, help = 'max peculiar velocity (blue/red)shift range')
parser.add_argument('--z-res', type = float, default = 0.00002, required = False, help = 'dz grid resolution')
parser.add_argument('--template-error', type = float, default = 0.1, required = False, help = 'fractional template error used in chi2 computation (about 0.1 for BOSS b1)')
parser.add_argument('--maxstdstars', type=int, default=30, \
help='Maximum number of stdstars to include')
parser.add_argument('--std-targetids', type=int, default=None,
nargs='*',
help='List of TARGETIDs of standards overriding the targeting info')
parser.add_argument('--mpi', action='store_true', help='Use MPI')
parser.add_argument('--ignore-gpu', action='store_true', help='Ignore GPU, if available')
log = get_logger()
args = None
if options is None:
args = parser.parse_args()
cmd = ' '.join(sys.argv)
else:
args = parser.parse_args(options)
cmd = 'desi_fit_stdstars ' + ' '.join(options)
log.info('RUNNING {}'.format(cmd))
return args
def safe_read_key(header,key) :
value = None
try :
value=header[key]
except KeyError :
value = None
pass
if value is None : # second try
value=header[key.ljust(8).upper()]
return value
def get_gaia_ab_correction():
"""
Get the dictionary with corrections from AB magnitudes to
Vega magnitudes (as the official gaia catalog is in vega)
"""
vega_zpt = dict(G=25.6914396869,
BP=25.3488107670,
RP=24.7626744847)
ab_zpt=dict(G=25.7915509947,
BP=25.3861560855,
RP=25.1161664528)
# revised dr2 zpts from https://www.cosmos.esa.int/web/gaia/iow_20180316
ret = {}
for k in vega_zpt.keys():
ret['GAIA-'+k] = vega_zpt[k] - ab_zpt[k]
# these corrections need to be added to convert
# the simulated ab into vega
return ret
def get_magnitude(stdwave, model, model_filters, cur_filt):
""" Obtain magnitude for a filter taking into
account the ab/vega correction if needed.
Wwe assume the flux is in units of 1e-17 erg/s/cm^2/A
"""
fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom
# AB/Vega correction
if cur_filt[:5] == 'GAIA-':
corr = get_gaia_ab_correction()[cur_filt]
else:
corr = 0
if not(cur_filt in model_filters):
raise Exception(('Filter {} is not present in models').format(cur_filt))
# see https://github.com/desihub/speclite/issues/34
# to explain copy()
retmag = model_filters[cur_filt].get_ab_magnitude(model * fluxunits, stdwave.copy())+ corr
return retmag
def main(args, comm=None) :
""" finds the best models of all standard stars in the frame
and normlize the model flux. Output is written to a file and will be called for calibration.
"""
log = get_logger()
log.info("mag delta %s = %f (for the pre-selection of stellar models)"%(args.color,args.delta_color))
if args.mpi or comm is not None:
from mpi4py import MPI
if comm is None:
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
if rank == 0:
log.info('mpi parallelizing with {} ranks'.format(size))
else:
comm = None
rank = 0
size = 1
# disable multiprocess by forcing ncpu = 1 when using MPI
if comm is not None:
ncpu = 1
if rank == 0:
log.info('disabling multiprocess (forcing ncpu = 1)')
else:
ncpu = args.ncpu
if ncpu > 1:
if rank == 0:
log.info('multiprocess parallelizing with {} processes'.format(ncpu))
if args.ignore_gpu and desispec.fluxcalibration.use_gpu:
# Opt-out of GPU usage
desispec.fluxcalibration.use_gpu = False
if rank == 0:
log.info('ignoring GPU')
elif desispec.fluxcalibration.use_gpu:
# Nothing to do here, GPU is used by default if available
if rank == 0:
log.info('using GPU')
else:
if rank == 0:
log.info('GPU not available')
std_targetids = None
if args.std_targetids is not None:
std_targetids = args.std_targetids
# READ DATA
############################################
# First loop through and group by exposure and spectrograph
frames_by_expid = {}
rows = list()
for filename in args.frames :
log.info("reading %s"%filename)
frame=io.read_frame(filename)
night = safe_read_key(frame.meta,"NIGHT")
expid = safe_read_key(frame.meta,"EXPID")
camera = safe_read_key(frame.meta,"CAMERA").strip().lower()
rows.append( (night, expid, camera) )
spec = camera[1]
uniq_key = (expid,spec)
if uniq_key in frames_by_expid.keys():
frames_by_expid[uniq_key][camera] = frame
else:
frames_by_expid[uniq_key] = {camera: frame}
input_frames_table = Table(rows=rows, names=('NIGHT', 'EXPID', 'CAMERA'))
frames={}
flats={}
skies={}
spectrograph=None
starfibers=None
starindices=None
fibermap=None
# For each unique expid,spec pair, get the logical OR of the FIBERSTATUS for all
# cameras and then proceed with extracting the frame information
# once we modify the fibermap FIBERSTATUS
for (expid,spec),camdict in frames_by_expid.items():
fiberstatus = None
for frame in camdict.values():
if fiberstatus is None:
fiberstatus = frame.fibermap['FIBERSTATUS'].data.copy()
else:
fiberstatus |= frame.fibermap['FIBERSTATUS']
for camera,frame in camdict.items():
frame.fibermap['FIBERSTATUS'] |= fiberstatus
# Set fibermask flagged spectra to have 0 flux and variance
frame = get_fiberbitmasked_frame(frame,bitmask='stdstars',ivar_framemask=True)
frame_fibermap = frame.fibermap
if std_targetids is None:
frame_starindices = np.where(isStdStar(frame_fibermap))[0]
else:
frame_starindices = np.nonzero(np.isin(frame_fibermap['TARGETID'], std_targetids))[0]
#- Confirm that all fluxes have entries but trust targeting bits
#- to get basic magnitude range correct
keep_legacy = np.ones(len(frame_starindices), dtype=bool)
for colname in ['FLUX_G', 'FLUX_R', 'FLUX_Z']: #- and W1 and W2?
keep_legacy &= frame_fibermap[colname][frame_starindices] > 10**((22.5-30)/2.5)
keep_legacy &= frame_fibermap[colname][frame_starindices] < 10**((22.5-0)/2.5)
keep_gaia = np.ones(len(frame_starindices), dtype=bool)
for colname in ['G', 'BP', 'RP']: #- and W1 and W2?
keep_gaia &= frame_fibermap['GAIA_PHOT_'+colname+'_MEAN_MAG'][frame_starindices] > 10
keep_gaia &= frame_fibermap['GAIA_PHOT_'+colname+'_MEAN_MAG'][frame_starindices] < 20
n_legacy_std = keep_legacy.sum()
n_gaia_std = keep_gaia.sum()
keep = keep_legacy | keep_gaia
# accept both types of standards for the time being
# keep the indices for gaia/legacy subsets
gaia_indices = keep_gaia[keep]
legacy_indices = keep_legacy[keep]
frame_starindices = frame_starindices[keep]
if spectrograph is None :
spectrograph = frame.spectrograph
fibermap = frame_fibermap
starindices=frame_starindices
starfibers=fibermap["FIBER"][starindices]
elif spectrograph != frame.spectrograph :
log.error("incompatible spectrographs {} != {}".format(spectrograph,frame.spectrograph))
raise ValueError("incompatible spectrographs {} != {}".format(spectrograph,frame.spectrograph))
elif starindices.size != frame_starindices.size or np.sum(starindices!=frame_starindices)>0 :
log.error("incompatible fibermap")
raise ValueError("incompatible fibermap")
if not camera in frames :
frames[camera]=[]
frames[camera].append(frame)
# possibly cleanup memory
del frames_by_expid
for filename in args.skymodels :
log.info("reading %s"%filename)
sky=io.read_sky(filename)
camera=safe_read_key(sky.header,"CAMERA").strip().lower()
if not camera in skies :
skies[camera]=[]
skies[camera].append(sky)
for filename in args.fiberflats :
log.info("reading %s"%filename)
flat=io.read_fiberflat(filename)
camera=safe_read_key(flat.header,"CAMERA").strip().lower()
# NEED TO ADD MORE CHECKS
if camera in flats:
log.warning("cannot handle several flats of same camera (%s), will use only the first one"%camera)
#raise ValueError("cannot handle several flats of same camera (%s)"%camera)
else :
flats[camera]=flat
# if color is not specified we decide on the fly
color = args.color
if color is not None:
if color[:4] == 'GAIA':
legacy_color = False
gaia_color = True
else:
legacy_color = True
gaia_color = False
if n_legacy_std == 0 and legacy_color:
raise Exception('Specified Legacy survey color, but no legacy standards')
if n_gaia_std == 0 and gaia_color:
raise Exception('Specified gaia color, but no gaia stds')
if starindices.size == 0:
log.error("no STD star found in fibermap")
raise ValueError("no STD star found in fibermap")
log.info("found %d STD stars" % starindices.size)
if n_legacy_std == 0:
gaia_std = True
if color is None:
color = 'GAIA-BP-RP'
else:
gaia_std = False
if color is None:
color='G-R'
if n_gaia_std > 0:
log.info('Gaia standards found but not used')
if gaia_std:
# The name of the reference filter to which we normalize the flux
ref_mag_name = 'GAIA-G'
color_band1, color_band2 = ['GAIA-'+ _ for _ in color[5:].split('-')]
log.info("Using Gaia standards with color {} and normalizing to {}".format(color, ref_mag_name))
# select appropriate subset of standards
starindices = starindices[gaia_indices]
starfibers = starfibers[gaia_indices]
else:
ref_mag_name = 'R'
color_band1, color_band2 = color.split('-')
log.info("Using Legacy standards with color {} and normalizing to {}".format(color, ref_mag_name))
# select appropriate subset of standards
starindices = starindices[legacy_indices]
starfibers = starfibers[legacy_indices]
# excessive check but just in case
if not color in ['G-R', 'R-Z', 'GAIA-BP-RP', 'GAIA-G-RP']:
raise ValueError('Unknown color {}'.format(color))
# log.warning("Not using flux errors for Standard Star fits!")
# DIVIDE FLAT AND SUBTRACT SKY , TRIM DATA
############################################
# since poping dict, we need to copy keys to iterate over to avoid
# RuntimeError due to changing dict
frame_cams = list(frames.keys())
for cam in frame_cams:
if not cam in skies:
log.warning("Missing sky for %s"%cam)
frames.pop(cam)
continue
if not cam in flats:
log.warning("Missing flat for %s"%cam)
frames.pop(cam)
continue
flat=flats[cam]
for frame,sky in zip(frames[cam],skies[cam]) :
frame.flux = frame.flux[starindices]
frame.ivar = frame.ivar[starindices]
frame.ivar *= (frame.mask[starindices] == 0)
frame.ivar *= (sky.ivar[starindices] != 0)
frame.ivar *= (sky.mask[starindices] == 0)
frame.ivar *= (flat.ivar[starindices] != 0)
frame.ivar *= (flat.mask[starindices] == 0)
frame.flux *= ( frame.ivar > 0) # just for clean plots
for star in range(frame.flux.shape[0]) :
ok=np.where((frame.ivar[star]>0)&(flat.fiberflat[star]!=0))[0]
if ok.size > 0 :
frame.flux[star] = frame.flux[star]/flat.fiberflat[star] - sky.flux[star]
frame.resolution_data = frame.resolution_data[starindices]
nframes=len(frames[cam])
if nframes>1 :
# optimal weights for the coaddition = ivar*throughput, not directly ivar,
# we estimate the relative throughput with median fluxes at this stage
medflux=np.zeros(nframes)
for i,frame in enumerate(frames[cam]) :
if np.sum(frame.ivar>0) == 0 :
log.error("ivar=0 for all std star spectra in frame {}-{:08d}".format(cam,frame.meta["EXPID"]))
else :
medflux[i] = np.median(frame.flux[frame.ivar>0])
log.debug("medflux = {}".format(medflux))
medflux *= (medflux>0)
if np.sum(medflux>0)==0 :
log.error("mean median flux = 0, for all stars in fibers {}".format(list(frames[cam][0].fibermap["FIBER"][starindices])))
sys.exit(12)
mmedflux = np.mean(medflux[medflux>0])
weights=medflux/mmedflux
log.info("coadding {} exposures in cam {}, w={}".format(nframes,cam,weights))
sw=np.zeros(frames[cam][0].flux.shape)
swf=np.zeros(frames[cam][0].flux.shape)
swr=np.zeros(frames[cam][0].resolution_data.shape)
for i,frame in enumerate(frames[cam]) :
sw += weights[i]*frame.ivar
swf += weights[i]*frame.ivar*frame.flux
swr += weights[i]*frame.ivar[:,None,:]*frame.resolution_data
coadded_frame = frames[cam][0]
coadded_frame.ivar = sw
coadded_frame.flux = swf/(sw+(sw==0))
coadded_frame.resolution_data = swr/((sw+(sw==0))[:,None,:])
frames[cam] = [ coadded_frame ]
# CHECK S/N
############################################
# for each band in 'brz', record quadratic sum of median S/N across wavelength
snr=dict()
for band in ['b','r','z'] :
snr[band]=np.zeros(starindices.size)
for cam in frames :
band=cam[0].lower()
for frame in frames[cam] :
msnr = np.median( frame.flux * np.sqrt( frame.ivar ) / np.sqrt(np.gradient(frame.wave)) , axis=1 ) # median SNR per sqrt(A.)
msnr *= (msnr>0)
snr[band] = np.sqrt( snr[band]**2 + msnr**2 )
log.info("SNR(B) = {}".format(snr['b']))
###############################
max_number_of_stars = 50
min_blue_snr = 4.
###############################
indices=np.argsort(snr['b'])[::-1][:max_number_of_stars]
validstars = np.where(snr['b'][indices]>min_blue_snr)[0]
#- TODO: later we filter on models based upon color, thus throwing
#- away very blue stars for which we don't have good models.
log.info("Number of stars with median stacked blue S/N > {} /sqrt(A) = {}".format(min_blue_snr,validstars.size))
if validstars.size == 0 :
log.error("No valid star")
sys.exit(12)
validstars = indices[validstars]
for band in ['b','r','z'] :
snr[band]=snr[band][validstars]
log.info("BLUE SNR of selected stars={}".format(snr['b']))
for cam in frames :
for frame in frames[cam] :
frame.flux = frame.flux[validstars]
frame.ivar = frame.ivar[validstars]
frame.resolution_data = frame.resolution_data[validstars]
starindices = starindices[validstars]
starfibers = starfibers[validstars]
nstars = starindices.size
fibermap = Table(fibermap[starindices])
# MASK OUT THROUGHPUT DIP REGION
############################################
mask_throughput_dip_region = True
if mask_throughput_dip_region :
wmin=4300.
wmax=4500.
log.warning("Masking out the wavelength region [{},{}]A in the standard star fit".format(wmin,wmax))
for cam in frames :
for frame in frames[cam] :
ii=np.where( (frame.wave>=wmin)&(frame.wave<=wmax) )[0]
if ii.size>0 :
frame.ivar[:,ii] = 0
# READ MODELS
############################################
log.info("reading star models in %s"%args.starmodels)
stdwave,stdflux,templateid,teff,logg,feh=io.read_stdstar_templates(args.starmodels)
# COMPUTE MAGS OF MODELS FOR EACH STD STAR MAG
############################################
#- Support older fibermaps
if 'PHOTSYS' not in fibermap.colnames:
log.warning('Old fibermap format; using defaults for missing columns')
log.warning(" PHOTSYS = 'S'")
log.warning(" EBV = 0.0")
fibermap['PHOTSYS'] = 'S'
fibermap['EBV'] = 0.0
if not np.in1d(np.unique(fibermap['PHOTSYS']),['','N','S','G']).all():
log.error('Unknown PHOTSYS found')
raise Exception('Unknown PHOTSYS found')
# Fetching Filter curves
model_filters = dict()
for band in ["G","R","Z"] :
for photsys in np.unique(fibermap['PHOTSYS']) :
if photsys in ['N','S']:
model_filters[band+photsys] = load_legacy_survey_filter(band=band,photsys=photsys)
if len(model_filters) == 0:
log.info('No Legacy survey photometry identified in fibermap')
# I will always load gaia data even if we are fitting LS standards only
for band in ["G", "BP", "RP"] :
model_filters["GAIA-" + band] = load_gaia_filter(band=band, dr=2)
# Compute model mags on rank 0 and bcast result to other ranks
# This sidesteps an OOM event on Cori Haswell with "-c 2"
model_mags = None
if rank == 0:
log.info("computing model mags for %s"%sorted(model_filters.keys()))
model_mags = dict()
for filter_name in model_filters.keys():
model_mags[filter_name] = get_magnitude(stdwave, stdflux, model_filters, filter_name)
log.info("done computing model mags")
if comm is not None:
model_mags = comm.bcast(model_mags, root=0)
# LOOP ON STARS TO FIND BEST MODEL
############################################
star_mags = dict()
star_unextincted_mags = dict()
if gaia_std and (fibermap['EBV']==0).all():
log.info("Using E(B-V) from SFD rather than FIBERMAP")
# when doing gaia standards, on old tiles the
# EBV is not set so we fetch from SFD (in original SFD scaling)
ebv = SFDMap(scaling=1).ebv(acoo.SkyCoord(
ra = fibermap['TARGET_RA'] * units.deg,
dec = fibermap['TARGET_DEC'] * units.deg))
else:
ebv = fibermap['EBV']
photometric_systems = np.unique(fibermap['PHOTSYS'])
if not gaia_std:
for band in ['G', 'R', 'Z']:
star_mags[band] = 22.5 - 2.5 * np.log10(fibermap['FLUX_'+band])
star_unextincted_mags[band] = np.zeros(star_mags[band].shape)
for photsys in photometric_systems :
r_band = extinction_total_to_selective_ratio(band , photsys) # dimensionless
# r_band = a_band / E(B-V)
# E(B-V) is a difference of magnitudes (dimensionless)
# a_band = -2.5*log10(effective dust transmission) , dimensionless
# effective dust transmission =
# integral( SED(lambda) * filter_transmission(lambda,band) * dust_transmission(lambda,E(B-V)) dlamdba)
# / integral( SED(lambda) * filter_transmission(lambda,band) dlamdba)
selection = (fibermap['PHOTSYS'] == photsys)
a_band = r_band * ebv[selection] # dimensionless
star_unextincted_mags[band][selection] = 22.5 - 2.5 * np.log10(fibermap['FLUX_'+band][selection]) - a_band
for band in ['G','BP','RP']:
star_mags['GAIA-'+band] = fibermap['GAIA_PHOT_'+band+'_MEAN_MAG']
for band, extval in gaia_extinction(star_mags['GAIA-G'],
star_mags['GAIA-BP'],
star_mags['GAIA-RP'], ebv).items():
star_unextincted_mags['GAIA-'+band] = star_mags['GAIA-'+band] - extval
star_colors = dict()
star_unextincted_colors = dict()
# compute the colors and define the unextincted colors
# the unextincted colors are filled later
if not gaia_std:
for c1,c2 in ['GR', 'RZ']:
star_colors[c1 + '-' + c2] = star_mags[c1] - star_mags[c2]
star_unextincted_colors[c1 + '-' + c2] = (
star_unextincted_mags[c1] - star_unextincted_mags[c2])
for c1,c2 in [('BP','RP'), ('G','RP')]:
star_colors['GAIA-' + c1 + '-' + c2] = (
star_mags['GAIA-' + c1] - star_mags['GAIA-' + c2])
star_unextincted_colors['GAIA-' + c1 + '-' + c2] = (
star_unextincted_mags['GAIA-' + c1] -
star_unextincted_mags['GAIA-' + c2])
linear_coefficients=np.zeros((nstars,stdflux.shape[0]))
chi2dof=np.zeros((nstars))
redshift=np.zeros((nstars))
normflux=np.zeros((nstars, stdwave.size))
fitted_model_colors = np.zeros(nstars)
local_comm, head_comm = None, None
if comm is not None:
# All ranks in local_comm work on the same stars
local_comm = comm.Split(rank % nstars, rank)
# The color 1 in head_comm contains all ranks that are have rank 0 in local_comm
head_comm = comm.Split(rank < nstars, rank)
for star in range(rank % nstars, nstars, size):
log.info("rank %d: finding best model for observed star #%d"%(rank, star))
# np.array of wave,flux,ivar,resol
wave = {}
flux = {}
ivar = {}
resolution_data = {}
for camera in frames :
for i,frame in enumerate(frames[camera]) :
identifier="%s-%d"%(camera,i)
wave[identifier]=frame.wave
flux[identifier]=frame.flux[star]
ivar[identifier]=frame.ivar[star]
resolution_data[identifier]=frame.resolution_data[star]
# preselect models based on magnitudes
photsys=fibermap['PHOTSYS'][star]
if gaia_std:
model_colors = model_mags[color_band1] - model_mags[color_band2]
else:
model_colors = model_mags[color_band1 + photsys] - model_mags[color_band2 + photsys]
color_diff = model_colors - star_unextincted_colors[color][star]
selection = np.abs(color_diff) < args.delta_color
if np.sum(selection) == 0 :
log.warning("no model in the selected color range for this star")
continue
# smallest cube in parameter space including this selection (needed for interpolation)
new_selection = (teff>=np.min(teff[selection]))&(teff<=np.max(teff[selection]))
new_selection &= (logg>=np.min(logg[selection]))&(logg<=np.max(logg[selection]))
new_selection &= (feh>=np.min(feh[selection]))&(feh<=np.max(feh[selection]))
selection = np.where(new_selection)[0]
log.info("star#%d fiber #%d, %s = %f, number of pre-selected models = %d/%d"%(
star, starfibers[star], color, star_unextincted_colors[color][star],
selection.size, stdflux.shape[0]))
# Match unextincted standard stars to data
match_templates_result = match_templates(
wave, flux, ivar, resolution_data,
stdwave, stdflux[selection],
teff[selection], logg[selection], feh[selection],
ncpu=ncpu, z_max=args.z_max, z_res=args.z_res,
template_error=args.template_error, comm=local_comm
)
# Only local rank 0 can perform the remaining work
if local_comm is not None and local_comm.Get_rank() != 0:
continue
coefficients, redshift[star], chi2dof[star] = match_templates_result
linear_coefficients[star,selection] = coefficients
log.info('Star Fiber: {}; TEFF: {:.3f}; LOGG: {:.3f}; FEH: {:.3f}; Redshift: {:g}; Chisq/dof: {:.3f}'.format(
starfibers[star],
np.inner(teff,linear_coefficients[star]),
np.inner(logg,linear_coefficients[star]),
np.inner(feh,linear_coefficients[star]),
redshift[star],
chi2dof[star])
)
# Apply redshift to original spectrum at full resolution
model=np.zeros(stdwave.size)
redshifted_stdwave = stdwave*(1+redshift[star])
for i,c in enumerate(linear_coefficients[star]) :
if c != 0 :
model += c*np.interp(stdwave,redshifted_stdwave,stdflux[i])
# Apply dust extinction to the model
log.info("Applying MW dust extinction to star {} with EBV = {}".format(star,ebv[star]))
model *= dust_transmission(stdwave, ebv[star])
# Compute final color of dust-extincted model
photsys=fibermap['PHOTSYS'][star]
if not gaia_std:
model_mag1, model_mag2 = [get_magnitude(stdwave, model, model_filters, _ + photsys) for _ in [color_band1, color_band2]]
else:
model_mag1, model_mag2 = [get_magnitude(stdwave, model, model_filters, _ ) for _ in [color_band1, color_band2]]
if color_band1 == ref_mag_name:
model_magr = model_mag1
elif color_band2 == ref_mag_name:
model_magr = model_mag2
else:
# if the reference magnitude is not among colours
# I'm fetching it separately. This will happen when
# colour is BP-RP and ref magnitude is G
if gaia_std:
model_magr = get_magnitude(stdwave, model, model_filters, ref_mag_name)
else:
model_magr = get_magnitude(stdwave, model, model_filters, ref_mag_name + photsys)
fitted_model_colors[star] = model_mag1 - model_mag2
#- TODO: move this back into normalize_templates, at the cost of
#- recalculating a model magnitude?
cur_refmag = star_mags[ref_mag_name][star]
# Normalize the best model using reported magnitude
scalefac=10**((model_magr - cur_refmag)/2.5)
log.info('scaling {} mag {:.3f} to {:.3f} using scale {}'.format(ref_mag_name, model_magr, cur_refmag, scalefac))
normflux[star] = model*scalefac
if head_comm is not None and rank < nstars: # head_comm color is 1
linear_coefficients = head_comm.reduce(linear_coefficients, op=MPI.SUM, root=0)
redshift = head_comm.reduce(redshift, op=MPI.SUM, root=0)
chi2dof = head_comm.reduce(chi2dof, op=MPI.SUM, root=0)
fitted_model_colors = head_comm.reduce(fitted_model_colors, op=MPI.SUM, root=0)
normflux = head_comm.reduce(normflux, op=MPI.SUM, root=0)
# Check at least one star was fit. The check is peformed on rank 0 and
# the result is bcast to other ranks so that all ranks exit together if
# the check fails.
atleastonestarfit = False
if rank == 0:
fitted_stars = np.where(chi2dof != 0)[0]
atleastonestarfit = fitted_stars.size > 0
if comm is not None:
atleastonestarfit = comm.bcast(atleastonestarfit, root=0)
if not atleastonestarfit:
log.error("No star has been fit.")
sys.exit(12)
# Now write the normalized flux for all best models to a file
if rank == 0:
# get the fibermap from any input frame for the standard stars
fibermap = Table(frame.fibermap)
keep = np.isin(fibermap['FIBER'], starfibers[fitted_stars])
fibermap = fibermap[keep]
# drop fibermap columns specific to exposures instead of targets
for col in ['DELTA_X', 'DELTA_Y', 'EXPTIME', 'NUM_ITER',
'FIBER_RA', 'FIBER_DEC', 'FIBER_X', 'FIBER_Y']:
if col in fibermap.colnames:
fibermap.remove_column(col)
data={}
data['LOGG']=linear_coefficients[fitted_stars,:].dot(logg)
data['TEFF']= linear_coefficients[fitted_stars,:].dot(teff)
data['FEH']= linear_coefficients[fitted_stars,:].dot(feh)
data['CHI2DOF']=chi2dof[fitted_stars]
data['REDSHIFT']=redshift[fitted_stars]
data['COEFF']=linear_coefficients[fitted_stars,:]
data['DATA_%s'%color]=star_colors[color][fitted_stars]
data['MODEL_%s'%color]=fitted_model_colors[fitted_stars]
data['BLUE_SNR'] = snr['b'][fitted_stars]
data['RED_SNR'] = snr['r'][fitted_stars]
data['NIR_SNR'] = snr['z'][fitted_stars]
io.write_stdstar_models(args.outfile,normflux,stdwave,
starfibers[fitted_stars],data,
fibermap, input_frames_table)
|
|
#! /usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fractions import gcd
"""Code generation for bulk operations"""
MAX_SPECIALIZED_BITS_PER_VALUE = 24;
PACKED_64_SINGLE_BLOCK_BPV = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 21, 32]
OUTPUT_FILE = "BulkOperation.java"
HEADER = """// This file has been automatically generated, DO NOT EDIT
package org.apache.lucene.util.packed;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
FOOTER="""
protected int writeLong(long block, byte[] blocks, int blocksOffset) {
for (int j = 1; j <= 8; ++j) {
blocks[blocksOffset++] = (byte) (block >>> (64 - (j << 3)));
}
return blocksOffset;
}
/**
* For every number of bits per value, there is a minimum number of
* blocks (b) / values (v) you need to write in order to reach the next block
* boundary:
* - 16 bits per value -> b=2, v=1
* - 24 bits per value -> b=3, v=1
* - 50 bits per value -> b=25, v=4
* - 63 bits per value -> b=63, v=8
* - ...
*
* A bulk read consists in copying <code>iterations*v</code> values that are
* contained in <code>iterations*b</code> blocks into a <code>long[]</code>
* (higher values of <code>iterations</code> are likely to yield a better
* throughput) => this requires n * (b + 8v) bytes of memory.
*
* This method computes <code>iterations</code> as
* <code>ramBudget / (b + 8v)</code> (since a long is 8 bytes).
*/
public final int computeIterations(int valueCount, int ramBudget) {
final int iterations = ramBudget / (byteBlockCount() + 8 * byteValueCount());
if (iterations == 0) {
// at least 1
return 1;
} else if ((iterations - 1) * byteValueCount() >= valueCount) {
// don't allocate for more than the size of the reader
return (int) Math.ceil((double) valueCount / byteValueCount());
} else {
return iterations;
}
}
}
"""
def is_power_of_two(n):
return n & (n - 1) == 0
def casts(typ):
cast_start = "(%s) (" %typ
cast_end = ")"
if typ == "long":
cast_start = ""
cast_end = ""
return cast_start, cast_end
def hexNoLSuffix(n):
# On 32 bit Python values > (1 << 31)-1 will have L appended by hex function:
s = hex(n)
if s.endswith('L'):
s = s[:-1]
return s
def masks(bits):
if bits == 64:
return "", ""
return "(", " & %sL)" %(hexNoLSuffix((1 << bits) - 1))
def get_type(bits):
if bits == 8:
return "byte"
elif bits == 16:
return "short"
elif bits == 32:
return "int"
elif bits == 64:
return "long"
else:
assert False
def block_value_count(bpv, bits=64):
blocks = bpv
values = blocks * bits / bpv
while blocks % 2 == 0 and values % 2 == 0:
blocks /= 2
values /= 2
assert values * bpv == bits * blocks, "%d values, %d blocks, %d bits per value" %(values, blocks, bpv)
return (blocks, values)
def packed64(bpv, f):
mask = (1 << bpv) - 1
f.write("\n")
f.write(" public BulkOperationPacked%d() {\n" %bpv)
f.write(" super(%d);\n" %bpv)
f.write(" }\n\n")
if bpv == 64:
f.write(""" @Override
public void decode(long[] blocks, int blocksOffset, long[] values, int valuesOffset, int iterations) {
System.arraycopy(blocks, blocksOffset, values, valuesOffset, valueCount() * iterations);
}
@Override
public void decode(long[] blocks, int blocksOffset, int[] values, int valuesOffset, int iterations) {
throw new UnsupportedOperationException();
}
@Override
public void decode(byte[] blocks, int blocksOffset, int[] values, int valuesOffset, int iterations) {
throw new UnsupportedOperationException();
}
@Override
public void decode(byte[] blocks, int blocksOffset, long[] values, int valuesOffset, int iterations) {
LongBuffer.wrap(values, valuesOffset, iterations * valueCount()).put(ByteBuffer.wrap(blocks, blocksOffset, 8 * iterations * blockCount()).asLongBuffer());
}
""")
else:
p64_decode(bpv, f, 32)
p64_decode(bpv, f, 64)
def p64_decode(bpv, f, bits):
blocks, values = block_value_count(bpv)
typ = get_type(bits)
cast_start, cast_end = casts(typ)
f.write(" @Override\n")
f.write(" public void decode(long[] blocks, int blocksOffset, %s[] values, int valuesOffset, int iterations) {\n" %typ)
if bits < bpv:
f.write(" throw new UnsupportedOperationException();\n")
else:
f.write(" for (int i = 0; i < iterations; ++i) {\n")
mask = (1 << bpv) - 1
if is_power_of_two(bpv):
f.write(" final long block = blocks[blocksOffset++];\n")
f.write(" for (int shift = %d; shift >= 0; shift -= %d) {\n" %(64 - bpv, bpv))
f.write(" values[valuesOffset++] = %s(block >>> shift) & %d%s;\n" %(cast_start, mask, cast_end))
f.write(" }\n")
else:
for i in xrange(0, values):
block_offset = i * bpv / 64
bit_offset = (i * bpv) % 64
if bit_offset == 0:
# start of block
f.write(" final long block%d = blocks[blocksOffset++];\n" %block_offset);
f.write(" values[valuesOffset++] = %sblock%d >>> %d%s;\n" %(cast_start, block_offset, 64 - bpv, cast_end))
elif bit_offset + bpv == 64:
# end of block
f.write(" values[valuesOffset++] = %sblock%d & %dL%s;\n" %(cast_start, block_offset, mask, cast_end))
elif bit_offset + bpv < 64:
# middle of block
f.write(" values[valuesOffset++] = %s(block%d >>> %d) & %dL%s;\n" %(cast_start, block_offset, 64 - bit_offset - bpv, mask, cast_end))
else:
# value spans across 2 blocks
mask1 = (1 << (64 - bit_offset)) -1
shift1 = bit_offset + bpv - 64
shift2 = 64 - shift1
f.write(" final long block%d = blocks[blocksOffset++];\n" %(block_offset + 1));
f.write(" values[valuesOffset++] = %s((block%d & %dL) << %d) | (block%d >>> %d)%s;\n" %(cast_start, block_offset, mask1, shift1, block_offset + 1, shift2, cast_end))
f.write(" }\n")
f.write(" }\n\n")
byte_blocks, byte_values = block_value_count(bpv, 8)
f.write(" @Override\n")
f.write(" public void decode(byte[] blocks, int blocksOffset, %s[] values, int valuesOffset, int iterations) {\n" %typ)
if bits < bpv:
f.write(" throw new UnsupportedOperationException();\n")
else:
if is_power_of_two(bpv) and bpv < 8:
f.write(" for (int j = 0; j < iterations; ++j) {\n")
f.write(" final byte block = blocks[blocksOffset++];\n")
for shift in xrange(8 - bpv, 0, -bpv):
f.write(" values[valuesOffset++] = (block >>> %d) & %d;\n" %(shift, mask))
f.write(" values[valuesOffset++] = block & %d;\n" %mask)
f.write(" }\n")
elif bpv == 8:
f.write(" for (int j = 0; j < iterations; ++j) {\n")
f.write(" values[valuesOffset++] = blocks[blocksOffset++] & 0xFF;\n")
f.write(" }\n")
elif is_power_of_two(bpv) and bpv > 8:
f.write(" for (int j = 0; j < iterations; ++j) {\n")
m = bits <= 32 and "0xFF" or "0xFFL"
f.write(" values[valuesOffset++] =")
for i in xrange(bpv / 8 - 1):
f.write(" ((blocks[blocksOffset++] & %s) << %d) |" %(m, bpv - 8))
f.write(" (blocks[blocksOffset++] & %s);\n" %m)
f.write(" }\n")
else:
f.write(" for (int i = 0; i < iterations; ++i) {\n")
for i in xrange(0, byte_values):
byte_start = i * bpv / 8
bit_start = (i * bpv) % 8
byte_end = ((i + 1) * bpv - 1) / 8
bit_end = ((i + 1) * bpv - 1) % 8
shift = lambda b: 8 * (byte_end - b - 1) + 1 + bit_end
if bit_start == 0:
f.write(" final %s byte%d = blocks[blocksOffset++] & 0xFF;\n" %(typ, byte_start))
for b in xrange(byte_start + 1, byte_end + 1):
f.write(" final %s byte%d = blocks[blocksOffset++] & 0xFF;\n" %(typ, b))
f.write(" values[valuesOffset++] =")
if byte_start == byte_end:
if bit_start == 0:
if bit_end == 7:
f.write(" byte%d" %byte_start)
else:
f.write(" byte%d >>> %d" %(byte_start, 7 - bit_end))
else:
if bit_end == 7:
f.write(" byte%d & %d" %(byte_start, 2 ** (8 - bit_start) - 1))
else:
f.write(" (byte%d >>> %d) & %d" %(byte_start, 7 - bit_end, 2 ** (bit_end - bit_start + 1) - 1))
else:
if bit_start == 0:
f.write(" (byte%d << %d)" %(byte_start, shift(byte_start)))
else:
f.write(" ((byte%d & %d) << %d)" %(byte_start, 2 ** (8 - bit_start) - 1, shift(byte_start)))
for b in xrange(byte_start + 1, byte_end):
f.write(" | (byte%d << %d)" %(b, shift(b)))
if bit_end == 7:
f.write(" | byte%d" %byte_end)
else:
f.write(" | (byte%d >>> %d)" %(byte_end, 7 - bit_end))
f.write(";\n")
f.write(" }\n")
f.write(" }\n\n")
if __name__ == '__main__':
f = open(OUTPUT_FILE, 'w')
f.write(HEADER)
f.write('\n')
f.write('''/**
* Efficient sequential read/write of packed integers.
*/\n''')
f.write('abstract class BulkOperation implements PackedInts.Decoder, PackedInts.Encoder {\n')
f.write(' private static final BulkOperation[] packedBulkOps = new BulkOperation[] {\n')
for bpv in xrange(1, 65):
if bpv > MAX_SPECIALIZED_BITS_PER_VALUE:
f.write(' new BulkOperationPacked(%d),\n' % bpv)
continue
f2 = open('BulkOperationPacked%d.java' % bpv, 'w')
f2.write(HEADER)
if bpv == 64:
f2.write('import java.nio.LongBuffer;\n')
f2.write('import java.nio.ByteBuffer;\n')
f2.write('\n')
f2.write('''/**
* Efficient sequential read/write of packed integers.
*/\n''')
f2.write('final class BulkOperationPacked%d extends BulkOperationPacked {\n' % bpv)
packed64(bpv, f2)
f2.write('}\n')
f2.close()
f.write(' new BulkOperationPacked%d(),\n' % bpv)
f.write(' };\n')
f.write('\n')
f.write(' // NOTE: this is sparse (some entries are null):\n')
f.write(' private static final BulkOperation[] packedSingleBlockBulkOps = new BulkOperation[] {\n')
for bpv in xrange(1, max(PACKED_64_SINGLE_BLOCK_BPV)+1):
if bpv in PACKED_64_SINGLE_BLOCK_BPV:
f.write(' new BulkOperationPackedSingleBlock(%d),\n' % bpv)
else:
f.write(' null,\n')
f.write(' };\n')
f.write('\n')
f.write("\n")
f.write(" public static BulkOperation of(PackedInts.Format format, int bitsPerValue) {\n")
f.write(" switch (format) {\n")
f.write(" case PACKED:\n")
f.write(" assert packedBulkOps[bitsPerValue - 1] != null;\n")
f.write(" return packedBulkOps[bitsPerValue - 1];\n")
f.write(" case PACKED_SINGLE_BLOCK:\n")
f.write(" assert packedSingleBlockBulkOps[bitsPerValue - 1] != null;\n")
f.write(" return packedSingleBlockBulkOps[bitsPerValue - 1];\n")
f.write(" default:\n")
f.write(" throw new AssertionError();\n")
f.write(" }\n")
f.write(" }\n")
f.write(FOOTER)
f.close()
|
|
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from eventlet import greenthread
import mock
from oslo.config import cfg
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron import context
from neutron.extensions import l3
from neutron import manager as n_manager
from neutron.openstack.common import uuidutils
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware.plugins import service as nsp
from neutron.tests import base
from neutron.tests.unit import test_l3_plugin
from neutron.tests.unit import vmware
from neutron.tests.unit.vmware import test_nsx_plugin
from neutron.tests.unit.vmware.vshield import fake_vcns
_uuid = uuidutils.generate_uuid
class ServiceRouterTestExtensionManager(object):
def get_resources(self):
# If l3 resources have been loaded and updated by main API
# router, update the map in the l3 extension so it will load
# the same attributes as the API router
l3_attr_map = copy.deepcopy(l3.RESOURCE_ATTRIBUTE_MAP)
for res in l3.RESOURCE_ATTRIBUTE_MAP.keys():
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP.get(res)
if attr_info:
l3.RESOURCE_ATTRIBUTE_MAP[res] = attr_info
resources = l3.L3.get_resources()
# restore the original resources once the controllers are created
l3.RESOURCE_ATTRIBUTE_MAP = l3_attr_map
return resources
def get_actions(self):
return []
def get_request_extensions(self):
return []
class ServiceRouterTest(test_nsx_plugin.L3NatTest,
test_l3_plugin.L3NatTestCaseMixin):
def vcns_patch(self):
instance = self.mock_vcns.start()
self.vcns_instance = instance
instance.return_value.deploy_edge.side_effect = self.fc2.deploy_edge
instance.return_value.get_edge_id.side_effect = self.fc2.get_edge_id
instance.return_value.get_edge_deploy_status.side_effect = (
self.fc2.get_edge_deploy_status)
instance.return_value.delete_edge.side_effect = self.fc2.delete_edge
instance.return_value.update_interface.side_effect = (
self.fc2.update_interface)
instance.return_value.get_nat_config.side_effect = (
self.fc2.get_nat_config)
instance.return_value.update_nat_config.side_effect = (
self.fc2.update_nat_config)
instance.return_value.delete_nat_rule.side_effect = (
self.fc2.delete_nat_rule)
instance.return_value.get_edge_status.side_effect = (
self.fc2.get_edge_status)
instance.return_value.get_edges.side_effect = self.fc2.get_edges
instance.return_value.update_routes.side_effect = (
self.fc2.update_routes)
instance.return_value.create_lswitch.side_effect = (
self.fc2.create_lswitch)
instance.return_value.delete_lswitch.side_effect = (
self.fc2.delete_lswitch)
instance.return_value.get_loadbalancer_config.side_effect = (
self.fc2.get_loadbalancer_config)
instance.return_value.enable_service_loadbalancer.side_effect = (
self.fc2.enable_service_loadbalancer)
def setUp(self, ext_mgr=None, service_plugins=None):
cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
cfg.CONF.set_override('task_status_check_interval', 200, group="vcns")
# vcns does not support duplicated router name, ignore router name
# validation for unit-test cases
self.fc2 = fake_vcns.FakeVcns(unique_router_name=False)
self.mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True)
self.vcns_patch()
mock_proxy = mock.patch(
"%s.%s" % (vmware.SERVICE_PLUGIN_NAME,
'_set_create_lswitch_proxy'))
mock_proxy.start()
ext_mgr = ext_mgr or ServiceRouterTestExtensionManager()
super(ServiceRouterTest, self).setUp(
plugin=vmware.SERVICE_PLUGIN_NAME,
service_plugins=service_plugins,
ext_mgr=ext_mgr)
self.fc2.set_fake_nsx_api(self.fc)
self.addCleanup(self.fc2.reset_all)
def tearDown(self):
plugin = n_manager.NeutronManager.get_plugin()
manager = plugin.vcns_driver.task_manager
# wait max ~10 seconds for all tasks to be finished
for i in range(100):
if not manager.has_pending_task():
break
greenthread.sleep(0.1)
if manager.has_pending_task():
manager.show_pending_tasks()
raise Exception(_("Tasks not completed"))
manager.stop()
# Ensure the manager thread has been stopped
self.assertIsNone(manager._thread)
super(ServiceRouterTest, self).tearDown()
def _create_router(self, fmt, tenant_id, name=None,
admin_state_up=None, set_context=False,
arg_list=None, **kwargs):
data = {'router': {'tenant_id': tenant_id}}
if name:
data['router']['name'] = name
if admin_state_up:
data['router']['admin_state_up'] = admin_state_up
for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())):
# Arg must be present and not empty
if arg in kwargs and kwargs[arg]:
data['router'][arg] = kwargs[arg]
data['router']['service_router'] = True
router_req = self.new_create_request('routers', data, fmt)
if set_context and tenant_id:
# create a specific auth context for this request
router_req.environ['neutron.context'] = context.Context(
'', tenant_id)
return router_req.get_response(self.ext_api)
class ServiceRouterTestCase(ServiceRouterTest,
test_nsx_plugin.TestL3NatTestCase):
def test_router_create(self):
name = 'router1'
tenant_id = _uuid()
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True),
('external_gateway_info', None),
('service_router', True)]
with self.router(name=name, admin_state_up=True,
tenant_id=tenant_id) as router:
expected_value_1 = expected_value + [('status', 'PENDING_CREATE')]
for k, v in expected_value_1:
self.assertEqual(router['router'][k], v)
# wait max ~10 seconds for router status update
for i in range(20):
greenthread.sleep(0.5)
res = self._show('routers', router['router']['id'])
if res['router']['status'] == 'ACTIVE':
break
expected_value_2 = expected_value + [('status', 'ACTIVE')]
for k, v in expected_value_2:
self.assertEqual(res['router'][k], v)
# check an integration lswitch is created
lswitch_name = "%s-ls" % name
for lswitch_id, lswitch in self.fc2._lswitches.iteritems():
if lswitch['display_name'] == lswitch_name:
break
else:
self.fail("Integration lswitch not found")
# check an integration lswitch is deleted
lswitch_name = "%s-ls" % name
for lswitch_id, lswitch in self.fc2._lswitches.iteritems():
if lswitch['display_name'] == lswitch_name:
self.fail("Integration switch is not deleted")
def test_router_delete_after_plugin_restart(self):
name = 'router1'
tenant_id = _uuid()
with self.router(name=name, admin_state_up=True,
tenant_id=tenant_id):
# clear router type cache to mimic plugin restart
plugin = n_manager.NeutronManager.get_plugin()
plugin._router_type = {}
# check an integration lswitch is deleted
lswitch_name = "%s-ls" % name
for lswitch_id, lswitch in self.fc2._lswitches.iteritems():
if lswitch['display_name'] == lswitch_name:
self.fail("Integration switch is not deleted")
def test_router_show(self):
name = 'router1'
tenant_id = _uuid()
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True),
('status', 'PENDING_CREATE'),
('external_gateway_info', None),
('service_router', True)]
with self.router(name='router1', admin_state_up=True,
tenant_id=tenant_id) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
def _test_router_create_with_gwinfo_and_l3_ext_net(self, vlan_id=None):
super(ServiceRouterTestCase,
self)._test_router_create_with_gwinfo_and_l3_ext_net(
vlan_id, validate_ext_gw=False)
def _test_router_update_gateway_on_l3_ext_net(self, vlan_id=None):
super(ServiceRouterTestCase,
self)._test_router_update_gateway_on_l3_ext_net(
vlan_id, validate_ext_gw=False)
def test_floatingip_update(self):
self._test_floatingip_update(constants.FLOATINGIP_STATUS_ACTIVE)
class TestProxyCreateLswitch(base.BaseTestCase):
def setUp(self):
super(TestProxyCreateLswitch, self).setUp()
self.tenant_id = "foo_tenant"
self.display_name = "foo_network"
self.tz_config = [
{'zone_uuid': 'foo_zone',
'transport_type': 'stt'}
]
self.tags = utils.get_tags(quantum_net_id='foo_id',
os_tid=self.tenant_id)
self.cluster = None
def test_create_lswitch_with_basic_args(self):
result = nsp._process_base_create_lswitch_args(self.cluster,
'foo_id',
self.tenant_id,
self.display_name,
self.tz_config)
self.assertEqual(self.display_name, result[0])
self.assertEqual(self.tz_config, result[1])
self.assertEqual(sorted(self.tags), sorted(result[2]))
def test_create_lswitch_with_shared_as_kwarg(self):
result = nsp._process_base_create_lswitch_args(self.cluster,
'foo_id',
self.tenant_id,
self.display_name,
self.tz_config,
shared=True)
expected = self.tags + [{'scope': 'shared', 'tag': 'true'}]
self.assertEqual(sorted(expected), sorted(result[2]))
def test_create_lswitch_with_shared_as_arg(self):
result = nsp._process_base_create_lswitch_args(self.cluster,
'foo_id',
self.tenant_id,
self.display_name,
self.tz_config,
True)
additional_tags = [{'scope': 'shared', 'tag': 'true'}]
expected = self.tags + additional_tags
self.assertEqual(sorted(expected), sorted(result[2]))
def test_create_lswitch_with_additional_tags(self):
more_tags = [{'scope': 'foo_scope', 'tag': 'foo_tag'}]
result = nsp._process_base_create_lswitch_args(self.cluster,
'foo_id',
self.tenant_id,
self.display_name,
self.tz_config,
tags=more_tags)
expected = self.tags + more_tags
self.assertEqual(sorted(expected), sorted(result[2]))
|
|
"""A module for representing universal morphosyntactic feature bundles."""
from typing import Dict, List, Optional, Tuple, Type, Union
from cltk.core.exceptions import CLTKException
from cltk.morphology.universal_dependencies_features import *
__author__ = ["John Stewart <free-variation>"]
class MorphosyntacticFeatureBundle:
"""A representation of a set of features, usually associated with a word form."""
def __init__(self, *features: List[MorphosyntacticFeature]) -> None:
"""
>>> f1 = MorphosyntacticFeatureBundle(F.neg, N.pos, V.neg, Case.accusative)
>>> f1.features
{F: [neg], N: [pos], V: [neg], Case: [accusative]}
"""
self.features = {}
for feature in features:
if isinstance(feature, type) and issubclass(
feature, MorphosyntacticFeature
):
self.features[feature] = Underspecified
else:
if type(feature) in self.features:
self.features[type(feature)].append(feature)
else:
self.features[type(feature)] = [feature]
def __getitem__(
self, feature_name: Union[str, Type[MorphosyntacticFeature]]
) -> List[MorphosyntacticFeature]:
"""
Use dict-type syntax for accessing the values of features.
>>> f1 = f(F.pos, N.pos)
>>> f1[F]
[pos]
>>> f1[V]
Traceback (most recent call last):
cltk.core.exceptions.CLTKException: {F: [pos], N: [pos]} unspecified for V
>>> f1['F']
[pos]
"""
if type(feature_name) == str:
if feature_name not in globals():
raise TypeError(feature_name + " is not a morphosytactic feature")
feature_name = globals()[feature_name]
if not issubclass(feature_name, MorphosyntacticFeature):
raise TypeError(str(feature_name) + " is not a morphosytactic feature")
if feature_name in self.features:
return self.features[feature_name]
else:
raise CLTKException(f"{self} unspecified for {feature_name}")
def __setitem__(
self,
feature_name: Union[str, Type[MorphosyntacticFeature]],
feature_values: Union[MorphosyntacticFeature, List[MorphosyntacticFeature]],
) -> "MorphosyntacticFeatureBundle":
"""
Use dict-type syntax to set the value of features.
>>> f1 = f(F.pos)
>>> f1[N] = N.neg
>>> f1
{F: [pos], N: [neg]}
>>> f1['V'] = V.pos
>>> f1
{F: [pos], N: [neg], V: [pos]}
"""
if type(feature_name) == str:
if feature_name not in globals():
raise TypeError(feature_name + " is not a morphosytactic feature")
feature_name = globals()[feature_name]
if not issubclass(feature_name, MorphosyntacticFeature):
raise TypeError(str(feature_name) + " is not a morphosyntactic feature")
if type(feature_values) is not list:
feature_values = [feature_values]
for value in feature_values:
if value is not None and type(value) != feature_name:
raise TypeError(str(value) + " is not a " + str(feature_name))
self.features[feature_name] = feature_values
return self
def all(
self,
) -> List[Tuple[Type[MorphosyntacticFeature], List[MorphosyntacticFeature]]]:
return self.features.items()
def underspecify(self, feature_name: Type[MorphosyntacticFeature]) -> None:
"""
Underspecify the given feature in the bundle.
>>> f1 = f(F.pos, N.pos, V.neg)
>>> f1.underspecify(F)
>>> f1[F] is Underspecified
True
"""
if not issubclass(feature_name, MorphosyntacticFeature):
raise TypeError(str(feature_name) + " is not a morphosytactic feature")
self.features[feature_name] = Underspecified
def matches(self, other: "MorphosyntacticFeatureBundle") -> bool:
"""
This feature bundle matches other if other contains all the features of this bundle,
i.e. if this bundle is an improper subset of other.
Underspecified features will match.
>>> f1 = f(F, N.pos, V.neg)
>>> f2 = f(F.neg, N.pos, V.neg)
>>> f3 = f(F.pos, N.neg, V.pos)
>>> f1.matches(f2)
True
>>> f1.matches(f3)
False
"""
if other is None:
return False
for f in self.features.keys():
if f not in other.features:
return False
if (
self[f] is not Underspecified
and other[f] is not Underspecified
and not (self[f] == other[f])
):
return False
return True
def __str__(self) -> str:
return str(self.features)
__repr__ = __str__
f = MorphosyntacticFeatureBundle
def to_categorial(pos: int) -> "MorphosyntacticFeatureBundle":
"""Maps UD parts of speech to binary categorial feature bundles.
In some cases these are underspecified, including empty bundles for interjections.
>>> to_categorial(POS.adjective)
{F: [neg], N: [pos], V: [pos]}
>>> to_categorial(POS.particle)
{F: [pos]}
>>> to_categorial(POS.interjection)
{}
"""
if pos == POS.adjective or pos == POS.adverb:
return f(F.neg, N.pos, V.pos)
elif pos == POS.adposition:
return f(F.pos, N.neg, V.neg)
elif pos == POS.auxiliary:
return f(F.pos, N.neg, V.pos)
elif (
pos == POS.coordinating_conjunction
or pos == POS.subordinating_conjunction
or pos == POS.particle
):
return f(F.pos)
elif pos == POS.determiner or pos == POS.pronoun or pos == POS.numeral:
return f(F.pos, N.pos, V.neg)
elif pos == POS.noun or pos == POS.proper_noun:
return f(F.neg, N.pos, V.neg)
elif pos == POS.verb:
return f(F.neg, N.neg, V.pos)
else:
return f()
FORM_UD_MAP: Dict[str, Dict[str, MorphosyntacticFeature]] = {
# parts of speech
"POS": {
"ADJ": POS.adjective,
"ADP": POS.adposition,
"ADV": POS.adverb,
"AUX": POS.auxiliary,
"CCONJ": POS.coordinating_conjunction,
"DET": POS.determiner,
"INTJ": POS.interjection,
"NOUN": POS.noun,
"NUM": POS.numeral,
"PART": POS.particle,
"PRON": POS.pronoun,
"PROPN": POS.proper_noun,
"PUNCT": POS.punctuation,
"SCONJ": POS.subordinating_conjunction,
"SYM": POS.symbol,
"VERB": POS.verb,
"X": POS.other,
},
# verbal features
"VerbForm": {
"Conv": VerbForm.converb,
"Fin": VerbForm.finite,
"Gdv": VerbForm.gerundive,
"Ger": VerbForm.gerund,
"Inf": VerbForm.infinitive,
"Part": VerbForm.participle,
"Sup": VerbForm.supine,
"Vnoun": VerbForm.masdar,
},
"Mood": {
"Adm": Mood.admirative,
"Cnd": Mood.conditional,
"Des": Mood.desiderative,
"Imp": Mood.imperative,
"Ind": Mood.indicative,
"Jus": Mood.jussive,
"Nec": Mood.necessitative,
"Opt": Mood.optative,
"Pot": Mood.potential,
"Prp": Mood.purposive,
"Qot": Mood.quotative,
"Sub": Mood.subjunctive,
},
"Tense": {
"Fut": Tense.future,
"Imp": Tense.imperfect,
"Past": Tense.past,
"Pqp": Tense.pluperfect,
"Pres": Tense.present,
},
"Aspect": {
"Hab": Aspect.habitual,
"Imp": Aspect.imperfective,
"Iter": Aspect.iterative,
"Perf": Aspect.perfective,
"Prog": Aspect.progressive,
"Prosp": Aspect.prospective,
},
"Voice": {
"Act": Voice.active,
"Antip": Voice.antipassive,
"Bfoc": Voice.beneficiary_focus,
"Lfoc": Voice.location_focus,
"Caus": Voice.causative,
"Dir": Voice.direct,
"Inv": Voice.inverse,
"Mid": Voice.middle,
"Pass": Voice.passive,
"Rcp": Voice.reciprocal,
},
"Evident": {"Fh": Evidentiality.first_hand, "Nfh": Evidentiality.non_first_hand},
"Polarity": {"Pos": Polarity.pos, "Neg": Polarity.neg},
"Person": {
"0": Person.zeroth,
"1": Person.first,
"2": Person.second,
"3": Person.third,
"4": Person.fourth,
"Psor": Person.psor,
"Subj": Person.subj,
},
"Polite": {
"Elev": Politeness.elevated,
"Form": Politeness.formal,
"Humb": Politeness.humble,
"Infm": Politeness.informal,
},
"Clusivity": {"Ex": Clusivity.exclusive, "In": Clusivity.inclusive},
# nominal
"Gender": {
"Com": Gender.common,
"Fem": Gender.feminine,
"Masc": Gender.masculine,
"Neut": Gender.neuter,
"Psor": Gender.psor,
},
"Animacy": {
"Anim": Animacy.animate,
"Hum": Animacy.human,
"Inan": Animacy.inanimate,
"Nhum": Animacy.non_human,
},
"Number": {
"Coll": Number.collective,
"Count": Number.count_plural,
"Dual": Number.dual,
"Grpa": Number.greater_paucal,
"Grpl": Number.greater_plural,
"Inv": Number.inverse_number,
"Pauc": Number.paucal,
"Plur": Number.plural,
"Ptan": Number.plurale_tantum,
"Sing": Number.singular,
"Tri": Number.trial,
"Psor": Number.psor,
},
"NumForm": {
"Word": NumForm.word,
"Digit": NumForm.digit,
"Roman": NumForm.roman,
"Reference": NumForm.reference,
},
"Case": {
# structural cases
"Nom": Case.nominative,
"Acc": Case.accusative,
"Erg": Case.ergative,
"Abs": Case.absolutive,
# oblique cases
"Abe": Case.abessive,
"Ben": Case.befefactive,
"Caus": Case.causative,
"Cmp": Case.comparative,
"Cns": Case.considerative,
"Com": Case.comitative,
"Dat": Case.dative,
"Dis": Case.distributive,
"Equ": Case.equative,
"Gen": Case.genitive,
"Ins": Case.instrumental,
"Par": Case.partitive,
"Voc": Case.vocative,
# spatiotemporal cases
"Abl": Case.ablative,
"Add": Case.additive,
"Ade": Case.adessive,
"All": Case.allative,
"Del": Case.delative,
"Ela": Case.elative,
"Ess": Case.essive,
"Ill": Case.illative,
"Ine": Case.inessive,
"Lat": Case.lative,
"Loc": Case.locative,
"Per": Case.perlative,
"Sub": Case.sublative,
"Sup": Case.superessive,
"Ter": Case.terminative,
"Tem": Case.temporal,
"Tra": Case.translative,
},
"Definite": {
"Com": Definiteness.complex,
"Cons": Definiteness.construct_state,
"Def": Definiteness.definite,
"Ind": Definiteness.indefinite,
"Spec": Definiteness.specific_indefinite,
},
"Degree": {
"Abs": Degree.absolute_superlative,
"Cmp": Degree.comparative,
"Equ": Degree.equative,
"Pos": Degree.positive,
"Sup": Degree.superlative,
},
# other lexical
"PronType": {
"Art": PrononimalType.article,
"Dem": PrononimalType.demonstrative,
"Emp": PrononimalType.emphatic,
"Exc": PrononimalType.exclamative,
"Ind": PrononimalType.indefinite,
"Int": PrononimalType.interrogative,
"Neg": PrononimalType.negative,
"Prs": PrononimalType.personal,
"Rcp": PrononimalType.reciprocal,
"Rel": PrononimalType.relative,
"Tot": PrononimalType.total,
},
"AdpType": {
"Prep": AdpositionalType.preposition,
"Post": AdpositionalType.postposition,
"Circ": AdpositionalType.circumposition,
"Voc": AdpositionalType.vocalized_adposition,
},
"AdvType": {
"Man": AdverbialType.manner,
"Loc": AdverbialType.location,
"Tim": AdverbialType.time,
"Deg": AdverbialType.degree,
"Cau": AdverbialType.cause,
"Mod": AdverbialType.modality,
},
"VerbType": {
"Aux": VerbType.auxiliary,
"Cop": VerbType.copula,
"Mod": VerbType.modal,
"Light": VerbType.light,
},
"NumType": {
"Card": Numeral.cardinal,
"Dist": Numeral.distributive,
"Frac": Numeral.fractional,
"Mult": Numeral.multiplicative,
"Ord": Numeral.ordinal,
"Range": Numeral.range,
"Sets": Numeral.sets,
},
"NameType": {
"Geo": NameType.place,
"Prs": NameType.person,
"Giv": NameType.person_given_name,
"Sur": NameType.person_surname,
"Nat": NameType.nationality,
"Com": NameType.company,
"Pro": NameType.product,
"Oth": NameType.other,
},
"Strength": {"Strong": Strength.strong, "Weak": Strength.weak},
"Poss": {"Yes": Possessive.pos},
"Reflex": {"Yes": Reflexive.pos},
"Foreign": {"Yes": Foreign.pos},
"Abbr": {"Yes": Abbreviation.pos},
"Typo": {"Yes": Typo.pos},
}
def from_ud(feature_name: str, feature_value: str) -> Optional[MorphosyntacticFeature]:
"""For a given Universal Dependencies feature name and value,
return the appropriate feature class/value.
>>> from_ud('Case', 'Abl')
ablative
>>> from_ud('Abbr', 'Yes')
pos
>>> from_ud('PronType', 'Ind')
indefinite
"""
# Do cleanup on certain inputs that look like ``"Number[psor]``
# Thus this is rewritten to ``feature_name = Number``
# and ``feature_value = psor``.
if "[" in feature_name and "]" in feature_name:
feature_name_split: List[str] = feature_name.split("[", maxsplit=1)
feature_name = feature_name_split[0]
feature_value = feature_name_split[1][:-1]
feature_value = feature_value.title()
if feature_name in FORM_UD_MAP:
feature_map = FORM_UD_MAP[feature_name]
else:
msg1: str = f"Unrecognized UD `feature_name` ('{feature_name}') with `feature_value` ('{feature_value}')."
msg2: str = f"Please raise an issue at <https://github.com/cltk/cltk/issues> and include a small sample to reproduce the error."
print(msg1)
print(msg2)
# raise CLTKException(msg)
return None
values = feature_value.split(",")
for value in values:
if value in feature_map:
return feature_map[value]
else:
raise CLTKException(
f"{value}: Unrecognized value for UD feature {feature_name}"
)
|
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
import sys
import time
import traceback
import json
import requests
import urllib.parse
try:
from . import paymentrequest_pb2 as pb2
except ImportError:
sys.exit("Error: could not find paymentrequest_pb2.py. Create it with 'protoc --proto_path=electrum/ --python_out=electrum/ electrum/paymentrequest.proto'")
from . import bitcoin, ecc, util, transaction, x509, rsakey
from .util import print_error, bh2u, bfh
from .util import export_meta, import_meta
from .bitcoin import TYPE_ADDRESS
from .transaction import TxOutput
REQUEST_HEADERS = {'Accept': 'application/bitcoin-paymentrequest', 'User-Agent': 'Electrum'}
ACK_HEADERS = {'Content-Type':'application/bitcoin-payment','Accept':'application/bitcoin-paymentack','User-Agent':'Electrum'}
ca_path = requests.certs.where()
ca_list = None
ca_keyID = None
def load_ca_list():
global ca_list, ca_keyID
if ca_list is None:
ca_list, ca_keyID = x509.load_certificates(ca_path)
# status of payment requests
PR_UNPAID = 0
PR_EXPIRED = 1
PR_UNKNOWN = 2 # sent but not propagated
PR_PAID = 3 # send and propagated
def get_payment_request(url):
u = urllib.parse.urlparse(url)
error = None
if u.scheme in ['http', 'https']:
try:
response = requests.request('GET', url, headers=REQUEST_HEADERS)
response.raise_for_status()
# Guard against `bitcoin:`-URIs with invalid payment request URLs
if "Content-Type" not in response.headers \
or response.headers["Content-Type"] != "application/bitcoin-paymentrequest":
data = None
error = "payment URL not pointing to a payment request handling server"
else:
data = response.content
print_error('fetched payment request', url, len(response.content))
except requests.exceptions.RequestException:
data = None
error = "payment URL not pointing to a valid server"
elif u.scheme == 'file':
try:
with open(u.path, 'r', encoding='utf-8') as f:
data = f.read()
except IOError:
data = None
error = "payment URL not pointing to a valid file"
else:
data = None
error = "Unknown scheme for payment request. URL: {}".format(url)
pr = PaymentRequest(data, error)
return pr
class PaymentRequest:
def __init__(self, data, error=None):
self.raw = data
self.error = error
self.parse(data)
self.requestor = None # known after verify
self.tx = None
def __str__(self):
return str(self.raw)
def parse(self, r):
if self.error:
return
self.id = bh2u(bitcoin.sha256(r)[0:16])
try:
self.data = pb2.PaymentRequest()
self.data.ParseFromString(r)
except:
self.error = "cannot parse payment request"
return
self.details = pb2.PaymentDetails()
self.details.ParseFromString(self.data.serialized_payment_details)
self.outputs = []
for o in self.details.outputs:
addr = transaction.get_address_from_output_script(o.script)[1]
self.outputs.append(TxOutput(TYPE_ADDRESS, addr, o.amount))
self.memo = self.details.memo
self.payment_url = self.details.payment_url
def is_pr(self):
return self.get_amount() != 0
#return self.get_outputs() != [(TYPE_ADDRESS, self.get_requestor(), self.get_amount())]
def verify(self, contacts):
if self.error:
return False
if not self.raw:
self.error = "Empty request"
return False
pr = pb2.PaymentRequest()
try:
pr.ParseFromString(self.raw)
except:
self.error = "Error: Cannot parse payment request"
return False
if not pr.signature:
# the address will be displayed as requestor
self.requestor = None
return True
if pr.pki_type in ["x509+sha256", "x509+sha1"]:
return self.verify_x509(pr)
elif pr.pki_type in ["dnssec+btc", "dnssec+ecdsa"]:
return self.verify_dnssec(pr, contacts)
else:
self.error = "ERROR: Unsupported PKI Type for Message Signature"
return False
def verify_x509(self, paymntreq):
load_ca_list()
if not ca_list:
self.error = "Trusted certificate authorities list not found"
return False
cert = pb2.X509Certificates()
cert.ParseFromString(paymntreq.pki_data)
# verify the chain of certificates
try:
x, ca = verify_cert_chain(cert.certificate)
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.error = str(e)
return False
# get requestor name
self.requestor = x.get_common_name()
if self.requestor.startswith('*.'):
self.requestor = self.requestor[2:]
# verify the BIP70 signature
pubkey0 = rsakey.RSAKey(x.modulus, x.exponent)
sig = paymntreq.signature
paymntreq.signature = b''
s = paymntreq.SerializeToString()
sigBytes = bytearray(sig)
msgBytes = bytearray(s)
if paymntreq.pki_type == "x509+sha256":
hashBytes = bytearray(hashlib.sha256(msgBytes).digest())
verify = pubkey0.verify(sigBytes, x509.PREFIX_RSA_SHA256 + hashBytes)
elif paymntreq.pki_type == "x509+sha1":
verify = pubkey0.hashAndVerify(sigBytes, msgBytes)
if not verify:
self.error = "ERROR: Invalid Signature for Payment Request Data"
return False
### SIG Verified
self.error = 'Signed by Trusted CA: ' + ca.get_common_name()
return True
def verify_dnssec(self, pr, contacts):
sig = pr.signature
alias = pr.pki_data
info = contacts.resolve(alias)
if info.get('validated') is not True:
self.error = "Alias verification failed (DNSSEC)"
return False
if pr.pki_type == "dnssec+btc":
self.requestor = alias
address = info.get('address')
pr.signature = b''
message = pr.SerializeToString()
if ecc.verify_message_with_address(address, sig, message):
self.error = 'Verified with DNSSEC'
return True
else:
self.error = "verify failed"
return False
else:
self.error = "unknown algo"
return False
def has_expired(self):
return self.details.expires and self.details.expires < int(time.time())
def get_expiration_date(self):
return self.details.expires
def get_amount(self):
return sum(map(lambda x:x[2], self.outputs))
def get_address(self):
o = self.outputs[0]
assert o.type == TYPE_ADDRESS
return o.address
def get_requestor(self):
return self.requestor if self.requestor else self.get_address()
def get_verify_status(self):
return self.error if self.requestor else "No Signature"
def get_memo(self):
return self.memo
def get_dict(self):
return {
'requestor': self.get_requestor(),
'memo':self.get_memo(),
'exp': self.get_expiration_date(),
'amount': self.get_amount(),
'signature': self.get_verify_status(),
'txid': self.tx,
'outputs': self.get_outputs()
}
def get_id(self):
return self.id if self.requestor else self.get_address()
def get_outputs(self):
return self.outputs[:]
def send_ack(self, raw_tx, refund_addr):
pay_det = self.details
if not self.details.payment_url:
return False, "no url"
paymnt = pb2.Payment()
paymnt.merchant_data = pay_det.merchant_data
paymnt.transactions.append(bfh(raw_tx))
ref_out = paymnt.refund_to.add()
ref_out.script = util.bfh(transaction.Transaction.pay_script(TYPE_ADDRESS, refund_addr))
paymnt.memo = "Paid using Electrum"
pm = paymnt.SerializeToString()
payurl = urllib.parse.urlparse(pay_det.payment_url)
try:
r = requests.post(payurl.geturl(), data=pm, headers=ACK_HEADERS, verify=ca_path)
except requests.exceptions.SSLError:
print("Payment Message/PaymentACK verify Failed")
try:
r = requests.post(payurl.geturl(), data=pm, headers=ACK_HEADERS, verify=False)
except Exception as e:
print(e)
return False, "Payment Message/PaymentACK Failed"
if r.status_code >= 500:
return False, r.reason
try:
paymntack = pb2.PaymentACK()
paymntack.ParseFromString(r.content)
except Exception:
return False, "PaymentACK could not be processed. Payment was sent; please manually verify that payment was received."
print("PaymentACK message received: %s" % paymntack.memo)
return True, paymntack.memo
def make_unsigned_request(req):
from .transaction import Transaction
addr = req['address']
time = req.get('time', 0)
exp = req.get('exp', 0)
if time and type(time) != int:
time = 0
if exp and type(exp) != int:
exp = 0
amount = req['amount']
if amount is None:
amount = 0
memo = req['memo']
script = bfh(Transaction.pay_script(TYPE_ADDRESS, addr))
outputs = [(script, amount)]
pd = pb2.PaymentDetails()
for script, amount in outputs:
pd.outputs.add(amount=amount, script=script)
pd.time = time
pd.expires = time + exp if exp else 0
pd.memo = memo
pr = pb2.PaymentRequest()
pr.serialized_payment_details = pd.SerializeToString()
pr.signature = util.to_bytes('')
return pr
def sign_request_with_alias(pr, alias, alias_privkey):
pr.pki_type = 'dnssec+btc'
pr.pki_data = str(alias)
message = pr.SerializeToString()
ec_key = ecc.ECPrivkey(alias_privkey)
compressed = bitcoin.is_compressed(alias_privkey)
pr.signature = ec_key.sign_message(message, compressed)
def verify_cert_chain(chain):
""" Verify a chain of certificates. The last certificate is the CA"""
load_ca_list()
# parse the chain
cert_num = len(chain)
x509_chain = []
for i in range(cert_num):
x = x509.X509(bytearray(chain[i]))
x509_chain.append(x)
if i == 0:
x.check_date()
else:
if not x.check_ca():
raise Exception("ERROR: Supplied CA Certificate Error")
if not cert_num > 1:
raise Exception("ERROR: CA Certificate Chain Not Provided by Payment Processor")
# if the root CA is not supplied, add it to the chain
ca = x509_chain[cert_num-1]
if ca.getFingerprint() not in ca_list:
keyID = ca.get_issuer_keyID()
f = ca_keyID.get(keyID)
if f:
root = ca_list[f]
x509_chain.append(root)
else:
raise Exception("Supplied CA Not Found in Trusted CA Store.")
# verify the chain of signatures
cert_num = len(x509_chain)
for i in range(1, cert_num):
x = x509_chain[i]
prev_x = x509_chain[i-1]
algo, sig, data = prev_x.get_signature()
sig = bytearray(sig)
pubkey = rsakey.RSAKey(x.modulus, x.exponent)
if algo == x509.ALGO_RSA_SHA1:
verify = pubkey.hashAndVerify(sig, data)
elif algo == x509.ALGO_RSA_SHA256:
hashBytes = bytearray(hashlib.sha256(data).digest())
verify = pubkey.verify(sig, x509.PREFIX_RSA_SHA256 + hashBytes)
elif algo == x509.ALGO_RSA_SHA384:
hashBytes = bytearray(hashlib.sha384(data).digest())
verify = pubkey.verify(sig, x509.PREFIX_RSA_SHA384 + hashBytes)
elif algo == x509.ALGO_RSA_SHA512:
hashBytes = bytearray(hashlib.sha512(data).digest())
verify = pubkey.verify(sig, x509.PREFIX_RSA_SHA512 + hashBytes)
else:
raise Exception("Algorithm not supported: {}".format(algo))
if not verify:
raise Exception("Certificate not Signed by Provided CA Certificate Chain")
return x509_chain[0], ca
def check_ssl_config(config):
from . import pem
key_path = config.get('ssl_privkey')
cert_path = config.get('ssl_chain')
with open(key_path, 'r', encoding='utf-8') as f:
params = pem.parse_private_key(f.read())
with open(cert_path, 'r', encoding='utf-8') as f:
s = f.read()
bList = pem.dePemList(s, "CERTIFICATE")
# verify chain
x, ca = verify_cert_chain(bList)
# verify that privkey and pubkey match
privkey = rsakey.RSAKey(*params)
pubkey = rsakey.RSAKey(x.modulus, x.exponent)
assert x.modulus == params[0]
assert x.exponent == params[1]
# return requestor
requestor = x.get_common_name()
if requestor.startswith('*.'):
requestor = requestor[2:]
return requestor
def sign_request_with_x509(pr, key_path, cert_path):
from . import pem
with open(key_path, 'r', encoding='utf-8') as f:
params = pem.parse_private_key(f.read())
privkey = rsakey.RSAKey(*params)
with open(cert_path, 'r', encoding='utf-8') as f:
s = f.read()
bList = pem.dePemList(s, "CERTIFICATE")
certificates = pb2.X509Certificates()
certificates.certificate.extend(map(bytes, bList))
pr.pki_type = 'x509+sha256'
pr.pki_data = certificates.SerializeToString()
msgBytes = bytearray(pr.SerializeToString())
hashBytes = bytearray(hashlib.sha256(msgBytes).digest())
sig = privkey.sign(x509.PREFIX_RSA_SHA256 + hashBytes)
pr.signature = bytes(sig)
def serialize_request(req):
pr = make_unsigned_request(req)
signature = req.get('sig')
requestor = req.get('name')
if requestor and signature:
pr.signature = bfh(signature)
pr.pki_type = 'dnssec+btc'
pr.pki_data = str(requestor)
return pr
def make_request(config, req):
pr = make_unsigned_request(req)
key_path = config.get('ssl_privkey')
cert_path = config.get('ssl_chain')
if key_path and cert_path:
sign_request_with_x509(pr, key_path, cert_path)
return pr
class InvoiceStore(object):
def __init__(self, storage):
self.storage = storage
self.invoices = {}
self.paid = {}
d = self.storage.get('invoices', {})
self.load(d)
def set_paid(self, pr, txid):
pr.tx = txid
pr_id = pr.get_id()
self.paid[txid] = pr_id
if pr_id not in self.invoices:
# in case the user had deleted it previously
self.add(pr)
def load(self, d):
for k, v in d.items():
try:
pr = PaymentRequest(bfh(v.get('hex')))
pr.tx = v.get('txid')
pr.requestor = v.get('requestor')
self.invoices[k] = pr
if pr.tx:
self.paid[pr.tx] = k
except:
continue
def import_file(self, path):
def validate(data):
return data # TODO
import_meta(path, validate, self.on_import)
def on_import(self, data):
self.load(data)
self.save()
def export_file(self, filename):
export_meta(self.dump(), filename)
def dump(self):
d = {}
for k, pr in self.invoices.items():
d[k] = {
'hex': bh2u(pr.raw),
'requestor': pr.requestor,
'txid': pr.tx
}
return d
def save(self):
self.storage.put('invoices', self.dump())
def get_status(self, key):
pr = self.get(key)
if pr is None:
print_error("[InvoiceStore] get_status() can't find pr for", key)
return
if pr.tx is not None:
return PR_PAID
if pr.has_expired():
return PR_EXPIRED
return PR_UNPAID
def add(self, pr):
key = pr.get_id()
self.invoices[key] = pr
self.save()
return key
def remove(self, key):
self.invoices.pop(key)
self.save()
def get(self, k):
return self.invoices.get(k)
def sorted_list(self):
# sort
return self.invoices.values()
def unpaid_invoices(self):
return [ self.invoices[k] for k in filter(lambda x: self.get_status(x)!=PR_PAID, self.invoices.keys())]
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import sys
import signal
import zmq
import conf
import logging
import atexit
import random
import json
from time import sleep
from math import sqrt
from colorsys import rgb_to_hsv
from urllib import urlencode
from urllib2 import urlopen, URLError
from collections import deque
from threading import Thread
from enum import Enum
from ev3dev import *
from ev3dev_utils import *
from graph import *
#from graph import indexof, contains, indexof_many, get_min_dest_direction, filter_graph, add_unknown_edges_to_graph, explored
# [TODO] are all the mails correct?
__authors__ = ["Marco Squarcina <squarcina at dais.unive.it>",
"Enrico Steffinlongo <enrico.steffinlongo at unive.it>",
"Francesco Di Giacomo <fdigiacom at gmail.com>",
"Michele Schiavinato <mschiavi at dais.unive.it>",
"Alan Del Piccolo <alan.delpiccolo at gmail.com>",
"Filippo Cavallin <840031 at stud.unive.it>",
"Eyasu Zemene Mequanint <eyasu201011 at gmail.com>"]
__status__ = "Development"
# global variables
# instances of motors
motor_left = large_motor(OUTPUT_D)
motor_right = large_motor(OUTPUT_B)
# instances of sensors
col_sensor = color_sensor()
ir_sensor = infrared_sensor()
## other variables
## [MERGE] moved in update()
#ir_buffer = [[deque(), deque()] for _ in range(4)]
#ir_medians = [[None, None] for _ in range(4)]
# mean between the value of the line and the plane
mid_value = (conf.line_value + conf.plane_value)/2
# queue of the last samples taken by the color sensor
last_hsvs = deque()
# zmq context definitions
context = zmq.Context()
# possible states
State = Enum('State', ('begin explore_node_init explore_node '
'explore_edge_init explore_edge_before_marker '
'explore_edge explore_edge_after_marker escaping_init '
'escaping waiting_for_clearance moving_init '
'moving_before_marker moving moving_after_marker idling'))
# function definitions
def message_server():
sock = context.socket(zmq.REP)
sock.bind("tcp://0.0.0.0:{}".format(conf.robot_port))
# log incoming messages and reply back
# [TODO] define a poison pill to kill this thread
while True:
message = sock.recv()
sock.send("Echoed: [{}]".format(message))
logging.info(message)
def reset(signal = None, frame = None):
"""Stop the motors. [FIXME] is it possible to break?"""
motor_left.reset()
motor_right.reset()
if signal:
sys.exit(1)
def stop_motors():
motor_left.stop()
motor_right.stop()
def start_motors():
motor_left.run()
motor_right.run()
def wait_launch():
"""Block until the game is started (click play on the web interface)."""
url_to_check = "http://{}:{}/started".format(
conf.web_server_ip, conf.web_server_port)
started = False
while not started:
try:
f = urlopen(url_to_check)
started = True if f.read() == '1' else False
except URLError:
logging.error('Unable to connect to the web server, proceeding')
break
sleep(0.5)
def json_translate(data):
return {int(k): v for k, v in data.iteritems()}
def greet():
"""Say hello before starting the protocol."""
# set the second parameter to False for non-blocking call
sound.speak("Hello, I am the robot number {}".format(conf.robot_id), True)
def follow_line(value, pulses = conf.base_pulses):
"""Adjust the speed of the two motors to keep up with the line tracking."""
start_motors()
error = value - mid_value
correction = int(conf.proportional_const * error)
motor_left.pulses_per_second_setpoint = pulses + correction
motor_right.pulses_per_second_setpoint = pulses - correction
def initialize():
# explicitly set the color sensor in RGB mode
col_sensor.mode = 'RGB-RAW'
# explicitly set the infrared sensor in SEEK mode
ir_sensor.mode = infrared_sensor.mode_ir_seeker
# prepare the motors
motor_left.regulation_mode = motor.mode_on
motor_right.regulation_mode = motor.mode_on
# set motors ready to run
start_motors()
def get_hsv_colors():
"""Return the Hue, Saturation, Value triple of the sampled color assuming
that the color sensor is in RAW-RGB mode."""
hsv = rgb_to_hsv(*[col_sensor.value(i)/1022 for i in range(col_sensor.num_values())])
if len(last_hsvs) >= conf.n_col_samples:
last_hsvs.popleft()
last_hsvs.append(hsv)
return hsv
'''
def avoid_collision():
# query the ir sensor in SEEK mode to avoid collisions
seek = [ir_sensor.value(i) for i in range(ir_sensor.num_values())]
for robot_id in range(4):
# remove the heads
if len(ir_buffer[robot_id][0]) >= conf.n_ir_samples:
ir_buffer[robot_id][0].popleft()
ir_buffer[robot_id][1].popleft()
# update the angle
ir_buffer[robot_id][0].append(seek[robot_id*2])
# update the distance
ir_buffer[robot_id][1].append(abs(seek[robot_id*2+1]))
# recompute the median
ir_medians[robot_id][0] = median(ir_buffer[robot_id][0])
ir_medians[robot_id][1] = median(ir_buffer[robot_id][1])
if ir_medians[robot_id][1] < 20:
# [TODO] handle collisions
pass
'''
#[MERGE] first part of avoid_collision used to sample the ir values
def update_ir_queue(ir_buffer):
# query the ir sensor in SEEK mode to avoid collisions
seek = [ir_sensor.value(i) for i in range(ir_sensor.num_values())]
for robot_id in range(4):
# remove the heads
if len(ir_buffer[robot_id][0]) >= conf.n_ir_samples:
ir_buffer[robot_id][0].popleft()
ir_buffer[robot_id][1].popleft()
# update the angle
ir_buffer[robot_id][0].append(seek[robot_id*2])
# update the distance
ir_buffer[robot_id][1].append(abs(seek[robot_id*2+1]))
#[MERGE] second part of avoid_collision used to check medians of ir_buffer to check for inbound robots
def get_seen_robots(ir_buffer):
ir_medians = [[None, None] for _ in range(4)]
for robot_id in range(4):
# recompute the median
ir_medians[robot_id][0] = median(ir_buffer[robot_id][0])
ir_medians[robot_id][1] = median(ir_buffer[robot_id][1])
#[MERGE] added minimum distance in conf.py
seen_bots = [i for i in range(4) if (ir_medians[i][1] <= conf.collision_distance and i != conf.robot_id)]
#seen_bots = indexof_many(lambda d: d <= conf.collision_distance, ir_medians[1])
#seen_bot = filter(lambda id: id != conf.robot_id, seen_bots)
assert len(seen_bots) < 2, "WTF? We are colliding with more than one bot??? Consider better invariants! IROS!"
return seen_bots
def identify_color(hsv_color):
"""Return the string id of the color closer to the provide HSV triple."""
# compute the distances among the acquired color and all known colors
distances = {k : color_distance(v, hsv_color) for k, v in conf.hsv_colors.iteritems()}
# return the closest one
return min(distances, key=distances.get)
def on_border():
"""Use the saturation mean to see if we fall on a border."""
saturation = mean([hsv[1] for hsv in last_hsvs])
return saturation > conf.border_saturation_thr
'''
def choose_random_direction(edges):
direction = random.choice([i for i in range(4) if edges[i]])
return direction
'''
def move_to_edge(current_orientation, new_orientation):
rotate(current_orientation, (new_orientation - current_orientation) % 4)
def rotate(starting_direction, direction = -1):
"""Rotate within a node.
This function can be used to identify all the out edges starting from the
current node or, when a direction is provided, to perform a rotation until
the given direction is reached. Return the list of discovered edges in the
first case, else nothing."""
print("Rotate args: starting_direction: {}, direction: {}".format(starting_direction, direction))
# if the direction is 0 we are already in the right place, there's nothing
# to do
if direction == 0:
return
# reset position
reset_motor_position()
# start with a queue made only of white values
for _ in range(conf.n_col_samples):
last_hsvs.popleft()
last_hsvs.append((0, 0, conf.plane_value))
# ... and obviously assume that the previous color is white
prev_color = 'white'
color = 'white'
# list of edges to be returned in case we are in discovery mode
edges = [False for _ in range(4)]
# start rotating at half of the maximum allowed speed
motor_left.pulses_per_second_setpoint = conf.slow_pulses
motor_right.pulses_per_second_setpoint = -conf.slow_pulses
while True:
# leave if a 360 degrees rotation has been done
if motor_left.position > conf.full_rotation_degrees:
break
# update the queue of sampled color values
get_hsv_colors()
# update the current color according to the sampled value
mean_value = median([hsv[2] for hsv in last_hsvs])
if mean_value < conf.line_value + 0.05:
color = 'black'
if mean_value > conf.plane_value - 0.05:
color = 'white'
# from white we just fallen on a black line
if prev_color != color and color == 'black':
#cur_direction = get_orientation(0)
#print("cur_direction: {}".format(cur_direction))
cur_direction = int(round(motor_left.position / (conf.full_rotation_degrees//4)))
if cur_direction == direction:
# arrived at destination, it's time to leave ;)
break
elif cur_direction <= 3:
# keep trace of the new edge just found
edges[cur_direction] = True
logging.info("FOUND EDGE")
elif motor_left.position > conf.full_rotation_degrees:
break
else:
# this is the 5th edge, we are back in the starting position on
# a node with 4 edges, we should stop here
break
prev_color = color
new_edges = [edges[(i-starting_direction) % 4] for i in range(4)]
print("starting_direction: {}, edges: {}, new_edges: {}".format(starting_direction, edges, new_edges))
return new_edges if direction == -1 else None
def cross_bordered_area(marker=True):
"""Cross a bordered colored region and return the color."""
color = conf.Color.unknown
# assume that we are on a border
local_state = 'border'
if not marker:
# if we are on a node just go straight until the end is reached because
# we have already sampled the color in the previous marker
local_state = 'sampled'
run_for(motor_left, ever=True, power=conf.slow_pulses)
run_for(motor_right, ever=True, power=conf.slow_pulses)
count = 0
while True:
# sample color
get_hsv_colors()
if local_state == 'border':
# halt!!!
#stop_motors()
# slightly move forward so that we are exactly over the color
# (run_for is not a blocking call, pay attention we need to sleep)
run_for(motor_left, power=conf.slow_pulses, degrees=27)
run_for(motor_right, power=conf.slow_pulses, degrees=27)
sleep(3)
#stop_motors()
#start_motors()
logging.info("Start sampling")
local_state = 'inside'
# start moving again
#run_for(motor_left, ever=True, power=conf.slow_pulses//2)
#run_for(motor_right, ever=True, power=conf.slow_pulses//2)
elif local_state == 'inside':
# time to pick up some samples to identify the color
count += 1
if count >= conf.n_col_samples:
mean_hsv_color = median(list(last_hsvs))
color = conf.Color[identify_color(mean_hsv_color)]
local_state = 'sampled'
logging.info([color, mean_hsv_color])
run_for(motor_left, power=conf.slow_pulses, ever=True)
run_for(motor_right, power=conf.slow_pulses, ever=True)
logging.info("Esco")
sleep(2)
sleep(0.01)
elif local_state == 'sampled':
# determine the end of the bordered area using the saturation
if not on_border():
return color
else:
raise Exception("Uh?")
def turn_around():
"""Change direction to avoid collisions and tell if a marker is found."""
marker_found = False
reset_motor_position()
# start with a queue made only of white values
for _ in range(conf.n_col_samples):
last_hsvs.popleft()
last_hsvs.append((0, 0, conf.plane_value))
while True:
get_hsv_colors()
# check if we are on a marker, this is kind of a code duplication, but
# it's much faster than computing the mean of the same list two times
# in a row
_, saturation, value = mean(last_hsvs)
if saturation > conf.border_saturation_thr:
marker_found = True
if motor_left.position > conf.full_rotation_degrees//2:
# we are performing the rotation over the marker
logging.info("1: {}".format(motor_left.position))
break
elif motor_left.position > conf.full_rotation_degrees*0.38 and value < mid_value:
# we performed the turn_around and we are back on track
logging.info("2: {}".format(motor_left.position))
break
elif motor_left.position < conf.full_rotation_degrees*0.75:
# clockwise rotation
run_for(motor_left, power=conf.slow_pulses, ever=True)
run_for(motor_right, power=-conf.slow_pulses, ever=True)
#motor_left.pulses_per_second_setpoint = conf.slow_pulses
#motor_right.pulses_per_second_setpoint = -conf.slow_pulses
else:
raise Exception("Lost the track")
while on_border():
get_hsv_colors()
motor_left.pulses_per_second_setpoint = conf.slow_pulses
motor_right.pulses_per_second_setpoint = conf.slow_pulses
return marker_found
def retire_from_marker():
run_for(motor_left, power=conf.slow_pulses, degrees=-150)
run_for(motor_right, power=conf.slow_pulses, degrees=-150)
sleep(4)
def mean(data):
"""Compute the mean of the provided data."""
n = len(data)
try:
return [float(sum(l))/len(l) for l in zip(*data)]
except TypeError:
return sum(data)/n
def median(data):
"""Compute the median of the provided data, used for ir smoothing."""
data = sorted(data)
n = len(data)
if n == 0:
raise Exception('No median for an empty list')
if n%2 == 1:
return data[n//2]
else:
i = n//2
return (data[i-1] + data[i])/2
def color_distance(a, b):
"""Compute the euclidean distance of 2 values.
This function also accounts for the heuristic corrections of the two
colors. Color near to red can be recognized with hue component near to 0 or
1 (due to cylindrical hsv color space). On the other hand, the height of
the color sensor wrt the surface involves heavily on the value component,
so we reduce the value by a constant multiplicative factor."""
# red correction on hue component and value reduction
a, b = [(0 if (x[0] >= 0.9) else x[0], x[1], x[2]*0.3) for x in a, b]
# euclidean distance of all components (hue, saturation, value)
return sqrt(sum((a - b)**2 for a, b in zip(a, b)))
def get_orientation(old_orientation):
delta_motors = motor_left.position - motor_right.position
orientation = int(round(delta_motors / conf.turn_rotation_difference) + old_orientation) % 4
print(delta_motors, old_orientation, orientation)
return orientation
# [TODO] the implementation of this trivial function is left to the willing programmer (Ok, I'll help you! :>")
def solve_collision(seen_robots, current_node, orientation):
if conf.robot_id != 0:
graph[current_node][orientation] = None
# [TODO] the server should also tell us if we need to explore the node (since
# it's a new undiscovered node) or not
# [TODO] when dest_orient and edge_len are -1, we just discard these values and
# check if the bot can enter the node
# [MERGE] it gives to the bot even the list of all bots positions
# [MERGE] the permission to enter in such node can be deduced using other_position
# returned values are: (the updated graph, the position of all bots, the permission to enter in destination)
def marker_update(destination_node, destination_orientation, edge_length, exploring):
data = {'robot': conf.robot_id,
'destination_node': destination_node.value,
'destination_orientation': destination_orientation,
'edge_length': edge_length,
'exploring': exploring}
url_to_check = "http://{}:{}/marker_update".format(
conf.web_server_ip, conf.web_server_port)
response_list = []
sent = False
while not sent:
try:
f = urlopen(url_to_check, urlencode(data))
response_list = json.loads(f.read())
sent = True
except URLError:
logging.error('Unable to connect to the web server, proceeding')
sleep(0.5)
response_list[0] = json_translate(response_list[0])
return response_list
# return updated graph and bot_positions
def outupdate(graph, current_node, direction):
edges = [1 if e != None else 0 for e in graph[current_node.value]]
data = {'robot': conf.robot_id,
'direction': direction,
'n': edges[0],
'e': edges[1],
's': edges[2],
'w': edges[3]}
url_to_check = "http://{}:{}/outupdate".format(
conf.web_server_ip, conf.web_server_port)
response_list = []
sent = False
while not sent:
try:
f = urlopen(url_to_check, urlencode(data))
response_list = json.loads(f.read())
sent = True
except URLError:
logging.error('Unable to connect to the web server, proceeding')
sleep(0.5)
response_list[0] = json_translate(response_list[0])
return response_list
def reset_motor_position():
motor_left.position = 0
motor_right.position = 0
def get_motor_position():
return (motor_left.position + motor_right.position) / 2
def get_complementary_orientation(orientation):
"""Used to find the outcoming orientation given the incoming one (e.g. N ->
S; E-> W; S -> N; W -> E)."""
return (orientation + 2) % 4
def update(debug=False):
"""OMG our huge state machine!!!!!!! x_X."""
state = State.begin
if debug:
state = State.explore_edge
old_state = State.begin
orientation = conf.robot_id
current_node = Color.unknown
# current edge is a 3-tuple: starting node, starting orientation,
# destination node (or unknown)
current_edge = None
has_to_explore = False
graph = dict()
# list containing the last visited node of each robot (even itself)
bot_positions = []
# list of last sampled ir measurements
ir_buffer = [[deque(), deque()] for _ in range(4)]
# tuple of the states where the bot should follow the line
moving_states = (State.begin, State.explore_edge_before_marker,
State.explore_edge, State.explore_edge_after_marker,
State.escaping, State.moving_before_marker, State.moving,
State.moving_after_marker)
while True:
if state != old_state:
logging.info("{} -> {}".format(old_state, state))
old_state = state
#logging.info(state)
# we sample every tick the ir values even if it is not used in current
# state
update_ir_queue(ir_buffer)
# update the global color queue every tick as before
hue, saturation, value = get_hsv_colors()
# if we are in a moving state we follow the line, this is correct since
# all the high level moving calls are blocking
if state in moving_states:
follow_line(value)
# BEGIN OF THE STATE MACHINE UPDATE
# Begin before a marker, update the vertex infos.
# NEXT_STATE: EXPLORE_EDGE_AFTER_MARKER.
if state == State.begin:
if on_border():
stop_motors()
orientation = get_orientation(orientation)
current_node = cross_bordered_area(marker=True)
stop_motors()
response = marker_update(current_node, get_complementary_orientation(orientation), -1, True)
if len(response) == 0:
raise Exception('Empty list returned by marker_update')
graph, bot_positions, has_to_explore, _ = response
state = State.explore_edge_after_marker
# Receive the updated graph, identify the node, explore the node if it is unexplored
# by rotating around and counting the edges under the color sensor.
# NEXT STATE: EXPLORE_NODE
elif state == State.explore_node_init:
cross_bordered_area(marker=False)
sleep(0.5)
if has_to_explore:
has_to_explore = False
edges = rotate(orientation)
# local graph updated. Modifications commited to the server in
# outupdate contained in explore_edge_init
graph = add_unknown_edges_to_graph(graph, current_node.value, edges)
state = State.explore_node
# Find the direction to reach the closes unexplored edge. If the edge is adjacent to
# the current node then start exploring it, otherwise move to the node in the minimum path.
# If there is no unexplored reachable edge switch to idle mode.
# NEXT STATES: IDLING, MOVING_INIT, EXPLORE_EDGE_INIT
elif state == State.explore_node:
filtered_graph = filter_graph(graph, conf.robot_id, bot_positions)
directions = get_min_dest_direction(filtered_graph, current_node.value)
if directions == None:
state = State.idling
else:
dest = random.choice(directions)
current_edge = (current_node.value, dest[1], dest[0])
print("Dest: {}".format(dest))
if dest[0] == Color.unknown.value:
state = State.explore_edge_init
else:
state = State.moving_init
# Update the graph infos on the server when exiting the node. Rotate
# and align with the edge to explore.
# Start moving on the edge.
# NEXT_STATE: EXPLORE_EDGE_BEFORE_MARKER
elif state == State.explore_edge_init:
sleep(1)
# [TODO] not merged... update position and direction of the bot,
# update the graph on the server. Maybe gets a new graph
stop_motors()
graph, bot_positions = outupdate(graph, current_node, current_edge[1])
start_motors()
print("current edge {}".format(current_edge))
move_to_edge(orientation, current_edge[1])
# always update orientation on turns
orientation = current_edge[1]
state = State.explore_edge_before_marker
#START!!!
# Try to spot a robot. If one exists solve the collision (in this case
# the robot always has the right of way) and start waiting until the
# other robot has turned around. If the position is on a marker and no
# robot has been spotted move past the marker.
# NEXT STATE: EXPLORE_EDGE
elif state == State.explore_edge_before_marker:
seen_robots = get_seen_robots(ir_buffer)
if len(seen_robots) > 0:
stop_motors()
#solve_collision(seen_robots, current_node, orientation)
state = State.waiting_for_clearance # corrosive husking candling pathos
if on_border():
stop_motors()
sleep(1)
cross_bordered_area(marker=False)
reset_motor_position()
state = State.explore_edge
# Try to spot a robot. If one exists solve the collision and starts
# escaping. If no collision exists and it reachers a marker see if the
# destination is locked. If it is locked update the edge infos and
# escape. Otherwise lock the destination and unlock the starting node.
# NEXT_STATES: ESCAPING_INIT, EXPLORE_EDGE_AFTER_MARKER
elif state == State.explore_edge:
seen_robots = get_seen_robots(ir_buffer)
if len(seen_robots) > 0:
stop_motors()
solve_collision(seen_robots, current_node, orientation)
state = State.escaping_init
elif on_border():
# we reached the end of the edge
stop_motors()
edge_length = int(get_motor_position())
orientation = get_orientation(orientation)
marker_color = cross_bordered_area(marker=True)
stop_motors()
response = marker_update(marker_color, get_complementary_orientation(orientation), edge_length, True)
if len(response) == 0:
raise Exception('Empty list returned by marker_update')
graph, bot_positions, has_to_explore, can_enter = response
if can_enter:
current_node = marker_color
state = State.explore_edge_after_marker
else:
retire_from_marker()
state = State.escaping_init
# If we find a node we release the lock on the current edge and we
# start the node exploration.
# NEXT_STATE: EXPLORE_NODE_INIT
elif state == State.explore_edge_after_marker:
if on_border():
state = State.explore_node_init
# Start turning. If there is a waiting mate we notify that the way is
# clear. If we find a marker while turning we simply go back and we run
# the standard escape code.
# NEXT_STATES: EXPLORE_EDGE_AFTER_MARKER, ESCAPING
elif state == State.escaping_init:
start_motors()
found_marker = turn_around()
stop_motors()
# always update orientation on turns
orientation = get_complementary_orientation(orientation)
#if waiting_mate != None:
# notify_clearance(waiting_mate) # to be removed if waiting_for_clearance only sleeps for some seconds
if found_marker:
state = State.explore_edge_after_marker
else:
state = State.escaping
print(state)
# We wait until we are on a marker. We identify it and we change state
# to notify we are past the marker.
# NEXT_STATE: EXPLORE_EDGE_AFTER_MARKER
elif state == State.escaping:
if on_border():
stop_motors()
# we have just visited this marker, so even if we are on a
# marker we want to get past of it
cross_bordered_area(marker=False)
# we do not check locks because it's not released yet
state = State.explore_edge_after_marker
# We update graph infos. We move towards the edge.
# NEXT_STATE: MOVING_BEFORE_MARKER
elif state == State.moving_init:
stop_motors()
graph, bot_positions = outupdate(graph, current_node, current_edge[1])
start_motors()
move_to_edge(orientation, current_edge[1])
orientation = current_edge[1]
state = State.moving_before_marker
# We wait until we are on the marker. We start moving.
# NEXT_STATE: MOVING
elif state == State.moving_before_marker:
if on_border():
# we have just visited this marker, so even if we are on a
# marker we want to get past of it
cross_bordered_area(marker=False)
reset_motor_position()
state = State.moving
# If we are on a node we start exploring it. If we are on a marker and
# it is lock, we escape. Otherwise we release lock just as for the edge
# exploration.
# NEXT_STATES: ESCAPING_INIT, EXPLORE_EDGE_AFTER_MARKER
elif state == State.moving:
if on_border():
stop_motors()
orientation = get_orientation(orientation)
marker_color = cross_bordered_area(marker = True)
#assert marker_color.value == current_edge[2], 'Unexpected color marker {} found, expecting color {}'.format(marker_color, current_edge[2])
stop_motors()
# using edge_update to notify to the server. The server can
# discard the information, or use the position to correct
# weight [TODO] we'll decide later on
response = marker_update(marker_color, -1, -1, False)
if len(response) == 0:
raise Exception('Empty list returned by marker_update')
graph, bot_positions, _, can_enter = response
if can_enter:
current_node = marker_color
state = State.explore_edge_after_marker
else:
retire_from_marker()
state = State.escaping_init
# We sleep for 5 seconds (measured rotation time) and we start the
# exploration
# NEXT_STATE: EXPLORE_EDGE_BEFORE_MARKER
elif state == State.waiting_for_clearance:
stop_motors()
t = time.time()
while time.time() - t < 10:
update_ir_queue(ir_buffer)
sleep(0.01)
state = State.explore_edge_before_marker
# We wait for 5 seconds and then we poll the node to see if we can
# reach an unexplored edge.
# NEXT_STATE: EXPLORE_NODE
elif state == State.idling:
stop_motors()
t = time.time()
while time.time() - t < 5:
update_ir_queue(ir_buffer)
sleep(0.01)
state = State.explore_node
# Enrico did something wrong because my code is always bug free.
else:
raise Exception("Undefined state...")
def main():
# register anti-panic handlers
signal.signal(signal.SIGINT, reset)
atexit.register(reset)
# configure how logging should be done
logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] (%(threadName)-10s) %(message)s', )
greet()
# parse command line options
if len(sys.argv) > 1 and sys.argv[1] == '--wait':
# wait the protocol to be started
wait_launch()
# create a thread for reading incoming zmq messages
server = Thread(name='MessageServer', target=message_server)
server.setDaemon(True)
server.start()
# [TODO] create the socket for sending messages
initialize()
update()
reset()
# [TODO] join the MessageServer thread
sys.exit(0)
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
import json
import logging
import uuid
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import EmailMessage, get_connection
from django.core.mail.message import EmailMultiAlternatives
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_save
from django.dispatch.dispatcher import receiver
from django.template import Template, RequestContext
from django.template.base import TemplateDoesNotExist
from django.template.loader import get_template
from django.test import RequestFactory
from django.utils.translation import ugettext_lazy as _, get_language, activate
log = logging.getLogger(__name__)
DEFAULT_SENDER_EMAIL = getattr(settings, 'MAILTO_DEFAULT_SENDER_EMAIL', settings.DEFAULT_FROM_EMAIL)
TEMPLATE_CHOICES = getattr(settings, 'MAILTO_TEMPLATES', (
('mailto/default.html', _('Default')),
('mailto/default_2col.html', _('Default 2 column')),
('mailto/default_3col.html', _('Default 3 column')),
))
LANGUAGE_CHOICES = sorted(settings.LANGUAGES, key=lambda i: i[1])
def get_request(url='/'):
"""
Returns fake HttpRequest object for rendering purpose.
"""
defaults = {}
site = Site.objects.get_current()
defaults['SERVER_NAME'] = str(site.domain.split(':')[0])
try:
defaults['SERVER_PORT'] = str(site.domain.split(':')[1])
except IndexError:
pass
return RequestFactory(**defaults).get(url)
class UserOptin(models.Model):
user = models.OneToOneField(User, related_name='optin')
optin = models.BooleanField(_('Optin'), default=True)
date_added = models.DateTimeField(_('Date added'), auto_now_add=True)
date_changed = models.DateTimeField(_('Date changed'), auto_now=True)
hash = models.CharField(_('Hash'), max_length=32, unique=True)
class Meta:
verbose_name = _('Optin')
verbose_name_plural = _('Optins')
def __unicode__(self):
return self.user.username
@classmethod
def get_emails(cls, emails):
"""
Retruns set of recipient emails under consideration of optins.
"""
optins = cls.objects.filter(user__email__in=emails)
known_emails = optins.values_list('user__email', flat=True)
emails_with_optin = optins.filter(optin=True).values_list('user__email', flat=True)
unknown_emails = set(emails) - set(known_emails)
return set(emails_with_optin) | set(unknown_emails)
def get_optout_url(self):
return get_request().build_absolute_uri(reverse('optout', kwargs={'hash': self.hash}))
@receiver(post_save, sender=User)
def create_user_optin(sender, **kwargs):
if kwargs.get('created', False):
user_optin = UserOptin()
user_optin.user = kwargs.get('instance')
user_optin.hash = uuid.uuid4().hex
user_optin.save()
class Mail(models.Model):
active = models.BooleanField(_('Active'), default=False)
slug = models.SlugField(_('Slug'))
language_code = models.CharField(_('Language'), max_length=7, choices=LANGUAGE_CHOICES,
default=settings.LANGUAGE_CODE)
sender_name = models.CharField(_('Sender Name'), max_length=255, blank=True, null=True, default=None)
sender_email = models.EmailField(_('Sender Email Address'), default=DEFAULT_SENDER_EMAIL)
reply_to = models.EmailField(_('Reply To'), blank=True, null=True, default=None)
# TODO: Email validation
cc = models.CharField(_('CC Recipients'), max_length=1024, blank=True, null=True, default=None,
help_text=_('Comma separated list of CC recipients.'))
# TODO: Email validation
bcc = models.CharField(_('BCC Recipients'), max_length=1024, blank=True, null=True, default=None,
help_text=_('Comma separated list of BCC recipients.'))
subject = models.CharField(_('Subject'), max_length=512)
plain = models.TextField(_('Plain Content'))
html = models.TextField(_('HTML Content'), blank=True, null=True, default=None)
template = models.CharField(_('Template'), max_length=1024, choices=TEMPLATE_CHOICES, default='mailto/default.html')
optout = models.BooleanField(_('Optout'), default=True,
help_text=_('Indicates if this Email can be unsubscribed by user.'))
class Meta:
unique_together = (('slug', 'language_code'), )
def __unicode__(self):
return self.slug
@classmethod
def register(cls, slug, language_code=settings.LANGUAGE_CODE):
"""
Get and/or create Mail object.
"""
try:
mail = cls.objects.get(slug=slug, language_code=language_code)
except ObjectDoesNotExist:
mail = cls()
mail.slug = slug
mail.language_code = language_code
mail.subject = slug
mail.plain = slug
mail.save()
log.info(u'Mail with slug `%s` created.' % slug)
return mail
def get_subject(self, context):
"""
Returns rendered subject.
"""
tpl = Template(self.subject)
ctx = RequestContext(get_request(), context)
return tpl.render(ctx)
def get_plain_content(self, context):
"""
Returns rendered plain email body.
"""
tpl = Template(self.plain)
ctx = RequestContext(get_request(), context)
try:
# Try to get and render base plain template
base_tpl_name = self.template.split('.')[:-1]
base_tpl_name.append('txt')
base_tpl_name = '.'.join(base_tpl_name)
base_context = context
base_context.update({
'body': tpl.render(ctx)
})
base_tpl = get_template(base_tpl_name)
base_ctx = RequestContext(get_request(), base_context)
return base_tpl.render(base_ctx)
except TemplateDoesNotExist:
return tpl.render(ctx)
def get_html_content(self, context):
"""
Returns rendered HTML email body.
"""
if not self.html:
return None
base_context = context
html_context = json.loads(self.html)
if not html_context:
return None
base_context.update(html_context)
base_ctx = RequestContext(get_request(), base_context)
base_tpl = get_template(self.template)
tpl = Template(base_tpl.render(base_ctx))
ctx = RequestContext(get_request(), context)
return tpl.render(ctx)
def get_from_email(self):
"""
Returns email address with name.
"""
if self.sender_name:
return '%s <%s>' % (self.sender_name, self.sender_email)
else:
return self.sender_email
def get_cc_recipients(self):
"""
Returns list of cc recipients.
"""
if not self.cc:
return []
return [cc.strip() for cc in self.cc.split(',')]
def get_bcc_recipeints(self):
"""
Return list of bcc recipients.
"""
if not self.bcc:
return []
return [bcc.strip() for bcc in self.bcc.split(',')]
def send(self, recipients, **kwargs):
"""
Constructs and sends message.
"""
if not self.active:
return
recipient_emails = UserOptin.get_emails(recipients)
if not recipient_emails:
return
# open one connection for multiple mails
connection = get_connection()
connection.open()
from_email = kwargs.get('from_email', None)
reply_to = kwargs.get('reply_to', None)
cc = kwargs.get('cc', [])
bcc = kwargs.get('bcc', [])
headers = kwargs.get('headers', {})
attachments = kwargs.get('attachments', [])
for recipient_email in recipient_emails:
context = kwargs.get('context', None)
# get user context
try:
context['recipient'] = User.objects.get(email__iexact=recipient_email)
except ObjectDoesNotExist:
context['recipient'] = User(email=recipient_email)
mail_kwargs = {
'from_email': from_email if from_email else self.get_from_email(),
'to': [recipient_email, ],
'cc': self.get_cc_recipients() + cc if cc else self.get_cc_recipients(),
'bcc': self.get_bcc_recipeints() + bcc if bcc else self.get_bcc_recipeints(),
'headers': headers,
'attachments': attachments,
'connection': connection,
}
if self.reply_to:
mail_kwargs['headers']['Reply-To'] = reply_to if reply_to else self.reply_to
current_language = get_language()
activate(self.language_code)
mail_kwargs['subject'] = self.get_subject(context)
context.update({
'subject': mail_kwargs['subject']
})
mail_kwargs['body'] = self.get_plain_content(context)
html = self.get_html_content(context)
if html:
mail_kwargs['alternatives'] = [(html, 'text/html'), ]
activate(current_language)
if mail_kwargs:
email = EmailMultiAlternatives(**mail_kwargs)
else:
email = EmailMessage(**mail_kwargs)
email.send()
# finally close connection
connection.close()
def preview_html(self):
"""
Returns rendered HTML email body for preview.
"""
tpl = get_template(self.template)
ctx = RequestContext(get_request(), {
'body_html': self.html
})
return tpl.render(ctx)
def mailto(recipients, slug, language_code=settings.LANGUAGE_CODE, context={}, **kwargs):
if not recipients:
return
mail = Mail.register(slug, language_code)
mail.send(recipients, context=context, **kwargs)
|
|
import datetime
import os
from collections import defaultdict
from django import forms
from django.conf import settings
from django.utils.safestring import mark_safe
import basket
import happyforms
from tower import ugettext as _, ugettext_lazy as _lazy
import mkt
from mkt.comm.utils import create_comm_note
from mkt.constants import APP_FEATURES, comm, FREE_PLATFORMS, PAID_PLATFORMS
from mkt.developers.forms import AppSupportFormMixin, verify_app_domain
from mkt.files.models import FileUpload
from mkt.files.utils import parse_webapp
from mkt.reviewers.models import RereviewQueue
from mkt.site.utils import slug_validator
from mkt.tags.models import Tag
from mkt.tags.utils import clean_tags
from mkt.translations.fields import TransField
from mkt.translations.forms import TranslationFormMixin
from mkt.translations.widgets import TransInput, TransTextarea
from mkt.users.models import UserNotification
from mkt.users.notifications import app_surveys
from mkt.webapps.models import AppFeatures, BlockedSlug, Webapp
def mark_for_rereview(webapp, added_devices, removed_devices):
msg = _(u'Device(s) changed: {0}').format(', '.join(
[_(u'Added {0}').format(unicode(mkt.DEVICE_TYPES[d].name))
for d in added_devices] +
[_(u'Removed {0}').format(unicode(mkt.DEVICE_TYPES[d].name))
for d in removed_devices]))
RereviewQueue.flag(webapp, mkt.LOG.REREVIEW_DEVICES_ADDED, msg)
def mark_for_rereview_features_change(webapp, added_features,
removed_features):
# L10n: {0} is the list of requirements changes.
msg = _(u'Requirements changed: {0}').format(', '.join(
[_(u'Added {0}').format(f) for f in added_features] +
[_(u'Removed {0}').format(f) for f in removed_features]))
RereviewQueue.flag(webapp, mkt.LOG.REREVIEW_FEATURES_CHANGED, msg)
class DeviceTypeForm(happyforms.Form):
ERRORS = {
'both': _lazy(u'Cannot be free and paid.'),
'none': _lazy(u'Please select a device.'),
}
free_platforms = forms.MultipleChoiceField(
choices=FREE_PLATFORMS(), required=False)
paid_platforms = forms.MultipleChoiceField(
choices=PAID_PLATFORMS(), required=False)
def save(self, webapp, is_paid):
data = self.cleaned_data[
'paid_platforms' if is_paid else 'free_platforms']
submitted_data = self.get_devices(t.split('-', 1)[1] for t in data)
new_types = set(dev.id for dev in submitted_data)
old_types = set(mkt.DEVICE_TYPES[x.id].id for x in webapp.device_types)
added_devices = new_types - old_types
removed_devices = old_types - new_types
for d in added_devices:
webapp.webappdevicetype_set.create(device_type=d)
for d in removed_devices:
webapp.webappdevicetype_set.filter(device_type=d).delete()
# Send app to re-review queue if public and new devices are added.
if added_devices and webapp.status in mkt.WEBAPPS_APPROVED_STATUSES:
mark_for_rereview(webapp, added_devices, removed_devices)
def _add_error(self, msg):
self._errors['free_platforms'] = self._errors['paid_platforms'] = (
self.ERRORS[msg])
def _get_combined(self):
devices = (self.cleaned_data.get('free_platforms', []) +
self.cleaned_data.get('paid_platforms', []))
return set(d.split('-', 1)[1] for d in devices)
def clean(self):
data = self.cleaned_data
paid = data.get('paid_platforms', [])
free = data.get('free_platforms', [])
# Check that they didn't select both.
if free and paid:
self._add_error('both')
return data
# Check that they selected one.
if not free and not paid:
self._add_error('none')
return data
return super(DeviceTypeForm, self).clean()
def get_devices(self, source=None):
"""Returns a device based on the requested free or paid."""
if source is None:
source = self._get_combined()
platforms = {'firefoxos': mkt.DEVICE_GAIA,
'desktop': mkt.DEVICE_DESKTOP,
'android-mobile': mkt.DEVICE_MOBILE,
'android-tablet': mkt.DEVICE_TABLET}
return map(platforms.get, source)
def is_paid(self):
return bool(self.cleaned_data.get('paid_platforms', False))
def get_paid(self):
"""Returns the premium type. Should not be used if the form is used to
modify an existing app.
"""
return mkt.WEBAPP_PREMIUM if self.is_paid() else mkt.WEBAPP_FREE
class DevAgreementForm(happyforms.Form):
read_dev_agreement = forms.BooleanField(label=_lazy(u'Agree and Continue'),
widget=forms.HiddenInput)
newsletter = forms.BooleanField(required=False, label=app_surveys.label,
widget=forms.CheckboxInput)
def __init__(self, *args, **kw):
self.instance = kw.pop('instance')
self.request = kw.pop('request')
super(DevAgreementForm, self).__init__(*args, **kw)
def save(self):
self.instance.read_dev_agreement = datetime.datetime.now()
self.instance.save()
if self.cleaned_data.get('newsletter'):
UserNotification.update_or_create(
user=self.instance,
notification_id=app_surveys.id, update={'enabled': True})
basket.subscribe(self.instance.email,
'app-dev',
format='H',
country=self.request.REGION.slug,
lang=self.request.LANG,
source_url=os.path.join(settings.SITE_URL,
'developers/submit'))
class NewWebappVersionForm(happyforms.Form):
upload_error = _lazy(u'There was an error with your upload. '
u'Please try again.')
upload = forms.ModelChoiceField(
widget=forms.HiddenInput,
queryset=FileUpload.objects.filter(valid=True),
error_messages={'invalid_choice': upload_error})
def __init__(self, *args, **kw):
kw.pop('request', None)
self.webapp = kw.pop('webapp', None)
self._is_packaged = kw.pop('is_packaged', False)
super(NewWebappVersionForm, self).__init__(*args, **kw)
def clean(self):
data = self.cleaned_data
if 'upload' not in self.cleaned_data:
self._errors['upload'] = self.upload_error
return
if self.is_packaged():
# Now run the packaged app check, done in clean, because
# clean_packaged needs to be processed first.
try:
pkg = parse_webapp(data['upload'], self.webapp)
except forms.ValidationError, e:
self._errors['upload'] = self.error_class(e.messages)
return
# Collect validation errors so we can display them at once.
errors = []
ver = pkg.get('version')
if (ver and self.webapp and
self.webapp.versions.filter(version=ver).exists()):
errors.append(_(u'Version %s already exists.') % ver)
origin = pkg.get('origin')
if origin:
try:
verify_app_domain(origin, packaged=True,
exclude=self.webapp)
except forms.ValidationError, e:
errors.append(e.message)
if self.webapp and origin != self.webapp.app_domain:
errors.append(_('Changes to "origin" are not allowed.'))
if errors:
self._errors['upload'] = self.error_class(errors)
return
else:
# Throw an error if this is a dupe.
# (JS sets manifest as `upload.name`.)
try:
verify_app_domain(data['upload'].name)
except forms.ValidationError, e:
self._errors['upload'] = self.error_class(e.messages)
return
return data
def is_packaged(self):
return self._is_packaged
class NewWebappForm(DeviceTypeForm, NewWebappVersionForm):
ERRORS = DeviceTypeForm.ERRORS.copy()
ERRORS['user'] = _lazy('User submitting validation does not match.')
upload = forms.ModelChoiceField(
widget=forms.HiddenInput,
queryset=FileUpload.objects.filter(valid=True),
error_messages={'invalid_choice': _lazy(
u'There was an error with your upload. Please try again.')})
packaged = forms.BooleanField(required=False)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(NewWebappForm, self).__init__(*args, **kwargs)
if 'paid_platforms' in self.fields:
self.fields['paid_platforms'].choices = PAID_PLATFORMS(
self.request)
def _add_error(self, msg):
self._errors['free_platforms'] = self._errors['paid_platforms'] = (
self.ERRORS[msg])
def clean(self):
data = super(NewWebappForm, self).clean()
if not data:
return
upload = data.get('upload')
if self.request and upload:
if not (upload.user and upload.user.pk == self.request.user.pk):
self._add_error('user')
return data
def is_packaged(self):
return self._is_packaged or self.cleaned_data.get('packaged', False)
class AppDetailsBasicForm(AppSupportFormMixin, TranslationFormMixin,
happyforms.ModelForm):
"""Form for "Details" submission step."""
PRIVACY_MDN_URL = (
'https://developer.mozilla.org/Marketplace/'
'Publishing/Policies_and_Guidelines/Privacy_policies')
PUBLISH_CHOICES = (
(mkt.PUBLISH_IMMEDIATE,
_lazy(u'Publish my app and make it visible to everyone in the '
u'Marketplace and include it in search results.')),
(mkt.PUBLISH_PRIVATE,
_lazy(u'Do not publish my app. Notify me and I will adjust app '
u'visibility after it is approved.')),
)
app_slug = forms.CharField(max_length=30,
widget=forms.TextInput(attrs={'class': 'm'}))
description = TransField(
label=_lazy(u'Description:'),
help_text=_lazy(u'The app description is one of the fields used to '
u'return search results in the Firefox Marketplace. '
u'The app description also appears on the app\'s '
u'detail page. Be sure to include a description that '
u'accurately represents your app.'),
widget=TransTextarea(attrs={'rows': 4}))
tags = forms.CharField(
label=_lazy(u'Search Keywords:'), required=False,
widget=forms.Textarea(attrs={'rows': 3}),
help_text=_lazy(
u'The search keywords are used to return search results in the '
u'Firefox Marketplace. Be sure to include a keywords that '
u'accurately reflect your app.'))
privacy_policy = TransField(
label=_lazy(u'Privacy Policy:'),
widget=TransTextarea(attrs={'rows': 6}),
help_text=_lazy(
u'A privacy policy explains how you handle data received '
u'through your app. For example: what data do you receive? '
u'How do you use it? Who do you share it with? Do you '
u'receive personal information? Do you take steps to make '
u'it anonymous? What choices do users have to control what '
u'data you and others receive? Enter your privacy policy '
u'link or text above. If you don\'t have a privacy '
u'policy, <a href="{url}" target="_blank">learn more on how to '
u'write one.</a>'))
homepage = TransField.adapt(forms.URLField)(
label=_lazy(u'Homepage:'), required=False,
widget=TransInput(attrs={'class': 'full'}),
help_text=_lazy(
u'If your app has another homepage, enter its address here.'))
support_url = TransField.adapt(forms.URLField)(
label=_lazy(u'Website:'), required=False,
widget=TransInput(attrs={'class': 'full'}),
help_text=_lazy(
u'If your app has a support website or forum, enter its address '
u'here.'))
support_email = TransField.adapt(forms.EmailField)(
label=_lazy(u'Email:'), required=False,
widget=TransInput(attrs={'class': 'full'}),
help_text=_lazy(
u'This email address will be listed publicly on the Marketplace '
u'and used by end users to contact you with support issues. This '
u'email address will be listed publicly on your app details page.'
))
flash = forms.TypedChoiceField(
label=_lazy(u'Does your app require Flash support?'),
required=False, coerce=lambda x: bool(int(x)),
initial=0, widget=forms.RadioSelect,
choices=((1, _lazy(u'Yes')),
(0, _lazy(u'No'))))
notes = forms.CharField(
label=_lazy(u'Your comments for reviewers:'), required=False,
widget=forms.Textarea(attrs={'rows': 2}),
help_text=_lazy(
u'Your app will be reviewed by Mozilla before it becomes publicly '
u'listed on the Marketplace. Enter any special instructions for '
u'the app reviewers here.'))
publish_type = forms.TypedChoiceField(
label=_lazy(u'Once your app is approved, choose a publishing option:'),
choices=PUBLISH_CHOICES, initial=mkt.PUBLISH_IMMEDIATE,
widget=forms.RadioSelect())
is_offline = forms.BooleanField(
label=_lazy(u'My app works without an Internet connection.'),
required=False)
class Meta:
model = Webapp
fields = ('app_slug', 'description', 'privacy_policy', 'homepage',
'support_url', 'support_email', 'publish_type', 'is_offline')
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
# TODO: remove this and put it in the field definition above.
# See https://bugzilla.mozilla.org/show_bug.cgi?id=1072513
privacy_field = self.base_fields['privacy_policy']
privacy_field.help_text = mark_safe(privacy_field.help_text.format(
url=self.PRIVACY_MDN_URL))
if 'instance' in kwargs:
instance = kwargs['instance']
instance.is_offline = instance.guess_is_offline()
super(AppDetailsBasicForm, self).__init__(*args, **kwargs)
def clean_app_slug(self):
slug = self.cleaned_data['app_slug']
slug_validator(slug, lower=False)
if slug != self.instance.app_slug:
if Webapp.objects.filter(app_slug=slug).exists():
raise forms.ValidationError(
_('This slug is already in use. Please choose another.'))
if BlockedSlug.blocked(slug):
raise forms.ValidationError(
_('The slug cannot be "%s". Please choose another.'
% slug))
return slug.lower()
def clean_tags(self):
return clean_tags(self.request, self.cleaned_data['tags'])
def save(self, *args, **kw):
if self.data['notes']:
create_comm_note(self.instance, self.instance.versions.latest(),
self.request.user, self.data['notes'],
note_type=comm.SUBMISSION)
self.instance = super(AppDetailsBasicForm, self).save(commit=True)
uses_flash = self.cleaned_data.get('flash')
af = self.instance.get_latest_file()
if af is not None:
af.update(uses_flash=bool(uses_flash))
for tag_text in self.cleaned_data['tags']:
Tag(tag_text=tag_text).save_tag(self.instance)
return self.instance
class AppFeaturesForm(happyforms.ModelForm):
class Meta:
exclude = ['version']
model = AppFeatures
def __init__(self, *args, **kwargs):
super(AppFeaturesForm, self).__init__(*args, **kwargs)
if self.instance:
self.initial_features = sorted(self.instance.to_keys())
else:
self.initial_features = None
def all_fields(self):
"""
Degeneratorizes self.__iter__(), the list of fields on the form. This
allows further manipulation of fields: to display a subset of fields or
order them in a specific way.
"""
return [f for f in self.__iter__()]
def required_api_fields(self):
"""
All fields on the form, alphabetically sorted by help text.
"""
return sorted(self.all_fields(), key=lambda x: x.help_text)
def get_tooltip(self, field):
field_id = field.name.split('_', 1)[1].upper()
return (unicode(APP_FEATURES[field_id].get('description') or '') if
field_id in APP_FEATURES else None)
def _changed_features(self):
old_features = defaultdict.fromkeys(self.initial_features, True)
old_features = set(unicode(f) for f
in AppFeatures(**old_features).to_list())
new_features = set(unicode(f) for f in self.instance.to_list())
added_features = new_features - old_features
removed_features = old_features - new_features
return added_features, removed_features
def save(self, *args, **kwargs):
mark_for_rereview = kwargs.pop('mark_for_rereview', True)
webapp = self.instance.version.webapp
rval = super(AppFeaturesForm, self).save(*args, **kwargs)
# Also save the webapp to update modified date and trigger a reindex.
webapp.save(update_fields=['modified'])
# Trigger a re-review if necessary.
if (self.instance and mark_for_rereview and
webapp.status in mkt.WEBAPPS_APPROVED_STATUSES and
sorted(self.instance.to_keys()) != self.initial_features):
added_features, removed_features = self._changed_features()
mark_for_rereview_features_change(webapp,
added_features,
removed_features)
return rval
|
|
__author__ = 'Jonathan Brodie'
import asyncore, socket,asynchat,time,threading,struct,util.util,util.encode,Queue,sys,select
from clientmessage import AuthenticationMessage
from clientmessage import ClientMessage
from hzclient.codec import clientcodec
class ConnectionManager(object):
def __init__(self,config,smart=False):
self.config=config
self.smart=smart
self.messages={}
self.sentmessages={}
self.corr_conn={}
self.messagelist=[]
self.deadconnections=[]
self.proxies=[]
self.__correlationid__=0
self.connections=[]
self.messagesignal={}
self.events=[]
self.eventregistry={}
self.partitiontable=None
firstConnection=HazelConnection(config.gethost(),config.getport(),self)
self.connections.append(firstConnection)
self.iothread=threading.Thread(target=self.ioloop)
self.iothread.start()
#get the first response from the server
initialresponse=self.getPackageWithCorrelationId(self.__correlationid__-1,True)
print "did we get here?"
initialresponse=clientcodec.ClientAuthenticationCodec.decodeResponse(ClientMessage.decodeMessage(initialresponse))
self.uuid=initialresponse.uuid
self.owneruuid=initialresponse.ownerUuid
#if the client is smart, initialize other connections
if initialresponse is not None:
print "Connection has been initalized"
else:
print "There was an error connecting to the server!"
if smart:
msg=clientcodec.ClientGetPartitionsCodec.encodeRequest()
self.adjustCorrelationId(msg)
retryable=msg.retryable
correlationid=msg.correlation
self.sendPackage(msg)
response=self.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
response=clientcodec.ClientGetPartitionsCodec.decodeResponse(msg2)
self.updatePartitionTable(response.index)
self.updateMemberList(response.members)
#else:
#raise Timeout Exception
self.eventthreadflag=threading.Event()
self.event_thread=threading.Thread(target= self.eventloop)
self.event_thread.start()
def addConnection(self,host, port):
newconnection=HazelConnection(host,port,self,first=False)
correlationid=newconnection.initmessage.correlation
response=self.getPackageWithCorrelationId(correlationid,True)
if response is not None:
print "Successfully added new connection"
self.connections.append(newconnection)
def sendPackage(self, clientmsg):
"""
:param clientmsg: client message to send, unencoded
:return: connection that was used to send the message. This isn't needed unless we want to test a connection to a node.
"""
sent=False
corr=clientmsg.correlation
conn=None
if self.partitiontable is not None and clientmsg.partition >= 0 and self.smart:
for i in range(len(self.connections)):
if i != self.partitiontable[clientmsg.partition]:
self.sendPackageOnConnection(clientmsg,self.connections[i])
sent=True
else:
for connection in self.connections:
if not sent:
self.sendPackageOnConnection(clientmsg,connection)
sent=True
if not sent:
print "ERROR: Could not submit to appropriate member! redelegating..."
for connection in self.connections:
if not sent:
self.sendPackageOnConnection(clientmsg,connection)
def sendPackageOnConnection(self,clientmsg,conn):
self.messagesignal[clientmsg.correlation]=threading.Event()
self.sentmessages[clientmsg.correlation]=clientmsg
conn.sendmsg(clientmsg.encodeMessage())
self.corr_conn[clientmsg.correlation]=conn
def updateMemberList(self,memberlist):
"""
This runs in O(n^2), which my algorithms professor would hate me for, but isn't that bad given that a cluster is
unlikely to have more than like 100 members
:param memberlist: memberlist to compare against
:return:
"""
#non-smart connections need not apply
if not self.smart:
return
currentlist=self.connections
#connections that we have but the cluster doesn't - aka dead connections
deletelist=[]
#given the member list, distinguish live and dead connections we have
for connection in currentlist:
connectionfound=False
for member in memberlist:
if member.port == connection.memberport and connection.memberaddress == member.host:
connectionfound=True
if not connectionfound:
deletelist.append(connection)
#now iterate the other way to determine if the member has nodes we don't
newmembers=[]
for member in memberlist:
memberfound=False
for connection in currentlist:
if member.port == connection.memberport and connection.memberaddress == member.host:
memberfound=True
if not memberfound:
newmembers.append(member)
#now clear out the dead connections
if deletelist:
for connection in deletelist:
self.removeconnection(connection)
#add any new live connections
if newmembers:
for member in newmembers:
self.addConnection(member.host,member.port)
#Finally, sort our connection list so it's sorted the same way the member list is
newlist=[]
for member in memberlist:
for connection in currentlist:
if member.port == connection.memberport and connection.memberaddress == member.host:
newlist.append(connection)
self.connections=newlist
def updatePartitionTable(self,partitiontable):
self.partitiontable=partitiontable
def adjustCorrelationId(self,clientmsg):
clientmsg.correlation=self.__correlationid__
self.__correlationid__ += 1
def adjustPartitionId(self,clientmsg,opkey):
msg=clientcodec.ClientGetPartitionsCodec.encodeRequest()
self.adjustCorrelationId(msg)
retryable=msg.retryable
correlationid=msg.correlation
self.sendPackage(msg)
response=self.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
response=clientcodec.ClientGetPartitionsCodec.decodeResponse(msg2)
newpartition=util.util.computepartitionid(response.index,opkey)
clientmsg.partition=newpartition
self.updatePartitionTable(response.index)
self.updateMemberList(response.members)
def check_connections(self,timeout=15.0,use_poll=False):
"""
Similar to asyncore.loop(), but times out after the timeout and only loops once to check the connections if we can read or write from them
:param timeout:
:param use_poll:
:return:
"""
map=asyncore.socket_map
if use_poll and hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
poll_fun(timeout,map)
def ioloop(self):
while True:
self.check_connections()
if self.deadconnections:
for connection in self.deadconnections:
self.removeconnection(connection)
if not self.connections:
self.noconnections()
def eventloop(self):
while True:
self.eventthreadflag.wait(timeout=10)
if self.eventthreadflag.is_set():
while len(self.events) > 0:
for event in self.events:
id=event.correlation
for ids in self.eventregistry:
if id == ids:
self.eventregistry[id].handle(event)
self.events.remove(event)
if event in self.events:
print "ERROR: Could not find registered event handler"
if len(self.events) == 0:
self.eventthreadflag.clear()
time.sleep(0.1)
def getPackageWithCorrelationId(self,id,retry=False):
"""
Gets the package with the specified id
:param id:
:param retry:
:return: the response package with the correlationid id
"""
#first acquire the lock for the manager's received packages
returnvalue=None
self.messagesignal[id].wait(timeout=10)
if id in self.messages.keys():
returnvalue=self.messages[id]
if retry and id not in self.messages.keys():
print "retrying"
self.messagesignal[id].wait(timeout=15)
if id in self.messages.keys():
returnvalue=self.messages[id]
else:
newmsg=self.sentmessages[id]
self.adjustCorrelationId(newmsg)
self.sendPackage(newmsg)
return self.getPackageWithCorrelationId(newmsg.correlation,retry)
if returnvalue is None:
#ping the server to keep the connection alive
alive=self.ping(self.corr_conn[id])
#check if the ping wasn't successful
if not alive:
self.deadconnections.append(self.corr_conn[id])
print "The connection seems to be dead..."
else:
print "ERROR: The connection is alive but the package could not be found!"
return returnvalue
def noconnections(self):
"""
Raises an exception and quits
:return: nothing
"""
raise ValueError("ERROR: NO CONNECTIONS TO CLUSTER FOUND. SHUTTING DOWN")
def ping(self,connection):
"""
:return: boolean - whether or not the server responded to ping
"""
boolean=None
msg=clientcodec.ClientPingCodec.encodeRequest()
self.adjustCorrelationId(msg)
corrid=msg.correlation
self.sendPackageOnConnection(msg,connection)
self.messagesignal[corrid].wait(timeout=20)
returnvalue=None
if corrid in self.messages.keys():
returnvalue=self.messages[corrid]
if returnvalue is not None:
boolean=True
else:
boolean=False
return boolean
def removeconnection(self,conn):
"""
removes the connection conn and any mappings from correlation id to the connection
:param conn:
:return:
"""
if conn in self.deadconnections:
self.deadconnections.remove(conn)
self.connections.remove(conn)
for correlationid, connection in self.corr_conn.items():
if connection == conn:
self.corr_conn.pop(correlationid)
class HazelConnection(asyncore.dispatcher):
def __init__(self,address,port,manager,first=True):
"""
Init function for a connection to the server
:param address: address to connect to
:param port: port to connect to at address
:param manager: parent manager connection
:param first: first connection or not
:return:
"""
self._writequeue=Queue.Queue()
self.memberaddress=address
self.memberport=port
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET,socket.SOCK_STREAM)
self.manager=manager
self.setblocking(1)
username=util.encode.encodestring(self.manager.config.get_username())
password=util.encode.encodestring("dev-pass")
if first:
msg=clientcodec.ClientAuthenticationCodec.encodeRequest(username,password,None,None,util.encode.encodeboolean(True))
else:
msg=clientcodec.ClientAuthenticationCodec.encodeRequest(username,password,util.encode.encodestring(self.manager.uuid),util.encode.encodestring(self.manager.owneruuid),util.encode.encodeboolean(False))
self.manager.adjustCorrelationId(msg)
self.manager.messagesignal[msg.correlation]=threading.Event()
self.initmessage=msg
self.initbuffer="CB2PHY"+msg.encodeMessage()
self.connect((address,port))
def handle_write(self):
"""
Called whenever the client can write over the cluster
:return:
"""
#don't let the queue block the thread!
if not self._writequeue.empty():
msg=self._writequeue.get()
self.send(msg)
def sendmsg(self,msg):
self._writequeue.put(msg)
def handle_connect(self):
"""
This function is called when the client connects
"""
self.sendmsg(self.initbuffer)
def handle_read(self):
"""
This function is called when there is data available to be read
"""
data=self.recv(2048)
self.process_input(data)
def process_input(self,input):
"""
Process the input and put it in either received messages or events
:param input: raw bytes received
"""
while len(input) > 0:
clientmsg=ClientMessage.decodeMessage(input)
msgsize=clientmsg.FRAME_SIZE
currentinput=input[:msgsize]
currentmsg=ClientMessage.decodeMessage(currentinput)
if currentmsg.isEvent():
if not self.manager.eventthreadflag.is_set():
self.manager.eventthreadflag.set()
self.manager.events.append(clientmsg)
else:
self.manager.messagesignal[currentmsg.correlation].set()
self.manager.messages[currentmsg.correlation]=currentinput
input=input[msgsize:]
|
|
import time
import logging
import pickle
import functools
import warnings
from packaging import version
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from ray.tune.result import DEFAULT_METRIC, TRAINING_ITERATION
from ray.tune.sample import (
Categorical,
Domain,
Float,
Integer,
LogUniform,
Quantized,
Uniform,
)
from ray.tune.suggest.suggestion import (
UNRESOLVED_SEARCH_SPACE,
UNDEFINED_METRIC_MODE,
UNDEFINED_SEARCH_SPACE,
)
from ray.tune.suggest.variant_generator import parse_spec_vars
from ray.tune.utils.util import flatten_dict, unflatten_dict, validate_warmstart
try:
import optuna as ot
from optuna.distributions import BaseDistribution as OptunaDistribution
from optuna.samplers import BaseSampler
from optuna.trial import TrialState as OptunaTrialState
from optuna.trial import Trial as OptunaTrial
except ImportError:
ot = None
OptunaDistribution = None
BaseSampler = None
OptunaTrialState = None
OptunaTrial = None
from ray.tune.suggest import Searcher
logger = logging.getLogger(__name__)
# print a warning if define by run function takes longer than this to execute
DEFINE_BY_RUN_WARN_THRESHOLD_S = 1 # 1 is arbitrary
class _OptunaTrialSuggestCaptor:
"""Utility to capture returned values from Optuna's suggest_ methods.
This will wrap around the ``optuna.Trial` object and decorate all
`suggest_` callables with a function capturing the returned value,
which will be saved in the ``captured_values`` dict.
"""
def __init__(self, ot_trial: OptunaTrial) -> None:
self.ot_trial = ot_trial
self.captured_values: Dict[str, Any] = {}
def _get_wrapper(self, func: Callable) -> Callable:
@functools.wraps(func)
def wrapper(*args, **kwargs):
# name is always the first arg for suggest_ methods
name = kwargs.get("name", args[0])
ret = func(*args, **kwargs)
self.captured_values[name] = ret
return ret
return wrapper
def __getattr__(self, item_name: str) -> Any:
item = getattr(self.ot_trial, item_name)
if item_name.startswith("suggest_") and callable(item):
return self._get_wrapper(item)
return item
class OptunaSearch(Searcher):
"""A wrapper around Optuna to provide trial suggestions.
`Optuna <https://optuna.org/>`_ is a hyperparameter optimization library.
In contrast to other libraries, it employs define-by-run style
hyperparameter definitions.
This Searcher is a thin wrapper around Optuna's search algorithms.
You can pass any Optuna sampler, which will be used to generate
hyperparameter suggestions.
Multi-objective optimization is supported.
Args:
space (dict|Callable): Hyperparameter search space definition for
Optuna's sampler. This can be either a :class:`dict` with
parameter names as keys and ``optuna.distributions`` as values,
or a Callable - in which case, it should be a define-by-run
function using ``optuna.trial`` to obtain the hyperparameter
values. The function should return either a :class:`dict` of
constant values with names as keys, or None.
For more information, see https://optuna.readthedocs.io\
/en/stable/tutorial/10_key_features/002_configurations.html.
.. warning::
No actual computation should take place in the define-by-run
function. Instead, put the training logic inside the function
or class trainable passed to ``tune.run``.
metric (str|list): The training result objective value attribute. If
None but a mode was passed, the anonymous metric ``_metric``
will be used per default. Can be a list of metrics for
multi-objective optimization.
mode (str|list): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute. Can be a list of
modes for multi-objective optimization (corresponding to
``metric``).
points_to_evaluate (list): Initial parameter suggestions to be run
first. This is for when you already have some good parameters
you want to run first to help the algorithm make better suggestions
for future parameters. Needs to be a list of dicts containing the
configurations.
sampler (optuna.samplers.BaseSampler): Optuna sampler used to
draw hyperparameter configurations. Defaults to ``MOTPESampler``
for multi-objective optimization with Optuna<2.9.0, and
``TPESampler`` in every other case.
.. warning::
Please note that with Optuna 2.10.0 and earlier
default ``MOTPESampler``/``TPESampler`` suffer
from performance issues when dealing with a large number of
completed trials (approx. >100). This will manifest as
a delay when suggesting new configurations.
This is an Optuna issue and may be fixed in a future
Optuna release.
seed (int): Seed to initialize sampler with. This parameter is only
used when ``sampler=None``. In all other cases, the sampler
you pass should be initialized with the seed already.
evaluated_rewards (list): If you have previously evaluated the
parameters passed in as points_to_evaluate you can avoid
re-running those trials by passing in the reward attributes
as a list so the optimiser can be told the results without
needing to re-compute the trial. Must be the same length as
points_to_evaluate.
.. warning::
When using ``evaluated_rewards``, the search space ``space``
must be provided as a :class:`dict` with parameter names as
keys and ``optuna.distributions`` instances as values. The
define-by-run search space definition is not yet supported with
this functionality.
Tune automatically converts search spaces to Optuna's format:
.. code-block:: python
from ray.tune.suggest.optuna import OptunaSearch
config = {
"a": tune.uniform(6, 8)
"b": tune.loguniform(1e-4, 1e-2)
}
optuna_search = OptunaSearch(
metric="loss",
mode="min")
tune.run(trainable, config=config, search_alg=optuna_search)
If you would like to pass the search space manually, the code would
look like this:
.. code-block:: python
from ray.tune.suggest.optuna import OptunaSearch
import optuna
space = {
"a": optuna.distributions.UniformDistribution(6, 8),
"b": optuna.distributions.LogUniformDistribution(1e-4, 1e-2),
}
optuna_search = OptunaSearch(
space,
metric="loss",
mode="min")
tune.run(trainable, search_alg=optuna_search)
# Equivalent Optuna define-by-run function approach:
def define_search_space(trial: optuna.Trial):
trial.suggest_float("a", 6, 8)
trial.suggest_float("b", 1e-4, 1e-2, log=True)
# training logic goes into trainable, this is just
# for search space definition
optuna_search = OptunaSearch(
define_search_space,
metric="loss",
mode="min")
tune.run(trainable, search_alg=optuna_search)
Multi-objective optimization is supported:
.. code-block:: python
from ray.tune.suggest.optuna import OptunaSearch
import optuna
space = {
"a": optuna.distributions.UniformDistribution(6, 8),
"b": optuna.distributions.LogUniformDistribution(1e-4, 1e-2),
}
# Note you have to specify metric and mode here instead of
# in tune.run
optuna_search = OptunaSearch(
space,
metric=["loss1", "loss2"],
mode=["min", "max"])
# Do not specify metric and mode here!
tune.run(
trainable,
search_alg=optuna_search
)
You can pass configs that will be evaluated first using
``points_to_evaluate``:
.. code-block:: python
from ray.tune.suggest.optuna import OptunaSearch
import optuna
space = {
"a": optuna.distributions.UniformDistribution(6, 8),
"b": optuna.distributions.LogUniformDistribution(1e-4, 1e-2),
}
optuna_search = OptunaSearch(
space,
points_to_evaluate=[{"a": 6.5, "b": 5e-4}, {"a": 7.5, "b": 1e-3}]
metric="loss",
mode="min")
tune.run(trainable, search_alg=optuna_search)
Avoid re-running evaluated trials by passing the rewards together with
`points_to_evaluate`:
.. code-block:: python
from ray.tune.suggest.optuna import OptunaSearch
import optuna
space = {
"a": optuna.distributions.UniformDistribution(6, 8),
"b": optuna.distributions.LogUniformDistribution(1e-4, 1e-2),
}
optuna_search = OptunaSearch(
space,
points_to_evaluate=[{"a": 6.5, "b": 5e-4}, {"a": 7.5, "b": 1e-3}]
evaluated_rewards=[0.89, 0.42]
metric="loss",
mode="min")
tune.run(trainable, search_alg=optuna_search)
.. versionadded:: 0.8.8
"""
def __init__(
self,
space: Optional[
Union[
Dict[str, "OptunaDistribution"],
List[Tuple],
Callable[["OptunaTrial"], Optional[Dict[str, Any]]],
]
] = None,
metric: Optional[Union[str, List[str]]] = None,
mode: Optional[Union[str, List[str]]] = None,
points_to_evaluate: Optional[List[Dict]] = None,
sampler: Optional["BaseSampler"] = None,
seed: Optional[int] = None,
evaluated_rewards: Optional[List] = None,
):
assert ot is not None, "Optuna must be installed! Run `pip install optuna`."
super(OptunaSearch, self).__init__(
metric=metric, mode=mode, max_concurrent=None, use_early_stopped_trials=None
)
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(par="space", cls=type(self).__name__)
)
space = self.convert_search_space(space)
else:
# Flatten to support nested dicts
space = flatten_dict(space, "/")
self._space = space
self._points_to_evaluate = points_to_evaluate or []
self._evaluated_rewards = evaluated_rewards
self._study_name = "optuna" # Fixed study name for in-memory storage
if sampler and seed:
logger.warning(
"You passed an initialized sampler to `OptunaSearch`. The "
"`seed` parameter has to be passed to the sampler directly "
"and will be ignored."
)
elif sampler:
assert isinstance(sampler, BaseSampler), (
"You can only pass an instance of "
"`optuna.samplers.BaseSampler` "
"as a sampler to `OptunaSearcher`."
)
self._sampler = sampler
self._seed = seed
self._ot_trials = {}
self._ot_study = None
if self._space:
self._setup_study(mode)
def _setup_study(self, mode: Union[str, list]):
if self._metric is None and self._mode:
if isinstance(self._mode, list):
raise ValueError(
"If ``mode`` is a list (multi-objective optimization "
"case), ``metric`` must be defined."
)
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
pruner = ot.pruners.NopPruner()
storage = ot.storages.InMemoryStorage()
if self._sampler:
sampler = self._sampler
elif isinstance(mode, list) and version.parse(ot.__version__) < version.parse(
"2.9.0"
):
# MOTPESampler deprecated in Optuna>=2.9.0
sampler = ot.samplers.MOTPESampler(seed=self._seed)
else:
sampler = ot.samplers.TPESampler(seed=self._seed)
if isinstance(mode, list):
study_direction_args = dict(
directions=["minimize" if m == "min" else "maximize" for m in mode],
)
else:
study_direction_args = dict(
direction="minimize" if mode == "min" else "maximize",
)
self._ot_study = ot.study.create_study(
storage=storage,
sampler=sampler,
pruner=pruner,
study_name=self._study_name,
load_if_exists=True,
**study_direction_args,
)
if self._points_to_evaluate:
validate_warmstart(
self._space,
self._points_to_evaluate,
self._evaluated_rewards,
validate_point_name_lengths=not callable(self._space),
)
if self._evaluated_rewards:
for point, reward in zip(
self._points_to_evaluate, self._evaluated_rewards
):
self.add_evaluated_point(point, reward)
else:
for point in self._points_to_evaluate:
self._ot_study.enqueue_trial(point)
def set_search_properties(
self, metric: Optional[str], mode: Optional[str], config: Dict, **spec
) -> bool:
if self._space:
return False
space = self.convert_search_space(config)
self._space = space
if metric:
self._metric = metric
if mode:
self._mode = mode
self._setup_study(self._mode)
return True
def _suggest_from_define_by_run_func(
self,
func: Callable[["OptunaTrial"], Optional[Dict[str, Any]]],
ot_trial: "OptunaTrial",
) -> Dict:
captor = _OptunaTrialSuggestCaptor(ot_trial)
time_start = time.time()
ret = func(captor)
time_taken = time.time() - time_start
if time_taken > DEFINE_BY_RUN_WARN_THRESHOLD_S:
warnings.warn(
"Define-by-run function passed in the `space` argument "
f"took {time_taken} seconds to "
"run. Ensure that actual computation, training takes "
"place inside Tune's train functions or Trainables "
"passed to `tune.run`."
)
if ret is not None:
if not isinstance(ret, dict):
raise TypeError(
"The return value of the define-by-run function "
"passed in the `space` argument should be "
"either None or a `dict` with `str` keys. "
f"Got {type(ret)}."
)
if not all(isinstance(k, str) for k in ret.keys()):
raise TypeError(
"At least one of the keys in the dict returned by the "
"define-by-run function passed in the `space` argument "
"was not a `str`."
)
return {**captor.captured_values, **ret} if ret else captor.captured_values
def suggest(self, trial_id: str) -> Optional[Dict]:
if not self._space:
raise RuntimeError(
UNDEFINED_SEARCH_SPACE.format(
cls=self.__class__.__name__, space="space"
)
)
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(
cls=self.__class__.__name__, metric=self._metric, mode=self._mode
)
)
if callable(self._space):
# Define-by-run case
if trial_id not in self._ot_trials:
self._ot_trials[trial_id] = self._ot_study.ask()
ot_trial = self._ot_trials[trial_id]
params = self._suggest_from_define_by_run_func(self._space, ot_trial)
else:
# Use Optuna ask interface (since version 2.6.0)
if trial_id not in self._ot_trials:
self._ot_trials[trial_id] = self._ot_study.ask(
fixed_distributions=self._space
)
ot_trial = self._ot_trials[trial_id]
params = ot_trial.params
return unflatten_dict(params)
def on_trial_result(self, trial_id: str, result: Dict):
if isinstance(self.metric, list):
# Optuna doesn't support incremental results
# for multi-objective optimization
return
metric = result[self.metric]
step = result[TRAINING_ITERATION]
ot_trial = self._ot_trials[trial_id]
ot_trial.report(metric, step)
def on_trial_complete(
self, trial_id: str, result: Optional[Dict] = None, error: bool = False
):
ot_trial = self._ot_trials[trial_id]
if result:
if isinstance(self.metric, list):
val = [result.get(metric, None) for metric in self.metric]
else:
val = result.get(self.metric, None)
else:
val = None
ot_trial_state = OptunaTrialState.COMPLETE
if val is None:
if error:
ot_trial_state = OptunaTrialState.FAIL
else:
ot_trial_state = OptunaTrialState.PRUNED
try:
self._ot_study.tell(ot_trial, val, state=ot_trial_state)
except ValueError as exc:
logger.warning(exc) # E.g. if NaN was reported
def add_evaluated_point(
self,
parameters: Dict,
value: float,
error: bool = False,
pruned: bool = False,
intermediate_values: Optional[List[float]] = None,
):
if not self._space:
raise RuntimeError(
UNDEFINED_SEARCH_SPACE.format(
cls=self.__class__.__name__, space="space"
)
)
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(
cls=self.__class__.__name__, metric=self._metric, mode=self._mode
)
)
if callable(self._space):
raise TypeError(
"Define-by-run function passed in `space` argument is not "
"yet supported when using `evaluated_rewards`. Please provide "
"an `OptunaDistribution` dict or pass a Ray Tune "
"search space to `tune.run()`."
)
ot_trial_state = OptunaTrialState.COMPLETE
if error:
ot_trial_state = OptunaTrialState.FAIL
elif pruned:
ot_trial_state = OptunaTrialState.PRUNED
if intermediate_values:
intermediate_values_dict = {
i: value for i, value in enumerate(intermediate_values)
}
else:
intermediate_values_dict = None
trial = ot.trial.create_trial(
state=ot_trial_state,
value=value,
params=parameters,
distributions=self._space,
intermediate_values=intermediate_values_dict,
)
self._ot_study.add_trial(trial)
def save(self, checkpoint_path: str):
save_object = (
self._sampler,
self._ot_trials,
self._ot_study,
self._points_to_evaluate,
self._evaluated_rewards,
)
with open(checkpoint_path, "wb") as outputFile:
pickle.dump(save_object, outputFile)
def restore(self, checkpoint_path: str):
with open(checkpoint_path, "rb") as inputFile:
save_object = pickle.load(inputFile)
if len(save_object) == 5:
(
self._sampler,
self._ot_trials,
self._ot_study,
self._points_to_evaluate,
self._evaluated_rewards,
) = save_object
else:
# Backwards compatibility
(
self._sampler,
self._ot_trials,
self._ot_study,
self._points_to_evaluate,
) = save_object
@staticmethod
def convert_search_space(spec: Dict) -> Dict[str, Any]:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if not domain_vars and not grid_vars:
return {}
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to an Optuna search space."
)
# Flatten and resolve again after checking for grid search.
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
def resolve_value(domain: Domain) -> ot.distributions.BaseDistribution:
quantize = None
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
quantize = sampler.q
sampler = sampler.sampler
if isinstance(sampler, LogUniform):
logger.warning(
"Optuna does not handle quantization in loguniform "
"sampling. The parameter will be passed but it will "
"probably be ignored."
)
if isinstance(domain, Float):
if isinstance(sampler, LogUniform):
if quantize:
logger.warning(
"Optuna does not support both quantization and "
"sampling from LogUniform. Dropped quantization."
)
return ot.distributions.LogUniformDistribution(
domain.lower, domain.upper
)
elif isinstance(sampler, Uniform):
if quantize:
return ot.distributions.DiscreteUniformDistribution(
domain.lower, domain.upper, quantize
)
return ot.distributions.UniformDistribution(
domain.lower, domain.upper
)
elif isinstance(domain, Integer):
if isinstance(sampler, LogUniform):
return ot.distributions.IntLogUniformDistribution(
domain.lower, domain.upper - 1, step=quantize or 1
)
elif isinstance(sampler, Uniform):
# Upper bound should be inclusive for quantization and
# exclusive otherwise
return ot.distributions.IntUniformDistribution(
domain.lower,
domain.upper - int(bool(not quantize)),
step=quantize or 1,
)
elif isinstance(domain, Categorical):
if isinstance(sampler, Uniform):
return ot.distributions.CategoricalDistribution(domain.categories)
raise ValueError(
"Optuna search does not support parameters of type "
"`{}` with samplers of type `{}`".format(
type(domain).__name__, type(domain.sampler).__name__
)
)
# Parameter name is e.g. "a/b/c" for nested dicts
values = {"/".join(path): resolve_value(domain) for path, domain in domain_vars}
return values
|
|
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
from tensorforce import util
from tensorforce.core.networks import Network
from tensorforce.core.distributions import Distribution, Bernoulli, Categorical, Gaussian, Beta
from tensorforce.models import MemoryModel
class DistributionModel(MemoryModel):
"""
Base class for models using distributions parametrized by a neural network.
"""
COMPONENT_NETWORK = "network"
COMPONENT_DISTRIBUTION = "distribution"
def __init__(
self,
states,
actions,
scope,
device,
saver,
summarizer,
execution,
batching_capacity,
variable_noise,
states_preprocessing,
actions_exploration,
reward_preprocessing,
update_mode,
memory,
optimizer,
discount,
network,
distributions,
entropy_regularization,
requires_deterministic
):
self.network_spec = network
self.distributions_spec = distributions
# Entropy regularization
assert entropy_regularization is None or entropy_regularization >= 0.0
self.entropy_regularization = entropy_regularization
# For deterministic action sampling (Q vs PG model)
self.requires_deterministic = requires_deterministic
self.network = None
self.distributions = None
self.fn_kl_divergence = None
super(DistributionModel, self).__init__(
states=states,
actions=actions,
scope=scope,
device=device,
saver=saver,
summarizer=summarizer,
execution=execution,
batching_capacity=batching_capacity,
variable_noise=variable_noise,
states_preprocessing=states_preprocessing,
actions_exploration=actions_exploration,
reward_preprocessing=reward_preprocessing,
update_mode=update_mode,
memory=memory,
optimizer=optimizer,
discount=discount
)
def initialize(self, custom_getter):
# Network
self.network = Network.from_spec(
spec=self.network_spec,
kwargs=dict(summary_labels=self.summary_labels)
)
# Before super-call since internals_spec attribute is required subsequently.
assert len(self.internals_spec) == 0
self.internals_spec = self.network.internals_spec()
super(DistributionModel, self).initialize(custom_getter)
# Distributions
self.distributions = self.create_distributions()
# KL divergence function
self.fn_kl_divergence = tf.make_template(
name_='kl-divergence',
func_=self.tf_kl_divergence,
custom_getter_=custom_getter
)
def create_distributions(self):
distributions = dict()
for name, action in self.actions_spec.items():
if self.distributions_spec is not None and name in self.distributions_spec:
kwargs = dict(action)
kwargs['summary_labels'] = self.summary_labels
distributions[name] = Distribution.from_spec(
spec=self.distributions_spec[name],
kwargs=kwargs
)
elif action['type'] == 'bool':
distributions[name] = Bernoulli(
shape=action['shape'],
summary_labels=self.summary_labels
)
elif action['type'] == 'int':
distributions[name] = Categorical(
shape=action['shape'],
num_actions=action['num_actions'],
summary_labels=self.summary_labels
)
elif action['type'] == 'float':
if 'min_value' in action:
distributions[name] = Beta(
shape=action['shape'],
min_value=action['min_value'],
max_value=action['max_value'],
summary_labels=self.summary_labels
)
else:
distributions[name] = Gaussian(
shape=action['shape'],
summary_labels=self.summary_labels
)
return distributions
def tf_actions_and_internals(self, states, internals, deterministic):
embedding, internals = self.network.apply(
x=states,
internals=internals,
update=tf.constant(value=False),
return_internals=True
)
actions = dict()
for name, distribution in self.distributions.items():
distr_params = distribution.parameterize(x=embedding)
actions[name] = distribution.sample(
distr_params=distr_params,
deterministic=tf.logical_or(x=deterministic, y=self.requires_deterministic)
)
# Prefix named variable with "name_" if more than 1 distribution
if len(self.distributions.items()) > 1:
name_prefix = name + "_"
else:
name_prefix = ""
# parameterize() returns list as [logits, probabilities, state_value]
self.network.set_named_tensor(name_prefix + "logits", distr_params[0])
self.network.set_named_tensor(name_prefix + "probabilities", distr_params[1])
self.network.set_named_tensor(name_prefix + "state_value", distr_params[2])
return actions, internals
def tf_regularization_losses(self, states, internals, update):
losses = super(DistributionModel, self).tf_regularization_losses(
states=states,
internals=internals,
update=update
)
network_loss = self.network.regularization_loss()
if network_loss is not None:
losses['network'] = network_loss
for distribution in self.distributions.values():
regularization_loss = distribution.regularization_loss()
if regularization_loss is not None:
if 'distributions' in losses:
losses['distributions'] += regularization_loss
else:
losses['distributions'] = regularization_loss
if self.entropy_regularization is not None and self.entropy_regularization > 0.0:
entropies = list()
embedding = self.network.apply(x=states, internals=internals, update=update)
for name, distribution in self.distributions.items():
distr_params = distribution.parameterize(x=embedding)
entropy = distribution.entropy(distr_params=distr_params)
collapsed_size = util.prod(util.shape(entropy)[1:])
entropy = tf.reshape(tensor=entropy, shape=(-1, collapsed_size))
entropies.append(entropy)
entropy_per_instance = tf.reduce_mean(input_tensor=tf.concat(values=entropies, axis=1), axis=1)
entropy = tf.reduce_mean(input_tensor=entropy_per_instance, axis=0)
if 'entropy' in self.summary_labels:
summary = tf.summary.scalar(name='entropy', tensor=entropy)
self.summaries.append(summary)
losses['entropy'] = -self.entropy_regularization * entropy
return losses
def tf_kl_divergence(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None):
embedding = self.network.apply(x=states, internals=internals, update=update)
kl_divergences = list()
for name, distribution in self.distributions.items():
distr_params = distribution.parameterize(x=embedding)
fixed_distr_params = tuple(tf.stop_gradient(input=value) for value in distr_params)
kl_divergence = distribution.kl_divergence(distr_params1=fixed_distr_params, distr_params2=distr_params)
collapsed_size = util.prod(util.shape(kl_divergence)[1:])
kl_divergence = tf.reshape(tensor=kl_divergence, shape=(-1, collapsed_size))
kl_divergences.append(kl_divergence)
kl_divergence_per_instance = tf.reduce_mean(input_tensor=tf.concat(values=kl_divergences, axis=1), axis=1)
return tf.reduce_mean(input_tensor=kl_divergence_per_instance, axis=0)
def optimizer_arguments(self, states, internals, actions, terminal, reward, next_states, next_internals):
arguments = super(DistributionModel, self).optimizer_arguments(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
next_states=next_states,
next_internals=next_internals
)
arguments['fn_kl_divergence'] = self.fn_kl_divergence
return arguments
def get_variables(self, include_submodules=False, include_nontrainable=False):
model_variables = super(DistributionModel, self).get_variables(
include_submodules=include_submodules,
include_nontrainable=include_nontrainable
)
network_variables = self.network.get_variables(include_nontrainable=include_nontrainable)
model_variables += network_variables
distribution_variables = [
variable for name in sorted(self.distributions)
for variable in self.distributions[name].get_variables(include_nontrainable=include_nontrainable)
]
model_variables += distribution_variables
return model_variables
def get_summaries(self):
model_summaries = super(DistributionModel, self).get_summaries()
network_summaries = self.network.get_summaries()
distribution_summaries = [
summary for name in sorted(self.distributions)
for summary in self.distributions[name].get_summaries()
]
return model_summaries + network_summaries + distribution_summaries
def get_components(self):
result = dict(super(DistributionModel, self).get_components())
result[DistributionModel.COMPONENT_NETWORK] = self.network
for action, distribution in self.distributions.items():
result["%s_%s" % (DistributionModel.COMPONENT_DISTRIBUTION, action)] = distribution
if len(self.distributions) == 1:
result[DistributionModel.COMPONENT_DISTRIBUTION] = next(iter(self.distributions.values()))
return result
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for volume and images."""
import datetime
import mock
import os
import tempfile
from oslo_utils import imageutils
from oslo_utils import units
from cinder import db
from cinder import exception
from cinder.message import message_field
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder.tests import fake_driver
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit.image import fake as fake_image
from cinder.tests.unit import utils as tests_utils
from cinder.tests.unit import volume as base
import cinder.volume
from cinder.volume import manager as vol_manager
QUOTAS = quota.QUOTAS
NON_EXISTENT_IMAGE_ID = '003f540f-ec6b-4293-a3f9-7c68646b0f5c'
class FakeImageService(object):
def __init__(self, db_driver=None, image_service=None):
pass
def show(self, context, image_id):
return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'status': 'active'}
class CopyVolumeToImageTestCase(base.BaseVolumeTestCase):
def fake_local_path(self, volume):
return self.dst_path
def setUp(self):
super(CopyVolumeToImageTestCase, self).setUp()
self.dst_fd, self.dst_path = tempfile.mkstemp()
self.addCleanup(os.unlink, self.dst_path)
os.close(self.dst_fd)
self.mock_object(self.volume.driver, 'local_path',
self.fake_local_path)
self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
self.image_meta = {
'id': self.image_id,
'container_format': 'bare',
'disk_format': 'raw'
}
self.volume_id = fake.VOLUME_ID
self.addCleanup(db.volume_destroy, self.context, self.volume_id)
self.volume_attrs = {
'id': self.volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'uploading',
'host': 'dummy'
}
def test_copy_volume_to_image_status_available(self):
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_over_image_quota(self):
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
volume = db.volume_create(self.context, self.volume_attrs)
with mock.patch.object(self.volume.driver,
'copy_volume_to_image') as driver_copy_mock:
driver_copy_mock.side_effect = exception.ImageLimitExceeded
# test with image not in queued state
self.assertRaises(exception.ImageLimitExceeded,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
# Assert a user message was created
self.volume.message_api.create.assert_called_once_with(
self.context,
message_field.Action.COPY_VOLUME_TO_IMAGE,
resource_uuid=volume['id'],
exception=mock.ANY,
detail=message_field.Detail.FAILED_TO_UPLOAD_VOLUME)
def test_copy_volume_to_image_instance_deleted(self):
# During uploading volume to image if instance is deleted,
# volume should be in available status.
self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
# Creating volume testdata
self.volume_attrs['instance_uuid'] = 'b21f957d-a72f-4b93-b5a5-' \
'45b1161abb02'
db.volume_create(self.context, self.volume_attrs)
method = 'volume_update_status_based_on_attachment'
with mock.patch.object(db, method,
wraps=getattr(db, method)) as mock_update:
# Start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
# Check 'volume_update_status_after_copy_volume_to_image'
# is called 1 time
self.assertEqual(1, mock_update.call_count)
# Check volume status has changed to available because
# instance is deleted
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_status_use(self):
self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
# creating volume testdata
db.volume_create(self.context, self.volume_attrs)
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_exception(self):
self.image_meta['id'] = NON_EXISTENT_IMAGE_ID
# creating volume testdata
self.volume_attrs['status'] = 'in-use'
db.volume_create(self.context, self.volume_attrs)
# start test
self.assertRaises(exception.ImageNotFound,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_driver_not_initialized(self):
# creating volume testdata
db.volume_create(self.context, self.volume_attrs)
# set initialized to False
self.volume.driver._initialized = False
# start test
self.assertRaises(exception.DriverNotInitialized,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume.status)
def test_copy_volume_to_image_driver_exception(self):
self.image_meta['id'] = self.image_id
image_service = fake_image.FakeImageService()
# create new image in queued state
queued_image_id = 'd5133f15-f753-41bd-920a-06b8c49275d9'
queued_image_meta = image_service.show(self.context, self.image_id)
queued_image_meta['id'] = queued_image_id
queued_image_meta['status'] = 'queued'
image_service.create(self.context, queued_image_meta)
# create new image in saving state
saving_image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2'
saving_image_meta = image_service.show(self.context, self.image_id)
saving_image_meta['id'] = saving_image_id
saving_image_meta['status'] = 'saving'
image_service.create(self.context, saving_image_meta)
# create volume
self.volume_attrs['status'] = 'available'
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
with mock.patch.object(self.volume.driver,
'copy_volume_to_image') as driver_copy_mock:
driver_copy_mock.side_effect = exception.VolumeDriverException(
"Error")
# test with image not in queued state
self.assertRaises(exception.VolumeDriverException,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
# Make sure we are passing an OVO instance and not an ORM instance
# to the driver
self.assertIsInstance(driver_copy_mock.call_args[0][1],
objects.Volume)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# image shouldn't be deleted if it is not in queued state
image_service.show(self.context, self.image_id)
# test with image in queued state
self.assertRaises(exception.VolumeDriverException,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
queued_image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# queued image should be deleted
self.assertRaises(exception.ImageNotFound,
image_service.show,
self.context,
queued_image_id)
# test with image in saving state
self.assertRaises(exception.VolumeDriverException,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
saving_image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# image in saving state should be deleted
self.assertRaises(exception.ImageNotFound,
image_service.show,
self.context,
saving_image_id)
@mock.patch.object(QUOTAS, 'reserve')
@mock.patch.object(QUOTAS, 'commit')
@mock.patch.object(vol_manager.VolumeManager, 'create_volume')
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver,
'copy_volume_to_image')
def _test_copy_volume_to_image_with_image_volume(
self, mock_copy, mock_create, mock_quota_commit,
mock_quota_reserve):
self.flags(glance_api_version=2)
self.volume.driver.configuration.image_upload_use_cinder_backend = True
self.addCleanup(fake_image.FakeImageService_reset)
image_service = fake_image.FakeImageService()
def add_location_wrapper(ctx, id, uri, metadata):
try:
volume = db.volume_get(ctx, id)
self.assertEqual(ctx.project_id,
volume['metadata']['image_owner'])
except exception.VolumeNotFound:
pass
return image_service.add_location_orig(ctx, id, uri, metadata)
image_service.add_location_orig = image_service.add_location
image_service.add_location = add_location_wrapper
image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2'
self.image_meta['id'] = image_id
self.image_meta['status'] = 'queued'
image_service.create(self.context, self.image_meta)
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
def fake_create(context, volume, **kwargs):
db.volume_update(context, volume.id, {'status': 'available'})
mock_create.side_effect = fake_create
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# return create image
image = image_service.show(self.context, image_id)
image_service.delete(self.context, image_id)
return image
def test_copy_volume_to_image_with_image_volume(self):
image = self._test_copy_volume_to_image_with_image_volume()
self.assertTrue(image['locations'][0]['url'].startswith('cinder://'))
def test_copy_volume_to_image_with_image_volume_qcow2(self):
self.image_meta['disk_format'] = 'qcow2'
image = self._test_copy_volume_to_image_with_image_volume()
self.assertNotIn('locations', image)
@mock.patch.object(vol_manager.VolumeManager, 'delete_volume')
@mock.patch.object(fake_image._FakeImageService, 'add_location',
side_effect=exception.Invalid)
def test_copy_volume_to_image_with_image_volume_failure(
self, mock_add_location, mock_delete):
image = self._test_copy_volume_to_image_with_image_volume()
self.assertNotIn('locations', image)
self.assertTrue(mock_delete.called)
class ImageVolumeCacheTestCase(base.BaseVolumeTestCase):
def setUp(self):
super(ImageVolumeCacheTestCase, self).setUp()
self.volume.driver.set_initialized()
@mock.patch('oslo_utils.importutils.import_object')
def test_cache_configs(self, mock_import_object):
opts = {
'image_volume_cache_enabled': True,
'image_volume_cache_max_size_gb': 100,
'image_volume_cache_max_count': 20
}
def conf_get(option):
if option in opts:
return opts[option]
else:
return None
mock_driver = mock.Mock()
mock_driver.configuration.safe_get.side_effect = conf_get
mock_driver.configuration.extra_capabilities = 'null'
def import_obj(*args, **kwargs):
return mock_driver
mock_import_object.side_effect = import_obj
manager = vol_manager.VolumeManager(volume_driver=mock_driver)
self.assertIsNotNone(manager)
self.assertIsNotNone(manager.image_volume_cache)
self.assertEqual(100, manager.image_volume_cache.max_cache_size_gb)
self.assertEqual(20, manager.image_volume_cache.max_cache_size_count)
def test_delete_image_volume(self):
volume_params = {
'status': 'creating',
'host': 'some_host',
'cluster_name': 'some_cluster',
'size': 1
}
volume_api = cinder.volume.api.API()
volume = tests_utils.create_volume(self.context, **volume_params)
volume.status = 'available'
volume.save()
image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
db.image_volume_cache_create(self.context,
volume['host'],
volume_params['cluster_name'],
image_id,
datetime.datetime.utcnow(),
volume['id'],
volume['size'])
volume_api.delete(self.context, volume)
entry = db.image_volume_cache_get_by_volume_id(self.context,
volume['id'])
self.assertIsNone(entry)
def test_delete_volume_with_keymanager_exception(self):
volume_params = {
'host': 'some_host',
'size': 1
}
volume_api = cinder.volume.api.API()
volume = tests_utils.create_volume(self.context, **volume_params)
with mock.patch.object(
volume_api.key_manager, 'delete') as key_del_mock:
key_del_mock.side_effect = Exception("Key not found")
volume_api.delete(self.context, volume)
class ImageVolumeTestCases(base.BaseVolumeTestCase):
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'create_cloned_volume')
@mock.patch('cinder.quota.QUOTAS.rollback')
@mock.patch('cinder.quota.QUOTAS.commit')
@mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"])
def test_clone_image_volume(self, mock_reserve, mock_commit,
mock_rollback, mock_cloned_volume):
vol = tests_utils.create_volume(self.context,
**self.volume_params)
# unnecessary attributes should be removed from image volume
vol.consistencygroup = None
result = self.volume._clone_image_volume(self.context, vol,
{'id': fake.VOLUME_ID})
self.assertNotEqual(False, result)
mock_reserve.assert_called_once_with(self.context, volumes=1,
gigabytes=vol.size)
mock_commit.assert_called_once_with(self.context, ["RESERVATION"],
project_id=vol.project_id)
@mock.patch('cinder.quota.QUOTAS.rollback')
@mock.patch('cinder.quota.QUOTAS.commit')
@mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"])
def test_clone_image_volume_creation_failure(self, mock_reserve,
mock_commit, mock_rollback):
vol = tests_utils.create_volume(self.context, **self.volume_params)
with mock.patch.object(objects, 'Volume', side_effect=ValueError):
self.assertFalse(self.volume._clone_image_volume(
self.context, vol, {'id': fake.VOLUME_ID}))
mock_reserve.assert_called_once_with(self.context, volumes=1,
gigabytes=vol.size)
mock_rollback.assert_called_once_with(self.context, ["RESERVATION"])
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_cloned_status_available(
self, mock_qemu_info):
"""Test create volume from image via cloning.
Verify that after cloning image to volume, it is in available
state and is bootable.
"""
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
volume = self._create_volume_from_image()
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
self.volume.delete_volume(self.context, volume)
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_not_cloned_status_available(
self, mock_qemu_info):
"""Test create volume from image via full copy.
Verify that after copying image to volume, it is in available
state and is bootable.
"""
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
volume = self._create_volume_from_image(fakeout_clone_image=True)
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
self.volume.delete_volume(self.context, volume)
def test_create_volume_from_image_exception(self):
"""Test create volume from a non-existing image.
Verify that create volume from a non-existing image, the volume
status is 'error' and is not bootable.
"""
dst_fd, dst_path = tempfile.mkstemp()
os.close(dst_fd)
self.mock_object(self.volume.driver, 'local_path', lambda x: dst_path)
# creating volume testdata
kwargs = {'display_description': 'Test Desc',
'size': 20,
'availability_zone': 'fake_availability_zone',
'status': 'creating',
'attach_status': fields.VolumeAttachStatus.DETACHED,
'host': 'dummy'}
volume = objects.Volume(context=self.context, **kwargs)
volume.create()
self.assertRaises(exception.ImageNotFound,
self.volume.create_volume,
self.context,
volume,
{'image_id': NON_EXISTENT_IMAGE_ID})
volume = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual("error", volume['status'])
self.assertFalse(volume['bootable'])
# cleanup
volume.destroy()
os.unlink(dst_path)
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_copy_exception_rescheduling(
self, mock_qemu_info):
"""Test create volume with ImageCopyFailure
This exception should not trigger rescheduling and allocated_capacity
should be incremented so we're having assert for that here.
"""
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
def fake_copy_image_to_volume(context, volume, image_service,
image_id):
raise exception.ImageCopyFailure()
self.mock_object(self.volume.driver, 'copy_image_to_volume',
fake_copy_image_to_volume)
mock_delete = self.mock_object(self.volume.driver, 'delete_volume')
self.assertRaises(exception.ImageCopyFailure,
self._create_volume_from_image)
# NOTE(dulek): Rescheduling should not occur, so lets assert that
# allocated_capacity is incremented.
self.assertDictEqual(self.volume.stats['pools'],
{'_pool0': {'allocated_capacity_gb': 1}})
# NOTE(dulek): As we haven't rescheduled, make sure no delete_volume
# was called.
self.assertFalse(mock_delete.called)
@mock.patch('cinder.utils.brick_get_connector_properties')
@mock.patch('cinder.utils.brick_get_connector')
@mock.patch('cinder.volume.driver.BaseVD.secure_file_operations_enabled')
@mock.patch('cinder.volume.driver.BaseVD._detach_volume')
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_unavailable(
self, mock_qemu_info, mock_detach, mock_secure, *args):
"""Test create volume with ImageCopyFailure
We'll raise an exception inside _connect_device after volume has
already been attached to confirm that it detaches the volume.
"""
mock_secure.side_effect = NameError
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
unbound_copy_method = cinder.volume.driver.BaseVD.copy_image_to_volume
bound_copy_method = unbound_copy_method.__get__(self.volume.driver)
with mock.patch.object(self.volume.driver, 'copy_image_to_volume',
side_effect=bound_copy_method):
self.assertRaises(exception.ImageCopyFailure,
self._create_volume_from_image,
fakeout_copy_image_to_volume=False)
# We must have called detach method.
self.assertEqual(1, mock_detach.call_count)
@mock.patch('cinder.utils.brick_get_connector_properties')
@mock.patch('cinder.utils.brick_get_connector')
@mock.patch('cinder.volume.driver.BaseVD._connect_device')
@mock.patch('cinder.volume.driver.BaseVD._detach_volume')
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_unavailable_no_attach_info(
self, mock_qemu_info, mock_detach, mock_connect, *args):
"""Test create volume with ImageCopyFailure
We'll raise an exception on _connect_device call to confirm that it
detaches the volume even if the exception doesn't have attach_info.
"""
mock_connect.side_effect = NameError
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
unbound_copy_method = cinder.volume.driver.BaseVD.copy_image_to_volume
bound_copy_method = unbound_copy_method.__get__(self.volume.driver)
with mock.patch.object(self.volume.driver, 'copy_image_to_volume',
side_effect=bound_copy_method):
self.assertRaises(exception.ImageCopyFailure,
self._create_volume_from_image,
fakeout_copy_image_to_volume=False)
# We must have called detach method.
self.assertEqual(1, mock_detach.call_count)
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_clone_image_volume(self, mock_qemu_info):
"""Test create volume from image via image volume.
Verify that after cloning image to volume, it is in available
state and is bootable.
"""
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
volume = self._create_volume_from_image(clone_image_volume=True)
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
self.volume.delete_volume(self.context, volume)
def test_create_volume_from_exact_sized_image(self):
"""Test create volume from an image of the same size.
Verify that an image which is exactly the same size as the
volume, will work correctly.
"""
try:
volume_id = None
volume_api = cinder.volume.api.API(
image_service=FakeImageService())
volume = volume_api.create(self.context, 2, 'name', 'description',
image_id=self.FAKE_UUID)
volume_id = volume['id']
self.assertEqual('creating', volume['status'])
finally:
# cleanup
db.volume_destroy(self.context, volume_id)
def test_create_volume_from_oversized_image(self):
"""Verify that an image which is too big will fail correctly."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.Gi + 1,
'disk_format': 'raw',
'container_format': 'bare',
'status': 'active'}
volume_api = cinder.volume.api.API(
image_service=_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
def test_create_volume_with_mindisk_error(self):
"""Verify volumes smaller than image minDisk will cause an error."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'min_disk': 5,
'status': 'active'}
volume_api = cinder.volume.api.API(
image_service=_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
def test_create_volume_with_deleted_imaged(self):
"""Verify create volume from image will cause an error."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'min_disk': 5,
'status': 'deleted'}
volume_api = cinder.volume.api.API(
image_service=_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
def test_copy_volume_to_image_maintenance(self):
"""Test copy volume to image in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.copy_volume_to_image,
self.context,
volume,
test_meta1,
force=True)
|
|
import glob
import logging
import os
import sys
from urlparse import urljoin
import requests
from requests.exceptions import ConnectionError
from invoke import ctask as task, Collection
from invocations.testing import test
from tessera import app, db, config
from tessera_client.api.model import Section
from tessera.importer.graphite import GraphiteDashboardImporter
from tessera.importer.json_importer import JsonImporter, JsonExporter
from werkzeug.serving import run_simple
import flask
from flask.ext import migrate
warn = logging.WARN
log = logging.getLogger(__name__)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)-8s [%(name)s] %(message)s'
)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(warn)
logging.getLogger('sqlalchemy.engine').setLevel(warn)
DEFAULT_TESSERA_URL = 'http://{0}:{1}'.format(config['SERVER_ADDRESS'], config['SERVER_PORT'])
DEFAULT_GRAPHITE_URL = config['GRAPHITE_URL']
DEFAULT_MIGRATION_DIR = config['MIGRATION_DIR']
@task
def run(c):
"""Launch the server."""
run_simple(config['SERVER_ADDRESS'], config['SERVER_PORT'], app, use_reloader=True)
# =============================================================================
# db collection
# inv db.init
# inv db.init_migrations
# inv db.current
# inv db.revisions
# inv db.migrate
# inv db.upgrade
# inv db.downgrade
# inv db.stamp
# inv db.history
# =============================================================================
@task
def initdb(c):
"""
Deprecated, use db.init instead.
"""
db.create_all()
@task(name='init')
def db_init(c):
"""
Set up a new, empty database.
"""
db.create_all()
@task(name='init_migrations')
def db_init_migrations(c, dir=None):
"""
Update the project to support migrations.
"""
with app.app_context():
migrate.init(dir)
@task(name='current')
def db_current(c, dir=DEFAULT_MIGRATION_DIR):
"""
Show current migration revision.
"""
with app.app_context():
migrate.current(directory=dir)
@task(name='revision')
def db_revision(c, dir=DEFAULT_MIGRATION_DIR):
"""
Generate new empty revision script.
"""
with app.app_context():
migrate.revision(directory=dir)
@task(name='migrate')
def db_migrate(c, dir=DEFAULT_MIGRATION_DIR):
"""
Generate new autofilled migration.
"""
with app.app_context():
migrate.migrate(directory=dir)
@task(name='upgrade')
def db_upgrade(c, dir=DEFAULT_MIGRATION_DIR):
"""
Run any migrations needed make database current.
"""
with app.app_context():
migrate.upgrade(directory=dir)
@task(name='downgrade')
def db_downgrade(c, dir=DEFAULT_MIGRATION_DIR):
"""
Downgrade database to a specific revision.
"""
with app.app_context():
migrate.downgrade(directory=dir)
@task(name='stamp')
def db_stamp(c, dir=DEFAULT_MIGRATION_DIR):
"""
Set database revision to a specific value.
"""
with app.app_context(directory=dir):
pass
@task(name='history')
def db_history(c, dir=DEFAULT_MIGRATION_DIR):
"""
List migration history.
"""
with app.app_context():
migrate.history(directory=dir)
# =============================================================================
# graphite tasks
# inv graphite.import
# inv graphite.export
# =============================================================================
@task(name='import')
def import_graphite_dashboards(
c, query='', layout=Section.Layout.FLUID, columns=4, overwrite=False,
graphite=DEFAULT_GRAPHITE_URL, tessera=DEFAULT_TESSERA_URL
):
"""
Import dashboards from a Graphite vanilla dashboard.
"""
log.info('Importing dashboards from graphite')
importer = GraphiteDashboardImporter(graphite, tessera, config['GRAPHITE_AUTH'])
importer.import_dashboards(
query, overwrite=overwrite, layout=layout, columns=int(columns)
)
@task(name='dump')
def dump_graphite_dashboards(c, query='', graphite=DEFAULT_GRAPHITE_URL, tessera=DEFAULT_TESSERA_URL):
"""
Dump Graphite dashboards to stdout in Tessera JSON format.
"""
log.info('Importing dashboards from graphite')
importer = GraphiteDashboardImporter(graphite, tessera)
importer.dump_dashboards(query)
# =============================================================================
# json tasks
# inv json.import
# inv json.export
# =============================================================================
@task(name='export')
def export_json(c, dir, tag=None, graphite=DEFAULT_GRAPHITE_URL, tessera=DEFAULT_TESSERA_URL):
"""
Export dashboards as JSON to a local directory.
"""
msg = 'Exporting dashboards (tagged: {0}) as JSON to directory {1}'
log.info(msg.format(tag, dir))
exporter = JsonExporter(graphite, tessera)
exporter.export(dir, tag)
@task(name='import')
def import_json(c, pattern, graphite=DEFAULT_GRAPHITE_URL, tessera=DEFAULT_TESSERA_URL):
"""
Import dashboards from a directory previously used for exporting.
"""
log.info('Import dashboards from {0})'.format(pattern))
files = glob.glob(pattern)
log.info('Found {0} files to import'.format(len(files)))
importer = JsonImporter(graphite, tessera)
importer.import_files(files)
# =============================================================================
# test tasks
# inv test.unit
# inv test.integration
# =============================================================================
@task
def integration(c):
"""
Run high level integration test suite.
"""
return test(c, opts="--tests=integration")
tests = Collection('test')
tests.add_task(test, name='unit', default=True)
tests.add_task(integration)
@task
def copy(c, source_id, source_uri=None, destination_uri=None):
"""
Copy a dashboard (via API) between two running Tessera instances.
:param str source_id:
Source dashboard ID, e.g. if copying a dashboard that lives at
``http://mytessera.com/dashboards/123``, this would simply be ``123``.
:param str source_uri:
Source base URI, e.g. ``http://mytessera.com`` or
``https://tessera.example.com:8080``. Will pull default value from the
``TESSERA_SOURCE_URI`` environment variable if not given.
:param str destination_uri:
Destination base URI, similar to ``source_uri``. Will pull default
value from ``TESSERA_DESTINATION_URI`` if not given.
"""
# Arg handling junk
missing = []
if source_uri is None:
try:
source_uri = os.environ['TESSERA_SOURCE_URI']
except KeyError:
missing.append("source")
if destination_uri is None:
try:
destination_uri = os.environ['TESSERA_DESTINATION_URI']
except KeyError:
missing.append("destination")
if missing:
sys.exit("Missing the following URI parameters: {0}".format(
', '.join("{0}_uri".format(x) for x in missing)))
# Actual copy
endpoint = '/api/dashboard/'
source = reduce(urljoin, (source_uri, endpoint, source_id))
try:
original = requests.get(source, params={'definition': 'true'})
except ConnectionError as e:
sys.exit("Unable to connect to {0}: {1}".format(source, e))
dest = urljoin(destination_uri, endpoint)
try:
response = requests.post(dest, data=original.content,
headers={'Content-Type': 'application/json'})
except ConnectionError as e:
sys.exit("Unable to connect to {0}: {1}".format(dest, e))
new_uri = urljoin(dest, response.json()['view_href'])
print("{0} -> {1}".format(source, new_uri))
ns = Collection(
run,
copy,
initdb,
tests,
Collection('db',
db_init,
db_init_migrations,
db_current,
db_revision,
db_migrate,
db_upgrade,
db_downgrade,
db_stamp,
db_history
),
Collection('json', import_json, export_json),
Collection('graphite',
import_graphite_dashboards,
dump_graphite_dashboards,
),
)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import re
import sys
import unicodedata
import six
from ironic.openstack.common.gettextutils import _ # noqa
# Used for looking up extensions of text
# to their 'multiplied' byte amount
BYTE_MULTIPLIERS = {
'': 1,
't': 1024 ** 4,
'g': 1024 ** 3,
'm': 1024 ** 2,
'k': 1024,
}
BYTE_REGEX = re.compile(r'(^-?\d+)(\D*)')
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return bool_from_string(subject) and 1 or 0
def bool_from_string(subject, strict=False):
"""Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when
`strict=False`, anything else is considered False.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, six.string_types):
subject = str(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = _("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return False
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming str using `incoming` if they're not already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, six.string_types):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, six.text_type):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors)
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming str/unicode using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, six.string_types):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
if isinstance(text, six.text_type):
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
return text
def to_bytes(text, default=0):
"""Converts a string into an integer of bytes.
Looks at the last characters of the text to determine
what conversion is needed to turn the input text into a byte number.
Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive)
:param text: String input for bytes size conversion.
:param default: Default return value when text is blank.
"""
match = BYTE_REGEX.search(text)
if match:
magnitude = int(match.group(1))
mult_key_org = match.group(2)
if not mult_key_org:
return magnitude
elif text:
msg = _('Invalid string format: %s') % text
raise TypeError(msg)
else:
return default
mult_key = mult_key_org.lower().replace('b', '', 1)
multiplier = BYTE_MULTIPLIERS.get(mult_key)
if multiplier is None:
msg = _('Unknown byte multiplier: %s') % mult_key_org
raise TypeError(msg)
return magnitude * multiplier
def to_slug(value, incoming=None, errors="strict"):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of str
"""
value = safe_decode(value, incoming, errors)
# NOTE(aababilov): no need to use safe_(encode|decode) here:
# encodings are always "ascii", error handling is always "ignore"
# and types are always known (first: unicode; second: str)
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)
|
|
"""Module containing the ShearSplink pipelines."""
import logging
from pathlib import Path
from cutadapt import seqio
import pandas as pd
import pysam
from pyim.external.cutadapt import cutadapt, cutadapt_summary
from pyim.external.bowtie2 import bowtie2
from pyim.external.util import flatten_arguments
from pyim.model import Insertion
from pyim.util.path import shorten_path, extract_suffix
from .base import Pipeline, register_pipeline
from ..util import extract_insertions
DEFAULT_OVERLAP = 3
DEFAULT_ERROR_RATE = 0.1
class ShearSplinkPipeline(Pipeline):
"""ShearSplink pipeline.
Analyzes (single-end) sequencing data that was prepared using the
ShearSplink protocol. Sequence reads are expected to have the following
structure::
[Transposon][Genomic][Linker]
Here, ``transposon`` refers to the flanking part of the transposon
sequence, ``linker`` to the flanking linker sequence and ``genomic``
to the genomic DNA located in between (which varies per insertion).
The linker sequence is optional and may be omitted if the linker is not
included in sequencing.
The pipeline essentially performs the following steps:
- If contaminants are provided, sequence reads are filtered
(using Cutadapt) for the contaminant sequences.
- The remaining reads are trimmed to remove the transposon and
linker sequences, leaving only genomic sequences. Reads without
the transposon/linker sequences are dropped, as we cannot be certain
of their origin. (Note that the linker is optional and is only
trimmed if a linker is given).
- The genomic reads are aligned to the reference genome.
- The resulting alignment is used to identify insertions.
Note that this pipeline does **NOT** support multiplexed datasets (which is
the default output of the ShearSplink protocol). For multiplexed datasets,
use the ``MultiplexedShearSplinkPipeline``.
Parameters
----------
transposon_path : Path
Path to the (flanking) transposon sequence (fasta).
bowtie_index_path : Path
Path to the bowtie index.
linker_path : Path
Path to the linker sequence (fasta).
contaminant_path : Path
Path to file containing contaminant sequences (fasta). If provided,
sequences are filtered for these sequences before extracting genomic
sequences for alignment.
min_length : int
Minimum length for genomic reads to be kept for alignment.
min_support : int
Minimum support for insertions to be kept in the final output.
min_mapq : int
Minimum mapping quality of alignments to be used for
identifying insertions.
merge_distance : int
Maximum distance within which insertions are merged. Used to merge
insertions that occur within close vicinity, which is typically due
to slight variations in alignments.
bowtie_options : Dict[str, Any]
Dictionary of extra options for Bowtie.
min_overlaps : Dict[str, int]
Minimum overlap required to recognize the transposon, linker and
contaminant sequences (see Cutadapts documentation for more
information). Keys of the dictionary indicate to which sequence the
overlap corresponds and should be one of the following: ``linker``,
``transposon`` or ``contaminant``.
error_rates : Dict[str, float]
Maximum error rate to use when recognizing transposon, linker and
contaminant sequences (see Cutadapts documentation for more
information). Keys should be the same as for ``min_overlaps``.
"""
def __init__(self,
transposon_path,
bowtie_index_path,
linker_path=None,
contaminant_path=None,
min_length=15,
min_support=2,
min_mapq=23,
merge_distance=None,
bowtie_options=None,
min_overlaps=None,
error_rates=None):
super().__init__()
self._transposon_path = transposon_path
self._linker_path = linker_path
self._contaminant_path = contaminant_path
self._index_path = bowtie_index_path
self._min_length = min_length
self._min_support = min_support
self._min_mapq = min_mapq
self._merge_distance = merge_distance
self._bowtie_options = bowtie_options or {}
self._min_overlaps = min_overlaps or {}
self._error_rates = error_rates or {}
@classmethod
def configure_args(cls, parser):
cls._setup_base_args(parser, paired=False)
parser.description = 'ShearSplink pipeline'
# Paths to various sequences.
seq_options = parser.add_argument_group('Sequences')
seq_options.add_argument(
'--transposon',
type=Path,
required=True,
help='Fasta file containing the transposon sequence.')
seq_options.add_argument(
'--contaminants',
type=Path,
default=None,
help='Fasta file containing contaminant sequences.')
seq_options.add_argument(
'--linker',
type=Path,
default=None,
help='Fasta file containing the linker sequence.')
# Trimming options (used for cutadapt).
trim_options = parser.add_argument_group('Trimming')
trim_options.add_argument(
'--min_length',
type=int,
default=15,
help='Minimum length for (trimmed) genomic sequences.')
trim_options.add_argument(
'--contaminant_error',
default=0.1,
type=float,
help='Maximum error rate for matching contaminants.')
trim_options.add_argument(
'--contaminant_overlap',
default=3,
type=int,
help='Minimum overlap for matching contaminants.')
trim_options.add_argument(
'--transposon_error',
default=0.1,
type=float,
help='Maximum error rate for matching the transposon.')
trim_options.add_argument(
'--transposon_overlap',
default=3,
type=int,
help='Minimum overlap for matching the transposon.')
trim_options.add_argument(
'--linker_error',
default=0.1,
type=float,
help='Maximum error rate for matching the linker.')
trim_options.add_argument(
'--linker_overlap',
default=3,
type=int,
help='Minimum overlap for matching the linker.')
align_options = parser.add_argument_group('Alignment')
align_options.add_argument(
'--bowtie_index',
type=Path,
required=True,
help='Bowtie2 index to use for alignment.')
align_options.add_argument(
'--local',
default=False,
action='store_true',
help='Use local alignment.')
ins_options = parser.add_argument_group('Insertions')
ins_options.add_argument(
'--min_mapq',
type=int,
default=23,
help=('Minimum mapping quality for reads '
'used to identify insertions.'))
ins_options.add_argument(
'--merge_distance',
type=int,
default=None,
help=('Distance within which insertions (from same '
'sample) are merged.'))
ins_options.add_argument(
'--min_support',
type=int,
default=2,
help='Minimum support for insertions.')
@classmethod
def _extract_args(cls, args):
bowtie_options = {'--local': args.local}
min_overlaps = {
'contaminant': args.contaminant_overlap,
'transposon': args.transposon_overlap,
'linker': args.linker_overlap
}
error_rates = {
'contaminant': args.contaminant_error,
'transposon': args.transposon_error,
'linker': args.linker_error
}
return dict(
transposon_path=args.transposon,
bowtie_index_path=args.bowtie_index,
linker_path=args.linker,
contaminant_path=args.contaminants,
min_length=args.min_length,
min_support=args.min_support,
min_mapq=args.min_mapq,
merge_distance=args.merge_distance,
bowtie_options=bowtie_options,
min_overlaps=min_overlaps,
error_rates=error_rates)
def run(self, read_path, output_dir, read2_path=None):
if read2_path is not None:
raise ValueError('Pipeline does not support paired-end data')
logger = logging.getLogger()
# Ensure output dir exists.
output_dir.mkdir(exist_ok=True, parents=True)
# Extract genomic sequences and align to reference.
genomic_path = self._extract_genomic(read_path, output_dir, logger)
alignment_path = self._align(genomic_path, output_dir, logger)
# Extract insertions from bam file.
bam_file = pysam.AlignmentFile(str(alignment_path))
try:
insertions = extract_insertions(
iter(bam_file),
func=_process_alignment,
merge_dist=self._merge_distance,
min_mapq=self._min_mapq,
min_support=self._min_support,
logger=logger)
finally:
bam_file.close()
# Write insertions to output file.
insertion_path = output_dir / 'insertions.txt'
ins_frame = Insertion.to_frame(insertions)
ins_frame.to_csv(str(insertion_path), sep='\t', index=False)
def _extract_genomic(self, read_path, output_dir, logger):
"""Extracts the genomic part of sequence reads."""
# Log parameters
if logger is not None:
logger.info('Extracting genomic sequences')
logger.info(' %-18s: %s', 'Transposon',
shorten_path(self._transposon_path))
logger.info(' %-18s: %s', 'Linker',
shorten_path(self._linker_path))
logger.info(' %-18s: %s', 'Contaminants',
shorten_path(self._contaminant_path))
logger.info(' %-18s: %s', 'Minimum length', self._min_length)
# Get suffix to use for intermediate/genomic files.
suffix = extract_suffix(read_path)
# Track interim files for cleaning.
interim_files = []
if self._contaminant_path is not None:
# Remove contaminants.
contaminant_out_path = output_dir / (
'trimmed_contaminant' + suffix)
contaminant_opts = {
'-g': 'file:' + str(self._contaminant_path),
'--discard-trimmed': True,
'-O': self._min_overlaps.get('contaminant', DEFAULT_OVERLAP),
'-e': self._error_rates.get('contaminant', DEFAULT_ERROR_RATE)
}
process = cutadapt(read_path, contaminant_out_path,
contaminant_opts)
if logger is not None:
summary = cutadapt_summary(process.stdout, padding=' ')
logger.info('Trimmed contaminant sequences' + summary)
interim_files.append(contaminant_out_path)
else:
contaminant_out_path = read_path
if self._linker_path is not None:
# Remove linker.
linker_out_path = output_dir / ('trimmed_linker' + suffix)
linker_opts = {
'-a': 'file:' + str(self._linker_path),
'--discard-untrimmed': True,
'-O': self._min_overlaps.get('linker', DEFAULT_OVERLAP),
'-e': self._error_rates.get('linker', DEFAULT_ERROR_RATE)
}
process = cutadapt(contaminant_out_path, linker_out_path,
linker_opts)
if logger is not None:
summary = cutadapt_summary(process.stdout, padding=' ')
logger.info('Trimmed linker sequence' + summary)
interim_files.append(linker_out_path)
else:
linker_out_path = contaminant_out_path
# Trim transposon and check minimum length.
transposon_opts = {
'-g': 'file:' + str(self._transposon_path),
'--discard-untrimmed': True,
'-O': self._min_overlaps.get('transposon', DEFAULT_OVERLAP),
'-e': self._error_rates.get('transposon', DEFAULT_ERROR_RATE)
}
if self._min_length is not None:
transposon_opts['--minimum-length'] = self._min_length
genomic_path = output_dir / ('genomic' + suffix)
process = cutadapt(linker_out_path, genomic_path, transposon_opts)
if logger is not None:
summary = cutadapt_summary(process.stdout, padding=' ')
logger.info('Trimmed transposon sequence and filtered '
'for length' + summary)
# Clean-up interim files.
for file_path in interim_files:
file_path.unlink()
return genomic_path
def _align(self, read_path, output_dir, logger):
"""Aligns genomic reads to the reference genome using Bowtie."""
# Log parameters
if logger is not None:
logger.info('Aligning to reference')
logger.info(' %-18s: %s', 'Reference',
shorten_path(self._index_path))
logger.info(' %-18s: %s', 'Bowtie options',
flatten_arguments(self._bowtie_options))
alignment_path = output_dir / 'alignment.bam'
bowtie2(
[read_path],
index_path=self._index_path,
output_path=alignment_path,
options=self._bowtie_options,
verbose=True)
return alignment_path
register_pipeline(name='shearsplink', pipeline=ShearSplinkPipeline)
def _process_alignment(aln):
"""Analyzes an alignment to determine the tranposon/linker breakpoints."""
ref = aln.reference_name
if aln.is_reverse:
transposon_pos = aln.reference_end
linker_pos = aln.reference_start
strand = -1
else:
transposon_pos = aln.reference_start
linker_pos = aln.reference_end
strand = 1
return (ref, transposon_pos, strand), linker_pos
class MultiplexedShearSplinkPipeline(ShearSplinkPipeline):
"""ShearSplink pipeline supporting multiplexed reads.
Analyzes multiplexed (single-end) sequencing data that was prepared using
the ShearSplink protocol. Sequence reads are expected to have the following
structure::
[Barcode][Transposon][Genomic][Linker]
Here, the ``transposon``, ``genomic`` and ``linker`` sequences are the
same as for the ``ShearSplinkPipeline``. The ``barcode`` sequence is an
index that indicates which sample the read originated for.
Barcode sequences should be provided using the ``barcode_path`` argument.
The optional ``barcode_mapping`` argument can be used to map barcodes to
sample names.
Parameters
----------
transposon_path : Path
Path to the (flanking) transposon sequence (fasta).
bowtie_index_path : Path
Path to the bowtie index.
barcode_path :
Path to barcode sequences (fasta).
barcode_mapping : Path
Path to a tsv file specifying a mapping from barcodes to sample names.
Should contain ``sample`` and ``barcode`` columns.
linker_path : Path
Path to the linker sequence (fasta).
contaminant_path : Path
Path to file containing contamintant sequences (fasta). If provided,
sequences are filtered for these sequences before extracting genomic
sequences for alignment.
min_length : int
Minimum length for genomic reads to be kept for alignment.
min_support : int
Minimum support for insertions to be kept in the final output.
min_mapq : int
Minimum mapping quality of alignments to be used for
identifying insertions.
merge_distance : int
Maximum distance within which insertions are merged. Used to merge
insertions that occur within close vicinity, which is typically due
to slight variations in alignments.
bowtie_options : Dict[str, Any]
Dictionary of extra options for Bowtie.
min_overlaps : Dict[str, int]
Minimum overlap required to recognize the transposon, linker and
contamintant sequences (see Cutadapts documentation for more
information). Keys of the dictionary indicate to which sequence the
overlap corresponds and should be one of the following: ``linker``,
``transposon`` or ``contaminant``.
error_rates : Dict[str, float]
Maximum error rate to use when recognizing transposon, linker and
contamintant sequences (see Cutadapts documentation for more
information). Keys should be the same as for ``min_overlaps``.
"""
def __init__(self,
transposon_path,
bowtie_index_path,
barcode_path,
barcode_mapping=None,
linker_path=None,
contaminant_path=None,
min_length=15,
min_support=2,
min_mapq=23,
merge_distance=0,
bowtie_options=None,
min_overlaps=None,
error_rates=None):
super().__init__(
transposon_path=transposon_path,
bowtie_index_path=bowtie_index_path,
linker_path=linker_path,
contaminant_path=contaminant_path,
min_length=min_length,
min_support=min_support,
min_mapq=min_mapq,
merge_distance=merge_distance,
bowtie_options=bowtie_options,
min_overlaps=min_overlaps,
error_rates=error_rates)
self._barcode_path = barcode_path
self._barcode_mapping = barcode_mapping
@classmethod
def configure_args(cls, parser):
super().configure_args(parser)
parser.add_argument('--barcodes', required=True, type=Path)
parser.add_argument(
'--barcode_mapping', required=False, type=Path, default=None)
@classmethod
def _extract_args(cls, args):
arg_dict = super()._extract_args(args)
if args.barcode_mapping is not None:
map_df = pd.read_csv(args.barcode_mapping, sep='\t')
arg_dict['barcode_mapping'] = dict(
zip(map_df['barcode'], map_df['sample']))
else:
arg_dict['barcode_mapping'] = None
arg_dict['barcode_path'] = args.barcodes
return arg_dict
def run(self, read_path, output_dir, read2_path=None):
if read2_path is not None:
raise ValueError('Pipeline does not support paired-end data')
logger = logging.getLogger()
# Ensure output dir exists.
output_dir.mkdir(exist_ok=True, parents=True)
# Extract genomic sequences and align to reference.
genomic_path = self._extract_genomic(read_path, output_dir, logger)
alignment_path = self._align(genomic_path, output_dir, logger)
# Map reads to specific barcodes/samples.
logger.info('Extracting barcode/sample mapping')
logger.info(' %-18s: %s', 'Barcodes',
shorten_path(self._barcode_path))
read_map = self._get_barcode_mapping(read_path)
# Extract insertions from bam file.
bam_file = pysam.AlignmentFile(str(alignment_path))
try:
insertions = extract_insertions(
iter(bam_file),
func=_process_alignment,
group_func=lambda aln: read_map.get(aln.query_name, None),
merge_dist=self._merge_distance,
min_mapq=self._min_mapq,
min_support=self._min_support,
logger=logger)
finally:
bam_file.close()
# Write insertions to output file.
insertion_path = output_dir / 'insertions.txt'
ins_frame = Insertion.to_frame(insertions)
ins_frame.to_csv(str(insertion_path), sep='\t', index=False)
def _get_barcode_mapping(self, read_path):
# Read barcode sequences.
with seqio.open(str(self._barcode_path)) as barcode_file:
barcodes = list(barcode_file)
# Extract read --> barcode mapping.
with seqio.open(str(read_path)) as reads:
return _extract_barcode_mapping(reads, barcodes,
self._barcode_mapping)
register_pipeline(
name='shearsplink-multiplexed', pipeline=MultiplexedShearSplinkPipeline)
def _extract_barcode_mapping(reads, barcodes, barcode_mapping=None):
# Create barcode/sample dict.
barcode_dict = {bc.name: bc.sequence for bc in barcodes}
if barcode_mapping is not None:
barcode_dict = {sample: barcode_dict[barcode]
for barcode, sample in barcode_mapping.items()}
# Build mapping.
mapping = {}
for read in reads:
# Check each barcode for match in read.
matched = [k for k, v in barcode_dict.items() if v in read.sequence]
if len(matched) == 1:
# Record single matches.
name = read.name.split()[0]
mapping[name] = matched[0]
elif len(matched) > 1:
logging.warning('Skipping %s due to multiple matching barcodes',
read.name.split()[0])
return mapping
|
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testscenarios
from oslo import messaging
from tests import utils as test_utils
load_tests = testscenarios.load_tests_apply_scenarios
class TestParseURL(test_utils.BaseTestCase):
scenarios = [
('transport',
dict(url='foo:', aliases=None,
expect=dict(transport='foo'))),
('transport_aliased',
dict(url='bar:', aliases=dict(bar='foo'),
expect=dict(transport='foo'))),
('virtual_host_slash',
dict(url='foo:////', aliases=None,
expect=dict(transport='foo', virtual_host='/'))),
('virtual_host',
dict(url='foo:///bar', aliases=None,
expect=dict(transport='foo', virtual_host='bar'))),
('host',
dict(url='foo://host/bar', aliases=None,
expect=dict(transport='foo',
virtual_host='bar',
hosts=[
dict(host='host'),
]))),
('ipv6_host',
dict(url='foo://[ffff::1]/bar', aliases=None,
expect=dict(transport='foo',
virtual_host='bar',
hosts=[
dict(host='ffff::1'),
]))),
('port',
dict(url='foo://host:1234/bar', aliases=None,
expect=dict(transport='foo',
virtual_host='bar',
hosts=[
dict(host='host', port=1234),
]))),
('ipv6_port',
dict(url='foo://[ffff::1]:1234/bar', aliases=None,
expect=dict(transport='foo',
virtual_host='bar',
hosts=[
dict(host='ffff::1', port=1234),
]))),
('username',
dict(url='foo://u@host:1234/bar', aliases=None,
expect=dict(transport='foo',
virtual_host='bar',
hosts=[
dict(host='host', port=1234, username='u'),
]))),
('password',
dict(url='foo://u:p@host:1234/bar', aliases=None,
expect=dict(transport='foo',
virtual_host='bar',
hosts=[
dict(host='host', port=1234,
username='u', password='p'),
]))),
('creds_no_host',
dict(url='foo://u:p@/bar', aliases=None,
expect=dict(transport='foo',
virtual_host='bar',
hosts=[
dict(username='u', password='p'),
]))),
('multi_host',
dict(url='foo://u:p@host1:1234,host2:4321/bar', aliases=None,
expect=dict(transport='foo',
virtual_host='bar',
hosts=[
dict(host='host1', port=1234,
username='u', password='p'),
dict(host='host2', port=4321),
]))),
('multi_creds',
dict(url='foo://u1:p1@host1:1234,u2:p2@host2:4321/bar', aliases=None,
expect=dict(transport='foo',
virtual_host='bar',
hosts=[
dict(host='host1', port=1234,
username='u1', password='p1'),
dict(host='host2', port=4321,
username='u2', password='p2'),
]))),
('multi_creds_ipv6',
dict(url='foo://u1:p1@[ffff::1]:1234,u2:p2@[ffff::2]:4321/bar',
aliases=None,
expect=dict(transport='foo',
virtual_host='bar',
hosts=[
dict(host='ffff::1', port=1234,
username='u1', password='p1'),
dict(host='ffff::2', port=4321,
username='u2', password='p2'),
]))),
]
def test_parse_url(self):
self.config(rpc_backend=None)
url = messaging.TransportURL.parse(self.conf, self.url, self.aliases)
hosts = []
for host in self.expect.get('hosts', []):
hosts.append(messaging.TransportHost(host.get('host'),
host.get('port'),
host.get('username'),
host.get('password')))
expected = messaging.TransportURL(self.conf,
self.expect.get('transport'),
self.expect.get('virtual_host'),
hosts)
self.assertEqual(url, expected)
class TestFormatURL(test_utils.BaseTestCase):
scenarios = [
('rpc_backend',
dict(rpc_backend='testbackend',
transport=None,
virtual_host=None,
hosts=[],
aliases=None,
expected='testbackend:///')),
('rpc_backend_aliased',
dict(rpc_backend='testfoo',
transport=None,
virtual_host=None,
hosts=[],
aliases=dict(testfoo='testbackend'),
expected='testbackend:///')),
('transport',
dict(rpc_backend=None,
transport='testtransport',
virtual_host=None,
hosts=[],
aliases=None,
expected='testtransport:///')),
('transport_aliased',
dict(rpc_backend=None,
transport='testfoo',
virtual_host=None,
hosts=[],
aliases=dict(testfoo='testtransport'),
expected='testtransport:///')),
('virtual_host',
dict(rpc_backend=None,
transport='testtransport',
virtual_host='/vhost',
hosts=[],
aliases=None,
expected='testtransport:////vhost')),
('host',
dict(rpc_backend=None,
transport='testtransport',
virtual_host='/',
hosts=[
dict(hostname='host',
port=10,
username='bob',
password='secret'),
],
aliases=None,
expected='testtransport://bob:secret@host:10//')),
('multi_host',
dict(rpc_backend=None,
transport='testtransport',
virtual_host='',
hosts=[
dict(hostname='h1',
port=1000,
username='b1',
password='s1'),
dict(hostname='h2',
port=2000,
username='b2',
password='s2'),
],
aliases=None,
expected='testtransport://b1:s1@h1:1000,b2:s2@h2:2000/')),
('quoting',
dict(rpc_backend=None,
transport='testtransport',
virtual_host='/$',
hosts=[
dict(hostname='host',
port=10,
username='b$',
password='s&'),
],
aliases=None,
expected='testtransport://b%24:s%26@host:10//%24')),
]
def test_parse_url(self):
self.config(rpc_backend=self.rpc_backend)
hosts = []
for host in self.hosts:
hosts.append(messaging.TransportHost(host.get('hostname'),
host.get('port'),
host.get('username'),
host.get('password')))
url = messaging.TransportURL(self.conf,
self.transport,
self.virtual_host,
hosts,
self.aliases)
self.assertEqual(str(url), self.expected)
|
|
import json
from math import log
# Make an Expectation Maximization answer for a task
def make_em_answer(task_obj, model_spec):
example_to_worker_label = {}
worker_to_example_label = {}
label_set = []
answers = []
# Label set
label_set = []
# Build up initial variables for em
all_responses = (model_spec.assignment_model.objects
.filter(task__task_type=task_obj.task_type)
.exclude(content__isnull=True)
.exclude(content__exact='')
.select_related('worker')
.order_by('-finished_at'))[:800]
cur_responses = (task_obj.assignments
.exclude(content__isnull=True)
.exclude(content__exact='')
.exclude(assignment_id__in=all_responses)
.select_related('worker'))
for response in list(all_responses) + list(cur_responses):
try:
answer_list = json.loads(response.content)
except Exception:
continue
for point_id in answer_list.keys():
worker_id = response.worker.worker_id
unique_id = point_id
current_label = answer_list[point_id]
example_to_worker_label.setdefault(unique_id, []).append(
(worker_id, current_label))
worker_to_example_label.setdefault(worker_id, []).append(
(unique_id, current_label))
if current_label not in label_set :
label_set.append(current_label)
# EM algorithm
iterations = 20
ans, b, c = EM(example_to_worker_label,
worker_to_example_label,
label_set).ExpectationMaximization(iterations)
# Gather answer
point_ids = json.loads(task_obj.assignments
.exclude(content__isnull=True)
.exclude(content__exact='')[0].content).keys()
answer_label = {}
for point_id in point_ids:
unique_id = point_id
soft_label = ans[unique_id]
maxv = 0
cur_label = label_set[0]
for label, weight in soft_label.items():
if weight > maxv:
maxv = weight
cur_label = label
answer_label[point_id] = float(cur_label)
return json.dumps(answer_label)
class EM:
def __init__(self, example_to_worker_label, worker_to_example_label, label_set):
self.example_to_worker_label = example_to_worker_label
self.worker_to_example_label = worker_to_example_label
self.label_set = label_set
def ConfusionMatrix(self, worker_to_example_label, example_to_softlabel):
worker_to_finallabel_weight = {}
worker_to_finallabel_workerlabel_weight = {}
for worker, example_label in worker_to_example_label.items():
if worker not in worker_to_finallabel_weight:
worker_to_finallabel_weight[worker] = {}
if worker not in worker_to_finallabel_workerlabel_weight:
worker_to_finallabel_workerlabel_weight[worker] = {}
for example, workerlabel in example_label:
softlabel = example_to_softlabel[example]
for finallabel, weight in softlabel.items():
worker_to_finallabel_weight[worker][finallabel] = worker_to_finallabel_weight[worker].get(finallabel, 0)+weight
if finallabel not in worker_to_finallabel_workerlabel_weight[worker]:
worker_to_finallabel_workerlabel_weight[worker][finallabel] = {}
worker_to_finallabel_workerlabel_weight[worker][finallabel][workerlabel] = worker_to_finallabel_workerlabel_weight[worker][finallabel].get(workerlabel, 0)+weight
worker_to_confusion_matrix = worker_to_finallabel_workerlabel_weight
for worker, finallabel_workerlabel_weight in worker_to_finallabel_workerlabel_weight.items():
for finallabel, workerlabel_weight in finallabel_workerlabel_weight.items():
if worker_to_finallabel_weight[worker][finallabel] == 0:
#approximately no possibility
for label in self.label_set:
if label==finallabel:
worker_to_confusion_matrix[worker][finallabel][label]=0.7
else:
worker_to_confusion_matrix[worker][finallabel][label]=0.3/(len(self.label_set)-1)
else:
for label in self.label_set:
if label in workerlabel_weight:
worker_to_confusion_matrix[worker][finallabel][label] = workerlabel_weight[label]*1.0/worker_to_finallabel_weight[worker][finallabel]
else:
worker_to_confusion_matrix[worker][finallabel][label] = 0.0
return worker_to_confusion_matrix
def PriorityProbability(self, example_to_softlabel):
label_to_priority_probability = {}
for _, softlabel in example_to_softlabel.items():
for label, probability in softlabel.items():
label_to_priority_probability[label] = label_to_priority_probability.get(label,0)+probability
for label, count in label_to_priority_probability.items():
label_to_priority_probability[label] = count*1.0/len(example_to_softlabel)
return label_to_priority_probability
def ProbabilityMajorityVote(self, example_to_worker_label, label_to_priority_probability, worker_to_confusion_matrix):
example_to_sortlabel = {}
for example, worker_label_set in example_to_worker_label.items():
sortlabel = {}
total_weight = 0
# can use worker
for final_label, priority_probability in label_to_priority_probability.items():
weight = priority_probability
for (worker, worker_label) in worker_label_set:
weight *= worker_to_confusion_matrix[worker][final_label][worker_label]
total_weight += weight
sortlabel[final_label] = weight
for final_label, weight in sortlabel.items():
if total_weight == 0:
assert weight == 0
#approximately less probability
sortlabel[final_label]=1.0/len(self.label_set)
else:
sortlabel[final_label] = weight*1.0/total_weight
example_to_sortlabel[example] = sortlabel
return example_to_sortlabel
#Pj
def InitPriorityProbability(self, label_set):
label_to_priority_probability = {}
for label in label_set:
label_to_priority_probability[label] = 1.0/len(label_set)
return label_to_priority_probability
#Pi
def InitConfusionMatrix(self, workers, label_set):
worker_to_confusion_matrix = {}
for worker in workers:
if worker not in worker_to_confusion_matrix:
worker_to_confusion_matrix[worker] = {}
for label1 in label_set:
if label1 not in worker_to_confusion_matrix[worker]:
worker_to_confusion_matrix[worker][label1] = {}
for label2 in label_set:
if label1 == label2:
worker_to_confusion_matrix[worker][label1][label2] = 0.7
else:
worker_to_confusion_matrix[worker][label1][label2] = 0.3/(len(label_set)-1)
return worker_to_confusion_matrix
def ExpectationMaximization(self, iterr = 10):
example_to_worker_label = self.example_to_worker_label
worker_to_example_label = self.worker_to_example_label
label_set = self.label_set
label_to_priority_probability = self.InitPriorityProbability(label_set)
worker_to_confusion_matrix = self.InitConfusionMatrix(worker_to_example_label.keys(), label_set)
while iterr>0:
example_to_softlabel = self.ProbabilityMajorityVote(example_to_worker_label, label_to_priority_probability, worker_to_confusion_matrix)
label_to_priority_probability = self.PriorityProbability(example_to_softlabel)
worker_to_confusion_matrix = self.ConfusionMatrix(worker_to_example_label, example_to_softlabel)
# compute the likelihood
#lh=self.computelikelihood(worker_to_confusion_matrix,label_to_priority_probability,example_to_worker_label); # can be omitted
#print alliter-iterr,':',lh;
#print alliter-iterr,'\t',lh-prelh
iterr -= 1
return example_to_softlabel,label_to_priority_probability,worker_to_confusion_matrix
def computelikelihood(self,w2cm,l2pd,e2wl):
lh=0;
for _,wl in e2wl.items():
temp=0;
for truelabel,prior in l2pd.items():
inner=1;
for workerlabel in wl:
worker=workerlabel[0]
label=workerlabel[1]
inner*=w2cm[worker][truelabel][label]
temp+=inner*prior
lh+=log(temp)
return lh
def getaccuracy(e2lpd,label_set):
accurate=0
allexamples=0
for example in e2lpd.keys():
distribution=e2lpd[example]
maxlabel=0
maxxvalue=-1
for label in label_set:
if maxxvalue<=distribution[label]:
maxlabel=label
maxxvalue=distribution[label]
truelabel=example.split('_')[1]
if maxlabel==truelabel:
accurate+=1
allexamples+=1
return accurate*1.0/allexamples
def gete2wlandw2el(filename):
example_to_worker_label = {}
worker_to_example_label = {}
label_set=[]
f = open(filename)
for line in f.xreadlines():
line = line.strip()
if not line:
continue
items = line.split("\t")
example_to_worker_label.setdefault(items[1], []).append((items[0], items[2]))
worker_to_example_label.setdefault(items[0], []).append((items[1], items[2]))
if items[2] not in label_set:
label_set.append(items[2])
return example_to_worker_label,worker_to_example_label,label_set
#if __name__ == "__main__":
# filename=r'filename'
# example_to_worker_label,worker_to_example_label,label_set=gete2wlandw2el(filename)
# iterations=20 # EM iteration number
# EM(example_to_worker_label,worker_to_example_label,label_set).ExpectationMaximization(iterations)
|
|
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import math
import time
from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _, _LW
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_base
LOG = logging.getLogger(__name__)
@six.add_metaclass(utils.TraceWrapperMetaclass)
class Client(client_base.Client):
def __init__(self, volume_list=None, **kwargs):
super(Client, self).__init__(**kwargs)
vfiler = kwargs.get('vfiler', None)
self.connection.set_vfiler(vfiler)
(major, minor) = self.get_ontapi_version(cached=False)
self.connection.set_api_version(major, minor)
self.volume_list = volume_list
self._init_features()
def _init_features(self):
super(Client, self)._init_features()
ontapi_version = self.get_ontapi_version() # major, minor
ontapi_1_20 = ontapi_version >= (1, 20)
self.features.add_feature('SYSTEM_METRICS', supported=ontapi_1_20)
def _invoke_vfiler_api(self, na_element, vfiler):
server = copy.copy(self.connection)
server.set_vfiler(vfiler)
result = server.invoke_successfully(na_element, True)
return result
def _invoke_7mode_iterator_getter(self, start_api_name, next_api_name,
end_api_name, record_container_tag_name,
maximum=100):
"""Invoke a 7-mode iterator-style getter API."""
data = []
start_api = netapp_api.NaElement(start_api_name)
start_result = self.connection.invoke_successfully(start_api)
tag = start_result.get_child_content('tag')
if not tag:
return data
try:
while True:
next_api = netapp_api.NaElement(next_api_name)
next_api.add_new_child('tag', tag)
next_api.add_new_child('maximum', six.text_type(maximum))
next_result = self.connection.invoke_successfully(next_api)
records = next_result.get_child_content('records') or 0
if int(records) == 0:
break
record_container = next_result.get_child_by_name(
record_container_tag_name) or netapp_api.NaElement('none')
data.extend(record_container.get_children())
finally:
end_api = netapp_api.NaElement(end_api_name)
end_api.add_new_child('tag', tag)
self.connection.invoke_successfully(end_api)
return data
def get_iscsi_target_details(self):
"""Gets the iSCSI target portal details."""
iscsi_if_iter = netapp_api.NaElement('iscsi-portal-list-info')
result = self.connection.invoke_successfully(iscsi_if_iter, True)
tgt_list = []
portal_list_entries = result.get_child_by_name(
'iscsi-portal-list-entries')
if portal_list_entries:
portal_list = portal_list_entries.get_children()
for iscsi_if in portal_list:
d = dict()
d['address'] = iscsi_if.get_child_content('ip-address')
d['port'] = iscsi_if.get_child_content('ip-port')
d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag')
tgt_list.append(d)
return tgt_list
def get_fc_target_wwpns(self):
"""Gets the FC target details."""
wwpns = []
port_name_list_api = netapp_api.NaElement('fcp-port-name-list-info')
result = self.connection.invoke_successfully(port_name_list_api)
port_names = result.get_child_by_name('fcp-port-names')
if port_names:
for port_name_info in port_names.get_children():
wwpn = port_name_info.get_child_content('port-name').lower()
wwpns.append(wwpn)
return wwpns
def get_iscsi_service_details(self):
"""Returns iscsi iqn."""
iscsi_service_iter = netapp_api.NaElement('iscsi-node-get-name')
result = self.connection.invoke_successfully(iscsi_service_iter, True)
return result.get_child_content('node-name')
def get_lun_list(self):
"""Gets the list of LUNs on filer."""
lun_list = []
if self.volume_list:
for vol in self.volume_list:
try:
luns = self._get_vol_luns(vol)
if luns:
lun_list.extend(luns)
except netapp_api.NaApiError:
LOG.warning(_LW("Error finding LUNs for volume %s."
" Verify volume exists."), vol)
else:
luns = self._get_vol_luns(None)
lun_list.extend(luns)
return lun_list
def _get_vol_luns(self, vol_name):
"""Gets the LUNs for a volume."""
api = netapp_api.NaElement('lun-list-info')
if vol_name:
api.add_new_child('volume-name', vol_name)
result = self.connection.invoke_successfully(api, True)
luns = result.get_child_by_name('luns')
return luns.get_children()
def get_igroup_by_initiators(self, initiator_list):
"""Get igroups exactly matching a set of initiators."""
igroup_list = []
if not initiator_list:
return igroup_list
initiator_set = set(initiator_list)
igroup_list_info = netapp_api.NaElement('igroup-list-info')
result = self.connection.invoke_successfully(igroup_list_info, True)
initiator_groups = result.get_child_by_name(
'initiator-groups') or netapp_api.NaElement('none')
for initiator_group_info in initiator_groups.get_children():
initiator_set_for_igroup = set()
initiators = initiator_group_info.get_child_by_name(
'initiators') or netapp_api.NaElement('none')
for initiator_info in initiators.get_children():
initiator_set_for_igroup.add(
initiator_info.get_child_content('initiator-name'))
if initiator_set == initiator_set_for_igroup:
igroup = {'initiator-group-os-type':
initiator_group_info.get_child_content(
'initiator-group-os-type'),
'initiator-group-type':
initiator_group_info.get_child_content(
'initiator-group-type'),
'initiator-group-name':
initiator_group_info.get_child_content(
'initiator-group-name')}
igroup_list.append(igroup)
return igroup_list
def clone_lun(self, path, clone_path, name, new_name,
space_reserved='true', src_block=0,
dest_block=0, block_count=0):
# zAPI can only handle 2^24 blocks per range
bc_limit = 2 ** 24 # 8GB
# zAPI can only handle 32 block ranges per call
br_limit = 32
z_limit = br_limit * bc_limit # 256 GB
z_calls = int(math.ceil(block_count / float(z_limit)))
zbc = block_count
if z_calls == 0:
z_calls = 1
for _call in range(0, z_calls):
if zbc > z_limit:
block_count = z_limit
zbc -= z_limit
else:
block_count = zbc
clone_start = netapp_api.NaElement.create_node_with_children(
'clone-start', **{'source-path': path,
'destination-path': clone_path,
'no-snap': 'true'})
if block_count > 0:
block_ranges = netapp_api.NaElement("block-ranges")
# zAPI can only handle 2^24 block ranges
bc_limit = 2 ** 24 # 8GB
segments = int(math.ceil(block_count / float(bc_limit)))
bc = block_count
for _segment in range(0, segments):
if bc > bc_limit:
block_count = bc_limit
bc -= bc_limit
else:
block_count = bc
block_range =\
netapp_api.NaElement.create_node_with_children(
'block-range',
**{'source-block-number':
six.text_type(src_block),
'destination-block-number':
six.text_type(dest_block),
'block-count':
six.text_type(block_count)})
block_ranges.add_child_elem(block_range)
src_block += int(block_count)
dest_block += int(block_count)
clone_start.add_child_elem(block_ranges)
result = self.connection.invoke_successfully(clone_start, True)
clone_id_el = result.get_child_by_name('clone-id')
cl_id_info = clone_id_el.get_child_by_name('clone-id-info')
vol_uuid = cl_id_info.get_child_content('volume-uuid')
clone_id = cl_id_info.get_child_content('clone-op-id')
if vol_uuid:
self._check_clone_status(clone_id, vol_uuid, name, new_name)
def _check_clone_status(self, clone_id, vol_uuid, name, new_name):
"""Checks for the job till completed."""
clone_status = netapp_api.NaElement('clone-list-status')
cl_id = netapp_api.NaElement('clone-id')
clone_status.add_child_elem(cl_id)
cl_id.add_node_with_children('clone-id-info',
**{'clone-op-id': clone_id,
'volume-uuid': vol_uuid})
running = True
clone_ops_info = None
while running:
result = self.connection.invoke_successfully(clone_status, True)
status = result.get_child_by_name('status')
ops_info = status.get_children()
if ops_info:
for info in ops_info:
if info.get_child_content('clone-state') == 'running':
time.sleep(1)
break
else:
running = False
clone_ops_info = info
break
else:
if clone_ops_info:
fmt = {'name': name, 'new_name': new_name}
if clone_ops_info.get_child_content('clone-state')\
== 'completed':
LOG.debug("Clone operation with src %(name)s"
" and dest %(new_name)s completed", fmt)
else:
LOG.debug("Clone operation with src %(name)s"
" and dest %(new_name)s failed", fmt)
raise netapp_api.NaApiError(
clone_ops_info.get_child_content('error'),
clone_ops_info.get_child_content('reason'))
def get_lun_by_args(self, **args):
"""Retrieves LUNs with specified args."""
lun_info = netapp_api.NaElement.create_node_with_children(
'lun-list-info', **args)
result = self.connection.invoke_successfully(lun_info, True)
luns = result.get_child_by_name('luns')
return luns.get_children()
def get_filer_volumes(self, volume=None):
"""Returns list of filer volumes in API format."""
vol_request = netapp_api.NaElement('volume-list-info')
res = self.connection.invoke_successfully(vol_request, True)
volumes = res.get_child_by_name('volumes')
if volumes:
return volumes.get_children()
return []
def get_lun_map(self, path):
lun_map_list = netapp_api.NaElement.create_node_with_children(
'lun-map-list-info',
**{'path': path})
return self.connection.invoke_successfully(lun_map_list, True)
def set_space_reserve(self, path, enable):
"""Sets the space reserve info."""
space_res = netapp_api.NaElement.create_node_with_children(
'lun-set-space-reservation-info',
**{'path': path, 'enable': enable})
self.connection.invoke_successfully(space_res, True)
def get_actual_path_for_export(self, export_path):
"""Gets the actual path on the filer for export path."""
storage_path = netapp_api.NaElement.create_node_with_children(
'nfs-exportfs-storage-path', **{'pathname': export_path})
result = self.connection.invoke_successfully(storage_path,
enable_tunneling=True)
if result.get_child_content('actual-pathname'):
return result.get_child_content('actual-pathname')
raise exception.NotFound(_('No storage path found for export path %s')
% (export_path))
def clone_file(self, src_path, dest_path):
LOG.debug("Cloning with src %(src_path)s, dest %(dest_path)s",
{'src_path': src_path, 'dest_path': dest_path})
clone_start = netapp_api.NaElement.create_node_with_children(
'clone-start',
**{'source-path': src_path,
'destination-path': dest_path,
'no-snap': 'true'})
result = self.connection.invoke_successfully(clone_start,
enable_tunneling=True)
clone_id_el = result.get_child_by_name('clone-id')
cl_id_info = clone_id_el.get_child_by_name('clone-id-info')
vol_uuid = cl_id_info.get_child_content('volume-uuid')
clone_id = cl_id_info.get_child_content('clone-op-id')
if vol_uuid:
try:
self._wait_for_clone_finish(clone_id, vol_uuid)
except netapp_api.NaApiError as e:
if e.code != 'UnknownCloneId':
self._clear_clone(clone_id)
raise
def _wait_for_clone_finish(self, clone_op_id, vol_uuid):
"""Waits till a clone operation is complete or errored out."""
clone_ls_st = netapp_api.NaElement('clone-list-status')
clone_id = netapp_api.NaElement('clone-id')
clone_ls_st.add_child_elem(clone_id)
clone_id.add_node_with_children('clone-id-info',
**{'clone-op-id': clone_op_id,
'volume-uuid': vol_uuid})
task_running = True
while task_running:
result = self.connection.invoke_successfully(clone_ls_st,
enable_tunneling=True)
status = result.get_child_by_name('status')
ops_info = status.get_children()
if ops_info:
state = ops_info[0].get_child_content('clone-state')
if state == 'completed':
task_running = False
elif state == 'failed':
code = ops_info[0].get_child_content('error')
reason = ops_info[0].get_child_content('reason')
raise netapp_api.NaApiError(code, reason)
else:
time.sleep(1)
else:
raise netapp_api.NaApiError(
'UnknownCloneId',
'No clone operation for clone id %s found on the filer'
% (clone_id))
def _clear_clone(self, clone_id):
"""Clear the clone information.
Invoke this in case of failed clone.
"""
clone_clear = netapp_api.NaElement.create_node_with_children(
'clone-clear',
**{'clone-id': clone_id})
retry = 3
while retry:
try:
self.connection.invoke_successfully(clone_clear,
enable_tunneling=True)
break
except netapp_api.NaApiError:
# Filer might be rebooting
time.sleep(5)
retry = retry - 1
def get_file_usage(self, path):
"""Gets the file unique bytes."""
LOG.debug('Getting file usage for %s', path)
file_use = netapp_api.NaElement.create_node_with_children(
'file-usage-get', **{'path': path})
res = self.connection.invoke_successfully(file_use)
bytes = res.get_child_content('unique-bytes')
LOG.debug('file-usage for path %(path)s is %(bytes)s',
{'path': path, 'bytes': bytes})
return bytes
def get_ifconfig(self):
ifconfig = netapp_api.NaElement('net-ifconfig-get')
return self.connection.invoke_successfully(ifconfig)
def get_flexvol_capacity(self, flexvol_path):
"""Gets total capacity and free capacity, in bytes, of the flexvol."""
api_args = {'volume': flexvol_path, 'verbose': 'false'}
result = self.send_request('volume-list-info', api_args)
flexvol_info_list = result.get_child_by_name('volumes')
flexvol_info = flexvol_info_list.get_children()[0]
total_bytes = float(
flexvol_info.get_child_content('size-total'))
available_bytes = float(
flexvol_info.get_child_content('size-available'))
return total_bytes, available_bytes
def get_performance_instance_names(self, object_name):
"""Get names of performance instances for a node."""
api_args = {'objectname': object_name}
result = self.send_request('perf-object-instance-list-info',
api_args,
enable_tunneling=False)
instance_names = []
instances = result.get_child_by_name(
'instances') or netapp_api.NaElement('None')
for instance_info in instances.get_children():
instance_names.append(instance_info.get_child_content('name'))
return instance_names
def get_performance_counters(self, object_name, instance_names,
counter_names):
"""Gets or or more 7-mode Data ONTAP performance counters."""
api_args = {
'objectname': object_name,
'instances': [
{'instance': instance} for instance in instance_names
],
'counters': [
{'counter': counter} for counter in counter_names
],
}
result = self.send_request('perf-object-get-instances',
api_args,
enable_tunneling=False)
counter_data = []
timestamp = result.get_child_content('timestamp')
instances = result.get_child_by_name(
'instances') or netapp_api.NaElement('None')
for instance in instances.get_children():
instance_name = instance.get_child_content('name')
counters = instance.get_child_by_name(
'counters') or netapp_api.NaElement('None')
for counter in counters.get_children():
counter_name = counter.get_child_content('name')
counter_value = counter.get_child_content('value')
counter_data.append({
'instance-name': instance_name,
'timestamp': timestamp,
counter_name: counter_value,
})
return counter_data
def get_system_name(self):
"""Get the name of the 7-mode Data ONTAP controller."""
result = self.send_request('system-get-info',
{},
enable_tunneling=False)
system_info = result.get_child_by_name('system-info')
system_name = system_info.get_child_content('system-name')
return system_name
|
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import imp
import itertools
import os
import re
import sys
import tarfile
from testrunner.local import statusfile
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import testcase
DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
ARCHIVE = DATA + ".tar"
TEST_262_HARNESS_FILES = ["sta.js", "assert.js"]
TEST_262_NATIVE_FILES = ["detachArrayBuffer.js"]
TEST_262_SUITE_PATH = ["data", "test"]
TEST_262_HARNESS_PATH = ["data", "harness"]
TEST_262_TOOLS_PATH = ["harness", "src"]
TEST_262_LOCAL_TESTS_PATH = ["local-tests", "test"]
TEST_262_RELPATH_REGEXP = re.compile(
r'.*[\\/]test[\\/]test262[\\/][^\\/]+[\\/]test[\\/](.*)\.js')
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),
*TEST_262_TOOLS_PATH))
ALL_VARIANT_FLAGS_STRICT = dict(
(v, [flags + ["--use-strict"] for flags in flag_sets])
for v, flag_sets in testsuite.ALL_VARIANT_FLAGS.iteritems()
)
FAST_VARIANT_FLAGS_STRICT = dict(
(v, [flags + ["--use-strict"] for flags in flag_sets])
for v, flag_sets in testsuite.FAST_VARIANT_FLAGS.iteritems()
)
ALL_VARIANT_FLAGS_BOTH = dict(
(v, [flags for flags in testsuite.ALL_VARIANT_FLAGS[v] +
ALL_VARIANT_FLAGS_STRICT[v]])
for v in testsuite.ALL_VARIANT_FLAGS
)
FAST_VARIANT_FLAGS_BOTH = dict(
(v, [flags for flags in testsuite.FAST_VARIANT_FLAGS[v] +
FAST_VARIANT_FLAGS_STRICT[v]])
for v in testsuite.FAST_VARIANT_FLAGS
)
ALL_VARIANTS = {
'nostrict': testsuite.ALL_VARIANT_FLAGS,
'strict': ALL_VARIANT_FLAGS_STRICT,
'both': ALL_VARIANT_FLAGS_BOTH,
}
FAST_VARIANTS = {
'nostrict': testsuite.FAST_VARIANT_FLAGS,
'strict': FAST_VARIANT_FLAGS_STRICT,
'both': FAST_VARIANT_FLAGS_BOTH,
}
class Test262VariantGenerator(testsuite.VariantGenerator):
def GetFlagSets(self, testcase, variant):
if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
variant_flags = FAST_VARIANTS
else:
variant_flags = ALL_VARIANTS
test_record = self.suite.GetTestRecord(testcase)
if "noStrict" in test_record:
return variant_flags["nostrict"][variant]
if "onlyStrict" in test_record:
return variant_flags["strict"][variant]
return variant_flags["both"][variant]
class Test262TestSuite(testsuite.TestSuite):
# Match the (...) in '/path/to/v8/test/test262/subdir/test/(...).js'
# In practice, subdir is data or local-tests
def __init__(self, name, root):
super(Test262TestSuite, self).__init__(name, root)
self.testroot = os.path.join(self.root, *TEST_262_SUITE_PATH)
self.harnesspath = os.path.join(self.root, *TEST_262_HARNESS_PATH)
self.harness = [os.path.join(self.harnesspath, f)
for f in TEST_262_HARNESS_FILES]
self.harness += [os.path.join(self.root, "harness-adapt.js")]
self.localtestroot = os.path.join(self.root, *TEST_262_LOCAL_TESTS_PATH)
self.ParseTestRecord = None
def ListTests(self, context):
tests = []
testnames = set()
for dirname, dirs, files in itertools.chain(os.walk(self.testroot),
os.walk(self.localtestroot)):
for dotted in [x for x in dirs if x.startswith(".")]:
dirs.remove(dotted)
if context.noi18n and "intl402" in dirs:
dirs.remove("intl402")
dirs.sort()
files.sort()
for filename in files:
if not filename.endswith(".js"):
continue
if filename.endswith("_FIXTURE.js"):
continue
fullpath = os.path.join(dirname, filename)
relpath = re.match(TEST_262_RELPATH_REGEXP, fullpath).group(1)
testnames.add(relpath.replace(os.path.sep, "/"))
return [testcase.TestCase(self, testname) for testname in testnames]
def GetFlagsForTestCase(self, testcase, context):
return (testcase.flags + context.mode_flags + self.harness +
self.GetIncludesForTest(testcase) +
(["--module"] if "module" in self.GetTestRecord(testcase) else []) +
[self.GetPathForTest(testcase)] +
(["--throws"] if "negative" in self.GetTestRecord(testcase)
else []) +
(["--allow-natives-syntax"]
if "detachArrayBuffer.js" in
self.GetTestRecord(testcase).get("includes", [])
else []) +
([flag for flag in testcase.outcomes if flag.startswith("--")]))
def _VariantGeneratorFactory(self):
return Test262VariantGenerator
def LoadParseTestRecord(self):
if not self.ParseTestRecord:
root = os.path.join(self.root, *TEST_262_TOOLS_PATH)
f = None
try:
(f, pathname, description) = imp.find_module("parseTestRecord", [root])
module = imp.load_module("parseTestRecord", f, pathname, description)
self.ParseTestRecord = module.parseTestRecord
except:
raise ImportError("Cannot load parseTestRecord; you may need to "
"gclient sync for test262")
finally:
if f:
f.close()
return self.ParseTestRecord
def GetTestRecord(self, testcase):
if not hasattr(testcase, "test_record"):
ParseTestRecord = self.LoadParseTestRecord()
testcase.test_record = ParseTestRecord(self.GetSourceForTest(testcase),
testcase.path)
return testcase.test_record
def BasePath(self, filename):
return self.root if filename in TEST_262_NATIVE_FILES else self.harnesspath
def GetIncludesForTest(self, testcase):
test_record = self.GetTestRecord(testcase)
if "includes" in test_record:
return [os.path.join(self.BasePath(filename), filename)
for filename in test_record.get("includes", [])]
else:
includes = []
return includes
def GetPathForTest(self, testcase):
filename = os.path.join(self.localtestroot, testcase.path + ".js")
if not os.path.exists(filename):
filename = os.path.join(self.testroot, testcase.path + ".js")
return filename
def GetSourceForTest(self, testcase):
with open(self.GetPathForTest(testcase)) as f:
return f.read()
def _ParseException(self, str):
# somefile:somelinenumber: someerror[: sometext]
match = re.search('^[^: ]*:[0-9]+: ([^ ]+?)($|: )', str, re.MULTILINE)
return match.group(1)
def IsFailureOutput(self, testcase):
output = testcase.output
test_record = self.GetTestRecord(testcase)
if output.exit_code != 0:
return True
if "negative" in test_record and \
"type" in test_record["negative"] and \
self._ParseException(output.stdout) != test_record["negative"]["type"]:
return True
return "FAILED!" in output.stdout
def HasUnexpectedOutput(self, testcase):
outcome = self.GetOutcome(testcase)
if (statusfile.FAIL_SLOPPY in testcase.outcomes and
"--use-strict" not in testcase.flags):
return outcome != statusfile.FAIL
return not outcome in ([outcome for outcome in testcase.outcomes
if not outcome.startswith('--')]
or [statusfile.PASS])
def PrepareSources(self):
# The archive is created only on swarming. Local checkouts have the
# data folder.
if os.path.exists(ARCHIVE) and not os.path.exists(DATA):
print "Extracting archive..."
tar = tarfile.open(ARCHIVE)
tar.extractall(path=os.path.dirname(ARCHIVE))
tar.close()
def GetSuite(name, root):
return Test262TestSuite(name, root)
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import eventlet
import Queue
import six
import time
from st2actions.container.service import RunnerContainerService
from st2common.runners import get_runner
from st2common import log as logging
from st2common.constants import action as action_constants
from st2common.persistence.executionstate import ActionExecutionState
from st2common.persistence.liveaction import LiveAction
from st2common.services import executions
from st2common.util.action_db import (get_action_by_ref, get_runnertype_by_name)
from st2common.util import date as date_utils
LOG = logging.getLogger(__name__)
__all__ = [
'Querier',
'QueryContext'
]
@six.add_metaclass(abc.ABCMeta)
class Querier(object):
def __init__(self, threads_pool_size=10, query_interval=1, empty_q_sleep_time=5,
no_workers_sleep_time=1, container_service=None):
self._query_threads_pool_size = threads_pool_size
self._query_contexts = Queue.Queue()
self._thread_pool = eventlet.GreenPool(self._query_threads_pool_size)
self._empty_q_sleep_time = empty_q_sleep_time
self._no_workers_sleep_time = no_workers_sleep_time
self._query_interval = query_interval
if not container_service:
container_service = RunnerContainerService()
self.container_service = container_service
self._started = False
def start(self):
self._started = True
while True:
while self._query_contexts.empty():
eventlet.greenthread.sleep(self._empty_q_sleep_time)
while self._thread_pool.free() <= 0:
eventlet.greenthread.sleep(self._no_workers_sleep_time)
self._fire_queries()
def add_queries(self, query_contexts=None):
if query_contexts is None:
query_contexts = []
LOG.debug('Adding queries to querier: %s' % query_contexts)
for query_context in query_contexts:
self._query_contexts.put((time.time(), query_context))
def is_started(self):
return self._started
def _fire_queries(self):
if self._thread_pool.free() <= 0:
return
while not self._query_contexts.empty() and self._thread_pool.free() > 0:
(last_query_time, query_context) = self._query_contexts.get_nowait()
if time.time() - last_query_time < self._query_interval:
self._query_contexts.put((last_query_time, query_context))
continue
else:
self._thread_pool.spawn(self._query_and_save_results, query_context)
def _query_and_save_results(self, query_context):
execution_id = query_context.execution_id
actual_query_context = query_context.query_context
LOG.debug('Querying external service for results. Context: %s' % actual_query_context)
try:
(status, results) = self.query(execution_id, actual_query_context)
except:
LOG.exception('Failed querying results for liveaction_id %s.', execution_id)
self._delete_state_object(query_context)
LOG.debug('Remove state object %s.', query_context)
return
liveaction_db = None
try:
liveaction_db = self._update_action_results(execution_id, status, results)
except Exception:
LOG.exception('Failed updating action results for liveaction_id %s', execution_id)
self._delete_state_object(query_context)
return
if status in action_constants.LIVEACTION_COMPLETED_STATES:
action_db = get_action_by_ref(liveaction_db.action)
if not action_db:
LOG.exception('Unable to invoke post run. Action %s '
'no longer exists.' % liveaction_db.action)
self._delete_state_object(query_context)
return
if status != action_constants.LIVEACTION_STATUS_CANCELED:
self._invoke_post_run(liveaction_db, action_db)
self._delete_state_object(query_context)
return
self._query_contexts.put((time.time(), query_context))
def _update_action_results(self, execution_id, status, results):
liveaction_db = LiveAction.get_by_id(execution_id)
if not liveaction_db:
raise Exception('No DB model for liveaction_id: %s' % execution_id)
if liveaction_db.status != action_constants.LIVEACTION_STATUS_CANCELED:
liveaction_db.status = status
liveaction_db.result = results
# Action has completed, record end_timestamp
if (liveaction_db.status in action_constants.LIVEACTION_COMPLETED_STATES and
not liveaction_db.end_timestamp):
liveaction_db.end_timestamp = date_utils.get_datetime_utc_now()
# update liveaction, update actionexecution and then publish update.
updated_liveaction = LiveAction.add_or_update(liveaction_db, publish=False)
executions.update_execution(updated_liveaction)
LiveAction.publish_update(updated_liveaction)
return updated_liveaction
def _invoke_post_run(self, actionexec_db, action_db):
LOG.info('Invoking post run for action execution %s. Action=%s; Runner=%s',
actionexec_db.id, action_db.name, action_db.runner_type['name'])
# Get an instance of the action runner.
runnertype_db = get_runnertype_by_name(action_db.runner_type['name'])
runner = get_runner(runnertype_db.runner_module)
# Configure the action runner.
runner.container_service = RunnerContainerService()
runner.action = action_db
runner.action_name = action_db.name
runner.action_execution_id = str(actionexec_db.id)
runner.entry_point = RunnerContainerService.get_entry_point_abs_path(
pack=action_db.pack, entry_point=action_db.entry_point)
runner.context = getattr(actionexec_db, 'context', dict())
runner.callback = getattr(actionexec_db, 'callback', dict())
runner.libs_dir_path = RunnerContainerService.get_action_libs_abs_path(
pack=action_db.pack, entry_point=action_db.entry_point)
# Invoke the post_run method.
runner.post_run(actionexec_db.status, actionexec_db.result)
def _delete_state_object(self, query_context):
state_db = ActionExecutionState.get_by_id(query_context.id)
if state_db is not None:
try:
LOG.info('Clearing state object: %s', state_db)
ActionExecutionState.delete(state_db)
except:
LOG.exception('Failed clearing state object: %s', state_db)
def query(self, execution_id, query_context):
"""
This is the method individual queriers must implement.
This method should return a tuple of (status, results).
status should be one of LIVEACTION_STATUS_SUCCEEDED, LIVEACTION_STATUS_RUNNING,
LIVEACTION_STATUS_FAILED defined in st2common.constants.action.
"""
pass
def print_stats(self):
LOG.info('\t --- Name: %s, pending queuries: %d', self.__class__.__name__,
self._query_contexts.qsize())
class QueryContext(object):
def __init__(self, obj_id, execution_id, query_context, query_module):
self.id = obj_id
self.execution_id = execution_id
self.query_context = query_context
self.query_module = query_module
@classmethod
def from_model(cls, model):
return QueryContext(str(model.id), str(model.execution_id), model.query_context,
model.query_module)
def __repr__(self):
return ('<QueryContext id=%s,execution_id=%s,query_context=%s>' %
(self.id, self.execution_id, self.query_context))
|
|
import mock
from osf.models import AdminLogEntry, OSFUser, Node, NodeLog
from admin.nodes.views import (
NodeDeleteView,
NodeRemoveContributorView,
NodeView,
NodeReindexShare,
NodeReindexElastic,
NodeFlaggedSpamList,
NodeKnownSpamList,
NodeKnownHamList,
)
from admin_tests.utilities import setup_log_view, setup_view
from nose import tools as nt
from django.test import RequestFactory
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django.contrib.auth.models import Permission
from tests.base import AdminTestCase
from osf_tests.factories import AuthUserFactory, ProjectFactory, RegistrationFactory
class TestNodeView(AdminTestCase):
def test_get_flagged_spam(self):
user = AuthUserFactory()
user.is_superuser = True
user.save()
request = RequestFactory().get(reverse('nodes:flagged-spam'))
request.user = user
response = NodeFlaggedSpamList.as_view()(request)
nt.assert_equal(response.status_code, 200)
def test_get_known_spam(self):
user = AuthUserFactory()
user.is_superuser = True
user.save()
request = RequestFactory().get(reverse('nodes:known-spam'))
request.user = user
response = NodeKnownSpamList.as_view()(request)
nt.assert_equal(response.status_code, 200)
def test_get_known_ham(self):
user = AuthUserFactory()
user.is_superuser = True
user.save()
request = RequestFactory().get(reverse('nodes:known-ham'))
request.user = user
response = NodeKnownHamList.as_view()(request)
nt.assert_equal(response.status_code, 200)
def test_no_guid(self):
request = RequestFactory().get('/fake_path')
view = NodeView()
view = setup_view(view, request)
with nt.assert_raises(AttributeError):
view.get_object()
def test_load_data(self):
node = ProjectFactory()
guid = node._id
request = RequestFactory().get('/fake_path')
view = NodeView()
view = setup_view(view, request, guid=guid)
res = view.get_object()
nt.assert_is_instance(res, dict)
def test_name_data(self):
node = ProjectFactory()
guid = node._id
request = RequestFactory().get('/fake_path')
view = NodeView()
view = setup_view(view, request, guid=guid)
temp_object = view.get_object()
view.object = temp_object
res = view.get_context_data()
nt.assert_equal(res[NodeView.context_object_name], temp_object)
def test_no_user_permissions_raises_error(self):
user = AuthUserFactory()
node = ProjectFactory()
guid = node._id
request = RequestFactory().get(reverse('nodes:node', kwargs={'guid': guid}))
request.user = user
with nt.assert_raises(PermissionDenied):
NodeView.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = AuthUserFactory()
node = ProjectFactory()
guid = node._id
change_permission = Permission.objects.get(codename='view_node')
user.user_permissions.add(change_permission)
user.save()
request = RequestFactory().get(reverse('nodes:node', kwargs={'guid': guid}))
request.user = user
response = NodeView.as_view()(request, guid=guid)
nt.assert_equal(response.status_code, 200)
class TestNodeDeleteView(AdminTestCase):
def setUp(self):
super(TestNodeDeleteView, self).setUp()
self.node = ProjectFactory()
self.request = RequestFactory().post('/fake_path')
self.plain_view = NodeDeleteView
self.view = setup_log_view(self.plain_view(), self.request,
guid=self.node._id)
self.url = reverse('nodes:remove', kwargs={'guid': self.node._id})
def test_get_object(self):
obj = self.view.get_object()
nt.assert_is_instance(obj, Node)
def test_get_context(self):
res = self.view.get_context_data(object=self.node)
nt.assert_in('guid', res)
nt.assert_equal(res.get('guid'), self.node._id)
def test_remove_node(self):
count = AdminLogEntry.objects.count()
self.view.delete(self.request)
self.node.refresh_from_db()
nt.assert_true(self.node.is_deleted)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
def test_restore_node(self):
self.view.delete(self.request)
self.node.refresh_from_db()
nt.assert_true(self.node.is_deleted)
count = AdminLogEntry.objects.count()
self.view.delete(self.request)
self.node.reload()
nt.assert_false(self.node.is_deleted)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
def test_no_user_permissions_raises_error(self):
user = AuthUserFactory()
guid = self.node._id
request = RequestFactory().get(self.url)
request.user = user
with nt.assert_raises(PermissionDenied):
self.plain_view.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = AuthUserFactory()
guid = self.node._id
change_permission = Permission.objects.get(codename='delete_node')
view_permission = Permission.objects.get(codename='view_node')
user.user_permissions.add(change_permission)
user.user_permissions.add(view_permission)
user.save()
request = RequestFactory().get(self.url)
request.user = user
response = self.plain_view.as_view()(request, guid=guid)
nt.assert_equal(response.status_code, 200)
class TestRemoveContributor(AdminTestCase):
def setUp(self):
super(TestRemoveContributor, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
self.user_2 = AuthUserFactory()
self.node.add_contributor(self.user_2)
self.node.save()
self.view = NodeRemoveContributorView
self.request = RequestFactory().post('/fake_path')
self.url = reverse('nodes:remove_user', kwargs={'node_id': self.node._id, 'user_id': self.user._id})
def test_get_object(self):
view = setup_log_view(self.view(), self.request, node_id=self.node._id,
user_id=self.user._id)
node, user = view.get_object()
nt.assert_is_instance(node, Node)
nt.assert_is_instance(user, OSFUser)
@mock.patch('admin.nodes.views.Node.remove_contributor')
def test_remove_contributor(self, mock_remove_contributor):
user_id = self.user_2._id
node_id = self.node._id
view = setup_log_view(self.view(), self.request, node_id=node_id,
user_id=user_id)
view.delete(self.request)
mock_remove_contributor.assert_called_with(self.user_2, None, log=False)
def test_integration_remove_contributor(self):
nt.assert_in(self.user_2, self.node.contributors)
view = setup_log_view(self.view(), self.request, node_id=self.node._id,
user_id=self.user_2._id)
count = AdminLogEntry.objects.count()
view.delete(self.request)
nt.assert_not_in(self.user_2, self.node.contributors)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
def test_do_not_remove_last_admin(self):
nt.assert_equal(
len(list(self.node.get_admin_contributors(self.node.contributors))),
1
)
view = setup_log_view(self.view(), self.request, node_id=self.node._id,
user_id=self.user._id)
count = AdminLogEntry.objects.count()
view.delete(self.request)
self.node.reload() # Reloads instance to show that nothing was removed
nt.assert_equal(len(list(self.node.contributors)), 2)
nt.assert_equal(
len(list(self.node.get_admin_contributors(self.node.contributors))),
1
)
nt.assert_equal(AdminLogEntry.objects.count(), count)
def test_no_log(self):
view = setup_log_view(self.view(), self.request, node_id=self.node._id,
user_id=self.user_2._id)
view.delete(self.request)
nt.assert_not_equal(self.node.logs.latest().action, NodeLog.CONTRIB_REMOVED)
def test_no_user_permissions_raises_error(self):
guid = self.node._id
request = RequestFactory().get(self.url)
request.user = self.user
with nt.assert_raises(PermissionDenied):
self.view.as_view()(request, node_id=guid, user_id=self.user)
def test_correct_view_permissions(self):
change_permission = Permission.objects.get(codename='change_node')
view_permission = Permission.objects.get(codename='view_node')
self.user.user_permissions.add(change_permission)
self.user.user_permissions.add(view_permission)
self.user.save()
request = RequestFactory().get(self.url)
request.user = self.user
response = self.view.as_view()(request, node_id=self.node._id, user_id=self.user._id)
nt.assert_equal(response.status_code, 200)
class TestNodeReindex(AdminTestCase):
def setUp(self):
super(TestNodeReindex, self).setUp()
self.request = RequestFactory().post('/fake_path')
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
self.registration = RegistrationFactory(project=self.node, creator=self.user)
@mock.patch('website.project.tasks.format_node')
@mock.patch('website.project.tasks.format_registration')
@mock.patch('website.project.tasks.settings.SHARE_URL', 'ima_real_website')
@mock.patch('website.project.tasks.settings.SHARE_API_TOKEN', 'totaly_real_token')
@mock.patch('website.project.tasks.send_share_node_data')
def test_reindex_node_share(self, mock_update_share, mock_format_registration, mock_format_node):
count = AdminLogEntry.objects.count()
view = NodeReindexShare()
view = setup_log_view(view, self.request, guid=self.node._id)
view.delete(self.request)
nt.assert_true(mock_update_share.called)
nt.assert_true(mock_format_node.called)
nt.assert_false(mock_format_registration.called)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
@mock.patch('website.project.tasks.format_node')
@mock.patch('website.project.tasks.format_registration')
@mock.patch('website.project.tasks.settings.SHARE_URL', 'ima_real_website')
@mock.patch('website.project.tasks.settings.SHARE_API_TOKEN', 'totaly_real_token')
@mock.patch('website.project.tasks.send_share_node_data')
def test_reindex_registration_share(self, mock_update_share, mock_format_registration, mock_format_node):
count = AdminLogEntry.objects.count()
view = NodeReindexShare()
view = setup_log_view(view, self.request, guid=self.registration._id)
view.delete(self.request)
nt.assert_true(mock_update_share.called)
nt.assert_false(mock_format_node.called)
nt.assert_true(mock_format_registration.called)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
@mock.patch('website.search.search.update_node')
def test_reindex_node_elastic(self, mock_update_search):
count = AdminLogEntry.objects.count()
view = NodeReindexElastic()
view = setup_log_view(view, self.request, guid=self.node._id)
view.delete(self.request)
nt.assert_true(mock_update_search.called)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
@mock.patch('website.search.search.update_node')
def test_reindex_registration_elastic(self, mock_update_search):
count = AdminLogEntry.objects.count()
view = NodeReindexElastic()
view = setup_log_view(view, self.request, guid=self.registration._id)
view.delete(self.request)
nt.assert_true(mock_update_search.called)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
|
|
import unittest
from django.test import TransactionTestCase
from django.conf import settings
from django.utils import timezone
from datetime import timedelta, datetime
import sys
if sys.version_info >= (3, 0):
unicode = str
from background_task.tasks import tasks, TaskSchedule, TaskProxy
from background_task.models import Task
from background_task import background
_recorded = []
def empty_task():
pass
def record_task(*arg, **kw):
_recorded.append((arg, kw))
class TestBackgroundDecorator(unittest.TestCase):
def test_get_proxy(self):
proxy = tasks.background()(empty_task)
self.assertNotEqual(proxy, empty_task)
self.assertTrue(isinstance(proxy, TaskProxy))
# and alternate form
proxy = tasks.background(empty_task)
self.assertNotEqual(proxy, empty_task)
self.assertTrue(isinstance(proxy, TaskProxy))
def test_default_name(self):
proxy = tasks.background()(empty_task)
self.assertEqual(proxy.name, 'background_task.tests.task_tests.empty_task')
proxy = tasks.background()(record_task)
self.assertEqual(proxy.name, 'background_task.tests.task_tests.record_task')
proxy = tasks.background(empty_task)
# print proxy
self.assertTrue(isinstance(proxy, TaskProxy))
self.assertEqual(proxy.name, 'background_task.tests.task_tests.empty_task')
def test_specified_name(self):
proxy = tasks.background(name='mytask')(empty_task)
self.assertEqual(proxy.name, 'mytask')
def test_task_function(self):
proxy = tasks.background()(empty_task)
self.assertEqual(proxy.task_function, empty_task)
proxy = tasks.background()(record_task)
self.assertEqual(proxy.task_function, record_task)
def test_default_schedule(self):
proxy = tasks.background()(empty_task)
self.assertEqual(TaskSchedule(), proxy.schedule)
def test_schedule(self):
proxy = tasks.background(schedule=10)(empty_task)
self.assertEqual(TaskSchedule(run_at=10), proxy.schedule)
def test__unicode__(self):
proxy = tasks.background()(empty_task)
self.assertEqual(u'TaskProxy(background_task.tests.task_tests.empty_task)',
unicode(proxy))
def test_shortcut(self):
'''check shortcut to decorator works'''
proxy = background()(empty_task)
self.failIfEqual(proxy, empty_task)
self.assertEqual(proxy.task_function, empty_task)
class TestTaskProxy(unittest.TestCase):
def setUp(self):
super(TestTaskProxy, self).setUp()
self.proxy = tasks.background()(record_task)
def test_run_task(self):
tasks.run_task(self.proxy.name, [], {})
self.assertEqual(((), {}), _recorded.pop())
tasks.run_task(self.proxy.name, ['hi'], {})
self.assertEqual((('hi',), {}), _recorded.pop())
tasks.run_task(self.proxy.name, [], {'kw': 1})
self.assertEqual(((), {'kw': 1}), _recorded.pop())
class TestTaskSchedule(unittest.TestCase):
def test_priority(self):
self.assertEqual(0, TaskSchedule().priority)
self.assertEqual(0, TaskSchedule(priority=0).priority)
self.assertEqual(1, TaskSchedule(priority=1).priority)
self.assertEqual(2, TaskSchedule(priority=2).priority)
def _within_one_second(self, d1, d2):
self.failUnless(isinstance(d1, datetime))
self.failUnless(isinstance(d2, datetime))
self.failUnless(abs(d1 - d2) <= timedelta(seconds=1))
def test_run_at(self):
for schedule in [None, 0, timedelta(seconds=0)]:
now = timezone.now()
run_at = TaskSchedule(run_at=schedule).run_at
self._within_one_second(run_at, now)
now = timezone.now()
run_at = TaskSchedule(run_at=now).run_at
self._within_one_second(run_at, now)
fixed_dt = timezone.now() + timedelta(seconds=60)
run_at = TaskSchedule(run_at=fixed_dt).run_at
self._within_one_second(run_at, fixed_dt)
run_at = TaskSchedule(run_at=90).run_at
self._within_one_second(run_at, timezone.now() + timedelta(seconds=90))
run_at = TaskSchedule(run_at=timedelta(seconds=35)).run_at
self._within_one_second(run_at, timezone.now() + timedelta(seconds=35))
def test_create(self):
fixed_dt = timezone.now() + timedelta(seconds=10)
schedule = TaskSchedule.create({'run_at': fixed_dt})
self.assertEqual(schedule.run_at, fixed_dt)
self.assertEqual(0, schedule.priority)
self.assertEqual(TaskSchedule.SCHEDULE, schedule.action)
schedule = {'run_at': fixed_dt, 'priority': 2,
'action': TaskSchedule.RESCHEDULE_EXISTING}
schedule = TaskSchedule.create(schedule)
self.assertEqual(schedule.run_at, fixed_dt)
self.assertEqual(2, schedule.priority)
self.assertEqual(TaskSchedule.RESCHEDULE_EXISTING, schedule.action)
schedule = TaskSchedule.create(0)
self._within_one_second(schedule.run_at, timezone.now())
schedule = TaskSchedule.create(10)
self._within_one_second(schedule.run_at,
timezone.now() + timedelta(seconds=10))
schedule = TaskSchedule.create(TaskSchedule(run_at=fixed_dt))
self.assertEqual(schedule.run_at, fixed_dt)
self.assertEqual(0, schedule.priority)
self.assertEqual(TaskSchedule.SCHEDULE, schedule.action)
def test_merge(self):
default = TaskSchedule(run_at=10, priority=2,
action=TaskSchedule.RESCHEDULE_EXISTING)
schedule = TaskSchedule.create(20).merge(default)
self._within_one_second(timezone.now() + timedelta(seconds=20),
schedule.run_at)
self.assertEqual(2, schedule.priority)
self.assertEqual(TaskSchedule.RESCHEDULE_EXISTING, schedule.action)
schedule = TaskSchedule.create({'priority': 0}).merge(default)
self._within_one_second(timezone.now() + timedelta(seconds=10),
schedule.run_at)
self.assertEqual(0, schedule.priority)
self.assertEqual(TaskSchedule.RESCHEDULE_EXISTING, schedule.action)
action = TaskSchedule.CHECK_EXISTING
schedule = TaskSchedule.create({'action': action}).merge(default)
self._within_one_second(timezone.now() + timedelta(seconds=10),
schedule.run_at)
self.assertEqual(2, schedule.priority)
self.assertEqual(action, schedule.action)
def test_repr(self):
self.assertEqual('TaskSchedule(run_at=10, priority=0)',
repr(TaskSchedule(run_at=10, priority=0)))
class TestSchedulingTasks(TransactionTestCase):
def test_background_gets_scheduled(self):
self.result = None
@tasks.background(name='test_background_gets_scheduled')
def set_result(result):
self.result = result
# calling set_result should now actually create a record in the db
set_result(1)
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
task = all_tasks[0]
self.assertEqual('test_background_gets_scheduled', task.task_name)
self.assertEqual('[[1], {}]', task.task_params)
def test_reschedule_existing(self):
reschedule_existing = TaskSchedule.RESCHEDULE_EXISTING
@tasks.background(name='test_reschedule_existing',
schedule=TaskSchedule(action=reschedule_existing))
def reschedule_fn():
pass
# this should only end up with one task
# and it should be scheduled for the later time
reschedule_fn()
reschedule_fn(schedule=90)
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
task = all_tasks[0]
self.assertEqual('test_reschedule_existing', task.task_name)
# check task is scheduled for later on
now = timezone.now()
self.failUnless(now + timedelta(seconds=89) < task.run_at)
self.failUnless(now + timedelta(seconds=91) > task.run_at)
def test_check_existing(self):
check_existing = TaskSchedule.CHECK_EXISTING
@tasks.background(name='test_check_existing',
schedule=TaskSchedule(action=check_existing))
def check_fn():
pass
# this should only end up with the first call
# scheduled
check_fn()
check_fn(schedule=90)
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
task = all_tasks[0]
self.assertEqual('test_check_existing', task.task_name)
# check new task is scheduled for the earlier time
now = timezone.now()
self.failUnless(now - timedelta(seconds=1) < task.run_at)
self.failUnless(now + timedelta(seconds=1) > task.run_at)
class TestTaskRunner(TransactionTestCase):
def setUp(self):
super(TestTaskRunner, self).setUp()
self.runner = tasks._runner
def test_get_task_to_run_no_tasks(self):
self.failIf(self.runner.get_task_to_run())
def test_get_task_to_run(self):
task = Task.objects.new_task('mytask', (1), {})
task.save()
self.failUnless(task.locked_by is None)
self.failUnless(task.locked_at is None)
locked_task = self.runner.get_task_to_run()
self.failIf(locked_task is None)
self.failIf(locked_task.locked_by is None)
self.assertEqual(self.runner.worker_name, locked_task.locked_by)
self.failIf(locked_task.locked_at is None)
self.assertEqual('mytask', locked_task.task_name)
class TestTaskModel(TransactionTestCase):
def test_lock_uncontested(self):
task = Task.objects.new_task('mytask')
task.save()
self.failUnless(task.locked_by is None)
self.failUnless(task.locked_at is None)
locked_task = task.lock('mylock')
self.assertEqual('mylock', locked_task.locked_by)
self.failIf(locked_task.locked_at is None)
self.assertEqual(task.pk, locked_task.pk)
def test_lock_contested(self):
# locking should actually look at db, not object
# in memory
task = Task.objects.new_task('mytask')
task.save()
self.failIf(task.lock('mylock') is None)
self.failUnless(task.lock('otherlock') is None)
def test_lock_expired(self):
settings.MAX_RUN_TIME = 60
task = Task.objects.new_task('mytask')
task.save()
locked_task = task.lock('mylock')
# force expire the lock
expire_by = timedelta(seconds=(settings.MAX_RUN_TIME + 2))
locked_task.locked_at = locked_task.locked_at - expire_by
locked_task.save()
# now try to get the lock again
self.failIf(task.lock('otherlock') is None)
def test__unicode__(self):
task = Task.objects.new_task('mytask')
self.assertEqual(u'Task(mytask)', unicode(task))
class TestTasks(TransactionTestCase):
def setUp(self):
super(TestTasks, self).setUp()
settings.MAX_RUN_TIME = 60
settings.MAX_ATTEMPTS = 25
@tasks.background(name='set_fields')
def set_fields(**fields):
for key, value in fields.items():
setattr(self, key, value)
@tasks.background(name='throws_error')
def throws_error():
raise RuntimeError("an error")
self.set_fields = set_fields
self.throws_error = throws_error
def test_run_next_task_nothing_scheduled(self):
self.failIf(tasks.run_next_task())
def test_run_next_task_one_task_scheduled(self):
self.set_fields(worked=True)
self.failIf(hasattr(self, 'worked'))
self.failUnless(tasks.run_next_task())
self.failUnless(hasattr(self, 'worked'))
self.failUnless(self.worked)
def test_run_next_task_several_tasks_scheduled(self):
self.set_fields(one='1')
self.set_fields(two='2')
self.set_fields(three='3')
for i in range(3):
self.failUnless(tasks.run_next_task())
self.failIf(tasks.run_next_task()) # everything should have been run
for field, value in [('one', '1'), ('two', '2'), ('three', '3')]:
self.failUnless(hasattr(self, field))
self.assertEqual(value, getattr(self, field))
def test_run_next_task_error_handling(self):
self.throws_error()
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
original_task = all_tasks[0]
# should run, but trigger error
self.failUnless(tasks.run_next_task())
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
failed_task = all_tasks[0]
# should have an error recorded
self.failIfEqual('', failed_task.last_error)
self.failUnless(failed_task.failed_at is None)
self.assertEqual(1, failed_task.attempts)
# should have been rescheduled for the future
# and no longer locked
self.failUnless(failed_task.run_at > original_task.run_at)
self.failUnless(failed_task.locked_by is None)
self.failUnless(failed_task.locked_at is None)
def test_run_next_task_does_not_run_locked(self):
self.set_fields(locked=True)
self.failIf(hasattr(self, 'locked'))
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
original_task = all_tasks[0]
original_task.lock('lockname')
self.failIf(tasks.run_next_task())
self.failIf(hasattr(self, 'locked'))
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
def test_run_next_task_unlocks_after_MAX_RUN_TIME(self):
self.set_fields(lock_overridden=True)
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
original_task = all_tasks[0]
locked_task = original_task.lock('lockname')
self.failIf(tasks.run_next_task())
self.failIf(hasattr(self, 'lock_overridden'))
# put lot time into past
expire_by = timedelta(seconds=(settings.MAX_RUN_TIME + 2))
locked_task.locked_at = locked_task.locked_at - expire_by
locked_task.save()
# so now we should be able to override the lock
# and run the task
self.failUnless(tasks.run_next_task())
self.assertEqual(0, Task.objects.count())
self.failUnless(hasattr(self, 'lock_overridden'))
self.failUnless(self.lock_overridden)
def test_default_schedule_used_for_run_at(self):
@tasks.background(name='default_schedule_used_for_run_at', schedule=60)
def default_schedule_used_for_time():
pass
now = timezone.now()
default_schedule_used_for_time()
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
task = all_tasks[0]
self.failUnless(now < task.run_at)
self.failUnless((task.run_at - now) <= timedelta(seconds=61))
self.failUnless((task.run_at - now) >= timedelta(seconds=59))
def test_default_schedule_used_for_priority(self):
@tasks.background(name='default_schedule_used_for_priority',
schedule={'priority': 2})
def default_schedule_used_for_priority():
pass
now = timezone.now()
default_schedule_used_for_priority()
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
task = all_tasks[0]
self.assertEqual(2, task.priority)
def test_non_default_schedule_used(self):
default_run_at = timezone.now() + timedelta(seconds=90)
@tasks.background(name='non_default_schedule_used',
schedule={'run_at': default_run_at, 'priority': 2})
def default_schedule_used_for_priority():
pass
run_at = timezone.now().replace(microsecond=0) + timedelta(seconds=60)
default_schedule_used_for_priority(schedule=run_at)
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
task = all_tasks[0]
self.assertEqual(run_at, task.run_at)
def test_failed_at_set_after_MAX_ATTEMPTS(self):
@tasks.background(name='test_failed_at_set_after_MAX_ATTEMPTS')
def failed_at_set_after_MAX_ATTEMPTS():
raise RuntimeError('failed')
failed_at_set_after_MAX_ATTEMPTS()
available = Task.objects.find_available()
self.assertEqual(1, available.count())
task = available[0]
self.failUnless(task.failed_at is None)
task.attempts = settings.MAX_ATTEMPTS
task.save()
# task should be scheduled to run now
# but will be marked as failed straight away
self.failUnless(tasks.run_next_task())
available = Task.objects.find_available()
self.assertEqual(0, available.count())
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
task = all_tasks[0]
self.failIf(task.failed_at is None)
|
|
# -*- coding: utf-8 -*-
"""
Django settings for {{ cookiecutter.project_name }} project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from decouple import config, Csv
BASE_DIR = os.path.dirname(__file__)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', default='*', cast=Csv())
# Application definition
INSTALLED_APPS = (
'flat',
# django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd
{% if cookiecutter.use_compressor == 'y' %}'compressor',{% endif %}
'crispy_forms',
'debug_toolbar',
'django_extensions',
'django_nose',
'floppyforms',
{% if cookiecutter.use_elasticsearch == 'y' %}'haystack',{% endif %}
'imagekit',
'rest_framework',
'rest_framework.authtoken',
# project
'{{ cookiecutter.repo_name }}.accounts',
'{{ cookiecutter.repo_name }}.base',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = '{{ cookiecutter.repo_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
{% if cookiecutter.use_jade != 'y' %}'APP_DIRS': True,{% endif %}
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
{% if cookiecutter.use_jade == 'y' %}
'loaders': [
('pyjade.ext.django.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
))
],
'builtins': [
'pyjade.ext.django.templatetags',
],
{% endif %}
},
},
]
WSGI_APPLICATION = '{{ cookiecutter.repo_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
from dj_database_url import parse as db_url
DATABASES = {'default': config('DATABASE_URL', cast=db_url)}
# Intrnationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(BASE_DIR, '..', 'static-root')
MEDIA_ROOT = os.path.join(BASE_DIR, '..', 'media-root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
os.path.join(BASE_DIR, '..', 'bower_components'),
)
{% if cookiecutter.use_compressor == 'y' %}
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'node-sass --scss --load-path=%s {infile} {outfile}' % STATICFILES_DIRS[1]),
)
COMPRESS_ENABLED = config('COMPRESS_ENABLED', cast=bool, default=False)
COMPRESS_OFFLINE = config('COMPRESS_OFFLINE', cast=bool, default=True)
{% endif %}
# Crispy forms
CRISPY_TEMPLATE_PACK = 'bootstrap3'
CRISPY_FAIL_SILENTLY = not DEBUG
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# Auth
AUTH_USER_MODEL = 'accounts.User'
LOGIN_URL = '/auth/login/'
LOGOUT_URL = '/auth/logout/'
LOGIN_REDIRECT_URL = '/'
{% if cookiecutter.use_bcrypt == 'y' %}
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
)
{% endif %}
{% if cookiecutter.use_memcached == 'y' %}
# Cache
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': config('CACHE_LOCATION'),
}
}
{% endif %}
{% if cookiecutter.use_elasticsearch == 'y' %}
# Haystack / Elasticsearch
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': config('HAYSTACK_URL'),
'INDEX_NAME': 'haystack',
},
}
{% endif %}
# Tests
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# rest_framework
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.DjangoModelPermissions',
),
'DEFAULT_FILTER_BACKENDS': (
'rest_framework.filters.DjangoFilterBackend',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
'rest_framework_xml.renderers.XMLRenderer',
),
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.NamespaceVersioning',
'PAGINATE_BY': 50,
'MAX_PAGINATE_BY': 300,
'PAGINATE_BY_PARAM': 'page_size',
}
{% if cookiecutter.use_celery == 'y' %}
# celery
BROKER_URL = config('CELERY_BROKER_URL')
CELERY_IGNORE_RESULT = config('CELERY_IGNORE_RESULT', cast=bool)
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
CELERY_TIMEZONE = TIME_ZONE
{% endif %}
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains a Salesforce Hook
which allows you to connect to your Salesforce instance,
retrieve data from it, and write that data to a file
for other uses.
NOTE: this hook also relies on the simple_salesforce package:
https://github.com/simple-salesforce/simple-salesforce
"""
from simple_salesforce import Salesforce
from airflow.hooks.base_hook import BaseHook
import json
import pandas as pd
import time
from airflow.utils.log.logging_mixin import LoggingMixin
class SalesforceHook(BaseHook, LoggingMixin):
def __init__(
self,
conn_id,
*args,
**kwargs
):
"""
Create new connection to Salesforce
and allows you to pull data out of SFDC and save it to a file.
You can then use that file with other
Airflow operators to move the data into another data source
:param conn_id: the name of the connection that has the parameters
we need to connect to Salesforce.
The conenction shoud be type `http` and include a
user's security token in the `Extras` field.
.. note::
For the HTTP connection type, you can include a
JSON structure in the `Extras` field.
We need a user's security token to connect to Salesforce.
So we define it in the `Extras` field as:
`{"security_token":"YOUR_SECRUITY_TOKEN"}`
"""
self.conn_id = conn_id
self._args = args
self._kwargs = kwargs
# get the connection parameters
self.connection = self.get_connection(conn_id)
self.extras = self.connection.extra_dejson
def sign_in(self):
"""
Sign into Salesforce.
If we have already signed it, this will just return the original object
"""
if hasattr(self, 'sf'):
return self.sf
# connect to Salesforce
sf = Salesforce(
username=self.connection.login,
password=self.connection.password,
security_token=self.extras['security_token'],
instance_url=self.connection.host
)
self.sf = sf
return sf
def make_query(self, query):
"""
Make a query to Salesforce. Returns result in dictionary
:param query: The query to make to Salesforce
"""
self.sign_in()
self.log.info("Querying for all objects")
query = self.sf.query_all(query)
self.log.info(
"Received results: Total size: %s; Done: %s",
query['totalSize'], query['done']
)
query = json.loads(json.dumps(query))
return query
def describe_object(self, obj):
"""
Get the description of an object from Salesforce.
This description is the object's schema
and some extra metadata that Salesforce stores for each object
:param obj: Name of the Salesforce object
that we are getting a description of.
"""
self.sign_in()
return json.loads(json.dumps(self.sf.__getattr__(obj).describe()))
def get_available_fields(self, obj):
"""
Get a list of all available fields for an object.
This only returns the names of the fields.
"""
self.sign_in()
desc = self.describe_object(obj)
return [f['name'] for f in desc['fields']]
def _build_field_list(self, fields):
# join all of the fields in a comma seperated list
return ",".join(fields)
def get_object_from_salesforce(self, obj, fields):
"""
Get all instances of the `object` from Salesforce.
For each model, only get the fields specified in fields.
All we really do underneath the hood is run:
SELECT <fields> FROM <obj>;
"""
field_string = self._build_field_list(fields)
query = "SELECT {0} FROM {1}".format(field_string, obj)
self.log.info(
"Making query to Salesforce: %s",
query if len(query) < 30 else " ... ".join([query[:15], query[-15:]])
)
return self.make_query(query)
@classmethod
def _to_timestamp(cls, col):
"""
Convert a column of a dataframe to UNIX timestamps if applicable
:param col: A Series object representing a column of a dataframe.
"""
# try and convert the column to datetimes
# the column MUST have a four digit year somewhere in the string
# there should be a better way to do this,
# but just letting pandas try and convert every column without a format
# caused it to convert floats as well
# For example, a column of integers
# between 0 and 10 are turned into timestamps
# if the column cannot be converted,
# just return the original column untouched
try:
col = pd.to_datetime(col)
except ValueError:
log = LoggingMixin().log
log.warning(
"Could not convert field to timestamps: %s", col.name
)
return col
# now convert the newly created datetimes into timestamps
# we have to be careful here
# because NaT cannot be converted to a timestamp
# so we have to return NaN
converted = []
for i in col:
try:
converted.append(i.timestamp())
except ValueError:
converted.append(pd.np.NaN)
except AttributeError:
converted.append(pd.np.NaN)
# return a new series that maintains the same index as the original
return pd.Series(converted, index=col.index)
def write_object_to_file(
self,
query_results,
filename,
fmt="csv",
coerce_to_timestamp=False,
record_time_added=False
):
"""
Write query results to file.
Acceptable formats are:
- csv:
comma-seperated-values file. This is the default format.
- json:
JSON array. Each element in the array is a different row.
- ndjson:
JSON array but each element is new-line deliminated
instead of comman deliminated like in `json`
This requires a significant amount of cleanup.
Pandas doesn't handle output to CSV and json in a uniform way.
This is especially painful for datetime types.
Pandas wants to write them as strings in CSV,
but as milisecond Unix timestamps.
By default, this function will try and leave all values as
they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes
to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your
datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:param filename: the name of the file where the data
should be dumped to
:param fmt: the format you want the output in.
*Default:* csv.
:param coerce_to_timestamp: True if you want all datetime fields to be
converted into Unix timestamps.
False if you want them to be left in the
same format as they were in Salesforce.
Leaving the value as False will result
in datetimes being strings.
*Defaults to False*
:param record_time_added: *(optional)* True if you want to add a
Unix timestamp field to the resulting data
that marks when the data
was fetched from Salesforce.
*Default: False*.
"""
fmt = fmt.lower()
if fmt not in ['csv', 'json', 'ndjson']:
raise ValueError("Format value is not recognized: {0}".format(fmt))
# this line right here will convert all integers to floats if there are
# any None/np.nan values in the column
# that's because None/np.nan cannot exist in an integer column
# we should write all of our timestamps as FLOATS in our final schema
df = pd.DataFrame.from_records(query_results, exclude=["attributes"])
df.columns = [c.lower() for c in df.columns]
# convert columns with datetime strings to datetimes
# not all strings will be datetimes, so we ignore any errors that occur
# we get the object's definition at this point and only consider
# features that are DATE or DATETIME
if coerce_to_timestamp and df.shape[0] > 0:
# get the object name out of the query results
# it's stored in the "attributes" dictionary
# for each returned record
object_name = query_results[0]['attributes']['type']
self.log.info("Coercing timestamps for: %s", object_name)
schema = self.describe_object(object_name)
# possible columns that can be convereted to timestamps
# are the ones that are either date or datetime types
# strings are too general and we risk unintentional conversion
possible_timestamp_cols = [
i['name'].lower()
for i in schema['fields']
if i['type'] in ["date", "datetime"] and
i['name'].lower() in df.columns
]
df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(
lambda x: self._to_timestamp(x)
)
if record_time_added:
fetched_time = time.time()
df["time_fetched_from_salesforce"] = fetched_time
# write the CSV or JSON file depending on the option
# NOTE:
# datetimes here are an issue.
# There is no good way to manage the difference
# for to_json, the options are an epoch or a ISO string
# but for to_csv, it will be a string output by datetime
# For JSON we decided to output the epoch timestamp in seconds
# (as is fairly standard for JavaScript)
# And for csv, we do a string
if fmt == "csv":
# there are also a ton of newline objects
# that mess up our ability to write to csv
# we remove these newlines so that the output is a valid CSV format
self.log.info("Cleaning data and writing to CSV")
possible_strings = df.columns[df.dtypes == "object"]
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\r\n", "")
)
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\n", "")
)
# write the dataframe
df.to_csv(filename, index=False)
elif fmt == "json":
df.to_json(filename, "records", date_unit="s")
elif fmt == "ndjson":
df.to_json(filename, "records", lines=True, date_unit="s")
return df
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A PHP devappserver2 runtime."""
import base64
import cStringIO
import httplib
import logging
import os
import subprocess
import sys
import time
import urllib
import google
from google.appengine.api import appinfo
from google.appengine.tools.devappserver2 import environ_utils
from google.appengine.tools.devappserver2 import http_runtime_constants
from google.appengine.tools.devappserver2 import php
from google.appengine.tools.devappserver2 import request_rewriter
from google.appengine.tools.devappserver2 import runtime_config_pb2
from google.appengine.tools.devappserver2 import safe_subprocess
from google.appengine.tools.devappserver2 import wsgi_server
SDK_PATH = os.path.abspath(
os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'php/sdk'))
if not os.path.exists(SDK_PATH):
SDK_PATH = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]),
'php/sdk'))
SETUP_PHP_PATH = os.path.join(os.path.dirname(php.__file__), 'setup.php')
class PHPRuntime(object):
"""A WSGI application that runs PHP scripts using the PHP CGI binary."""
def __init__(self, config):
logging.debug('Initializing runtime with %s', config)
self.config = config
if appinfo.MODULE_SEPARATOR not in config.version_id:
module_id = appinfo.DEFAULT_MODULE
version_id = config.version_id
else:
module_id, version_id = config.version_id.split(appinfo.MODULE_SEPARATOR)
self.environ_template = {
'APPLICATION_ID': str(config.app_id),
'CURRENT_MODULE_ID': module_id,
'CURRENT_VERSION_ID': version_id,
'DATACENTER': str(config.datacenter),
'INSTANCE_ID': str(config.instance_id),
'APPENGINE_RUNTIME': 'php',
'AUTH_DOMAIN': str(config.auth_domain),
'HTTPS': 'off',
# By default php-cgi does not allow .php files to be run directly so
# REDIRECT_STATUS must be set. See:
# http://php.net/manual/en/security.cgi-bin.force-redirect.php
'REDIRECT_STATUS': '1',
'REMOTE_API_HOST': str(config.api_host),
'REMOTE_API_PORT': str(config.api_port),
'SERVER_SOFTWARE': http_runtime_constants.SERVER_SOFTWARE,
'TZ': 'UTC',
}
self.environ_template.update((env.key, env.value) for env in config.environ)
def __call__(self, environ, start_response):
"""Handles an HTTP request for the runtime using a PHP executable.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
user_environ = self.environ_template.copy()
environ_utils.propagate_environs(environ, user_environ)
user_environ['REQUEST_METHOD'] = environ.get('REQUEST_METHOD', 'GET')
user_environ['PATH_INFO'] = environ['PATH_INFO']
user_environ['QUERY_STRING'] = environ['QUERY_STRING']
# Construct the partial URL that PHP expects for REQUEST_URI
# (http://php.net/manual/en/reserved.variables.server.php) using part of
# the process described in PEP-333
# (http://www.python.org/dev/peps/pep-0333/#url-reconstruction).
user_environ['REQUEST_URI'] = urllib.quote(user_environ['PATH_INFO'])
if user_environ['QUERY_STRING']:
user_environ['REQUEST_URI'] += '?' + user_environ['QUERY_STRING']
# Modify the SCRIPT_FILENAME to specify the setup script that readies the
# PHP environment. Put the user script in REAL_SCRIPT_FILENAME.
user_environ['REAL_SCRIPT_FILENAME'] = os.path.normpath(
os.path.join(self.config.application_root,
environ[http_runtime_constants.SCRIPT_HEADER]))
user_environ['SCRIPT_FILENAME'] = SETUP_PHP_PATH
user_environ['REMOTE_REQUEST_ID'] = environ[
http_runtime_constants.REQUEST_ID_ENVIRON]
# Pass the APPLICATION_ROOT so we can use it in the setup script. We will
# remove it from the environment before we execute the user script.
user_environ['APPLICATION_ROOT'] = self.config.application_root
if 'CONTENT_TYPE' in environ:
user_environ['CONTENT_TYPE'] = environ['CONTENT_TYPE']
user_environ['HTTP_CONTENT_TYPE'] = environ['CONTENT_TYPE']
if 'CONTENT_LENGTH' in environ:
user_environ['CONTENT_LENGTH'] = environ['CONTENT_LENGTH']
user_environ['HTTP_CONTENT_LENGTH'] = environ['CONTENT_LENGTH']
content = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH']))
else:
content = ''
# On Windows, in order to run a side-by-side assembly the specified env
# must include a valid SystemRoot.
if 'SYSTEMROOT' in os.environ:
user_environ['SYSTEMROOT'] = os.environ['SYSTEMROOT']
# See http://www.php.net/manual/en/ini.core.php#ini.include-path.
include_paths = [self.config.application_root, SDK_PATH]
if sys.platform == 'win32':
# See https://bugs.php.net/bug.php?id=46034 for quoting requirements.
include_path = 'include_path="%s"' % ';'.join(include_paths)
else:
include_path = 'include_path=%s' % ':'.join(include_paths)
args = [self.config.php_config.php_executable_path, '-d', include_path]
# Load php.ini from application's root.
args.extend(['-c', self.config.application_root])
if self.config.php_config.enable_debugger:
args.extend(['-d', 'xdebug.remote_enable="1"'])
user_environ['XDEBUG_CONFIG'] = os.environ.get('XDEBUG_CONFIG', '')
request_type = environ.pop(http_runtime_constants.REQUEST_TYPE_HEADER, None)
if request_type == 'interactive':
args.extend(['-d', 'html_errors="0"'])
user_environ[http_runtime_constants.REQUEST_TYPE_HEADER] = request_type
try:
p = safe_subprocess.start_process(args,
input_string=content,
env=user_environ,
cwd=self.config.application_root,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
except Exception as e:
logging.exception('Failure to start PHP with: %s', args)
start_response('500 Internal Server Error',
[(http_runtime_constants.ERROR_CODE_HEADER, '1')])
return ['Failure to start the PHP subprocess with %r:\n%s' % (args, e)]
if p.returncode:
if request_type == 'interactive':
start_response('200 OK', [('Content-Type', 'text/plain')])
message = httplib.HTTPMessage(cStringIO.StringIO(stdout))
return [message.fp.read()]
else:
logging.error('php failure (%r) with:\nstdout:\n%sstderr:\n%s',
p.returncode, stdout, stderr)
start_response('500 Internal Server Error',
[(http_runtime_constants.ERROR_CODE_HEADER, '1')])
message = httplib.HTTPMessage(cStringIO.StringIO(stdout))
return [message.fp.read()]
message = httplib.HTTPMessage(cStringIO.StringIO(stdout))
if 'Status' in message:
status = message['Status']
del message['Status']
else:
status = '200 OK'
# Ensures that we avoid merging repeat headers into a single header,
# allowing use of multiple Set-Cookie headers.
headers = []
for name in message:
for value in message.getheaders(name):
headers.append((name, value))
start_response(status, headers)
return [message.fp.read()]
def main():
config = runtime_config_pb2.Config()
config.ParseFromString(base64.b64decode(sys.stdin.read()))
server = wsgi_server.WsgiServer(
('localhost', 0),
request_rewriter.runtime_rewriter_middleware(PHPRuntime(config)))
server.start()
print server.port
sys.stdout.close()
sys.stdout = sys.stderr
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
server.quit()
if __name__ == '__main__':
main()
|
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
from chaco.scatterplot import ScatterPlot
from numpy import Inf
from traits.api import Bool, on_trait_change, Event, Int
# =============local library imports ==========================
from pychron.graph.graph import Graph
# =============standard library imports ========================
class StackedGraph(Graph):
""" """
# indices=List
bind_index = Bool(True)
bind_padding = Bool(True)
bind_y_title_spacing = Bool(True)
bind_grids = Bool(True)
equi_stack = True
panel_height = 100
_has_title = False
# padding_bottom = 40
fixed_bounds = Bool(False)
metadata_updated = Event
vertical_resize = Bool(True)
def get_rescale_actions(self):
return [("Rescale All Y", "rescale_all_y", {})]
def rescale_all_y(self):
ymi = Inf
yma = -Inf
for plot in self.plots:
yma = max(yma, plot.value_range.high)
ymi = min(ymi, plot.value_range.low)
for i, plot in enumerate(self.plots):
self.set_y_limits(ymi, yma, plotid=i)
self.refresh()
@on_trait_change("plots:value_axis:title_spacing")
def _update_value_axis(self, obj, name, old, new):
if self.bind_y_title_spacing:
for p in self.plots:
p.value_axis.trait_set(**{name: new})
@on_trait_change("plots:[x_grid:[visible,line_+], y_grid:[visible,line_+]]")
def _update_grids(self, obj, name, old, new):
if self.bind_grids:
grid = "x_grid" if obj.orientation == "vertical" else "y_grid"
for p in self.plots:
setattr(getattr(p, grid), name, new)
# getattr(p, grid).visible = new
@on_trait_change("plots:[padding_left, padding_right]")
def _update_padding(self, obj, name, old, new):
if self.bind_padding:
for p in self.plots:
p.trait_set(**{name: new})
def clear_has_title(self):
self._has_title = False
def add_minor_xticks(self, plotid=0, **kw):
if plotid != 0:
kw["aux_component"] = self.plots[0]
super(StackedGraph, self).add_minor_xticks(plotid=plotid, **kw)
def add_minor_yticks(self, plotid=0, **kw):
if plotid != 0:
kw["aux_component"] = self.plots[0]
super(StackedGraph, self).add_minor_yticks(plotid=plotid, **kw)
def container_factory(self, *args, **kw):
c = super(StackedGraph, self).container_factory(*args, **kw)
"""
bind to self.plotcontainer.bounds
allows stacked graph to resize vertically
"""
c.on_trait_change(self._bounds_changed, "bounds")
return c
def new_plot(self, **kw):
if "title" in kw:
if self._has_title:
kw.pop("title")
self._has_title = True
n = len(self.plotcontainer.components)
if n > 0:
if "resizable" not in kw:
kw["resizable"] = "h"
# kw['resizable'] = 'h'
if "bounds" not in kw:
kw["bounds"] = (1, self.panel_height)
p = super(StackedGraph, self).new_plot(**kw)
# p.value_axis.ensure_labels_bounded = True
# p.value_axis.title_spacing = 50
if n >= 1:
pm = self.plotcontainer.components[0]
pind = pm.index_range
for pi in self.plotcontainer.components[1:]:
pi.index_range = pind
self.set_paddings()
self._bounds_changed(self.plotcontainer.bounds)
# p.fill_padding=True
# p.bgcolor='green'
return p
def set_paddings(self):
pc = self.plotcontainer
n = len(pc.components)
bottom = pc.stack_order == "bottom_to_top"
comps = pc.components
if not bottom:
comps = reversed(comps)
if n > 1:
for i, pi in enumerate(comps):
if i < n - 1:
pi.padding_top = 0
if i == 0:
pi.index_axis.visible = True
else:
pi.index_axis.visible = False
pi.padding_bottom = 0
def new_series(self, *args, **kw):
s, _p = super(StackedGraph, self).new_series(*args, **kw)
if self.bind_index:
bind_id = kw.get("bind_id")
if isinstance(s, ScatterPlot):
s.bind_id = bind_id
self._bind_index(s, bind_id=bind_id)
return s, _p
def _bounds_changed(self, bounds):
"""
vertically resizes the stacked graph.
the plots are sized equally
"""
if self.vertical_resize:
self._update_bounds(bounds, self.plotcontainer.components)
def _update_bounds(self, bounds, comps):
if self.fixed_bounds:
return
padding_top = sum([getattr(p, "padding_top") for p in comps])
padding_bottom = sum([getattr(p, "padding_bottom") for p in comps])
#
pt = (
self.plotcontainer.padding_top
+ self.plotcontainer.padding_bottom
+ padding_top
+ padding_bottom
)
n = len(self.plotcontainer.components)
if self.equi_stack:
for p in self.plotcontainer.components:
p.bounds[1] = (bounds[1] - pt) / n
else:
try:
self.plots[0].bounds[1] = (bounds[1] - pt) / max(1, (n - 1))
except IndexError:
pass
def _update_metadata(self, bind_id, obj, name, old, new):
if obj:
if hasattr(obj, "suppress_update") and obj.suppress_update:
return
elif hasattr(obj, "suppress_hover_update") and obj.suppress_hover_update:
return
obj.suppress_update = True
for plot in self.plots:
for k, ps in plot.plots.items():
si = ps[0]
if si.index is not obj:
if hasattr(si, "bind_id"):
if si.bind_id == bind_id:
si.index.suppress_update = True
si.index.metadata = obj.metadata
si.index.suppress_update = False
obj.suppress_update = False
def _bind_index(self, scatter, bind_id=0, bind_selection=True, **kw):
if bind_selection:
def func(obj, name, old, new):
self._update_metadata(bind_id, obj, name, old, new)
scatter.index.on_trait_change(func, "metadata_changed")
class ColumnStackedGraph(StackedGraph):
ncols = Int
nrows = Int
def _update_bounds(self, bounds, comps):
padding_top = sum([getattr(p, "padding_top") for p in comps])
padding_bottom = sum([getattr(p, "padding_bottom") for p in comps])
pt = padding_bottom + padding_top
n = self.nrows
if self.equi_stack:
for p in self.plotcontainer.components:
p.bounds = (1, (bounds[1] - pt) / n)
else:
try:
self.plots[0].bounds[1] = (bounds[1] - pt) / max(1, (n - 1))
except IndexError:
pass
def set_paddings(self):
pc = self.plotcontainer
n = self.nrows
comps = pc.components
def colsplit(l, ncols):
nn = len(l)
return [l[i:nn:ncols] for i in range(ncols)]
cols = colsplit(comps, self.ncols)
if n > 1:
for col in cols:
n = len(col)
for i, pi in enumerate(col):
pi.padding_top = 0
pi.padding_bottom = 0
if i == n - 1:
pi.index_axis.visible = True
else:
pi.index_axis.visible = False
def container_factory(self, *args, **kw):
kw["kind"] = "g"
kw["shape"] = (self.nrows, self.ncols)
# kw['spacing'] = (0, 0)
c = super(ColumnStackedGraph, self).container_factory(*args, **kw)
return c
if __name__ == "__main__":
g = StackedGraph(
resizable=True,
fixed_bounds=True,
container_dict={"padding_top": 15 * 4, "spacing": 10, "padding_bottom": 40},
)
for i in range(3):
p = g.new_plot(padding=[80, 10, 10, 40], resizable="", bounds=(100, 100))
p.fill_padding = True
p.bgcolor = "green"
# p=g.new_plot()
g.new_series([1, 2, 3], [4, 5, 10 * i])
g.configure_traits()
# ============= EOF ====================================
|
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
if "ETPD" not in os.environ:
os.environ["ETPD"] = buildDir + '/src/etpd' + EXEEXT
if "ETPCLI" not in os.environ:
os.environ["ETPCLI"] = buildDir + '/src/etp-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print "Win tests currently disabled by default. Use -win option to enable"
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled"
sys.exit(0)
# python-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError as e:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " \
"to run zmq tests, see dependency info in /qa/README.md.")
raise e
#Tests
testScripts = [
'bip68-112-113-p2p.py',
'wallet.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'p2p-fullblocktest.py', # NOTE: needs etp_hash to pass
'blockchain.py',
'disablewallet.py',
'sendheaders.py', # NOTE: needs etp_hash to pass
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py', # NOTE: needs etp_hash to pass
'invalidtxrequest.py', # NOTE: needs etp_hash to pass
'abandonconflict.py',
'p2p-versionbits-warning.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py', # NOTE: needs etp_hash to pass
'bip68-sequence.py',
'bipdersig-p2p.py', # NOTE: needs etp_hash to pass
'bipdersig.py',
'getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
# 'pruning.py', # Prune mode is incompatible with -txindex.
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py', # NOTE: needs etp_hash to pass
'mempool_packages.py',
'maxuploadtarget.py',
# 'replace-by-fee.py', # RBF is disabled in Etp Core
]
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
|
|
"""Support for Xiaomi Miio."""
from __future__ import annotations
from dataclasses import dataclass
from datetime import timedelta
import logging
import async_timeout
from miio import (
AirFresh,
AirFreshA1,
AirFreshT2017,
AirHumidifier,
AirHumidifierMiot,
AirHumidifierMjjsq,
AirPurifier,
AirPurifierMB4,
AirPurifierMiot,
CleaningDetails,
CleaningSummary,
ConsumableStatus,
DeviceException,
DNDStatus,
Fan,
Fan1C,
FanP5,
FanP9,
FanP10,
FanP11,
FanZA5,
RoborockVacuum,
Timer,
VacuumStatus,
)
from miio.gateway.gateway import GatewayException
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_TOKEN, Platform
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
ATTR_AVAILABLE,
CONF_DEVICE,
CONF_FLOW_TYPE,
CONF_GATEWAY,
CONF_MODEL,
DOMAIN,
KEY_COORDINATOR,
KEY_DEVICE,
MODEL_AIRFRESH_A1,
MODEL_AIRFRESH_T2017,
MODEL_AIRPURIFIER_3C,
MODEL_FAN_1C,
MODEL_FAN_P5,
MODEL_FAN_P9,
MODEL_FAN_P10,
MODEL_FAN_P11,
MODEL_FAN_ZA5,
MODELS_AIR_MONITOR,
MODELS_FAN,
MODELS_FAN_MIIO,
MODELS_HUMIDIFIER,
MODELS_HUMIDIFIER_MIIO,
MODELS_HUMIDIFIER_MIOT,
MODELS_HUMIDIFIER_MJJSQ,
MODELS_LIGHT,
MODELS_PURIFIER_MIOT,
MODELS_SWITCH,
MODELS_VACUUM,
ROBOROCK_GENERIC,
ROCKROBO_GENERIC,
AuthException,
SetupException,
)
from .gateway import ConnectXiaomiGateway
_LOGGER = logging.getLogger(__name__)
POLLING_TIMEOUT_SEC = 10
UPDATE_INTERVAL = timedelta(seconds=15)
GATEWAY_PLATFORMS = [
Platform.ALARM_CONTROL_PANEL,
Platform.LIGHT,
Platform.SENSOR,
Platform.SWITCH,
]
SWITCH_PLATFORMS = [Platform.SWITCH]
FAN_PLATFORMS = [
Platform.BINARY_SENSOR,
Platform.FAN,
Platform.NUMBER,
Platform.SELECT,
Platform.SENSOR,
Platform.SWITCH,
]
HUMIDIFIER_PLATFORMS = [
Platform.BINARY_SENSOR,
Platform.HUMIDIFIER,
Platform.NUMBER,
Platform.SELECT,
Platform.SENSOR,
Platform.SWITCH,
]
LIGHT_PLATFORMS = [Platform.LIGHT]
VACUUM_PLATFORMS = [Platform.BINARY_SENSOR, Platform.SENSOR, Platform.VACUUM]
AIR_MONITOR_PLATFORMS = [Platform.AIR_QUALITY, Platform.SENSOR]
MODEL_TO_CLASS_MAP = {
MODEL_FAN_1C: Fan1C,
MODEL_FAN_P10: FanP10,
MODEL_FAN_P11: FanP11,
MODEL_FAN_P5: FanP5,
MODEL_FAN_P9: FanP9,
MODEL_FAN_ZA5: FanZA5,
}
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up the Xiaomi Miio components from a config entry."""
hass.data.setdefault(DOMAIN, {})
if entry.data[CONF_FLOW_TYPE] == CONF_GATEWAY:
await async_setup_gateway_entry(hass, entry)
return True
return bool(
entry.data[CONF_FLOW_TYPE] != CONF_DEVICE
or await async_setup_device_entry(hass, entry)
)
@callback
def get_platforms(config_entry):
"""Return the platforms belonging to a config_entry."""
model = config_entry.data[CONF_MODEL]
flow_type = config_entry.data[CONF_FLOW_TYPE]
if flow_type == CONF_GATEWAY:
return GATEWAY_PLATFORMS
if flow_type == CONF_DEVICE:
if model in MODELS_SWITCH:
return SWITCH_PLATFORMS
if model in MODELS_HUMIDIFIER:
return HUMIDIFIER_PLATFORMS
if model in MODELS_FAN:
return FAN_PLATFORMS
if model in MODELS_LIGHT:
return LIGHT_PLATFORMS
for vacuum_model in MODELS_VACUUM:
if model.startswith(vacuum_model):
return VACUUM_PLATFORMS
for air_monitor_model in MODELS_AIR_MONITOR:
if model.startswith(air_monitor_model):
return AIR_MONITOR_PLATFORMS
_LOGGER.error(
"Unsupported device found! Please create an issue at "
"https://github.com/syssi/xiaomi_airpurifier/issues "
"and provide the following data: %s",
model,
)
return []
def _async_update_data_default(hass, device):
async def update():
"""Fetch data from the device using async_add_executor_job."""
async def _async_fetch_data():
"""Fetch data from the device."""
async with async_timeout.timeout(POLLING_TIMEOUT_SEC):
state = await hass.async_add_executor_job(device.status)
_LOGGER.debug("Got new state: %s", state)
return state
try:
return await _async_fetch_data()
except DeviceException as ex:
if getattr(ex, "code", None) != -9999:
raise UpdateFailed(ex) from ex
_LOGGER.info("Got exception while fetching the state, trying again: %s", ex)
# Try to fetch the data a second time after error code -9999
try:
return await _async_fetch_data()
except DeviceException as ex:
raise UpdateFailed(ex) from ex
return update
@dataclass(frozen=True)
class VacuumCoordinatorData:
"""A class that holds the vacuum data retrieved by the coordinator."""
status: VacuumStatus
dnd_status: DNDStatus
last_clean_details: CleaningDetails
consumable_status: ConsumableStatus
clean_history_status: CleaningSummary
timers: list[Timer]
fan_speeds: dict[str, int]
fan_speeds_reverse: dict[int, str]
@dataclass(init=False, frozen=True)
class VacuumCoordinatorDataAttributes:
"""
A class that holds attribute names for VacuumCoordinatorData.
These attributes can be used in methods like `getattr` when a generic solutions is
needed.
See homeassistant.components.xiaomi_miio.device.XiaomiCoordinatedMiioEntity
._extract_value_from_attribute for
an example.
"""
status: str = "status"
dnd_status: str = "dnd_status"
last_clean_details: str = "last_clean_details"
consumable_status: str = "consumable_status"
clean_history_status: str = "clean_history_status"
timer: str = "timer"
fan_speeds: str = "fan_speeds"
fan_speeds_reverse: str = "fan_speeds_reverse"
def _async_update_data_vacuum(hass, device: RoborockVacuum):
def update() -> VacuumCoordinatorData:
timer = []
# See https://github.com/home-assistant/core/issues/38285 for reason on
# Why timers must be fetched separately.
try:
timer = device.timer()
except DeviceException as ex:
_LOGGER.debug(
"Unable to fetch timers, this may happen on some devices: %s", ex
)
fan_speeds = device.fan_speed_presets()
data = VacuumCoordinatorData(
device.status(),
device.dnd_status(),
device.last_clean_details(),
device.consumable_status(),
device.clean_history(),
timer,
fan_speeds,
{v: k for k, v in fan_speeds.items()},
)
return data
async def update_async():
"""Fetch data from the device using async_add_executor_job."""
async def execute_update():
async with async_timeout.timeout(POLLING_TIMEOUT_SEC):
state = await hass.async_add_executor_job(update)
_LOGGER.debug("Got new vacuum state: %s", state)
return state
try:
return await execute_update()
except DeviceException as ex:
if getattr(ex, "code", None) != -9999:
raise UpdateFailed(ex) from ex
_LOGGER.info("Got exception while fetching the state, trying again: %s", ex)
# Try to fetch the data a second time after error code -9999
try:
return await execute_update()
except DeviceException as ex:
raise UpdateFailed(ex) from ex
return update_async
async def async_create_miio_device_and_coordinator(
hass: HomeAssistant, entry: ConfigEntry
) -> None:
"""Set up a data coordinator and one miio device to service multiple entities."""
model: str = entry.data[CONF_MODEL]
host = entry.data[CONF_HOST]
token = entry.data[CONF_TOKEN]
name = entry.title
device = None
migrate = False
update_method = _async_update_data_default
coordinator_class = DataUpdateCoordinator
if (
model not in MODELS_HUMIDIFIER
and model not in MODELS_FAN
and model not in MODELS_VACUUM
and not model.startswith(ROBOROCK_GENERIC)
and not model.startswith(ROCKROBO_GENERIC)
):
return
_LOGGER.debug("Initializing with host %s (token %s...)", host, token[:5])
# Humidifiers
if model in MODELS_HUMIDIFIER_MIOT:
device = AirHumidifierMiot(host, token)
migrate = True
elif model in MODELS_HUMIDIFIER_MJJSQ:
device = AirHumidifierMjjsq(host, token, model=model)
migrate = True
elif model in MODELS_HUMIDIFIER_MIIO:
device = AirHumidifier(host, token, model=model)
migrate = True
# Airpurifiers and Airfresh
elif model == MODEL_AIRPURIFIER_3C:
device = AirPurifierMB4(host, token)
elif model in MODELS_PURIFIER_MIOT:
device = AirPurifierMiot(host, token)
elif model.startswith("zhimi.airpurifier."):
device = AirPurifier(host, token)
elif model.startswith("zhimi.airfresh."):
device = AirFresh(host, token)
elif model == MODEL_AIRFRESH_A1:
device = AirFreshA1(host, token)
elif model == MODEL_AIRFRESH_T2017:
device = AirFreshT2017(host, token)
elif (
model in MODELS_VACUUM
or model.startswith(ROBOROCK_GENERIC)
or model.startswith(ROCKROBO_GENERIC)
):
device = RoborockVacuum(host, token)
update_method = _async_update_data_vacuum
coordinator_class = DataUpdateCoordinator[VacuumCoordinatorData]
# Pedestal fans
elif model in MODEL_TO_CLASS_MAP:
device = MODEL_TO_CLASS_MAP[model](host, token)
elif model in MODELS_FAN_MIIO:
device = Fan(host, token, model=model)
else:
_LOGGER.error(
"Unsupported device found! Please create an issue at "
"https://github.com/syssi/xiaomi_airpurifier/issues "
"and provide the following data: %s",
model,
)
return
if migrate:
# Removing fan platform entity for humidifiers and migrate the name to the config entry for migration
entity_registry = er.async_get(hass)
entity_id = entity_registry.async_get_entity_id("fan", DOMAIN, entry.unique_id)
if entity_id:
# This check is entities that have a platform migration only and should be removed in the future
if migrate_entity_name := entity_registry.async_get(entity_id).name:
hass.config_entries.async_update_entry(entry, title=migrate_entity_name)
entity_registry.async_remove(entity_id)
# Create update miio device and coordinator
coordinator = coordinator_class(
hass,
_LOGGER,
name=name,
update_method=update_method(hass, device),
# Polling interval. Will only be polled if there are subscribers.
update_interval=UPDATE_INTERVAL,
)
hass.data[DOMAIN][entry.entry_id] = {
KEY_DEVICE: device,
KEY_COORDINATOR: coordinator,
}
# Trigger first data fetch
await coordinator.async_config_entry_first_refresh()
async def async_setup_gateway_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Set up the Xiaomi Gateway component from a config entry."""
host = entry.data[CONF_HOST]
token = entry.data[CONF_TOKEN]
name = entry.title
gateway_id = entry.unique_id
# For backwards compat
if entry.unique_id.endswith("-gateway"):
hass.config_entries.async_update_entry(entry, unique_id=entry.data["mac"])
entry.async_on_unload(entry.add_update_listener(update_listener))
# Connect to gateway
gateway = ConnectXiaomiGateway(hass, entry)
try:
await gateway.async_connect_gateway(host, token)
except AuthException as error:
raise ConfigEntryAuthFailed() from error
except SetupException as error:
raise ConfigEntryNotReady() from error
gateway_info = gateway.gateway_info
device_registry = dr.async_get(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, gateway_info.mac_address)},
identifiers={(DOMAIN, gateway_id)},
manufacturer="Xiaomi",
name=name,
model=gateway_info.model,
sw_version=gateway_info.firmware_version,
hw_version=gateway_info.hardware_version,
)
def update_data():
"""Fetch data from the subdevice."""
data = {}
for sub_device in gateway.gateway_device.devices.values():
try:
sub_device.update()
except GatewayException as ex:
_LOGGER.error("Got exception while fetching the state: %s", ex)
data[sub_device.sid] = {ATTR_AVAILABLE: False}
else:
data[sub_device.sid] = {ATTR_AVAILABLE: True}
return data
async def async_update_data():
"""Fetch data from the subdevice using async_add_executor_job."""
return await hass.async_add_executor_job(update_data)
# Create update coordinator
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=name,
update_method=async_update_data,
# Polling interval. Will only be polled if there are subscribers.
update_interval=UPDATE_INTERVAL,
)
hass.data[DOMAIN][entry.entry_id] = {
CONF_GATEWAY: gateway.gateway_device,
KEY_COORDINATOR: coordinator,
}
for platform in GATEWAY_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
async def async_setup_device_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up the Xiaomi Miio device component from a config entry."""
platforms = get_platforms(entry)
await async_create_miio_device_and_coordinator(hass, entry)
if not platforms:
return False
entry.async_on_unload(entry.add_update_listener(update_listener))
hass.config_entries.async_setup_platforms(entry, platforms)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
platforms = get_platforms(config_entry)
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, platforms
)
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
async def update_listener(hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Handle options update."""
await hass.config_entries.async_reload(config_entry.entry_id)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_factory_ops.constant."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import ragged
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedConstOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters(
#=========================================================================
# 0-dimensional tensors.
dict(pylist=b'x', expected_shape=()),
#=========================================================================
# 1-dimensional tensors.
dict(pylist=[1, 2, 3], expected_shape=(3,)),
#=========================================================================
# 2-dimensional tensors.
dict(pylist=[[1, 2, 3], [4], [5, 6]], expected_shape=(3, None)),
dict(pylist=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], expected_shape=(3, None)),
#=========================================================================
# 3-dimensional tensors.
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
expected_shape=(3, None, None)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
inner_shape=(2,),
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
inner_shape=(2,),
expected_shape=(3, None, 2)),
#=========================================================================
# 4-dimensional tensors.
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
expected_shape=(2, None, None, None)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
ragged_rank=1,
expected_shape=(2, None, 2, 2)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
inner_shape=(2,),
expected_shape=(2, None, None, 2)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
inner_shape=(2, 2),
expected_shape=(2, None, 2, 2)),
#=========================================================================
# Empty tensors (no scalar values) w/ default ragged_rank and inner_shape
dict(pylist=[], expected_shape=(0,)),
dict(pylist=[[], [], []], expected_shape=(3, None)),
dict(
pylist=[[[], []], [], [[], [[]]]],
expected_shape=(3, None, None, None)),
#=========================================================================
# Empty tensors (no scalar values) w/ explicit ragged_rank or inner_shape
dict(pylist=[], ragged_rank=1, expected_shape=(0, None)),
dict(pylist=[], ragged_rank=2, expected_shape=(0, None, None)),
dict(pylist=[], inner_shape=(0, 100, 20), expected_shape=(0, 100, 20)),
dict(
pylist=[],
ragged_rank=1,
inner_shape=(100, 20),
expected_shape=(0, None, 100, 20)),
dict(
pylist=[],
ragged_rank=2,
inner_shape=(100, 20),
expected_shape=(0, None, None, 100, 20)),
dict(pylist=[[], [], []], ragged_rank=2, expected_shape=(3, None, None)),
dict(pylist=[], inner_shape=(0,), expected_shape=(0,)),
dict(pylist=[[]], inner_shape=(1, 0), expected_shape=(1, 0)),
#=========================================================================
# default/inferred dtypes
dict(pylist=[], expected_dtype=dtypes.float32),
dict(pylist=[[[], [[[]], []]]], expected_dtype=dtypes.float32),
dict(pylist=[[1, 2], [3], [4, 5, 6]], expected_dtype=dtypes.int32),
dict(pylist=[[1., 2.], [], [4., 5., 6.]], expected_dtype=dtypes.float32),
dict(pylist=[[1, 2], [3.], [4, 5, 6]], expected_dtype=dtypes.float32),
dict(pylist=[[b'a', b'b'], [b'c']], expected_dtype=dtypes.string),
dict(pylist=[[True]], expected_dtype=dtypes.bool),
#=========================================================================
# explicit dtypes
dict(pylist=[], dtype=dtypes.float32),
dict(pylist=[], dtype=dtypes.string),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=dtypes.int64),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=dtypes.int32),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=dtypes.float32),
dict(pylist=[[1., 2.], [3.], [4., 5., 6.]], dtype=dtypes.float16),
dict(pylist=[[1., 2.], [3.], [4., 5., 6.]], dtype=dtypes.float32),
dict(pylist=[[b'a', b'b'], [b'c'], [b'd', b'e', b'f']],
dtype=dtypes.string),
)
def testRaggedConst(self,
pylist,
dtype=None,
ragged_rank=None,
inner_shape=None,
expected_shape=None,
expected_dtype=None):
"""Tests that `ragged_const(pylist).eval().tolist() == pylist`.
Args:
pylist: The `pylist` argument for `ragged_const()`.
dtype: The `dtype` argument for `ragged_const()`. If not None, then also
test that the resulting ragged tensor has this `dtype`.
ragged_rank: The `ragged_rank` argument for `ragged_const()`. If not
None, then also test that the resulting ragged tensor has this
`ragged_rank`.
inner_shape: The `inner_shape` argument for `ragged_const()`. If not
None, then also test that the resulting ragged tensor has this
`inner_shape`.
expected_shape: The expected shape for the resulting ragged tensor.
expected_dtype: The expected dtype for the resulting ragged tensor (used
to test default/inferred types when dtype=None).
"""
rt = ragged_factory_ops.constant(
pylist, dtype=dtype, ragged_rank=ragged_rank, inner_shape=inner_shape)
# If dtype was explicitly specified, check it.
if dtype is not None:
self.assertEqual(rt.dtype, dtype)
if expected_dtype is not None:
self.assertEqual(rt.dtype, expected_dtype)
# If ragged_rank was explicitly specified, check it.
if ragged_rank is not None:
if isinstance(rt, ragged_tensor.RaggedTensor):
self.assertEqual(rt.ragged_rank, ragged_rank)
else:
self.assertEqual(0, ragged_rank)
# If inner_shape was explicitly specified, check it.
if inner_shape is not None:
if isinstance(rt, ragged_tensor.RaggedTensor):
self.assertEqual(rt.flat_values.shape.as_list()[1:], list(inner_shape))
else:
self.assertEqual(rt.shape.as_list(), list(inner_shape))
if expected_shape is not None:
self.assertEqual(tuple(rt.shape.as_list()), expected_shape)
self.assertRaggedEqual(rt, pylist)
@parameterized.parameters(
dict(
pylist=12,
ragged_rank=1,
exception=ValueError,
message='Invalid pylist=12: incompatible with ragged_rank=1'),
dict(
pylist=12,
inner_shape=(1,),
exception=ValueError,
message='Invalid pylist=12: incompatible with '
'dim\\(inner_shape\\)=1'),
dict(
pylist=[[[1], [2]]],
ragged_rank=-1,
exception=ValueError,
message='Invalid ragged_rank=-1: must be nonnegative'),
dict(
pylist=[[1, [2]]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[[1]], [[[2]]]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[1], [[]]],
exception=ValueError,
message='Invalid pylist=.*: empty list nesting is greater '
'than scalar value nesting'),
dict(
pylist=[1, 2, 3],
ragged_rank=1,
exception=ValueError,
message='pylist has scalar values depth 1, but ragged_rank=1 '
'requires scalar value depth greater than 1'),
dict(
pylist=[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
ragged_rank=2,
exception=ValueError,
message='pylist has scalar values depth 2, but ragged_rank=2 '
'requires scalar value depth greater than 2'),
dict(pylist=[1, 2, 3], inner_shape=(1, 1), exception=TypeError),
dict(
pylist=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
inner_shape=(2, 2),
ragged_rank=1,
exception=ValueError,
message='Invalid pylist=.*: incompatible with ragged_rank=1 and '
'dim\\(inner_shape\\)=2'),
dict(
pylist=[[[1, 2], [3, 4]], [[5, 6], [7, 8, 9]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
dict(
pylist=[[[], [[]]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
)
def testRaggedConstError(self,
pylist,
dtype=None,
ragged_rank=None,
inner_shape=None,
exception=None,
message=None):
"""Tests that `ragged_const()` raises an expected exception."""
self.assertRaisesRegexp(
exception,
message,
ragged_factory_ops.constant,
pylist,
dtype=dtype,
ragged_rank=ragged_rank,
inner_shape=inner_shape)
@parameterized.parameters([
dict(pylist=9, scalar_depth=0, max_depth=0),
dict(pylist=[9], scalar_depth=1, max_depth=1),
dict(pylist=[1, 2, 3], scalar_depth=1, max_depth=1),
dict(pylist=[[1], [2]], scalar_depth=2, max_depth=2),
dict(pylist=[[[1], [2]], [[3]]], scalar_depth=3, max_depth=3),
dict(pylist=[], scalar_depth=None, max_depth=1),
dict(pylist=[[]], scalar_depth=None, max_depth=2),
dict(pylist=[[], [], []], scalar_depth=None, max_depth=2),
dict(pylist=[[[], []], [[], [[[]]]], []], scalar_depth=None, max_depth=5),
dict(
pylist=[1, [2]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[1], 2],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[[[1]], []], [[2]]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
])
def testScalarAndMaxDepthHelper(self,
pylist,
scalar_depth=None,
max_depth=None,
exception=None,
message=None):
"""Tests for the _find_scalar_and_max_depth helper function."""
if exception is not None:
self.assertRaisesRegexp(exception, message,
ragged_factory_ops._find_scalar_and_max_depth,
pylist)
else:
self.assertEqual(
ragged_factory_ops._find_scalar_and_max_depth(pylist),
(scalar_depth, max_depth))
@parameterized.parameters([
dict(pylist=[[1], [2, 3]], ragged_rank=1, inner_shape=()),
dict(
pylist=[[[1], [2]], [[3], [4], [5]]], ragged_rank=1,
inner_shape=(1,)),
dict(pylist=[[[1], [2]], [[3], [4], [5]]], ragged_rank=2, inner_shape=()),
dict(
pylist=[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]]],
ragged_rank=1,
inner_shape=(2, 3)),
dict(
pylist=[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]]],
ragged_rank=2,
inner_shape=(3,)),
dict(
pylist=[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]]],
ragged_rank=3,
inner_shape=()),
dict(
pylist=[[[1], [2, 3]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
dict(
pylist=[[[1], [[2]]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
dict(
pylist=[[[[1]], [2]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
])
def testDefaultInnerShapeForPylistHelper(self,
pylist,
ragged_rank,
inner_shape=None,
exception=None,
message=None):
"""Tests for the _default_inner_shape_for_pylist helper function."""
if exception is not None:
self.assertRaisesRegexp(
exception, message,
ragged.ragged_factory_ops._default_inner_shape_for_pylist, pylist,
ragged_rank)
else:
self.assertEqual(
ragged.ragged_factory_ops._default_inner_shape_for_pylist(
pylist, ragged_rank), inner_shape)
if __name__ == '__main__':
googletest.main()
|
|
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import netaddr
import re
import time
import xml.etree.ElementTree as ET
import ciscoconfparse
from ncclient import manager
from oslo_config import cfg
from neutron.i18n import _LE, _LI, _LW
from networking_cisco.plugins.cisco.cfg_agent import cfg_exceptions as cfg_exc
from networking_cisco.plugins.cisco.cfg_agent.device_drivers import (
devicedriver_api)
from networking_cisco.plugins.cisco.cfg_agent.device_drivers.csr1kv import (
cisco_csr1kv_snippets as snippets)
LOG = logging.getLogger(__name__)
# N1kv constants
T1_PORT_NAME_PREFIX = 't1_p:' # T1 port/network is for VXLAN
T2_PORT_NAME_PREFIX = 't2_p:' # T2 port/network is for VLAN
class CSR1kvRoutingDriver(devicedriver_api.RoutingDriverBase):
"""CSR1kv Routing Driver.
This driver encapsulates the configuration logic via NETCONF protocol to
configure a CSR1kv Virtual Router (IOS-XE based) for implementing
Neutron L3 services. These services include routing, NAT and floating
IPs (as per Neutron terminology).
"""
DEV_NAME_LEN = 14
def __init__(self, **device_params):
try:
self._csr_host = device_params['management_ip_address']
self._csr_ssh_port = device_params['protocol_port']
credentials = device_params['credentials']
if credentials:
self._csr_user = credentials['username']
self._csr_password = credentials['password']
self._timeout = cfg.CONF.cfg_agent.device_connection_timeout
self._csr_conn = None
self._intfs_enabled = False
except KeyError as e:
LOG.error(_LE("Missing device parameter:%s. Aborting "
"CSR1kvRoutingDriver initialization"), e)
raise cfg_exc.CSR1kvInitializationException()
###### Public Functions ########
def router_added(self, ri):
self._csr_create_vrf(ri)
def router_removed(self, ri):
self._csr_remove_vrf(ri)
def internal_network_added(self, ri, port):
self._csr_create_subinterface(ri, port)
if port.get('ha_info') is not None and ri.ha_info['ha:enabled']:
self._csr_add_ha(ri, port)
def internal_network_removed(self, ri, port):
self._csr_remove_subinterface(port)
def external_gateway_added(self, ri, ex_gw_port):
self._csr_create_subinterface(ri, ex_gw_port)
ex_gw_ip = ex_gw_port['subnets'][0]['gateway_ip']
if ex_gw_ip:
#Set default route via this network's gateway ip
self._csr_add_default_route(ri, ex_gw_ip)
def external_gateway_removed(self, ri, ex_gw_port):
ex_gw_ip = ex_gw_port['subnets'][0]['gateway_ip']
if ex_gw_ip:
#Remove default route via this network's gateway ip
self._csr_remove_default_route(ri, ex_gw_ip)
#Finally, remove external network subinterface
self._csr_remove_subinterface(ex_gw_port)
def enable_internal_network_NAT(self, ri, port, ex_gw_port):
self._csr_add_internalnw_nat_rules(ri, port, ex_gw_port)
def disable_internal_network_NAT(self, ri, port, ex_gw_port):
self._csr_remove_internalnw_nat_rules(ri, [port], ex_gw_port)
def floating_ip_added(self, ri, ex_gw_port, floating_ip, fixed_ip):
self._csr_add_floating_ip(ri, floating_ip, fixed_ip)
def floating_ip_removed(self, ri, ex_gw_port, floating_ip, fixed_ip):
self._csr_remove_floating_ip(ri, ex_gw_port, floating_ip, fixed_ip)
def routes_updated(self, ri, action, route):
self._csr_update_routing_table(ri, action, route)
def clear_connection(self):
self._csr_conn = None
##### Internal Functions ####
def _csr_create_subinterface(self, ri, port):
vrf_name = self._csr_get_vrf_name(ri)
ip_cidr = port['ip_cidr']
netmask = netaddr.IPNetwork(ip_cidr).netmask
gateway_ip = ip_cidr.split('/')[0]
subinterface = self._get_interface_name_from_hosting_port(port)
vlan = self._get_interface_vlan_from_hosting_port(port)
self._create_subinterface(subinterface, vlan, vrf_name,
gateway_ip, netmask)
def _csr_remove_subinterface(self, port):
subinterface = self._get_interface_name_from_hosting_port(port)
self._remove_subinterface(subinterface)
def _csr_add_ha(self, ri, port):
func_dict = {
'HSRP': CSR1kvRoutingDriver._csr_add_ha_HSRP,
'VRRP': CSR1kvRoutingDriver._csr_add_ha_VRRP,
'GBLP': CSR1kvRoutingDriver._csr_add_ha_GBLP
}
#Invoke the right function for the ha type
func_dict[ri.ha_info['ha:type']](self, ri, port)
def _csr_add_ha_HSRP(self, ri, port):
priority = ri.ha_info['priority']
port_ha_info = port['ha_info']
group = port_ha_info['group']
ip = port_ha_info['virtual_port']['fixed_ips'][0]['ip_address']
if ip and group and priority:
vrf_name = self._csr_get_vrf_name(ri)
subinterface = self._get_interface_name_from_hosting_port(port)
self._set_ha_HSRP(subinterface, vrf_name, priority, group, ip)
def _csr_add_ha_VRRP(self, ri, port):
raise NotImplementedError()
def _csr_add_ha_GBLP(self, ri, port):
raise NotImplementedError()
def _csr_remove_ha(self, ri, port):
pass
def _csr_add_internalnw_nat_rules(self, ri, port, ex_port):
vrf_name = self._csr_get_vrf_name(ri)
in_vlan = self._get_interface_vlan_from_hosting_port(port)
acl_no = 'acl_' + str(in_vlan)
internal_cidr = port['ip_cidr']
internal_net = netaddr.IPNetwork(internal_cidr).network
netmask = netaddr.IPNetwork(internal_cidr).hostmask
inner_intfc = self._get_interface_name_from_hosting_port(port)
outer_intfc = self._get_interface_name_from_hosting_port(ex_port)
self._nat_rules_for_internet_access(acl_no, internal_net,
netmask, inner_intfc,
outer_intfc, vrf_name)
def _csr_remove_internalnw_nat_rules(self, ri, ports, ex_port):
acls = []
#First disable nat in all inner ports
for port in ports:
in_intfc_name = self._get_interface_name_from_hosting_port(port)
inner_vlan = self._get_interface_vlan_from_hosting_port(port)
acls.append("acl_" + str(inner_vlan))
self._remove_interface_nat(in_intfc_name, 'inside')
#Wait for two second
LOG.debug("Sleep for 2 seconds before clearing NAT rules")
time.sleep(2)
#Clear the NAT translation table
self._remove_dyn_nat_translations()
# Remove dynamic NAT rules and ACLs
vrf_name = self._csr_get_vrf_name(ri)
ext_intfc_name = self._get_interface_name_from_hosting_port(ex_port)
for acl in acls:
self._remove_dyn_nat_rule(acl, ext_intfc_name, vrf_name)
def _csr_add_default_route(self, ri, gw_ip):
vrf_name = self._csr_get_vrf_name(ri)
self._add_default_static_route(gw_ip, vrf_name)
def _csr_remove_default_route(self, ri, gw_ip):
vrf_name = self._csr_get_vrf_name(ri)
self._remove_default_static_route(gw_ip, vrf_name)
def _csr_add_floating_ip(self, ri, floating_ip, fixed_ip):
vrf_name = self._csr_get_vrf_name(ri)
self._add_floating_ip(floating_ip, fixed_ip, vrf_name)
def _csr_remove_floating_ip(self, ri, ex_gw_port, floating_ip, fixed_ip):
vrf_name = self._csr_get_vrf_name(ri)
out_intfc_name = self._get_interface_name_from_hosting_port(ex_gw_port)
# First remove NAT from outer interface
self._remove_interface_nat(out_intfc_name, 'outside')
#Clear the NAT translation table
self._remove_dyn_nat_translations()
#Remove the floating ip
self._remove_floating_ip(floating_ip, fixed_ip, vrf_name)
#Enable NAT on outer interface
self._add_interface_nat(out_intfc_name, 'outside')
def _csr_update_routing_table(self, ri, action, route):
vrf_name = self._csr_get_vrf_name(ri)
destination_net = netaddr.IPNetwork(route['destination'])
dest = destination_net.network
dest_mask = destination_net.netmask
next_hop = route['nexthop']
if action is 'replace':
self._add_static_route(dest, dest_mask, next_hop, vrf_name)
elif action is 'delete':
self._remove_static_route(dest, dest_mask, next_hop, vrf_name)
else:
LOG.error(_LE('Unknown route command %s'), action)
def _csr_create_vrf(self, ri):
vrf_name = self._csr_get_vrf_name(ri)
self._create_vrf(vrf_name)
def _csr_remove_vrf(self, ri):
vrf_name = self._csr_get_vrf_name(ri)
self._remove_vrf(vrf_name)
def _csr_get_vrf_name(self, ri):
return ri.router_name()[:self.DEV_NAME_LEN]
def _get_connection(self):
"""Make SSH connection to the CSR.
The external ncclient library is used for creating this connection.
This method keeps state of any existing connections and reuses them if
already connected. Also CSR1kv's interfaces (except management) are
disabled by default when it is booted. So if connecting for the first
time, driver will enable all other interfaces and keep that status in
the `_intfs_enabled` flag.
"""
try:
if self._csr_conn and self._csr_conn.connected:
return self._csr_conn
else:
self._csr_conn = manager.connect(host=self._csr_host,
port=self._csr_ssh_port,
username=self._csr_user,
password=self._csr_password,
device_params={'name': "csr"},
timeout=self._timeout)
if not self._intfs_enabled:
self._intfs_enabled = self._enable_intfs(self._csr_conn)
return self._csr_conn
except Exception as e:
conn_params = {'host': self._csr_host, 'port': self._csr_ssh_port,
'user': self._csr_user,
'timeout': self._timeout, 'reason': e.message}
raise cfg_exc.CSR1kvConnectionException(**conn_params)
def _get_interface_name_from_hosting_port(self, port):
vlan = self._get_interface_vlan_from_hosting_port(port)
int_no = self._get_interface_no_from_hosting_port(port)
intfc_name = 'GigabitEthernet%s.%s' % (int_no, vlan)
return intfc_name
@staticmethod
def _get_interface_vlan_from_hosting_port(port):
return port['hosting_info']['segmentation_id']
@staticmethod
def _get_interface_no_from_hosting_port(port):
"""Calculate interface number from the hosting port's name.
Interfaces in the CSR1kv are created in pairs (T1 and T2) where
T1 interface is used for VLAN and T2 interface for VXLAN traffic
respectively. On the neutron side these are named T1 and T2 ports and
follows the naming convention: <Tx_PORT_NAME_PREFIX>:<PAIR_INDEX>
where the `PORT_NAME_PREFIX` indicates either VLAN or VXLAN and
`PAIR_INDEX` is the pair number. `PAIR_INDEX` starts at 1.
In CSR1kv, GigabitEthernet 0 is not present and GigabitEthernet 1
is used as a management interface (Note: this might change in
future). So the first (T1,T2) pair corresponds to
(GigabitEthernet 2, GigabitEthernet 3) and so forth. This function
extracts the `PAIR_INDEX` and calculates the corresponding interface
number.
:param port: neutron port corresponding to the interface.
:return: number of the interface (eg: 1 in case of GigabitEthernet1)
"""
_name = port['hosting_info']['hosting_port_name']
if_type = _name.split(':')[0] + ':'
if if_type == T1_PORT_NAME_PREFIX:
return str(int(_name.split(':')[1]) * 2)
elif if_type == T2_PORT_NAME_PREFIX:
return str(int(_name.split(':')[1]) * 2 + 1)
else:
params = {'attribute': 'hosting_port_name', 'value': _name}
raise cfg_exc.CSR1kvUnknownValueException(**params)
def _get_interfaces(self):
"""Get a list of interfaces on this hosting device.
:return: List of the interfaces
"""
ioscfg = self._get_running_config()
parse = ciscoconfparse.CiscoConfParse(ioscfg)
intfs_raw = parse.find_lines("^interface GigabitEthernet")
intfs = [raw_if.strip().split(' ')[1] for raw_if in intfs_raw]
LOG.info(_LI("Interfaces:%s"), intfs)
return intfs
def _get_interface_ip(self, interface_name):
"""Get the ip address for an interface.
:param interface_name: interface_name as a string
:return: ip address of interface as a string
"""
ioscfg = self._get_running_config()
parse = ciscoconfparse.CiscoConfParse(ioscfg)
children = parse.find_children("^interface %s" % interface_name)
for line in children:
if 'ip address' in line:
ip_address = line.strip().split(' ')[2]
LOG.info(_LI("IP Address:%s"), ip_address)
return ip_address
LOG.warning(_LW("Cannot find interface: %s"), interface_name)
return None
def _interface_exists(self, interface):
"""Check whether interface exists."""
ioscfg = self._get_running_config()
parse = ciscoconfparse.CiscoConfParse(ioscfg)
intfs_raw = parse.find_lines("^interface " + interface)
return len(intfs_raw) > 0
def _enable_intfs(self, conn):
"""Enable the interfaces of a CSR1kv Virtual Router.
When the virtual router first boots up, all interfaces except
management are down. This method will enable all data interfaces.
Note: In CSR1kv, GigabitEthernet 0 is not present. GigabitEthernet 1
is used as management and GigabitEthernet 2 and up are used for data.
This might change in future releases.
Currently only the second and third Gig interfaces corresponding to a
single (T1,T2) pair and configured as trunk for VLAN and VXLAN
is enabled.
:param conn: Connection object
:return: True or False
"""
#ToDo(Hareesh): Interfaces are hard coded for now. Make it dynamic.
interfaces = ['GigabitEthernet 2', 'GigabitEthernet 3']
try:
for i in interfaces:
confstr = snippets.ENABLE_INTF % i
rpc_obj = conn.edit_config(target='running', config=confstr)
if self._check_response(rpc_obj, 'ENABLE_INTF'):
LOG.info(_LI("Enabled interface %s "), i)
time.sleep(1)
except Exception:
return False
return True
def _get_vrfs(self):
"""Get the current VRFs configured in the device.
:return: A list of vrf names as string
"""
vrfs = []
ioscfg = self._get_running_config()
parse = ciscoconfparse.CiscoConfParse(ioscfg)
vrfs_raw = parse.find_lines("^ip vrf")
for line in vrfs_raw:
# raw format ['ip vrf <vrf-name>',....]
vrf_name = line.strip().split(' ')[2]
vrfs.append(vrf_name)
LOG.info(_LI("VRFs:%s"), vrfs)
return vrfs
def _get_capabilities(self):
"""Get the servers NETCONF capabilities.
:return: List of server capabilities.
"""
conn = self._get_connection()
capabilities = []
for c in conn.server_capabilities:
capabilities.append(c)
LOG.debug("Server capabilities: %s", capabilities)
return capabilities
def _get_running_config(self):
"""Get the CSR's current running config.
:return: Current IOS running config as multiline string
"""
conn = self._get_connection()
config = conn.get_config(source="running")
if config:
root = ET.fromstring(config._raw)
running_config = root[0][0]
rgx = re.compile("\r*\n+")
ioscfg = rgx.split(running_config.text)
return ioscfg
def _check_acl(self, acl_no, network, netmask):
"""Check a ACL config exists in the running config.
:param acl_no: access control list (ACL) number
:param network: network which this ACL permits
:param netmask: netmask of the network
:return:
"""
exp_cfg_lines = ['ip access-list standard ' + str(acl_no),
' permit ' + str(network) + ' ' + str(netmask)]
ioscfg = self._get_running_config()
parse = ciscoconfparse.CiscoConfParse(ioscfg)
acls_raw = parse.find_children(exp_cfg_lines[0])
if acls_raw:
if exp_cfg_lines[1] in acls_raw:
return True
LOG.error(_LE("Mismatch in ACL configuration for %s"), acl_no)
return False
LOG.debug("%s is not present in config", acl_no)
return False
def _cfg_exists(self, cfg_str):
"""Check a partial config string exists in the running config.
:param cfg_str: config string to check
:return : True or False
"""
ioscfg = self._get_running_config()
parse = ciscoconfparse.CiscoConfParse(ioscfg)
cfg_raw = parse.find_lines("^" + cfg_str)
LOG.debug("_cfg_exists(): Found lines %s", cfg_raw)
return len(cfg_raw) > 0
def _set_interface(self, name, ip_address, mask):
conn = self._get_connection()
confstr = snippets.SET_INTC % (name, ip_address, mask)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'SET_INTC')
def _create_vrf(self, vrf_name):
try:
conn = self._get_connection()
confstr = snippets.CREATE_VRF % vrf_name
rpc_obj = conn.edit_config(target='running', config=confstr)
if self._check_response(rpc_obj, 'CREATE_VRF'):
LOG.info(_LI("VRF %s successfully created"), vrf_name)
except Exception:
LOG.exception(_LE("Failed creating VRF %s"), vrf_name)
def _remove_vrf(self, vrf_name):
if vrf_name in self._get_vrfs():
conn = self._get_connection()
confstr = snippets.REMOVE_VRF % vrf_name
rpc_obj = conn.edit_config(target='running', config=confstr)
if self._check_response(rpc_obj, 'REMOVE_VRF'):
LOG.info(_LI("VRF %s removed"), vrf_name)
else:
LOG.warning(_LW("VRF %s not present"), vrf_name)
def _create_subinterface(self, subinterface, vlan_id, vrf_name, ip, mask):
if vrf_name not in self._get_vrfs():
LOG.error(_LE("VRF %s not present"), vrf_name)
confstr = snippets.CREATE_SUBINTERFACE % (subinterface, vlan_id,
vrf_name, ip, mask)
self._edit_running_config(confstr, 'CREATE_SUBINTERFACE')
def _remove_subinterface(self, subinterface):
#Optional : verify this is the correct subinterface
if self._interface_exists(subinterface):
confstr = snippets.REMOVE_SUBINTERFACE % subinterface
self._edit_running_config(confstr, 'REMOVE_SUBINTERFACE')
def _set_ha_HSRP(self, subinterface, vrf_name, priority, group, ip):
if vrf_name not in self._get_vrfs():
LOG.error(_LE("VRF %s not present"), vrf_name)
confstr = snippets.SET_INTC_HSRP % (subinterface, vrf_name, group,
priority, group, ip)
action = "SET_INTC_HSRP (Group: %s, Priority: % s)" % (group, priority)
self._edit_running_config(confstr, action)
def _remove_ha_HSRP(self, subinterface, group):
confstr = snippets.REMOVE_INTC_HSRP % (subinterface, group)
action = ("REMOVE_INTC_HSRP (subinterface:%s, Group:%s)"
% (subinterface, group))
self._edit_running_config(confstr, action)
def _get_interface_cfg(self, interface):
ioscfg = self._get_running_config()
parse = ciscoconfparse.CiscoConfParse(ioscfg)
return parse.find_children('interface ' + interface)
def _nat_rules_for_internet_access(self, acl_no, network,
netmask,
inner_intfc,
outer_intfc,
vrf_name):
"""Configure the NAT rules for an internal network.
Configuring NAT rules in the CSR1kv is a three step process. First
create an ACL for the IP range of the internal network. Then enable
dynamic source NATing on the external interface of the CSR for this
ACL and VRF of the neutron router. Finally enable NAT on the
interfaces of the CSR where the internal and external networks are
connected.
:param acl_no: ACL number of the internal network.
:param network: internal network
:param netmask: netmask of the internal network.
:param inner_intfc: (name of) interface connected to the internal
network
:param outer_intfc: (name of) interface connected to the external
network
:param vrf_name: VRF corresponding to this virtual router
:return: True if configuration succeeded
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
CSR1kvConfigException
"""
conn = self._get_connection()
# Duplicate ACL creation throws error, so checking
# it first. Remove it in future as this is not common in production
acl_present = self._check_acl(acl_no, network, netmask)
if not acl_present:
confstr = snippets.CREATE_ACL % (acl_no, network, netmask)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'CREATE_ACL')
confstr = snippets.SET_DYN_SRC_TRL_INTFC % (acl_no, outer_intfc,
vrf_name)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'CREATE_SNAT')
confstr = snippets.SET_NAT % (inner_intfc, 'inside')
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'SET_NAT')
confstr = snippets.SET_NAT % (outer_intfc, 'outside')
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'SET_NAT')
def _add_interface_nat(self, intfc_name, intfc_type):
conn = self._get_connection()
confstr = snippets.SET_NAT % (intfc_name, intfc_type)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'SET_NAT ' + intfc_type)
def _remove_interface_nat(self, intfc_name, intfc_type):
conn = self._get_connection()
confstr = snippets.REMOVE_NAT % (intfc_name, intfc_type)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'REMOVE_NAT ' + intfc_type)
def _remove_dyn_nat_rule(self, acl_no, outer_intfc_name, vrf_name):
conn = self._get_connection()
confstr = snippets.SNAT_CFG % (acl_no, outer_intfc_name, vrf_name)
if self._cfg_exists(confstr):
confstr = snippets.REMOVE_DYN_SRC_TRL_INTFC % (acl_no,
outer_intfc_name,
vrf_name)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'REMOVE_DYN_SRC_TRL_INTFC')
confstr = snippets.REMOVE_ACL % acl_no
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'REMOVE_ACL')
def _remove_dyn_nat_translations(self):
conn = self._get_connection()
confstr = snippets.CLEAR_DYN_NAT_TRANS
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'CLEAR_DYN_NAT_TRANS')
def _add_floating_ip(self, floating_ip, fixed_ip, vrf):
conn = self._get_connection()
confstr = snippets.SET_STATIC_SRC_TRL % (fixed_ip, floating_ip, vrf)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'SET_STATIC_SRC_TRL')
def _remove_floating_ip(self, floating_ip, fixed_ip, vrf):
conn = self._get_connection()
confstr = snippets.REMOVE_STATIC_SRC_TRL % (fixed_ip, floating_ip, vrf)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'REMOVE_STATIC_SRC_TRL')
def _get_floating_ip_cfg(self):
ioscfg = self._get_running_config()
parse = ciscoconfparse.CiscoConfParse(ioscfg)
res = parse.find_lines('ip nat inside source static')
return res
def _add_static_route(self, dest, dest_mask, next_hop, vrf):
conn = self._get_connection()
confstr = snippets.SET_IP_ROUTE % (vrf, dest, dest_mask, next_hop)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'SET_IP_ROUTE')
def _remove_static_route(self, dest, dest_mask, next_hop, vrf):
conn = self._get_connection()
confstr = snippets.REMOVE_IP_ROUTE % (vrf, dest, dest_mask, next_hop)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'REMOVE_IP_ROUTE')
def _get_static_route_cfg(self):
ioscfg = self._get_running_config()
parse = ciscoconfparse.CiscoConfParse(ioscfg)
return parse.find_lines('ip route')
def _add_default_static_route(self, gw_ip, vrf):
conn = self._get_connection()
confstr = snippets.DEFAULT_ROUTE_CFG % (vrf, gw_ip)
if not self._cfg_exists(confstr):
confstr = snippets.SET_DEFAULT_ROUTE % (vrf, gw_ip)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'SET_DEFAULT_ROUTE')
def _remove_default_static_route(self, gw_ip, vrf):
conn = self._get_connection()
confstr = snippets.DEFAULT_ROUTE_CFG % (vrf, gw_ip)
if self._cfg_exists(confstr):
confstr = snippets.REMOVE_DEFAULT_ROUTE % (vrf, gw_ip)
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, 'REMOVE_DEFAULT_ROUTE')
def _edit_running_config(self, confstr, snippet):
conn = self._get_connection()
rpc_obj = conn.edit_config(target='running', config=confstr)
self._check_response(rpc_obj, snippet)
@staticmethod
def _check_response(rpc_obj, snippet_name):
"""This function checks the rpc response object for status.
This function takes as input the response rpc_obj and the snippet name
that was executed. It parses it to see, if the last edit operation was
a success or not.
<?xml version="1.0" encoding="UTF-8"?>
<rpc-reply message-id="urn:uuid:81bf8082-....-b69a-000c29e1b85c"
xmlns="urn:ietf:params:netconf:base:1.0">
<ok />
</rpc-reply>
In case of error, CSR1kv sends a response as follows.
We take the error type and tag.
<?xml version="1.0" encoding="UTF-8"?>
<rpc-reply message-id="urn:uuid:81bf8082-....-b69a-000c29e1b85c"
xmlns="urn:ietf:params:netconf:base:1.0">
<rpc-error>
<error-type>protocol</error-type>
<error-tag>operation-failed</error-tag>
<error-severity>error</error-severity>
</rpc-error>
</rpc-reply>
:return: True if the config operation completed successfully
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
CSR1kvConfigException
"""
LOG.debug("RPCReply for %(snippet_name)s is %(rpc_obj)s",
{'snippet_name': snippet_name, 'rpc_obj': rpc_obj.xml})
xml_str = rpc_obj.xml
if "<ok />" in xml_str:
LOG.debug("RPCReply for %s is OK", snippet_name)
LOG.info(_LI("%s successfully executed"), snippet_name)
return True
# Not Ok, we throw a ConfigurationException
e_type = rpc_obj._root[0][0].text
e_tag = rpc_obj._root[0][1].text
params = {'snippet': snippet_name, 'type': e_type, 'tag': e_tag}
raise cfg_exc.CSR1kvConfigException(**params)
|
|
from casexml.apps.stock.models import StockTransaction
from corehq.apps.commtrack.models import StockState
from corehq.apps.sms.api import incoming
from corehq.util.translation import localize
from custom.ilsgateway.models import SupplyPointStatus, SupplyPointStatusTypes, SupplyPointStatusValues
from custom.ilsgateway.tanzania.reminders import SOH_CONFIRM, SOH_BAD_FORMAT, LANGUAGE_CONFIRM
from custom.ilsgateway.tests.handlers.utils import ILSTestScript
class ILSSoHTest(ILSTestScript):
def setUp(self):
super(ILSSoHTest, self).setUp()
def test_stock_on_hand(self):
with localize('sw'):
response = unicode(SOH_CONFIRM)
script = """
5551234 > Hmk Id 400 Dp 569 Ip 678
5551234 < %(soh_confirm)s
""" % {"soh_confirm": response}
self.run_script(script)
self.assertEqual(StockTransaction.objects.all().count(), 3)
self.assertEqual(StockState.objects.all().count(), 3)
quantities = [400, 569, 678]
self.assertEqual(2, SupplyPointStatus.objects.count())
soh_status = SupplyPointStatus.objects.get(status_type=SupplyPointStatusTypes.SOH_FACILITY)
self.assertEqual(self.user1.location_id, soh_status.location_id)
self.assertEqual(SupplyPointStatusValues.SUBMITTED, soh_status.status_value)
la_status = SupplyPointStatus.objects.get(status_type=SupplyPointStatusTypes.LOSS_ADJUSTMENT_FACILITY)
self.assertEqual(self.user1.location_id, la_status.location_id)
self.assertEqual(SupplyPointStatusValues.REMINDER_SENT, la_status.status_value)
for idx, stock_transaction in enumerate(StockTransaction.objects.all().order_by('pk')):
self.assertEqual(stock_transaction.stock_on_hand, quantities[idx])
def test_stock_on_hand_stockouts(self):
with localize('sw'):
response = unicode(SOH_CONFIRM)
script = """
5551234 > Hmk Id 0 Dp 0 Ip 0
5551234 < %(soh_confirm)s
""" % {"soh_confirm": response}
self.run_script(script)
self.assertEqual(StockTransaction.objects.filter(case_id=self.facility_sp_id).count(), 3)
self.assertEqual(StockState.objects.filter(case_id=self.facility_sp_id).count(), 3)
for stock_transaction in StockTransaction.objects.filter(case_id=self.facility_sp_id):
self.assertTrue(stock_transaction.stock_on_hand == 0)
def test_stock_on_hand_update(self):
with localize('sw'):
response = unicode(SOH_CONFIRM)
prod_amt_configs = [
(('Id', 100), ('Dp', 200), ('Ip', 300)),
(('Id', 0), ('Dp', 100), ('Ip', 200)),
(('Id', 100), ('Dp', 0), ('Ip', 0)),
(('Id', 50), ('Dp', 150), ('Ip', 250)),
(('Id', 0), ('Dp', 0), ('Ip', 0)),
]
pkmax = -1
for prod_amt_config in prod_amt_configs:
this_pkmax = pkmax
product_string = ' '.join(['{p} {v}'.format(p=p, v=v) for p, v in prod_amt_config])
script = """
5551234 > Hmk {products}
5551234 < {soh_confirm}
""".format(
products=product_string,
soh_confirm=response
)
self.run_script(script)
self.assertEqual(
StockTransaction.objects.filter(
case_id=self.facility_sp_id,
type__in=['stockonhand', 'stockout'],
pk__gt=pkmax
).count(), 3
)
self.assertEqual(StockState.objects.count(), 3)
for code, amt in prod_amt_config:
ps = StockState.objects.get(
sql_product__code__iexact=code,
case_id=self.facility_sp_id
)
self.assertEqual(amt, ps.stock_on_hand)
pr = StockTransaction.objects.get(
case_id=self.facility_sp_id,
pk__gt=pkmax, sql_product__code__iexact=code, type__in=['stockonhand', 'stockout']
)
self.assertEqual(amt, pr.stock_on_hand)
this_pkmax = max(this_pkmax, pr.pk)
pkmax = max(this_pkmax, pkmax)
def test_product_aliases(self):
with localize('sw'):
response = unicode(SOH_CONFIRM)
script = """
5551234 > Hmk iucd 400
5551234 < {}
""".format(response)
self.run_script(script)
script = """
5551234 > Hmk Depo 569
5551234 < {}
""".format(response)
self.run_script(script)
script = """
5551234 > Hmk IMPL 678
5551234 < %(soh_confirm)s
""" % {"soh_confirm": response}
self.run_script(script)
def test_stock_on_hand_delimiter_standard(self):
with localize('sw'):
response = unicode(SOH_CONFIRM)
# standard spacing
script = """
5551234 > hmk fs100 md100 ff100 dx100 bp100 pc100 qi100
5551234 < %(soh_confirm)s
""" % {"soh_confirm": response}
self.run_script(script)
def test_stock_on_hand_delimiter_no_spaces(self):
with localize('sw'):
response = unicode(SOH_CONFIRM)
# no spaces
script = """
5551234 > hmk fs100md100ff100dx100bp100pc100qi100
5551234 < %(soh_confirm)s
""" % {"soh_confirm": unicode(response)}
self.run_script(script)
def test_stock_on_hand_delimiters_mixed_spacing(self):
with localize('sw'):
response = unicode(SOH_CONFIRM)
# no spaces
script = """
5551234 > hmk fs100 md 100 ff100 dx 100bp 100 pc100 qi100
5551234 < %(soh_confirm)s
""" % {"soh_confirm": unicode(response)}
self.run_script(script)
def test_stock_on_hand_delimiters_all_spaced_out(self):
with localize('sw'):
response = unicode(SOH_CONFIRM)
# all space delimited
script = """
5551234 > hmk fs 100 md 100 ff 100 dx 100 bp 100 pc 100 qi 100
5551234 < %(soh_confirm)s
""" % {"soh_confirm": response}
self.run_script(script)
def test_stock_on_hand_delimiters_commas(self):
with localize('sw'):
response = unicode(SOH_CONFIRM)
# commas
script = """
5551234 > hmk fs100,md100,ff100
5551234 < %(soh_confirm)s
""" % {"soh_confirm": response}
self.run_script(script)
def test_stock_on_hand_delimiters_commas_and_spaces(self):
with localize('sw'):
response = unicode(SOH_CONFIRM)
# commas
script = """
5551234 > hmk fs100, md100, ff100
5551234 < %(soh_confirm)s
""" % {"soh_confirm": response}
self.run_script(script)
def test_stock_on_hand_delimiters_extra_spaces(self):
with localize('sw'):
response = unicode(SOH_CONFIRM)
# extra spaces
script = """
5551234 > hmk fs 100 md 100 ff 100 pc 100
5551234 < %(soh_confirm)s
""" % {"soh_confirm": response}
self.run_script(script)
def test_stock_on_hand_mixed_delimiters_and_spacing(self):
with localize('sw'):
response = unicode(SOH_CONFIRM)
# mixed - commas, spacing
script = """
5551234 > hmk fs100 , md100,ff 100 pc 100 qi, 1000,bp, 100, dx,100
5551234 < %(soh_confirm)s
""" % {"soh_confirm": response}
self.run_script(script)
def test_stock_on_hand_invalid_code(self):
with localize('sw'):
response = unicode(SOH_BAD_FORMAT)
script = """
5551234 > hmk asdds 100 ff 100
5551234 < %(soh_bad_format)s
""" % {'soh_bad_format': response}
self.run_script(script)
self.assertEqual(StockState.objects.get(
sql_product__code='ff',
case_id=self.facility_sp_id
).stock_on_hand, 100)
def test_stock_on_hand_language_swahili(self):
with localize('sw'):
response = unicode(SOH_CONFIRM)
script = """
5551234 > hmk fs100md100
5551234 < %(soh_confirm)s
""" % {"soh_confirm": response}
self.run_script(script)
def test_stock_on_hand_language_english(self):
with localize('en'):
response = unicode(LANGUAGE_CONFIRM)
response2 = unicode(SOH_CONFIRM)
language_message = """
5551234 > language en
5551234 < {0}
""".format(unicode(response % dict(language='English')))
self.run_script(language_message)
script = """
5551234 > hmk fs100md100
5551234 < %(soh_confirm)s
""" % {"soh_confirm": response2}
self.run_script(script)
with localize('sw'):
response = unicode(LANGUAGE_CONFIRM)
language_message = """
5551234 > language sw
5551234 < {0}
""".format(unicode(response % dict(language='Swahili')))
self.run_script(language_message)
def test_multiline_message(self):
quantities = {
'fs': 100,
'md': 100,
'ff': 100,
'pc': 100
}
message = """
hmk
fs 100 md 100 ff 100 pc 100
"""
verified_number = self.user1.get_verified_number()
msg = incoming(
verified_number.phone_number, message, verified_number.backend_id
)
self.assertIsNotNone(msg)
stock_states = StockState.objects.filter(
case_id=self.facility_sp_id
).values_list('sql_product__code', 'stock_on_hand')
for product_code, quantity in stock_states:
self.assertEqual(quantity, quantities[product_code])
|
|
#
# Copyright 2009-2015 Oktie Hassanzadeh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import hashlib
import urllib2
import urllib
import csv
from django import http
from django import shortcuts
from django import template
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.core import urlresolvers
from django.db.models import Q
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import ListView
from django.views.decorators.cache import never_cache
from django.views.decorators.vary import vary_on_headers
import databrowse
from databrowse.datastructures import *
import forms
from models import *
import models
import operations
import rdf
CONFIG = settings.CONFIG
class HttpResponseSeeOther(http.HttpResponseRedirect):
status_code = 303
def homepage(request):
model_list = [ Trial, Intervention, Condition,
Country, City, State, Location,
Eligibility, Keyword, Mesh_term,
Condition_browse, Intervention_browse,
Reference, Link, Investigator, Responsible_party,
Outcome, Arm_group,
Contact, Address, Facility, Oversight_info,
Overall_official, Sponsor, Sponsor_group,
Provenance ]
# easy_model_list = [EasyModel(model) for model in model_list]
flat_page_model = FlatPage
# return shortcuts.render_to_response('homepage.html',
# {'model_list': easy_model_list, 'flat_page_model': flat_page_model,
# 'upload_form': forms.XMLSelectForm()})
#m_list = [EasyModel(databrowse.site, m) for m in databrowse.site.registry.keys()]
databrowse.site.root_url = CONFIG['ROOT']
m_list = [EasyModel(databrowse.site, m) for m in model_list]
return shortcuts.render_to_response('databrowse/homepage.html',
{'model_list': m_list,
'root_url': databrowse.site.root_url,
'flat_page_model': flat_page_model})
#def gen_view(request, **kwargs):
# return list_detail.object_detail(request, **kwargs)
@vary_on_headers('Accept', 'User-Agent')
def multi_format_object_detail(request, **kwargs):
"""A view that handles multiple format output.
By default, the view use object_detail generic view to output. If HTTP
request parameter format is set to rdf, redirect the output to D2R server.
"""
# For pubentry, the type is passed in but we really don't need it. If we
# don't remove it, object_detail is going to complain about extra field
# 'type'.
if kwargs.get('type', None) is not None:
kwargs.pop('type')
if 'uid' in kwargs:
uid_or_slug = kwargs['uid']
kwargs.pop('uid')
else:
uid_or_slug = kwargs['slug']
# Serve the RDF page if the user explicitly wants RDF.
if request.GET.get('format', '').lower() == 'rdf':
rdf_url = '%s%s/%s' % (CONFIG['RDF_SERVER'],
kwargs['extra_context']['model'].name(),
uid_or_slug)
# For debugging purpose, if redirect=1 is specified, we redirect
# to the d2r server. Otherwise, we load the RDF output from the d2r
# server and return to the user.
if request.GET.get('redirect'):
#return HttpResponseSeeOther(rdf_url)
response = http.HttpResponse(content="", status=303, status_code=303)
response["Location"] = rdf_url
return response
else:
rdf_content = rdf.load_rdf_from_url(rdf_url)
return http.HttpResponse(rdf_content, mimetype='text/rdf+xml', status=303, status_code=303)
# Serve the XML page if the user explicitly wants XML.
# if request.GET.get('format', '').lower() == 'xml':
# return cfxml(request, kwargs['extra_context']['model'].name(), uid_or_slug)
# If it's a RDF browser, redirect to the RDF format page.
if request.META.get('HTTP_ACCEPT', '').lower().find('rdf') != -1:
#rdf_url = '%s%s/%s' % (CONFIG['RDF_SERVER'],
# kwargs['extra_context']['model'].name(),
# uid_or_slug)
# TODO: the following is temporary solution until we find a better way
return rdf_view(request, request.path.replace('/resource/',''))
#rdf_content = rdf.load_rdf_from_url(rdf_url)
#return http.HttpResponse(rdf_content,
#return rdf_view()
#return HttpResponseSeeOther(request.path + '?format=rdf')
# If template_name is not set, use the default base_detail.html template.
if not kwargs.get('template_name'):
kwargs['template_name'] = 'base_detail.html'
return list_detail.object_detail(request, **kwargs)
#return gen_view(request, **kwargs)
#def cfxml(request, modelname, slug):
# """show xml view for a given slug. (currently not working)"""
# rtv = ""
# t = template.loader.get_template('trial_xml.xml')
#
# if modelname == "trial":
# trials = Trial.objects.filter(author__slug=slug)
## else:
## papers = PubAuthor.objects.filter(author__slug=slug)
# for t in trials:
# trial = t.pubentry
# if not trial.pid:
# trial.pid = trial.title.replace(" ", "")
# c = template.Context({ 'object': trial })
# rtv += t.render(c) + "\n\n"
# else:
# if modelname == "provenance":
# papers = Trial.objects.filter(provenance__slug=slug)
# elif modelname == "xxx":
# papers = xxx.objects.filter(series__slug=slug)
# else:
# return http.HttpResponse("not yet implemented for " + modelname)
#
# for t in trials:
# trial = t.pubentry
# if not trial.pid:
# trial.pid = trial.title.replace(" ", "")
# c = template.Context({ 'object': trial })
# rtv += t.render(c) + "\n\n"
#
# return http.HttpResponse(rtv, mimetype="text/plain")
# Displaying a search form is simple enough that we should disable cross-site
# attack protection.
@csrf_exempt
def search_form(request, object_type):
"""Display a form so that the user can perform search actions."""
if request.method == 'POST':
form = forms.SearchForm(request.POST)
if form.is_valid():
keyword = form.cleaned_data['keyword']
return http.HttpResponseRedirect(
urlresolvers.reverse('search', args=[object_type, keyword]))
else:
form = forms.SearchForm()
return shortcuts.render_to_response('search_form.html',
{'form': form, 'root_url' : CONFIG['ROOT']},
context_instance=template.RequestContext(request))
class SearchResultView(ListView):
template_name = 'databrowse/model_detail.html'
def get_queryset(self):
'''
object_type: One of (pub, author, journal, series, school, keyword).
keyword: The keyword to search for.
'''
object_type = self.kwargs['object_type']
keyword = self.kwargs['keyword']
model = getattr(models, object_type.capitalize())
matched_objects = model.objects.filter(label__icontains=keyword)
return matched_objects
def get_context_data(self, **kwargs):
context = super(SearchResultView, self).get_context_data(**kwargs)
self.queryset = self.get_queryset()
model = getattr(models, self.kwargs['object_type'].capitalize())
easy_model = EasyModel(databrowse.site, model)
easy_qs = self.queryset._clone(klass=EasyQuerySet)
easy_qs._easymodel = easy_model
databrowse.site.root_url = CONFIG['ROOT']
extra_context = {'model': easy_model,
'root_url': databrowse.site.root_url,
'request': self.request,
'objectlist': easy_qs}
context.update(extra_context)
return context
@csrf_exempt
def upload_xml(request):
"""Display a form so the user can select a xml file.
When the form is submitted, redirects to process_xml to process the
xml file.
"""
if request.method == 'POST':
form = forms.XMLSelectForm(request.POST)
if form.is_valid():
url = form.cleaned_data['url']
return http.HttpResponseRedirect(
urlresolvers.reverse('processxml', args=[url]) +
'?encoding=' + form.cleaned_data['encoding'])
else:
form = forms.XMLSelectForm()
return shortcuts.render_to_response('form_upload_xml.html',
{'form': form, 'root_url' : CONFIG['ROOT']},
context_instance=template.RequestContext(request))
def load_external_source(request, source_name):
"""Loads an external source."""
## Loading DBpedia
if source_name == 'dbpedia':
for m in External_resource.objects.filter(source_name='dbpedia'):
m.delete()
## Loading diseases
inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'dbpedia_disease.csv'
inputfile = file(inputfilename,'r')
csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"')
row = csv_reader.next()
if not csv_reader:
return http.HttpResponse("Error reading csv file")
else:
id_index = 0
try:
id_index = row.index('id')
except:
return http.HttpResponse("Error finding the right column in the csv file")
while row:
try:
row = csv_reader.next()
id = row[id_index]
resource_url = 'http://dbpedia.org/resource/' + id
resource_label = id.replace('_',' ')
resource_format = 'RDF_HTML'
related_model_name = 'Condition'
label = id + ' (dbpedia disease resource)'
if len(label)>127:
label = hashlib.md5(label).hexdigest()
external_resource, created = models.External_resource.objects.get_or_create(
label = label,
source_id = id,
source_label = resource_label,
source_name = source_name,
source_url = resource_url,
source_format = resource_format,
related_model_name =related_model_name,
)
except StopIteration:
row = None
## Loading drugs
inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'dbpedia_drugs.csv'
inputfile = file(inputfilename,'r')
csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"')
row = csv_reader.next()
if not csv_reader:
return http.HttpResponse("Error reading csv file")
else:
id_index = 0
try:
id_index = row.index('id')
except:
return http.HttpResponse("Error finding the right column in the csv file")
while row:
try:
row = csv_reader.next()
id = row[id_index]
resource_url = 'http://dbpedia.org/resource/' + id
resource_label = id.replace('_',' ')
resource_format = 'RDF_HTML'
related_model_name = 'Intervention'
label = id + ' (dbpedia drug resource)'
if len(label)>127:
label = hashlib.md5(label).hexdigest()
external_resource, created = models.External_resource.objects.get_or_create(
label = label,
source_label = resource_label,
source_id = id,
source_name = source_name,
source_url = resource_url,
source_format = resource_format,
related_model_name =related_model_name,
)
except StopIteration:
row = None
return http.HttpResponse("{'status':'OK'}")
## Loading Drugbank
elif source_name == 'drugbank':
for m in External_resource.objects.filter(source_name='drugbank'):
m.delete()
inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'drugbank_drugs.csv'
inputfile = file(inputfilename,'r')
csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"')
row = csv_reader.next()
if not csv_reader:
return http.HttpResponse("Error reading csv file")
else:
id_index = 0
name_index = 1
try:
id_index = row.index('id')
name_index = row.index('name')
except:
return http.HttpResponse("Error finding the right column(s) in the csv file")
while row:
try:
row = csv_reader.next()
id = row[id_index]
name = row[name_index]
resource_url = 'http://www4.wiwiss.fu-berlin.de/drugbank/resource/drugs/' + id
resource_label = name
resource_format = 'RDF_HTML'
related_model_name = 'Intervention'
label = id + ' (drugbank drug resource)'
if len(label)>127:
label = hashlib.md5(label).hexdigest()
external_resource, created = models.External_resource.objects.get_or_create(
label = label,
source_label = resource_label,
source_url = resource_url,
source_format = resource_format,
source_name = source_name,
related_model_name =related_model_name,
)
except StopIteration:
row = None
# alternative names
inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'drugbank_drug_brandnames.csv'
inputfile = file(inputfilename,'r')
csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"')
row = csv_reader.next()
if not csv_reader:
return http.HttpResponse("Error reading csv file")
else:
id_index = 0
name_index = 1
try:
id_index = row.index('id')
name_index = row.index('name')
except:
return http.HttpResponse("Error finding the right column(s) in the csv file")
while row:
try:
row = csv_reader.next()
id = row[id_index]
altname = unicode(row[name_index],errors='ignore')
alt_name, created = models.Alt_name.objects.get_or_create(
label = hashlib.md5(source_name+id+altname).hexdigest(),
source = source_name,
id = id,
altname = altname,
)
except StopIteration:
row = None
inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'drugbank_drug_synonyms.csv'
inputfile = file(inputfilename,'r')
csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"')
row = csv_reader.next()
if not csv_reader:
return http.HttpResponse("Error reading csv file")
else:
id_index = 0
name_index = 1
try:
id_index = row.index('id')
name_index = row.index('name')
except:
return http.HttpResponse("Error finding the right column(s) in the csv file")
while row:
try:
row = csv_reader.next()
id = row[id_index]
altname = unicode(row[name_index],errors='ignore')
alt_name, created = models.Alt_name.objects.get_or_create(
label = hashlib.md5(source_name+id+altname).hexdigest(),
source = source_name,
id = id,
altname = altname,
)
except StopIteration:
row = None
return http.HttpResponse("{'status':'OK'}")
## Loading Dailymed
elif source_name == 'dailymed':
for m in External_resource.objects.filter(source_name='dailymed'):
m.delete()
inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'dailymed_drugs.csv'
inputfile = file(inputfilename,'r')
csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"')
row = csv_reader.next()
if not csv_reader:
return http.HttpResponse("Error reading csv file")
else:
id_index = 0
name_index = 1
try:
id_index = row.index('id')
name_index = row.index('name')
fullName_index = row.index('fullName')
activeIngridient_index = row.index('activeIngridient')
drugbank_id_index = row.index('drugbank_id')
genericMedicine_index = row.index('genericMedicine')
except:
return http.HttpResponse("Error finding the right column(s) in the csv file")
while row:
try:
row = csv_reader.next()
id = row[id_index]
name = row[name_index]
resource_url = 'http://www4.wiwiss.fu-berlin.de/dailymed/resource/drugs/' + id
resource_label = name
resource_format = 'RDF_HTML'
related_model_name = 'Intervention'
label = id + ' (dailymed drug resource)'
if len(label)>127:
label = hashlib.md5(label).hexdigest()
external_resource, created = models.External_resource.objects.get_or_create(
label = label,
source_label = resource_label,
source_url = resource_url,
source_format = resource_format,
source_name = source_name,
related_model_name =related_model_name,
)
# alternative names
altname = row[genericMedicine_index]
alt_name, created = models.Alt_name.objects.get_or_create(
label = hashlib.md5(source_name+id+altname).hexdigest(),
source = source_name,
id = id,
altname = altname,
)
altname = row[fullName_index]
alt_name, created = models.Alt_name.objects.get_or_create(
label = hashlib.md5(source_name+id+altname).hexdigest(),
source = source_name,
id = id,
altname = altname,
)
altname = row[activeIngridient_index]
alt_name, created = models.Alt_name.objects.get_or_create(
label = hashlib.md5(source_name+id+altname).hexdigest(),
source = source_name,
id = id,
altname = altname,
)
altname = row[drugbank_id_index]
alt_name, created = models.Alt_name.objects.get_or_create(
label = hashlib.md5(source_name+id+altname).hexdigest(),
source = source_name,
id = id,
altname = altname,
)
except StopIteration:
row = None
return http.HttpResponse("{'status':'OK'}")
## Loading diseasome
elif source_name == 'diseasome':
for m in External_resource.objects.filter(source_name='diseasome'):
m.delete()
inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'diseasome_disease.csv'
inputfile = file(inputfilename,'r')
csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"')
row = csv_reader.next()
if not csv_reader:
return http.HttpResponse("Error reading csv file")
else:
id_index = 0
name_index = 1
try:
id_index = row.index('id')
name_index = row.index('name')
except:
return http.HttpResponse("Error finding the right column(s) in the csv file")
while row:
try:
row = csv_reader.next()
id = row[id_index]
name = row[name_index]
resource_url = 'http://www4.wiwiss.fu-berlin.de/diseasome/resource/diseases/' + id
resource_label = name
resource_format = 'RDF_HTML'
related_model_name = 'Condition'
label = id + ' (diseasome disease resource)'
if len(label)>127:
label = hashlib.md5(label).hexdigest()
external_resource, created = models.External_resource.objects.get_or_create(
label = label,
source_label = resource_label,
source_url = resource_url,
source_format = resource_format,
source_name = source_name,
related_model_name =related_model_name,
)
except StopIteration:
row = None
return http.HttpResponse("{'status':'OK'}")
## Other sources
else:
return http.HttpResponse("{'status':'FAIL', 'reason':'Source %s not found'}" % source_name)
def reprocess_xml(request, url):
"""Re-process an XML file."""
try:
operations.process_xml(
url, CONFIG['XML_SIZE_LIMIT'],
request.META['REMOTE_ADDR'], request.GET.get('encoding'),
True)
#return http.HttpResponseRedirect(
# urlresolvers.reverse('provenance_detail', args=[provenance.slug]))
return http.HttpResponse("OK")
# return shortcuts.render_to_response('error.html', {'content': 'OK'})
except (operations.FileSizeLimitExceededException,
operations.XMLFileFormatException,
operations.FileSizeLimitExceededException), e:
return shortcuts.render_to_response('error.html', {'content': str(e)})
def process_xml(request, url):
"""Process an XML file."""
try:
operations.process_xml(
url, CONFIG['XML_SIZE_LIMIT'],
request.META['REMOTE_ADDR'], request.GET.get('encoding'),
False)
#return http.HttpResponseRedirect(
# urlresolvers.reverse('provenance_detail', args=[provenance.slug]))
return http.HttpResponse("OK")
# return shortcuts.render_to_response('error.html', {'content': 'OK'})
except (operations.FileSizeLimitExceededException,
operations.XMLFileFormatException,
operations.FileSizeLimitExceededException), e:
return shortcuts.render_to_response('error.html', {'content': str(e)})
def rdf_view(request, url):
"""Provides RDF view by redirection to D2R server."""
try:
home_url = CONFIG["HOME"]
d2rserver_url = CONFIG['D2R_SERVER']
rdf_url = url
if url.endswith('/'):
rdf_url = url[:-1]
rdf_url = d2rserver_url + 'data/' + rdf_url
try:
#request = urllib.urlopen(rdf_url)
req = urllib2.Request(url=rdf_url)
req.add_header("Accept","application/rdf+xml")
request = urllib2.urlopen(req)
response = http.HttpResponse(request.read().replace(d2rserver_url,home_url+'/').replace('/vocab/resource/','/vocab/').replace(CONFIG['D2RMAP'],''), mimetype='text/rdf+xml')
filename = url.replace('/','-') +'.rdf'
response['Content-Disposition'] = 'inline; filename='+ filename.replace('-.','.')
response['Content-Type'] = 'application/rdf+xml'
return response
except Exception, e:
return shortcuts.render_to_response('error.html', {'content': str(e)})
except (operations.FileSizeLimitExceededException,
operations.XMLFileFormatException,
operations.FileSizeLimitExceededException), e:
return shortcuts.render_to_response('error.html', {'content': str(e)})
def vocab_view(request, url):
"""Provides view for Vocabulary by redirection to D2R server."""
try:
home_url = CONFIG["HOME"]
d2rserver_url = CONFIG['D2R_SERVER']
rdf_url = url
if url.endswith('/'):
rdf_url = url[:-1]
rdf_url = d2rserver_url + 'vocab/data/' + rdf_url
try:
#request = urllib.urlopen(rdf_url)
req = urllib2.Request(url=rdf_url)
req.add_header("Accept","application/rdf+xml")
request = urllib2.urlopen(req)
response = http.HttpResponse(request.read().replace(d2rserver_url,home_url+'/'), mimetype='text/rdf+xml')
filename = url.replace('/','-') +'.rdf'
response['Content-Disposition'] = 'inline; filename='+ filename.replace('-.','.')
response['Content-Type'] = 'application/rdf+xml'
return response
except Exception, e:
return shortcuts.render_to_response('error.html', {'content': str(e)})
except Exception, e:
return shortcuts.render_to_response('error.html', {'content': str(e)})
def sparql_view(request, url):
"""Provides view for sparql endpoint."""
try:
home_url = CONFIG["HOME"]
d2rserver_url = CONFIG['D2R_SERVER']
query = request.GET.get('query')
#print query
if query:
rdf_url = d2rserver_url + 'sparql?query=' + query
#print rdf_url
try:
request = urllib.urlopen(rdf_url)
response = http.HttpResponse(request.read().replace(d2rserver_url,home_url+'/'), mimetype='text/rdf+n3')
response['Content-Disposition'] = 'inline; filename=query_result.rdf'
return response
except Exception, e:
return shortcuts.render_to_response('error.html', {'content': str(e)})
else:
return shortcuts.render_to_response('error.html', {'content': 'Query not given.'})
except Exception, e:
return shortcuts.render_to_response('error.html', {'content': str(e)})
def snorql_view(request, url):
"""Provides view for snorql interface."""
try:
home_url = CONFIG["HOME"]
d2rserver_url = CONFIG['D2R_SERVER']
query = request.GET.get('QUERY_STRING')
#print '***URL:' + str(url)
#print '***REQUEST:' + str(request)
#print '***QUERY:' + str(query)
if query:
rdf_url = d2rserver_url + 'snorql/index.html?' + query + '/'
else:
rdf_url = d2rserver_url + 'snorql/' + url
#print rdf_url
try:
request = urllib.urlopen(rdf_url)
response = http.HttpResponse(request.read().replace(d2rserver_url,home_url+'/'))
#response = http.HttpResponse(request.read())
return response
except Exception, e:
return shortcuts.render_to_response('error.html', {'content': str(e)})
except Exception, e:
return shortcuts.render_to_response('error.html', {'content': str(e)})
def generate_object_list(model, queryset=None):
"""Generates an object for generic view use."""
if queryset is None:
queryset = model.objects.all()
return {
'queryset': queryset,
'extra_context': {'model': EasyModel(model)}}
|
|
"""This module implements a generic object wrapper for use in performing
monkey patching, helper functions to perform monkey patching and general
purpose decorators and wrapper functions for various basic tasks one can
make use of when doing monkey patching.
"""
import sys
import inspect
import functools
from ..packages.wrapt import (ObjectProxy as _ObjectProxy,
FunctionWrapper as _FunctionWrapper,
BoundFunctionWrapper as _BoundFunctionWrapper)
from ..packages.wrapt.wrappers import _FunctionWrapperBase
# We previously had our own pure Python implementation of the generic
# object wrapper but we now defer to using the wrapt module as its C
# implementation has less than ten percent of the overhead for the common
# case of instance methods. Even the wrapt pure Python implementation
# of wrapt has about fifty percent of the overhead. The wrapt module
# implementation is also much more comprehensive as far as being a
# transparent object proxy. The only problem is that until we can cut
# over completely to a generic API, we need to maintain the existing API
# we used. This requires the fiddles below where we need to customise by
# what names everything is accessed. Note that with the code below, the
# _ObjectWrapperBase class must come first in the base class list of
# the derived class to ensure correct precedence order on base class
# method lookup for __setattr__(), __getattr__() and __delattr__(). Also
# the intention eventually is that ObjectWrapper is deprecated. Either
# ObjectProxy or FunctionWrapper should be used going forward.
class _ObjectWrapperBase(object):
def __setattr__(self, name, value):
if name.startswith('_nr_'):
name = name.replace('_nr_', '_self_', 1)
setattr(self, name, value)
else:
_ObjectProxy.__setattr__(self, name, value)
def __getattr__(self, name):
if name.startswith('_nr_'):
name = name.replace('_nr_', '_self_', 1)
return getattr(self, name)
else:
return _ObjectProxy.__getattr__(self, name)
def __delattr__(self, name):
if name.startswith('_nr_'):
name = name.replace('_nr_', '_self_', 1)
delattr(self, name)
else:
_ObjectProxy.__delattr__(self, name)
@property
def _nr_next_object(self):
return self.__wrapped__
@property
def _nr_last_object(self):
try:
return self._self_last_object
except AttributeError:
self._self_last_object = getattr(self.__wrapped__,
'_nr_last_object', self.__wrapped__)
return self._self_last_object
@property
def _nr_instance(self):
return self._self_instance
@property
def _nr_wrapper(self):
return self._self_wrapper
@property
def _nr_parent(self):
return self._self_parent
class _NRBoundFunctionWrapper(_ObjectWrapperBase, _BoundFunctionWrapper):
pass
class FunctionWrapper(_ObjectWrapperBase, _FunctionWrapper):
__bound_function_wrapper__ = _NRBoundFunctionWrapper
class ObjectProxy(_ObjectProxy):
def __setattr__(self, name, value):
if name.startswith('_nr_'):
name = name.replace('_nr_', '_self_', 1)
setattr(self, name, value)
else:
_ObjectProxy.__setattr__(self, name, value)
def __getattr__(self, name):
if name.startswith('_nr_'):
name = name.replace('_nr_', '_self_', 1)
return getattr(self, name)
else:
return _ObjectProxy.__getattr__(self, name)
def __delattr__(self, name):
if name.startswith('_nr_'):
name = name.replace('_nr_', '_self_', 1)
delattr(self, name)
else:
_ObjectProxy.__delattr__(self, name)
@property
def _nr_next_object(self):
return self.__wrapped__
@property
def _nr_last_object(self):
try:
return self._self_last_object
except AttributeError:
self._self_last_object = getattr(self.__wrapped__,
'_nr_last_object', self.__wrapped__)
return self._self_last_object
class CallableObjectProxy(ObjectProxy):
def __call__(self, *args, **kwargs):
return self.__wrapped__(*args, **kwargs)
# The ObjectWrapper class needs to be deprecated and removed once all our
# own code no longer uses it. It reaches down into what are wrapt internals
# at present which shouldn't be doing.
class ObjectWrapper(_ObjectWrapperBase, _FunctionWrapperBase):
__bound_function_wrapper__ = _NRBoundFunctionWrapper
def __init__(self, wrapped, instance, wrapper):
if isinstance(wrapped, classmethod):
binding = 'classmethod'
elif isinstance(wrapped, staticmethod):
binding = 'staticmethod'
else:
binding = 'function'
super(ObjectWrapper, self).__init__(wrapped, instance, wrapper,
binding=binding)
# The wrap_callable() alias needs to be deprecated and usage of it removed.
wrap_callable = FunctionWrapper
# Helper functions for performing monkey patching.
def resolve_path(module, name):
if not inspect.ismodule(module):
__import__(module)
module = sys.modules[module]
parent = module
path = name.split('.')
attribute = path[0]
original = getattr(parent, attribute)
for attribute in path[1:]:
parent = original
original = getattr(original, attribute)
return (parent, attribute, original)
def apply_patch(parent, attribute, replacement):
setattr(parent, attribute, replacement)
def wrap_object(module, name, factory, args=(), kwargs={}):
(parent, attribute, original) = resolve_path(module, name)
wrapper = factory(original, *args, **kwargs)
apply_patch(parent, attribute, wrapper)
return wrapper
# Function for apply a proxy object to an attribute of a class instance.
# The wrapper works by defining an attribute of the same name on the
# class which is a descriptor and which intercepts access to the
# instance attribute. Note that this cannot be used on attributes which
# are themselves defined by a property object.
class AttributeWrapper(object):
def __init__(self, attribute, factory, args, kwargs):
self.attribute = attribute
self.factory = factory
self.args = args
self.kwargs = kwargs
def __get__(self, instance, owner):
value = instance.__dict__[self.attribute]
return self.factory(value, *self.args, **self.kwargs)
def __set__(self, instance, value):
instance.__dict__[self.attribute] = value
def __del__(self, instance):
del instance.__dict__[self.attribute]
def wrap_object_attribute(module, name, factory, args=(), kwargs={}):
path, attribute = name.rsplit('.', 1)
parent = resolve_path(module, path)[2]
wrapper = AttributeWrapper(attribute, factory, args, kwargs)
apply_patch(parent, attribute, wrapper)
return wrapper
# Function for creating a decorator for applying to functions, as well as
# short cut functions for applying wrapper functions via monkey patching.
def function_wrapper(wrapper):
def _wrapper(wrapped, instance, args, kwargs):
target_wrapped = args[0]
if instance is None:
target_wrapper = wrapper
elif inspect.isclass(instance):
target_wrapper = wrapper.__get__(None, instance)
else:
target_wrapper = wrapper.__get__(instance, type(instance))
return FunctionWrapper(target_wrapped, target_wrapper)
return FunctionWrapper(wrapper, _wrapper)
def wrap_function_wrapper(module, name, wrapper):
return wrap_object(module, name, FunctionWrapper, (wrapper,))
def patch_function_wrapper(module, name):
def _wrapper(wrapper):
return wrap_object(module, name, FunctionWrapper, (wrapper,))
return _wrapper
def transient_function_wrapper(module, name):
def _decorator(wrapper):
def _wrapper(wrapped, instance, args, kwargs):
target_wrapped = args[0]
if instance is None:
target_wrapper = wrapper
elif inspect.isclass(instance):
target_wrapper = wrapper.__get__(None, instance)
else:
target_wrapper = wrapper.__get__(instance, type(instance))
def _execute(wrapped, instance, args, kwargs):
(parent, attribute, original) = resolve_path(module, name)
replacement = FunctionWrapper(original, target_wrapper)
setattr(parent, attribute, replacement)
try:
return wrapped(*args, **kwargs)
finally:
setattr(parent, attribute, original)
return FunctionWrapper(target_wrapped, _execute)
return FunctionWrapper(wrapper, _wrapper)
return _decorator
# Generic decorators for performing actions before and after a wrapped
# function is called, or modifying the inbound arguments or return value.
def pre_function(function):
@function_wrapper
def _wrapper(wrapped, instance, args, kwargs):
if instance is not None:
function(instance, *args, **kwargs)
else:
function(*args, **kwargs)
return wrapped(*args, **kwargs)
return _wrapper
def PreFunctionWrapper(wrapped, function):
return pre_function(function)(wrapped)
def wrap_pre_function(module, object_path, function):
return wrap_object(module, object_path, PreFunctionWrapper, (function,))
def post_function(function):
@function_wrapper
def _wrapper(wrapped, instance, args, kwargs):
result = wrapped(*args, **kwargs)
if instance is not None:
function(instance, *args, **kwargs)
else:
function(*args, **kwargs)
return result
return _wrapper
def PostFunctionWrapper(wrapped, function):
return post_function(function)(wrapped)
def wrap_post_function(module, object_path, function):
return wrap_object(module, object_path, PostFunctionWrapper, (function,))
def in_function(function):
@function_wrapper
def _wrapper(wrapped, instance, args, kwargs):
if instance is not None:
args, kwargs = function(instance, *args, **kwargs)
# The instance is passed into the supplied function and for
# consistency it is also expected to also be returned
# otherwise it gets fiddly for the supplied function to
# remove it. It is expected that the instance returned in
# the arguments is the same value as it is simply dropped
# after being returned. This is necessary as the instance is
# not passed through anyway in arguments to the wrapped
# function, as the instance is already bound to the wrapped
# function at this point and supplied automatically.
return wrapped(*args[1:], **kwargs)
args, kwargs = function(*args, **kwargs)
return wrapped(*args, **kwargs)
return _wrapper
def InFunctionWrapper(wrapped, function):
return in_function(function)(wrapped)
def wrap_in_function(module, object_path, function):
return wrap_object(module, object_path, InFunctionWrapper, (function,))
def out_function(function):
@function_wrapper
def _wrapper(wrapped, instance, args, kwargs):
return function(wrapped(*args, **kwargs))
return _wrapper
def OutFunctionWrapper(wrapped, function):
return out_function(function)(wrapped)
def wrap_out_function(module, object_path, function):
return wrap_object(module, object_path, OutFunctionWrapper, (function,))
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'AlertRelatedGroup', fields ['group', 'alert']
db.delete_unique(u'sentry_alertrelatedgroup', ['group_id', 'alert_id'])
# Deleting model 'Alert'
db.delete_table(u'sentry_alert')
# Deleting model 'AlertRelatedGroup'
db.delete_table(u'sentry_alertrelatedgroup')
def backwards(self, orm):
# Adding model 'Alert'
db.create_table(u'sentry_alert', (
('project', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.Project'])),
('status', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(default=0, db_index=True)),
('group', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.Group'], null=True)),
('message', self.gf('django.db.models.fields.TextField')()),
('data', self.gf('sentry.db.models.fields.gzippeddict.GzippedDictField')(null=True)),
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('datetime', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['Alert'])
# Adding model 'AlertRelatedGroup'
db.create_table(u'sentry_alertrelatedgroup', (
('group', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.Group'])),
('data', self.gf('sentry.db.models.fields.gzippeddict.GzippedDictField')(null=True)),
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('alert', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.Alert'])),
))
db.send_create_signal('sentry', ['AlertRelatedGroup'])
# Adding unique constraint on 'AlertRelatedGroup', fields ['group', 'alert']
db.create_unique(u'sentry_alertrelatedgroup', ['group_id', 'alert_id'])
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'storage': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'storage_options': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.group': {
'Meta': {'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'counter': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'object_name': 'UserReport', 'index_together': "(('project', 'event_id'),)"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry']
|
|
#!/usr/bin/env python2.5
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper classes which help converting a url to a list of SB expressions."""
import array
import logging
import re
import string
import urllib
import urlparse
import util
class UrlParseError(Exception):
pass
def GenerateSafeChars():
"""
Return a string containing all 'safe' characters that shouldn't be escaped
for url encoding. This includes all printable characters except '#%' and
whitespace characters.
"""
unfiltered_chars = string.digits + string.ascii_letters + string.punctuation
filtered_list = [c for c in unfiltered_chars if c not in '%#']
return array.array('c', filtered_list).tostring()
class ExpressionGenerator(object):
"""Class does the conversion url -> list of SafeBrowsing expressions.
This class converts a given url into the list of all SafeBrowsing host-suffix,
path-prefix expressions for that url. These are expressions that are on the
SafeBrowsing lists.
"""
HEX = re.compile(r'^0x([a-fA-F0-9]+)$')
OCT = re.compile(r'^0([0-7]+)$')
DEC = re.compile(r'^(\d+)$')
IP_WITH_TRAILING_SPACE = re.compile(r'^(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) ')
POSSIBLE_IP = re.compile(r'^(?i)((?:0x[0-9a-f]+|[0-9\\.])+)$')
FIND_BAD_OCTAL_REGEXP = re.compile(r'(^|\.)0\d*[89]')
# This regular expression parses the host and port from a hostname. Note: any
# user and password are removed from the hostname.
HOST_PORT_REGEXP = re.compile(r'^(?:.*@)?(?P<host>[^:]*)(:(?P<port>\d+))?$')
SAFE_CHARS = GenerateSafeChars()
# Dict that maps supported schemes to their default port number.
DEFAULT_PORTS = {'http': '80', 'https': '443', 'ftp': '21'}
def __init__(self, url):
parse_exception = UrlParseError('failed to parse URL "%s"' % (url,))
canonical_url = ExpressionGenerator.CanonicalizeUrl(url)
if not canonical_url:
raise parse_exception
# Each element is a list of host components used to build expressions.
self._host_lists = []
# A list of paths used to build expressions.
self._path_exprs = []
url_split = urlparse.urlsplit(canonical_url)
canonical_host, canonical_path = url_split[1], url_split[2]
self._MakeHostLists(canonical_host, parse_exception)
if url_split[3]:
# Include canonicalized path with query arguments
self._path_exprs.append(canonical_path + '?' + url_split[3])
self._path_exprs.append(canonical_path)
# Get the first three directory path components and create the 4 path
# expressions starting at the root (/) and successively appending directory
# path components, including the trailing slash. E.g.:
# /a/b/c/d.html -> [/, /a/, /a/b/, /a/b/c/]
path_parts = canonical_path.rstrip('/').lstrip('/').split('/')[:3]
if canonical_path.count('/') < 4:
# If the last component in not a directory we remove it.
path_parts.pop()
while path_parts:
self._path_exprs.append('/' + '/'.join(path_parts) + '/')
path_parts.pop()
if canonical_path != '/':
self._path_exprs.append('/')
@staticmethod
def CanonicalizeUrl(url):
"""Canonicalize the given URL for the SafeBrowsing protocol.
Args:
url: URL to canonicalize.
Returns:
A canonical URL or None if the URL could not be canonicalized.
"""
# Start by stripping off the fragment identifier.
tmp_pos = url.find('#')
if tmp_pos >= 0:
url = url[0:tmp_pos]
# Stripping off leading and trailing white spaces.
url = url.lstrip().rstrip()
# Remove any embedded tabs and CR/LF characters which aren't escaped.
url = url.replace('\t', '').replace('\r', '').replace('\n', '')
# Un-escape and re-escpae the URL just in case there are some encoded
# characters in the url scheme for example.
url = ExpressionGenerator._Escape(url)
url_split = urlparse.urlsplit(url)
if not url_split[0]:
# URL had no scheme. In this case we assume it is http://.
url = 'http://' + url
url_split = urlparse.urlsplit(url)
url_scheme = url_split[0].lower()
if url_scheme not in ExpressionGenerator.DEFAULT_PORTS:
return None # Unsupported scheme.
# Note: applying HOST_PORT_REGEXP also removes any user and password.
m = ExpressionGenerator.HOST_PORT_REGEXP.match(url_split[1])
if not m:
return None
host, port = m.group('host'), m.group('port')
canonical_host = ExpressionGenerator.CanonicalizeHost(host)
if not canonical_host:
return None
# Now that the host is canonicalized we add the port back if it's not the
# default port for that url scheme.
if port and port != ExpressionGenerator.DEFAULT_PORTS[url_scheme]:
canonical_host += ':' + port
canonical_path = ExpressionGenerator.CanonicalizePath(url_split[2])
# If the URL ends with ? we want to keep the ?.
canonical_url = url_split[0] + '://' + canonical_host + canonical_path
if url_split[3] != '' or url.endswith('?'):
canonical_url += '?' + url_split[3]
return canonical_url
@staticmethod
def CanonicalizePath(path):
"""Canonicalize the given path."""
if not path:
return '/'
# There are some cases where the path will not start with '/'. Example:
# "ftp://host.com?q" -- the hostname is 'host.com' and the path '%3Fq'.
# Browsers typically do prepend a leading slash to the path in this case,
# we'll do the same.
if path[0] != '/':
path = '/' + path
path = ExpressionGenerator._Escape(path)
path_components = []
for path_component in path.split('/'):
# If the path component is '..' we skip it and remove the preceding path
# component if there are any.
if path_component == '..':
if len(path_components) > 0:
path_components.pop()
# We skip empty path components to remove successive slashes (i.e.,
# // -> /). Note: this means that the leading and trailing slash will
# also be removed and need to be re-added afterwards.
#
# If the path component is '.' we also skip it (i.e., /./ -> /).
elif path_component != '.' and path_component != '':
path_components.append(path_component)
# Put the path components back together and re-add the leading slash which
# got stipped by removing empty path components.
canonical_path = '/' + '/'.join(path_components)
# If necessary we also re-add the trailing slash.
if path.endswith('/') and not canonical_path.endswith('/'):
canonical_path += '/'
return canonical_path
@staticmethod
def CanonicalizeHost(host):
"""Canonicalize the given host. Returns None in case of an error."""
if not host:
return None
host = ExpressionGenerator._Escape(host.lower())
ip = ExpressionGenerator.CanonicalizeIp(host)
if ip:
# Host is an IP address.
host = ip
else:
# Host is a normal hostname.
# Skip trailing, leading and consecutive dots.
host_split = [part for part in host.split('.') if part]
if len(host_split) < 2:
return None
host = '.'.join(host_split)
return host
@staticmethod
def CanonicalizeIp(host):
"""
Return a canonicalized IP if host can represent an IP and None otherwise.
"""
if len(host) <= 15:
# The Windows resolver allows a 4-part dotted decimal IP address to have a
# space followed by any old rubbish, so long as the total length of the
# string doesn't get above 15 characters. So, "10.192.95.89 xy" is
# resolved to 10.192.95.89.
# If the string length is greater than 15 characters,
# e.g. "10.192.95.89 xy.wildcard.example.com", it will be resolved through
# DNS.
m = ExpressionGenerator.IP_WITH_TRAILING_SPACE.match(host)
if m:
host = m.group(1)
if not ExpressionGenerator.POSSIBLE_IP.match(host):
return None
# Basically we should parse octal if we can, but if there are illegal octal
# numbers, i.e. 08 or 09, then we should just look at decimal and hex.
allow_octal = not ExpressionGenerator.FIND_BAD_OCTAL_REGEXP.search(host)
# Skip trailing, leading and consecutive dots.
host_split = [part for part in host.split('.') if part]
if len(host_split) > 4:
return None
ip = []
for i in xrange(len(host_split)):
m = ExpressionGenerator.HEX.match(host_split[i])
if m:
base = 16
else:
m = ExpressionGenerator.OCT.match(host_split[i])
if m and allow_octal:
base = 8
else:
m = ExpressionGenerator.DEC.match(host_split[i])
if m:
base = 10
else:
return None
n = long(m.group(1), base)
if n > 255:
if i < len(host_split) - 1:
n &= 0xff
ip.append(n)
else:
bytes = []
shift = 0
while n > 0 and len(bytes) < 4:
bytes.append(n & 0xff)
n >>= 8
if len(ip) + len(bytes) > 4:
return None
bytes.reverse()
ip.extend(bytes)
else:
ip.append(n)
while len(ip) < 4:
ip.append(0)
return '%u.%u.%u.%u' % tuple(ip)
def Expressions(self):
"""
A generator of the possible expressions.
"""
for host_parts in self._host_lists:
host = '.'.join(host_parts)
for p in self._path_exprs:
yield Expression(host, p)
@staticmethod
def _Escape(unescaped_str):
"""Fully unescape the given string, then re-escape once.
Args:
unescaped_str: string that should be escaped.
Returns:
Escaped string according to the SafeBrowsing protocol.
"""
unquoted = urllib.unquote(unescaped_str)
while unquoted != unescaped_str:
unescaped_str = unquoted
unquoted = urllib.unquote(unquoted)
return urllib.quote(unquoted, ExpressionGenerator.SAFE_CHARS)
def _MakeHostLists(self, host, parse_exception):
"""
Canonicalize host and build self._host_lists.
"""
ip = ExpressionGenerator.CanonicalizeIp(host)
if ip is not None:
# Is an IP.
self._host_lists.append([ip])
return
# Is a hostname.
# Skip trailing, leading and consecutive dots.
host_split = [part for part in host.split('.') if part]
if len(host_split) < 2:
raise parse_exception
start = len(host_split) - 5
stop = len(host_split) - 1
if start <= 0:
start = 1
self._host_lists.append(host_split)
for i in xrange(start, stop):
self._host_lists.append(host_split[i:])
class Expression(object):
"""Class which represents a host-suffix, path-prefix expression."""
def __init__(self, host, path):
self._host = host
self._path = path
self._value = host + path
self._hash_value = util.GetHash256(self._value)
def __str__(self):
return self.Value()
def __repr__(self):
"""
Not really a good repr. This is for debugging.
"""
return self.Value()
def Value(self):
return self._value
def HashValue(self):
return self._hash_value
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RBF code."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
MAX_REPLACEMENT_LIMIT = 100
def txToHex(tx):
return bytes_to_hex_str(tx.serialize())
def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1*COIN
while node.getbalance() < satoshi_round((amount + fee)/COIN):
node.generate(100)
new_addr = node.getnewaddress()
txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
if txout['scriptPubKey']['addresses'] == [new_addr]:
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, scriptPubKey)]
tx2.rehash()
signed_tx = node.signrawtransaction(txToHex(tx2))
txid = node.sendrawtransaction(signed_tx['hex'], True)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert(new_size < mempool_size)
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args= [["-maxorphantx=1000",
"-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101"],
["-mempoolreplacement=0"]]
def run_test(self):
# Leave IBD
self.nodes[0].generate(1)
make_utxo(self.nodes[0], 1*COIN)
# Ensure nodes are synced
self.sync_all()
self.log.info("Running test simple doublespend...")
self.test_simple_doublespend()
self.log.info("Running test doublespend chain...")
self.test_doublespend_chain()
self.log.info("Running test doublespend tree...")
self.test_doublespend_tree()
self.log.info("Running test replacement feeperkb...")
self.test_replacement_feeperkb()
self.log.info("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
self.log.info("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
self.log.info("Running test too many replacements...")
self.test_too_many_replacements()
self.log.info("Running test opt-in...")
self.test_opt_in()
self.log.info("Running test RPC...")
self.test_rpc()
self.log.info("Running test prioritised transactions...")
self.test_prioritised_transactions()
self.log.info("Passed")
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# make_utxo may have generated a bunch of blocks, so we need to sync
# before we can spend the coins generated, or else the resulting
# transactions might not be accepted by our peers.
self.sync_all()
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
self.sync_all()
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
# This will raise an exception due to transaction replacement being disabled
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True)
# Extra 0.1 BTC fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
# Replacement still disabled even with "enough fee"
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True)
# Works when enabled
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
mempool = self.nodes[0].getrawmempool()
assert (tx1a_txid not in mempool)
assert (tx1b_txid in mempool)
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
# Second node is running mempoolreplacement=0, will not replace originally-seen txn
mempool = self.nodes[1].getrawmempool()
assert tx1a_txid in mempool
assert tx1b_txid not in mempool
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 10*COIN:
remaining_value -= 1*COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1]))]
tx_hex = txToHex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 BTC - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert(doublespent_txid not in mempool)
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = txToHex(tx)
assert(len(tx.serialize()) < 100000)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = int(0.0001*COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
# 1 BTC fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert (tx.hash not in mempool)
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
fee = int(0.0001*COIN)
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))]
tx1b_hex = txToHex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], int(1.2*COIN))
utxo2 = make_utxo(self.nodes[0], 3*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(int(1.1*COIN), CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN))
unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1_hex = txToHex(tx1)
self.nodes[0].sendrawtransaction(tx1_hex, True)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, True)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10*COIN
utxo = make_utxo(self.nodes[0], initial_nValue)
fee = int(0.0001*COIN)
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = txToHex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT+1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))]
tx_i_hex = txToHex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, True)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, True)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, True)
def test_opt_in(self):
"""Replacing should only work if orig tx opted in"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, True)
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx2b_hex = txToHex(tx2b)
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, True)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))]
tx3a_hex = txToHex(tx3a)
self.nodes[0].sendrawtransaction(tx3a_hex, True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx3b.vout = [CTxOut(int(0.5*COIN), CScript([b'e']))]
tx3b_hex = txToHex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
tx3c.vout = [CTxOut(int(0.5*COIN), CScript([b'f']))]
tx3c_hex = txToHex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, True)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, True)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))]
tx1b_hex = txToHex(tx1b)
# Verify tx1b cannot replace tx1a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1*COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
assert(tx1b_txid in self.nodes[0].getrawmempool())
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(1.01*COIN), CScript([b'a']))]
tx2b.rehash()
tx2b_hex = txToHex(tx2b)
# Verify tx2b cannot replace tx2a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, True)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1*COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
assert(tx2b_txid in self.nodes[0].getrawmempool())
def test_rpc(self):
us0 = self.nodes[0].listunspent()[0]
ins = [us0]
outs = {self.nodes[0].getnewaddress() : Decimal(1.0000000)}
rawtx0 = self.nodes[0].createrawtransaction(ins, outs, 0, True)
rawtx1 = self.nodes[0].createrawtransaction(ins, outs, 0, False)
json0 = self.nodes[0].decoderawtransaction(rawtx0)
json1 = self.nodes[0].decoderawtransaction(rawtx1)
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967295)
rawtx2 = self.nodes[0].createrawtransaction([], outs)
frawtx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True})
frawtx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False})
json0 = self.nodes[0].decoderawtransaction(frawtx2a['hex'])
json1 = self.nodes[0].decoderawtransaction(frawtx2b['hex'])
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967294)
if __name__ == '__main__':
ReplaceByFeeTest().main()
|
|
"""Support for Konnected devices."""
import asyncio
import copy
import hmac
import json
import logging
from aiohttp.hdrs import AUTHORIZATION
from aiohttp.web import Request, Response
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.binary_sensor import DEVICE_CLASSES_SCHEMA
from homeassistant.components.http import HomeAssistantView
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_ACCESS_TOKEN,
CONF_BINARY_SENSORS,
CONF_DEVICES,
CONF_DISCOVERY,
CONF_HOST,
CONF_ID,
CONF_NAME,
CONF_PIN,
CONF_PORT,
CONF_REPEAT,
CONF_SENSORS,
CONF_SWITCHES,
CONF_TYPE,
CONF_ZONE,
HTTP_BAD_REQUEST,
HTTP_NOT_FOUND,
HTTP_UNAUTHORIZED,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv
from .config_flow import ( # Loading the config flow file will register the flow
CONF_DEFAULT_OPTIONS,
CONF_IO,
CONF_IO_BIN,
CONF_IO_DIG,
CONF_IO_SWI,
OPTIONS_SCHEMA,
)
from .const import (
CONF_ACTIVATION,
CONF_API_HOST,
CONF_BLINK,
CONF_INVERSE,
CONF_MOMENTARY,
CONF_PAUSE,
CONF_POLL_INTERVAL,
DOMAIN,
PIN_TO_ZONE,
STATE_HIGH,
STATE_LOW,
UNDO_UPDATE_LISTENER,
UPDATE_ENDPOINT,
ZONE_TO_PIN,
ZONES,
)
from .handlers import HANDLERS
from .panel import AlarmPanel
_LOGGER = logging.getLogger(__name__)
def ensure_pin(value):
"""Check if valid pin and coerce to string."""
if value is None:
raise vol.Invalid("pin value is None")
if PIN_TO_ZONE.get(str(value)) is None:
raise vol.Invalid("pin not valid")
return str(value)
def ensure_zone(value):
"""Check if valid zone and coerce to string."""
if value is None:
raise vol.Invalid("zone value is None")
if str(value) not in ZONES is None:
raise vol.Invalid("zone not valid")
return str(value)
def import_device_validator(config):
"""Validate zones and reformat for import."""
config = copy.deepcopy(config)
io_cfgs = {}
# Replace pins with zones
for conf_platform, conf_io in (
(CONF_BINARY_SENSORS, CONF_IO_BIN),
(CONF_SENSORS, CONF_IO_DIG),
(CONF_SWITCHES, CONF_IO_SWI),
):
for zone in config.get(conf_platform, []):
if zone.get(CONF_PIN):
zone[CONF_ZONE] = PIN_TO_ZONE[zone[CONF_PIN]]
del zone[CONF_PIN]
io_cfgs[zone[CONF_ZONE]] = conf_io
# Migrate config_entry data into default_options structure
config[CONF_IO] = io_cfgs
config[CONF_DEFAULT_OPTIONS] = OPTIONS_SCHEMA(config)
# clean up fields migrated to options
config.pop(CONF_BINARY_SENSORS, None)
config.pop(CONF_SENSORS, None)
config.pop(CONF_SWITCHES, None)
config.pop(CONF_BLINK, None)
config.pop(CONF_DISCOVERY, None)
config.pop(CONF_API_HOST, None)
config.pop(CONF_IO, None)
return config
def import_validator(config):
"""Reformat for import."""
config = copy.deepcopy(config)
# push api_host into device configs
for device in config.get(CONF_DEVICES, []):
device[CONF_API_HOST] = config.get(CONF_API_HOST, "")
return config
# configuration.yaml schemas (legacy)
BINARY_SENSOR_SCHEMA_YAML = vol.All(
vol.Schema(
{
vol.Exclusive(CONF_ZONE, "s_io"): ensure_zone,
vol.Exclusive(CONF_PIN, "s_io"): ensure_pin,
vol.Required(CONF_TYPE): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_INVERSE, default=False): cv.boolean,
}
),
cv.has_at_least_one_key(CONF_PIN, CONF_ZONE),
)
SENSOR_SCHEMA_YAML = vol.All(
vol.Schema(
{
vol.Exclusive(CONF_ZONE, "s_io"): ensure_zone,
vol.Exclusive(CONF_PIN, "s_io"): ensure_pin,
vol.Required(CONF_TYPE): vol.All(vol.Lower, vol.In(["dht", "ds18b20"])),
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_POLL_INTERVAL, default=3): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
}
),
cv.has_at_least_one_key(CONF_PIN, CONF_ZONE),
)
SWITCH_SCHEMA_YAML = vol.All(
vol.Schema(
{
vol.Exclusive(CONF_ZONE, "s_io"): ensure_zone,
vol.Exclusive(CONF_PIN, "s_io"): ensure_pin,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ACTIVATION, default=STATE_HIGH): vol.All(
vol.Lower, vol.Any(STATE_HIGH, STATE_LOW)
),
vol.Optional(CONF_MOMENTARY): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(CONF_PAUSE): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(CONF_REPEAT): vol.All(vol.Coerce(int), vol.Range(min=-1)),
}
),
cv.has_at_least_one_key(CONF_PIN, CONF_ZONE),
)
DEVICE_SCHEMA_YAML = vol.All(
vol.Schema(
{
vol.Required(CONF_ID): cv.matches_regex("[0-9a-f]{12}"),
vol.Optional(CONF_BINARY_SENSORS): vol.All(
cv.ensure_list, [BINARY_SENSOR_SCHEMA_YAML]
),
vol.Optional(CONF_SENSORS): vol.All(cv.ensure_list, [SENSOR_SCHEMA_YAML]),
vol.Optional(CONF_SWITCHES): vol.All(cv.ensure_list, [SWITCH_SCHEMA_YAML]),
vol.Inclusive(CONF_HOST, "host_info"): cv.string,
vol.Inclusive(CONF_PORT, "host_info"): cv.port,
vol.Optional(CONF_BLINK, default=True): cv.boolean,
vol.Optional(CONF_API_HOST, default=""): vol.Any("", cv.url),
vol.Optional(CONF_DISCOVERY, default=True): cv.boolean,
}
),
import_device_validator,
)
# pylint: disable=no-value-for-parameter
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
import_validator,
vol.Schema(
{
vol.Required(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_API_HOST): vol.Url(),
vol.Optional(CONF_DEVICES): vol.All(
cv.ensure_list, [DEVICE_SCHEMA_YAML]
),
}
),
)
},
extra=vol.ALLOW_EXTRA,
)
YAML_CONFIGS = "yaml_configs"
PLATFORMS = ["binary_sensor", "sensor", "switch"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Konnected platform."""
cfg = config.get(DOMAIN)
if cfg is None:
cfg = {}
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {
CONF_ACCESS_TOKEN: cfg.get(CONF_ACCESS_TOKEN),
CONF_API_HOST: cfg.get(CONF_API_HOST),
CONF_DEVICES: {},
}
hass.http.register_view(KonnectedView)
# Check if they have yaml configured devices
if CONF_DEVICES not in cfg:
return True
for device in cfg.get(CONF_DEVICES, []):
# Attempt to importing the cfg. Use
# hass.async_add_job to avoid a deadlock.
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=device
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up panel from a config entry."""
client = AlarmPanel(hass, entry)
# creates a panel data store in hass.data[DOMAIN][CONF_DEVICES]
await client.async_save_data()
# if the cfg entry was created we know we could connect to the panel at some point
# async_connect will handle retries until it establishes a connection
await client.async_connect()
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
# config entry specific data to enable unload
hass.data[DOMAIN][entry.entry_id] = {
UNDO_UPDATE_LISTENER: entry.add_update_listener(async_entry_updated)
}
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENER]()
if unload_ok:
hass.data[DOMAIN][CONF_DEVICES].pop(entry.data[CONF_ID])
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def async_entry_updated(hass: HomeAssistant, entry: ConfigEntry):
"""Reload the config entry when options change."""
await hass.config_entries.async_reload(entry.entry_id)
class KonnectedView(HomeAssistantView):
"""View creates an endpoint to receive push updates from the device."""
url = UPDATE_ENDPOINT
name = "api:konnected"
requires_auth = False # Uses access token from configuration
def __init__(self):
"""Initialize the view."""
@staticmethod
def binary_value(state, activation):
"""Return binary value for GPIO based on state and activation."""
if activation == STATE_HIGH:
return 1 if state == STATE_ON else 0
return 0 if state == STATE_ON else 1
async def update_sensor(self, request: Request, device_id) -> Response:
"""Process a put or post."""
hass = request.app["hass"]
data = hass.data[DOMAIN]
auth = request.headers.get(AUTHORIZATION)
tokens = []
if hass.data[DOMAIN].get(CONF_ACCESS_TOKEN):
tokens.extend([hass.data[DOMAIN][CONF_ACCESS_TOKEN]])
tokens.extend(
[
entry.data[CONF_ACCESS_TOKEN]
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.data.get(CONF_ACCESS_TOKEN)
]
)
if auth is None or not next(
(True for token in tokens if hmac.compare_digest(f"Bearer {token}", auth)),
False,
):
return self.json_message("unauthorized", status_code=HTTP_UNAUTHORIZED)
try: # Konnected 2.2.0 and above supports JSON payloads
payload = await request.json()
except json.decoder.JSONDecodeError:
_LOGGER.error(
"Your Konnected device software may be out of "
"date. Visit https://help.konnected.io for "
"updating instructions"
)
device = data[CONF_DEVICES].get(device_id)
if device is None:
return self.json_message(
"unregistered device", status_code=HTTP_BAD_REQUEST
)
panel = device.get("panel")
if panel is not None:
# connect if we haven't already
hass.async_create_task(panel.async_connect())
try:
zone_num = str(payload.get(CONF_ZONE) or PIN_TO_ZONE[payload[CONF_PIN]])
payload[CONF_ZONE] = zone_num
zone_data = (
device[CONF_BINARY_SENSORS].get(zone_num)
or next(
(s for s in device[CONF_SWITCHES] if s[CONF_ZONE] == zone_num), None
)
or next(
(s for s in device[CONF_SENSORS] if s[CONF_ZONE] == zone_num), None
)
)
except KeyError:
zone_data = None
if zone_data is None:
return self.json_message(
"unregistered sensor/actuator", status_code=HTTP_BAD_REQUEST
)
zone_data["device_id"] = device_id
for attr in ["state", "temp", "humi", "addr"]:
value = payload.get(attr)
handler = HANDLERS.get(attr)
if value is not None and handler:
hass.async_create_task(handler(hass, zone_data, payload))
return self.json_message("ok")
async def get(self, request: Request, device_id) -> Response:
"""Return the current binary state of a switch."""
hass = request.app["hass"]
data = hass.data[DOMAIN]
device = data[CONF_DEVICES].get(device_id)
if not device:
return self.json_message(
f"Device {device_id} not configured", status_code=HTTP_NOT_FOUND
)
panel = device.get("panel")
if panel is not None:
# connect if we haven't already
hass.async_create_task(panel.async_connect())
# Our data model is based on zone ids but we convert from/to pin ids
# based on whether they are specified in the request
try:
zone_num = str(
request.query.get(CONF_ZONE) or PIN_TO_ZONE[request.query[CONF_PIN]]
)
zone = next(
switch
for switch in device[CONF_SWITCHES]
if switch[CONF_ZONE] == zone_num
)
except StopIteration:
zone = None
except KeyError:
zone = None
zone_num = None
if not zone:
target = request.query.get(
CONF_ZONE, request.query.get(CONF_PIN, "unknown")
)
return self.json_message(
f"Switch on zone or pin {target} not configured",
status_code=HTTP_NOT_FOUND,
)
resp = {}
if request.query.get(CONF_ZONE):
resp[CONF_ZONE] = zone_num
else:
resp[CONF_PIN] = ZONE_TO_PIN[zone_num]
# Make sure entity is setup
zone_entity_id = zone.get(ATTR_ENTITY_ID)
if zone_entity_id:
resp["state"] = self.binary_value(
hass.states.get(zone_entity_id).state, zone[CONF_ACTIVATION]
)
return self.json(resp)
_LOGGER.warning("Konnected entity not yet setup, returning default")
resp["state"] = self.binary_value(STATE_OFF, zone[CONF_ACTIVATION])
return self.json(resp)
async def put(self, request: Request, device_id) -> Response:
"""Receive a sensor update via PUT request and async set state."""
return await self.update_sensor(request, device_id)
async def post(self, request: Request, device_id) -> Response:
"""Receive a sensor update via POST request and async set state."""
return await self.update_sensor(request, device_id)
|
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""settings module tests."""
import os
import random
import re
import sys
import types
import mox
import stubout
from google.apputils import app
from google.apputils import basetest
# Set an environment variable so the settings module knows when it's being
# tested directly, versus used for testing other modules.
os.environ['____TESTING_SETTINGS_MODULE'] = 'yes'
from simian import settings
del(os.environ['____TESTING_SETTINGS_MODULE'])
class SettingsModuleTest(mox.MoxTestBase):
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def testConstants(self):
self.assertTrue(hasattr(settings, 'GAE'))
self.assertTrue(hasattr(settings, 'DEV_APPSERVER'))
self.assertTrue(hasattr(settings, 'DEBUG'))
self.assertTrue(hasattr(settings, 'TESTING'))
self.assertTrue(hasattr(settings, 'SETTINGS_TESTING'))
self.assertFalse(settings.GAE)
self.assertFalse(settings.DEV_APPSERVER)
self.assertFalse(settings.DEBUG)
self.assertTrue(settings.TESTING)
self.assertTrue(settings.SETTINGS_TESTING)
class BaseSettingsTestBase(mox.MoxTestBase):
"""Base test class for all BaseSettings derived class tests."""
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
if self.__class__.__name__ == 'BaseSettingsTestBase':
return
self.module = self._GenerateModule()
self.settings_class = self._GetSettingsClassUnderTest()
# Derive a class from this class that plugs in the test _Globals()
# function. This makes testing more predictable as the contents
# of the settings module may change at any time.
class DerivedSettingsClass(self.settings_class):
def _Globals(xself): # pylint: disable=no-self-argument
"""Returns globals dict like globals()."""
return self._Globals()
self.settings = DerivedSettingsClass(self.module)
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def _GetSettingsClassUnderTest(self):
"""Override to return the class under test."""
raise NotImplementedError
def _Globals(self):
"""Returns globals dict like globals()."""
return globals()
def _GenerateModule(self):
"""Return a module instance to pass to the settings class under test."""
self.module_name = self._GetSettingsClassUnderTest().__name__
return types.ModuleType(self.module_name)
def _TestNotImplemented(self, method_name, *args, **kwargs):
"""Helper function to test NotImplementedError on a method.
Args:
method_name: str, method name on self.settings to call
args: args to pass
kwargs: kwargs to pass
"""
self.assertRaises(
NotImplementedError,
getattr(self.settings, method_name),
*args,
**kwargs)
class BaseSettingsTest(BaseSettingsTestBase):
"""Test BaseSettings."""
def _GetSettingsClassUnderTest(self):
return settings.BaseSettings
def testInitialize(self):
"""Test _Initialize()."""
def testPopulateGlobal(self):
"""Test _PopulateGlobals()."""
global_vars = {
'FOO': 1,
'bar': 2,
}
self.mox.StubOutWithMock(self.settings, '_Set')
globals_ = self.mox.CreateMockAnything()
globals_().AndReturn(global_vars)
globals_().AndReturn(global_vars)
globals_().AndReturn(global_vars)
self.settings._Set('foo', 1)
self.mox.ReplayAll()
self.settings._PopulateGlobals(globals_=globals_)
self.mox.VerifyAll()
def testPopulateGlobalWithSetFunc(self):
"""Test _PopulateGlobals() with set_func."""
global_vars = {
'FOO': 1,
'bar': 2,
}
set_func = self.mox.CreateMockAnything()
globals_ = self.mox.CreateMockAnything()
globals_().AndReturn(global_vars)
globals_().AndReturn(global_vars)
globals_().AndReturn(global_vars)
set_func('foo', 1)
self.mox.ReplayAll()
self.settings._PopulateGlobals(set_func=set_func, globals_=globals_)
self.mox.VerifyAll()
def testGet(self):
"""Test _Get()."""
self._TestNotImplemented('_Get', 'k')
def testSet(self):
"""Test _Set()."""
self._TestNotImplemented('_Set', 'k', 'v')
def testDir(self):
"""Test _Dir()."""
self._TestNotImplemented('_Dir')
def testCheckValueRegex(self):
"""Test _CheckValueRegex()."""
self.settings._CheckValueRegex('k', 'foo', '^foo$')
self.settings._CheckValueRegex('k', 'foo', settings.re.compile('^foo$'))
self.assertRaises(
ValueError,
self.settings._CheckValueRegex,
'k', 'bar', '^foo$')
def testCheckValueFunc(self):
"""Test _CheckValueFunc()."""
func_foo = lambda k, v: v == 'foo'
func_bar = lambda k, v: v == 'bar'
self.assertRaises(
TypeError, self.settings._CheckValueFunc, 'k', 'foo','not callable')
self.settings._CheckValueFunc('k', 'foo', func_foo)
self.assertRaises(
ValueError, self.settings._CheckValueFunc, 'k', 'foo', func_bar)
def testCheckValuePemX509Cert(self):
"""Test CheckValuePemX509Cert()."""
k = 'k'
pem_cert = 'pem'
self.mox.StubOutWithMock(settings.x509, 'LoadCertificateFromPEM')
settings.x509.LoadCertificateFromPEM(pem_cert).AndReturn('cert')
self.mox.ReplayAll()
self.assertTrue(self.settings.CheckValuePemX509Cert(k, pem_cert) is None)
self.mox.VerifyAll()
def testCheckValuePemX509CertWhenBadlyFormed(self):
"""Test CheckValuePemX509Cert()."""
k = 'k'
pem_cert = 'pem'
self.mox.StubOutWithMock(settings.x509, 'LoadCertificateFromPEM')
settings.x509.LoadCertificateFromPEM(pem_cert).AndRaise(
settings.x509.Error)
self.mox.ReplayAll()
self.assertRaises(
ValueError, self.settings.CheckValuePemX509Cert, k, pem_cert)
self.mox.VerifyAll()
def testCheckValuePemRsaPrivateKey(self):
"""Test CheckValuePemRsaPrivateKey()."""
k = 'k'
pem_cert = 'pem'
self.mox.StubOutWithMock(settings.x509, 'LoadRSAPrivateKeyFromPEM')
settings.x509.LoadRSAPrivateKeyFromPEM(pem_cert).AndReturn('key')
self.mox.ReplayAll()
self.assertTrue(
self.settings.CheckValuePemRsaPrivateKey(k, pem_cert) is None)
self.mox.VerifyAll()
def testCheckValuePemRsaPrivateKeyWhenBadlyFormed(self):
"""Test CheckValuePemRsaPrivateKey()."""
k = 'k'
pem_cert = 'pem'
self.mox.StubOutWithMock(settings.x509, 'LoadRSAPrivateKeyFromPEM')
settings.x509.LoadRSAPrivateKeyFromPEM(pem_cert).AndRaise(
settings.x509.Error)
self.mox.ReplayAll()
self.assertRaises(
ValueError, self.settings.CheckValuePemRsaPrivateKey, k, pem_cert)
self.mox.VerifyAll()
def testCheckValidation(self):
"""Test _CheckValidation()."""
self.mox.StubOutWithMock(self.settings, self.settings._VALIDATION_REGEX)
self.settings._validation = {
'foo': {
self.settings._VALIDATION_REGEX: ['^bar$'],
}}
self.settings._CheckValueRegex('foo', 'bar', '^bar$').AndReturn(None)
self.mox.ReplayAll()
self.settings._CheckValidation('dne', 'other crap')
self.settings._CheckValidation('foo', 'bar')
self.mox.VerifyAll()
def testSetValidation(self):
"""Test SetValidation()."""
k = 'foo'
v = 'arg'
validation_type = self.settings._VALIDATION_TYPES[0] + 'junk'
self.assertFalse(validation_type in self.settings._VALIDATION_TYPES)
self.assertRaises(
ValueError,
self.settings._SetValidation, validation_type, 'junk')
validation_type = self.settings._VALIDATION_TYPES[0]
self.settings._SetValidation(k, validation_type, v)
self.assertEqual(
self.settings._validation,
{k: { validation_type: (v,) }})
def testGetValidationRegex(self):
"""Test GetValidationRegex()."""
regex = 'REGEX'
self.settings._validation = {
'foo': {self.settings._VALIDATION_REGEX: [regex]},
'bar': {},
}
self.assertTrue(self.settings.GetValidationRegex('dne') is None)
self.assertTrue(self.settings.GetValidationRegex('bar') is None)
self.assertEqual(self.settings.GetValidationRegex('foo'), regex)
def testGetattr(self):
"""Test __getattr__()."""
self.mox.StubOutWithMock(self.settings, '_Get')
self.settings._Get('foo').AndReturn(2)
self.settings._Get('dne').AndRaise(AttributeError('DNE'))
self.settings._Get('dne').AndRaise(AttributeError('non conform'))
self.mox.ReplayAll()
self.assertEqual(self.settings._is_class, 1)
self.assertEqual(self.settings.foo, 2)
self.assertRaises(
AttributeError,
getattr,
self.settings,
'DNE')
self.assertRaises(
AttributeError,
getattr,
self.settings,
'DNE')
self.assertRaises(
AttributeError,
getattr,
self.settings,
'_foobar')
self.mox.VerifyAll()
def testSetattr(self):
"""Test __setattr__()."""
self.mox.StubOutWithMock(self.settings, '_Set')
self.settings._Set('foo', 2)
self.mox.ReplayAll()
self.settings.FOO = 2
self.settings._bar = 1
self.assertEqual(self.settings.__dict__['_bar'], 1) # eh?
self.mox.VerifyAll()
def testDirPython(self):
"""Test __dir__()."""
self.mox.StubOutWithMock(self.settings, '_Dir')
self.settings._Dir().AndReturn(['foo', 'bar'])
self.mox.ReplayAll()
# NOTE(user): I am not sure why the return order is backwards.
self.assertEqual(['FOO', 'BAR'], self.settings.__dir__())
self.mox.VerifyAll()
class ModuleSettingsTest(BaseSettingsTestBase):
"""Test ModuleSettings."""
def _GetSettingsClassUnderTest(self):
# Make a light subclass of ModuleSettings that overrides
# methods. Goal: make testing of the important parts easier.
class ModuleSettingsTestModule(settings.ModuleSettings):
def _LoadSettingsModule(xself): # pylint: disable=no-self-argument
k = random.randint(0, 100000)
self.module_name = 'FOO%s' % k
sys.modules[self.module_name] = self.module
return self.module_name
return ModuleSettingsTestModule
def testLoadSettingsModule(self):
"""Test _LoadSettingsModule()."""
self.assertRaises(
NotImplementedError,
settings.ModuleSettings,
self.module)
def testInitialize(self):
"""Test _Initialize()."""
self.assertEqual(self.settings._module_name, self.module_name)
def testGet(self):
"""Test _Get()."""
self.settings._module.FOO = 'bar'
self.mox.ReplayAll()
self.assertEqual('bar', self.settings._Get('foo'))
self.assertRaises(
AttributeError,
self.settings._Get,
'dne')
self.mox.VerifyAll()
def testSet(self):
"""Test _Set()."""
self.mox.ReplayAll()
self.settings._Set('foo', 'bar')
self.assertEqual(self.settings._module.FOO, 'bar')
self.mox.VerifyAll()
class TestModuleSettingsTest(BaseSettingsTestBase):
"""Test TestModuleSettings."""
def _GetSettingsClassUnderTest(self):
return settings.TestModuleSettings
# NOTE(user): Skip unit tests for this class because its operation
# is clear and testing will be a PITA.
class DictSettingsTest(BaseSettingsTestBase):
"""Test DictSettings."""
def _GetSettingsClassUnderTest(self):
return settings.DictSettings
def _Globals(self):
"""Returns globals dict like globals()."""
return {'FOO': 1}
def testInitialize(self):
"""Test _Initialize()."""
self.assertEqual(self.settings._settings, {'foo': 1})
def testGet(self):
"""Test _Get()."""
self.assertEqual(self.settings._Get('foo'), 1)
self.assertRaises(AttributeError, self.settings._Get, 'dne')
def testSet(self):
"""Test _Set()."""
self.mox.ReplayAll()
self.settings._Set('bar', 2)
self.assertEqual(self.settings._settings['bar'], 2)
self.mox.VerifyAll()
def testDir(self):
self.assertEqual(['foo'], self.settings._Dir())
class SimianDictSettingsTest(BaseSettingsTestBase):
"""Test SimianDictSettings."""
def _GetSettingsClassUnderTest(self):
return settings.SimianDictSettings
def _Globals(self):
"""Returns globals dict like globals()."""
return {'SERVER_HOSTNAME': 'example.appspot.com'}
def _CheckSetValidation(self, k, t):
"""Helper to set that validation is set for k with type t.
Also, validate that _VALIDATION_REGEX type validations compile to
real well formed regexes.
"""
self.assertTrue(k in self.settings._validation)
self.assertTrue(t in self.settings._validation[k])
self.assertTrue(self.settings._validation[k][t] is not None)
if t == self.settings._VALIDATION_REGEX:
unused = re.compile(self.settings._validation[k][t][0])
def testInitialize(self):
"""Test _Initialize()."""
regex_key_validations = [
'email_domain', 'email_sender', 'email_reply_to', 'uuid_lookup_url',
'owner_lookup_url']
for k in regex_key_validations:
self._CheckSetValidation(k, self.settings._VALIDATION_REGEX)
def testIsCaIdValid(self):
k = 'k'
self.assertTrue(self.settings._IsCaIdValid(k, None))
self.assertTrue(self.settings._IsCaIdValid(k, 'FOO'))
self.assertFalse(self.settings._IsCaIdValid(k, '9'))
self.assertFalse(self.settings._IsCaIdValid(k, ''))
self.assertFalse(self.settings._IsCaIdValid(k, 10))
class FilesystemSettingsTest(BaseSettingsTestBase):
"""Test FilesystemSettings class."""
def _GetSettingsClassUnderTest(self):
return settings.FilesystemSettings
def testTranslateValue(self):
"""Test _TranslateValue()."""
self.assertEqual(1, self.settings._TranslateValue('1'))
self.assertEqual(True, self.settings._TranslateValue('True'))
self.assertEqual('foo', self.settings._TranslateValue('\"foo\"'))
self.assertEqual('foo', self.settings._TranslateValue('\'foo\''))
self.assertEqual(
['hi', 'there'], self.settings._TranslateValue('[hi, there]'))
self.assertEqual('', self.settings._TranslateValue(''))
def testGetExternalConfigurationAsFile(self):
"""Test _GetExternalConfiguration() when as_file=True."""
mock_open = self.mox.CreateMockAnything()
mock_fh = self.mox.CreateMockAnything()
mock_isdir = self.mox.CreateMockAnything()
mock_join = self.mox.CreateMockAnything()
mock_isdir(self.settings._path).AndReturn(True)
mock_join(self.settings._path, 'name').AndReturn('/path/name')
mock_open('/path/name', 'r').AndReturn(mock_fh)
mock_fh.read().AndReturn('value\n')
mock_fh.close()
self.mox.ReplayAll()
self.assertEqual(
'value', self.settings._GetExternalConfiguration(
'name', as_file=True,
open_=mock_open, isdir_=mock_isdir, join_=mock_join))
self.mox.VerifyAll()
def testGetExternalConfigurationAsFileWhenNotIsdir(self):
"""Test _GetExternalConfiguration() when as_file=True."""
mock_open = self.mox.CreateMockAnything()
mock_fh = self.mox.CreateMockAnything()
mock_isdir = self.mox.CreateMockAnything()
mock_join = self.mox.CreateMockAnything()
mock_isdir(self.settings._path).AndReturn(False)
self.mox.ReplayAll()
self.assertEqual(
None, self.settings._GetExternalConfiguration(
'name', as_file=True,
open_=mock_open, isdir_=mock_isdir, join_=mock_join))
self.mox.VerifyAll()
def testGetExternalConfigurationAsFileWhenIoError(self):
"""Test _GetExternalConfiguration() when as_file=True when IOError."""
mock_open = self.mox.CreateMockAnything()
mock_fh = self.mox.CreateMockAnything()
mock_isdir = self.mox.CreateMockAnything()
mock_join = self.mox.CreateMockAnything()
mock_isdir(self.settings._path).AndReturn(True)
mock_join(self.settings._path, 'name').AndReturn('/path/name')
mock_open('/path/name', 'r').AndReturn(mock_fh)
mock_fh.read().AndRaise(IOError)
self.mox.ReplayAll()
self.assertEqual(
None, self.settings._GetExternalConfiguration(
'name', as_file=True,
open_=mock_open, isdir_=mock_isdir, join_=mock_join))
self.mox.VerifyAll()
def testGetExternalConfiguration(self):
"""Test _GetExternalConfiguration() when as_file=False."""
mock_open = self.mox.CreateMockAnything()
mock_fh = self.mox.CreateMockAnything()
mock_cp = self.mox.CreateMockAnything()
mock_isdir = self.mox.CreateMockAnything()
mock_join = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(settings.ConfigParser, 'ConfigParser', True)
self.mox.StubOutWithMock(self.settings, '_TranslateValue')
mock_isdir(self.settings._path).AndReturn(True)
mock_join(self.settings._path, 'name').AndReturn('/path/name')
mock_open('/path/name.cfg', 'r').AndReturn(mock_fh)
settings.ConfigParser.ConfigParser().AndReturn(mock_cp)
mock_cp.readfp(mock_fh)
mock_fh.close()
mock_cp.items('settings').AndReturn(
(('name2', 'value2'), ('name', 'value')))
self.settings._TranslateValue('value2').AndReturn('value2')
self.settings._TranslateValue('value').AndReturn('value')
self.mox.ReplayAll()
self.assertEqual(
{'name2': 'value2', 'name': 'value'},
self.settings._GetExternalConfiguration(
'name', as_file=False,
open_=mock_open, isdir_=mock_isdir, join_=mock_join))
self.mox.VerifyAll()
def testGetExternalConfigurationWhenConfigParserError(self):
"""Test _GetExternalConfiguration() when as_file=False and CP.Error."""
mock_open = self.mox.CreateMockAnything()
mock_fh = self.mox.CreateMockAnything()
mock_cp = self.mox.CreateMockAnything()
mock_isdir = self.mox.CreateMockAnything()
mock_join = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(settings.ConfigParser, 'ConfigParser', True)
mock_isdir(self.settings._path).AndReturn(True)
mock_join(self.settings._path, 'name').AndReturn('/path/name')
mock_open('/path/name.cfg', 'r').AndReturn(mock_fh)
settings.ConfigParser.ConfigParser().AndReturn(mock_cp)
mock_cp.readfp(mock_fh).AndRaise(settings.ConfigParser.Error)
self.mox.ReplayAll()
self.assertEqual(
None,
self.settings._GetExternalConfiguration(
'name', as_file=False,
open_=mock_open, isdir_=mock_isdir, join_=mock_join))
self.mox.VerifyAll()
def testGetExternalConfigurationWhenIoError(self):
"""Test _GetExternalConfiguration() when as_file=False and IOError."""
mock_open = self.mox.CreateMockAnything()
mock_isdir = self.mox.CreateMockAnything()
mock_join = self.mox.CreateMockAnything()
mock_isdir(self.settings._path).AndReturn(True)
mock_join(self.settings._path, 'name').AndReturn('/path/name')
mock_open('/path/name.cfg', 'r').AndRaise(IOError)
self.mox.ReplayAll()
self.assertEqual(
None,
self.settings._GetExternalConfiguration(
'name', as_file=False,
open_=mock_open, isdir_=mock_isdir, join_=mock_join))
self.mox.VerifyAll()
def testGetExternalPem(self):
"""Test _GetExternalPem()."""
self.mox.StubOutWithMock(self.settings, '_GetExternalConfiguration')
path = os.path.join(self.settings._path, 'ssl')
self.settings._GetExternalConfiguration(
'new.pem', as_file=True, path=path).AndReturn('new')
self.settings._settings['predefined'] = 'pre'
self.mox.ReplayAll()
self.assertEqual('pre', self.settings._GetExternalPem('predefined'))
self.assertEqual('new', self.settings._GetExternalPem('new_pem'))
self.assertEqual(self.settings._settings['new_pem'], 'new')
self.mox.VerifyAll()
def testGetExternalPemWhenMissing(self):
"""Test _GetExternalPem()."""
self.mox.StubOutWithMock(self.settings, '_GetExternalConfiguration')
path = os.path.join(self.settings._path, 'ssl')
self.settings._GetExternalConfiguration(
'new.pem', as_file=True, path=path).AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(
AttributeError, self.settings._GetExternalPem, 'new_pem')
self.mox.VerifyAll()
def testGetExternalValue(self):
"""Test _GetExternalValue()."""
self.mox.StubOutWithMock(self.settings, '_GetExternalConfiguration')
# 1
self.settings._settings['predefined'] = 'pre'
# 2
self.settings._GetExternalConfiguration('settings').AndReturn({'new2': 1})
# 3
self.settings._GetExternalConfiguration('settings').AndReturn({'new': 1})
# 4
self.settings._GetExternalConfiguration('settings').AndReturn(None)
self.mox.ReplayAll()
# 1
self.assertEqual('pre', self.settings._GetExternalValue('predefined'))
# 2
self.assertRaises(
AttributeError,
self.settings._GetExternalValue,
'not-new')
# 3
self.assertEqual(1, self.settings._GetExternalValue('new'))
self.assertEqual(self.settings._settings['new'], 1)
# 4
self.assertRaises(
AttributeError,
self.settings._GetExternalValue,
'other')
self.mox.VerifyAll()
def testGet(self):
"""Test _Get()."""
self.mox.StubOutWithMock(self.settings, '_GetExternalPem')
self.mox.StubOutWithMock(self.settings, '_GetExternalValue')
self.settings._GetExternalPem('foo_pem').AndReturn(0)
self.settings._GetExternalValue('foo_item').AndReturn(1)
self.mox.ReplayAll()
self.assertEqual(0, getattr(self.settings, 'foo_pem'))
self.assertEqual(1, getattr(self.settings, 'foo_item'))
self.mox.VerifyAll()
def testDir(self):
"""Test _Dir()."""
self._TestNotImplemented('_Dir')
def main(unused_argv):
basetest.main()
if __name__ == '__main__':
app.run()
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ApiKey.allowed_origins'
db.add_column('sentry_apikey', 'allowed_origins',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ApiKey.allowed_origins'
db.delete_column('sentry_apikey', 'allowed_origins')
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'audit_actors'", 'to': "orm['sentry.User']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'storage': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'storage_options': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
|
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""End to end tests for ChromeDriver."""
import base64
import json
import math
import optparse
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import threading
import time
import unittest
import urllib
import urllib2
import uuid
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(1, os.path.join(_THIS_DIR, os.pardir))
sys.path.insert(1, os.path.join(_THIS_DIR, os.pardir, 'client'))
sys.path.insert(1, os.path.join(_THIS_DIR, os.pardir, 'server'))
import chrome_paths
import chromedriver
import unittest_util
import util
import server
from webelement import WebElement
import webserver
_TEST_DATA_DIR = os.path.join(chrome_paths.GetTestData(), 'chromedriver')
if util.IsLinux():
sys.path.insert(0, os.path.join(chrome_paths.GetSrc(), 'third_party',
'catapult', 'devil'))
from devil.android import device_utils
from devil.android import forwarder
sys.path.insert(0, os.path.join(chrome_paths.GetSrc(), 'build', 'android'))
import devil_chromium
from pylib import constants
_NEGATIVE_FILTER = [
# This test is flaky since it uses setTimeout.
# Re-enable once crbug.com/177511 is fixed and we can remove setTimeout.
'ChromeDriverTest.testAlert',
# This test is too flaky on the bots, but seems to run perfectly fine
# on developer workstations.
'ChromeDriverTest.testEmulateNetworkConditionsNameSpeed',
'ChromeDriverTest.testEmulateNetworkConditionsSpeed',
# crbug.com/469947
'ChromeDriverTest.testTouchPinch',
'ChromeDriverTest.testReturningAFunctionInJavascript',
]
_VERSION_SPECIFIC_FILTER = {}
_VERSION_SPECIFIC_FILTER['HEAD'] = [
# https://code.google.com/p/chromedriver/issues/detail?id=992
'ChromeDownloadDirTest.testDownloadDirectoryOverridesExistingPreferences',
]
_OS_SPECIFIC_FILTER = {}
_OS_SPECIFIC_FILTER['win'] = [
# https://code.google.com/p/chromedriver/issues/detail?id=299
'ChromeLogPathCapabilityTest.testChromeLogPath',
]
_OS_SPECIFIC_FILTER['linux'] = [
# Xvfb doesn't support maximization.
'ChromeDriverTest.testWindowMaximize',
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=1302
'ChromeDriverTest.testShadowDomStaleReference',
]
_OS_SPECIFIC_FILTER['mac'] = [
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=1302
'ChromeDriverTest.testShadowDomStaleReference',
]
_DESKTOP_NEGATIVE_FILTER = [
# Desktop doesn't support touch (without --touch-events).
'ChromeDriverTest.testTouchSingleTapElement',
'ChromeDriverTest.testTouchDownMoveUpElement',
'ChromeDriverTest.testTouchScrollElement',
'ChromeDriverTest.testTouchDoubleTapElement',
'ChromeDriverTest.testTouchLongPressElement',
'ChromeDriverTest.testTouchFlickElement',
'ChromeDriverTest.testTouchPinch',
'ChromeDriverAndroidTest.*',
]
def _GetDesktopNegativeFilter(version_name):
filter = _NEGATIVE_FILTER + _DESKTOP_NEGATIVE_FILTER
os = util.GetPlatformName()
if os in _OS_SPECIFIC_FILTER:
filter += _OS_SPECIFIC_FILTER[os]
if version_name in _VERSION_SPECIFIC_FILTER:
filter += _VERSION_SPECIFIC_FILTER[version_name]
return filter
_ANDROID_NEGATIVE_FILTER = {}
_ANDROID_NEGATIVE_FILTER['chrome'] = (
_NEGATIVE_FILTER + [
# TODO(chrisgao): fix hang of tab crash test on android.
'ChromeDriverTest.testTabCrash',
# Android doesn't support switches and extensions.
'ChromeSwitchesCapabilityTest.*',
'ChromeExtensionsCapabilityTest.*',
'MobileEmulationCapabilityTest.*',
'ChromeDownloadDirTest.*',
# https://crbug.com/274650
'ChromeDriverTest.testCloseWindow',
# https://code.google.com/p/chromedriver/issues/detail?id=298
'ChromeDriverTest.testWindowPosition',
'ChromeDriverTest.testWindowSize',
'ChromeDriverTest.testWindowMaximize',
'ChromeLogPathCapabilityTest.testChromeLogPath',
'RemoteBrowserTest.*',
# Don't enable perf testing on Android yet.
'PerfTest.testSessionStartTime',
'PerfTest.testSessionStopTime',
'PerfTest.testColdExecuteScript',
# Android doesn't support multiple sessions on one device.
'SessionHandlingTest.testGetSessions',
# Android doesn't use the chrome://print dialog.
'ChromeDriverTest.testCanSwitchToPrintPreviewDialog',
# https://code.google.com/p/chromedriver/issues/detail?id=1175
'ChromeDriverTest.testChromeDriverSendLargeData',
# Chrome 44+ for Android doesn't dispatch the dblclick event
'ChromeDriverTest.testMouseDoubleClick',
# Page cannot be loaded from file:// URI in Android unless it
# is stored in device.
'ChromeDriverTest.testCanClickAlertInIframes',
]
)
_ANDROID_NEGATIVE_FILTER['chrome_stable'] = (
_ANDROID_NEGATIVE_FILTER['chrome'])
_ANDROID_NEGATIVE_FILTER['chrome_beta'] = (
_ANDROID_NEGATIVE_FILTER['chrome'])
_ANDROID_NEGATIVE_FILTER['chromium'] = (
_ANDROID_NEGATIVE_FILTER['chrome'] + [
'ChromeDriverTest.testSwitchToWindow',
]
)
_ANDROID_NEGATIVE_FILTER['chromedriver_webview_shell'] = (
_ANDROID_NEGATIVE_FILTER['chrome'] + [
'PerformanceLoggerTest.testPerformanceLogger',
'ChromeDriverTest.testShadowDom*',
# WebView doesn't support emulating network conditions.
'ChromeDriverTest.testEmulateNetworkConditions',
'ChromeDriverTest.testEmulateNetworkConditionsNameSpeed',
'ChromeDriverTest.testEmulateNetworkConditionsOffline',
'ChromeDriverTest.testEmulateNetworkConditionsSpeed',
'ChromeDriverTest.testEmulateNetworkConditionsName',
# The WebView shell that we test against (on KitKat) does not yet
# support Synthetic Gesture DevTools commands.
# TODO(samuong): reenable when it does.
'ChromeDriverTest.testHasTouchScreen',
'ChromeDriverTest.testTouchScrollElement',
'ChromeDriverTest.testTouchDoubleTapElement',
'ChromeDriverTest.testTouchLongPressElement',
'ChromeDriverTest.testTouchPinch',
# WebView shell doesn't support popups or popup blocking.
'ChromeDriverTest.testPopups',
'ChromeDriverTest.testDontGoBackOrGoForward',
# ChromeDriver WebView shell doesn't support multiple tabs.
'ChromeDriverTest.testGetWindowHandles',
'ChromeDriverTest.testSwitchToWindow',
'ChromeDriverTest.testShouldHandleNewWindowLoadingProperly',
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=1295
# TODO(gmanikpure): re-enable this test when we stop supporting
# WebView on KitKat.
'ChromeDriverTest.testGetUrlOnInvalidUrl',
]
)
class ChromeDriverBaseTest(unittest.TestCase):
"""Base class for testing chromedriver functionalities."""
def __init__(self, *args, **kwargs):
super(ChromeDriverBaseTest, self).__init__(*args, **kwargs)
self._drivers = []
def tearDown(self):
for driver in self._drivers:
try:
driver.Quit()
except:
pass
def CreateDriver(self, server_url=None, download_dir=None, **kwargs):
if server_url is None:
server_url = _CHROMEDRIVER_SERVER_URL
android_package = None
android_activity = None
android_process = None
if _ANDROID_PACKAGE_KEY:
android_package = constants.PACKAGE_INFO[_ANDROID_PACKAGE_KEY].package
if _ANDROID_PACKAGE_KEY == 'chromedriver_webview_shell':
android_activity = constants.PACKAGE_INFO[_ANDROID_PACKAGE_KEY].activity
android_process = '%s:main' % android_package
driver = chromedriver.ChromeDriver(server_url,
chrome_binary=_CHROME_BINARY,
android_package=android_package,
android_activity=android_activity,
android_process=android_process,
download_dir=download_dir,
**kwargs)
self._drivers += [driver]
return driver
def WaitForNewWindow(self, driver, old_handles, check_closed_windows=True):
"""Wait for at least one new window to show up in 20 seconds.
Args:
old_handles: Handles to all old windows before the new window is added.
check_closed_windows: If True, assert that no windows are closed before
the new window is added.
Returns:
Handle to a new window. None if timeout.
"""
deadline = time.time() + 20
while time.time() < deadline:
handles = driver.GetWindowHandles()
if check_closed_windows:
self.assertTrue(set(old_handles).issubset(handles))
new_handles = set(handles).difference(set(old_handles))
if len(new_handles) > 0:
return new_handles.pop()
time.sleep(0.01)
return None
def WaitForCondition(self, predicate, timeout=5, timestep=0.1):
"""Wait for a condition to become true.
Args:
predicate: A function that returns a boolean value.
"""
deadline = time.time() + timeout
while time.time() < deadline:
if predicate():
return True
time.sleep(timestep)
return False
class ChromeDriverTest(ChromeDriverBaseTest):
"""End to end tests for ChromeDriver."""
@staticmethod
def GlobalSetUp():
ChromeDriverTest._http_server = webserver.WebServer(
chrome_paths.GetTestData())
ChromeDriverTest._sync_server = webserver.SyncWebServer()
if _ANDROID_PACKAGE_KEY:
ChromeDriverTest._device = device_utils.DeviceUtils.HealthyDevices()[0]
http_host_port = ChromeDriverTest._http_server._server.server_port
sync_host_port = ChromeDriverTest._sync_server._server.server_port
forwarder.Forwarder.Map(
[(http_host_port, http_host_port), (sync_host_port, sync_host_port)],
ChromeDriverTest._device)
@staticmethod
def GlobalTearDown():
if _ANDROID_PACKAGE_KEY:
forwarder.Forwarder.UnmapAllDevicePorts(ChromeDriverTest._device)
ChromeDriverTest._http_server.Shutdown()
@staticmethod
def GetHttpUrlForFile(file_path):
return ChromeDriverTest._http_server.GetUrl() + file_path
def setUp(self):
self._driver = self.CreateDriver()
def testStartStop(self):
pass
def testLoadUrl(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
def testGetCurrentWindowHandle(self):
self._driver.GetCurrentWindowHandle()
def testCloseWindow(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html'))
old_handles = self._driver.GetWindowHandles()
self._driver.FindElement('id', 'link').Click()
new_window_handle = self.WaitForNewWindow(self._driver, old_handles)
self.assertNotEqual(None, new_window_handle)
self._driver.SwitchToWindow(new_window_handle)
self.assertEquals(new_window_handle, self._driver.GetCurrentWindowHandle())
self.assertRaises(chromedriver.NoSuchElement,
self._driver.FindElement, 'id', 'link')
self._driver.CloseWindow()
self.assertRaises(chromedriver.NoSuchWindow,
self._driver.GetCurrentWindowHandle)
new_handles = self._driver.GetWindowHandles()
for old_handle in old_handles:
self.assertTrue(old_handle in new_handles)
for handle in new_handles:
self._driver.SwitchToWindow(handle)
self.assertEquals(handle, self._driver.GetCurrentWindowHandle())
self._driver.CloseWindow()
def testGetWindowHandles(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html'))
old_handles = self._driver.GetWindowHandles()
self._driver.FindElement('id', 'link').Click()
self.assertNotEqual(None, self.WaitForNewWindow(self._driver, old_handles))
def testSwitchToWindow(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html'))
self.assertEquals(
1, self._driver.ExecuteScript('window.name = "oldWindow"; return 1;'))
window1_handle = self._driver.GetCurrentWindowHandle()
old_handles = self._driver.GetWindowHandles()
self._driver.FindElement('id', 'link').Click()
new_window_handle = self.WaitForNewWindow(self._driver, old_handles)
self.assertNotEqual(None, new_window_handle)
self._driver.SwitchToWindow(new_window_handle)
self.assertEquals(new_window_handle, self._driver.GetCurrentWindowHandle())
self.assertRaises(chromedriver.NoSuchElement,
self._driver.FindElement, 'id', 'link')
self._driver.SwitchToWindow('oldWindow')
self.assertEquals(window1_handle, self._driver.GetCurrentWindowHandle())
def testEvaluateScript(self):
self.assertEquals(1, self._driver.ExecuteScript('return 1'))
self.assertEquals(None, self._driver.ExecuteScript(''))
def testEvaluateScriptWithArgs(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
script = ('document.body.innerHTML = "<div>b</div><div>c</div>";'
'return {stuff: document.querySelectorAll("div")};')
stuff = self._driver.ExecuteScript(script)['stuff']
script = 'return arguments[0].innerHTML + arguments[1].innerHTML'
self.assertEquals(
'bc', self._driver.ExecuteScript(script, stuff[0], stuff[1]))
def testEvaluateInvalidScript(self):
self.assertRaises(chromedriver.ChromeDriverException,
self._driver.ExecuteScript, '{{{')
def testExecuteAsyncScript(self):
self._driver.SetTimeout('script', 3000)
self.assertRaises(
chromedriver.ScriptTimeout,
self._driver.ExecuteAsyncScript,
'var callback = arguments[0];'
'setTimeout(function(){callback(1);}, 10000);')
self.assertEquals(
2,
self._driver.ExecuteAsyncScript(
'var callback = arguments[0];'
'setTimeout(function(){callback(2);}, 300);'))
def testSwitchToFrame(self):
self._driver.ExecuteScript(
'var frame = document.createElement("iframe");'
'frame.id="id";'
'frame.name="name";'
'document.body.appendChild(frame);')
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
self._driver.SwitchToFrame('id')
self.assertTrue(self._driver.ExecuteScript('return window.top != window'))
self._driver.SwitchToMainFrame()
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
self._driver.SwitchToFrame('name')
self.assertTrue(self._driver.ExecuteScript('return window.top != window'))
self._driver.SwitchToMainFrame()
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
self._driver.SwitchToFrameByIndex(0)
self.assertTrue(self._driver.ExecuteScript('return window.top != window'))
self._driver.SwitchToMainFrame()
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
self._driver.SwitchToFrame(self._driver.FindElement('tag name', 'iframe'))
self.assertTrue(self._driver.ExecuteScript('return window.top != window'))
def testSwitchToParentFrame(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/nested.html'))
self.assertTrue('One' in self._driver.GetPageSource())
self._driver.SwitchToFrameByIndex(0)
self.assertTrue('Two' in self._driver.GetPageSource())
self._driver.SwitchToFrameByIndex(0)
self.assertTrue('Three' in self._driver.GetPageSource())
self._driver.SwitchToParentFrame()
self.assertTrue('Two' in self._driver.GetPageSource())
self._driver.SwitchToParentFrame()
self.assertTrue('One' in self._driver.GetPageSource())
def testExecuteInRemovedFrame(self):
self._driver.ExecuteScript(
'var frame = document.createElement("iframe");'
'frame.id="id";'
'frame.name="name";'
'document.body.appendChild(frame);'
'window.addEventListener("message",'
' function(event) { document.body.removeChild(frame); });')
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
self._driver.SwitchToFrame('id')
self.assertTrue(self._driver.ExecuteScript('return window.top != window'))
self._driver.ExecuteScript('parent.postMessage("remove", "*");')
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
def testGetTitle(self):
script = 'document.title = "title"; return 1;'
self.assertEquals(1, self._driver.ExecuteScript(script))
self.assertEquals('title', self._driver.GetTitle())
def testGetPageSource(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html'))
self.assertTrue('Link to empty.html' in self._driver.GetPageSource())
def testFindElement(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>a</div><div>b</div>";')
self.assertTrue(
isinstance(self._driver.FindElement('tag name', 'div'), WebElement))
def testNoSuchElementExceptionMessage(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>a</div><div>b</div>";')
self.assertRaisesRegexp(chromedriver.NoSuchElement,
'no such element: Unable '
'to locate element: {"method":"tag name",'
'"selector":"divine"}',
self._driver.FindElement,
'tag name', 'divine')
def testUnexpectedAlertOpenExceptionMessage(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript('window.alert("Hi");')
self.assertRaisesRegexp(chromedriver.UnexpectedAlertOpen,
'unexpected alert open: {Alert text : Hi}',
self._driver.FindElement, 'tag name', 'divine')
def testFindElements(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>a</div><div>b</div>";')
divs = self._driver.FindElements('tag name', 'div')
self.assertTrue(isinstance(divs, list))
self.assertEquals(2, len(divs))
for div in divs:
self.assertTrue(isinstance(div, WebElement))
def testFindChildElement(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div><br><br></div><div><a></a></div>";')
element = self._driver.FindElement('tag name', 'div')
self.assertTrue(
isinstance(element.FindElement('tag name', 'br'), WebElement))
def testFindChildElements(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div><br><br></div><div><br></div>";')
element = self._driver.FindElement('tag name', 'div')
brs = element.FindElements('tag name', 'br')
self.assertTrue(isinstance(brs, list))
self.assertEquals(2, len(brs))
for br in brs:
self.assertTrue(isinstance(br, WebElement))
def testHoverOverElement(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("mouseover", function() {'
' document.body.appendChild(document.createElement("br"));'
'});'
'return div;')
div.HoverOver()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testClickElement(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("click", function() {'
' div.innerHTML="new<br>";'
'});'
'return div;')
div.Click()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testClickElementInSubFrame(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/frame_test.html'))
frame = self._driver.FindElement('tag name', 'iframe')
self._driver.SwitchToFrame(frame)
# Test clicking element in the sub frame.
self.testClickElement()
def testClickElementAfterNavigation(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/link_nav.html'))
link = self._driver.FindElement('id', 'l1')
link.Click()
alert_button = self._driver.FindElement('id', 'aa1')
alert_button.Click()
self.assertTrue(self._driver.IsAlertOpen())
def testClearElement(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
text = self._driver.ExecuteScript(
'document.body.innerHTML = \'<input type="text" value="abc">\';'
'return document.getElementsByTagName("input")[0];')
value = self._driver.ExecuteScript('return arguments[0].value;', text)
self.assertEquals('abc', value)
text.Clear()
value = self._driver.ExecuteScript('return arguments[0].value;', text)
self.assertEquals('', value)
def testSendKeysToElement(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
text = self._driver.ExecuteScript(
'document.body.innerHTML = \'<input type="text">\';'
'var input = document.getElementsByTagName("input")[0];'
'input.addEventListener("change", function() {'
' document.body.appendChild(document.createElement("br"));'
'});'
'return input;')
text.SendKeys('0123456789+-*/ Hi')
text.SendKeys(', there!')
value = self._driver.ExecuteScript('return arguments[0].value;', text)
self.assertEquals('0123456789+-*/ Hi, there!', value)
def testGetElementAttribute(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/attribute_colon_test.html'))
elem = self._driver.FindElement("name", "phones")
self.assertEquals('3', elem.GetAttribute('size'))
def testGetElementSpecialCharAttribute(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/attribute_colon_test.html'))
elem = self._driver.FindElement("name", "phones")
self.assertEquals('colonvalue', elem.GetAttribute('ext:qtip'))
def testGetCurrentUrl(self):
url = self.GetHttpUrlForFile('/chromedriver/frame_test.html')
self._driver.Load(url)
self.assertEquals(url, self._driver.GetCurrentUrl())
self._driver.SwitchToFrame(self._driver.FindElement('tagName', 'iframe'))
self.assertEquals(url, self._driver.GetCurrentUrl())
def testGoBackAndGoForward(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.GoBack()
self._driver.GoForward()
def testDontGoBackOrGoForward(self):
# We need to run this test in a new tab so that it is isolated from previous
# test runs.
old_windows = self._driver.GetWindowHandles()
self._driver.ExecuteScript('window.open("about:blank")')
new_window = self.WaitForNewWindow(self._driver, old_windows)
self._driver.SwitchToWindow(new_window)
self.assertEquals('about:blank', self._driver.GetCurrentUrl())
self._driver.GoBack()
self.assertEquals('about:blank', self._driver.GetCurrentUrl())
self._driver.GoForward()
self.assertEquals('about:blank', self._driver.GetCurrentUrl())
def testBackNavigationAfterClickElement(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/link_nav.html'))
link = self._driver.FindElement('id', 'l1')
link.Click()
self._driver.GoBack()
self.assertNotEqual('data:,', self._driver.GetCurrentUrl())
self.assertEquals(self.GetHttpUrlForFile('/chromedriver/link_nav.html'),
self._driver.GetCurrentUrl())
def testRefresh(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.Refresh()
def testMouseMoveTo(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("mouseover", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
self._driver.MouseMoveTo(div, 10, 10)
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testMoveToElementAndClick(self):
# This page gets rendered differently depending on which platform the test
# is running on, and what window size is being used. So we need to do some
# sanity checks to make sure that the <a> element is split across two lines
# of text.
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/multiline.html'))
# Check that link element spans two lines and that the first ClientRect is
# above the second.
link = self._driver.FindElements('tag name', 'a')[0]
client_rects = self._driver.ExecuteScript(
'return arguments[0].getClientRects();', link)
self.assertEquals(2, len(client_rects))
self.assertTrue(client_rects[0]['bottom'] <= client_rects[1]['top'])
# Check that the center of the link's bounding ClientRect is outside the
# element.
bounding_client_rect = self._driver.ExecuteScript(
'return arguments[0].getBoundingClientRect();', link)
center = bounding_client_rect['left'] + bounding_client_rect['width'] / 2
self.assertTrue(client_rects[1]['right'] < center)
self.assertTrue(center < client_rects[0]['left'])
self._driver.MouseMoveTo(link)
self._driver.MouseClick()
self.assertTrue(self._driver.GetCurrentUrl().endswith('#top'))
def testMouseClick(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("click", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
self._driver.MouseMoveTo(div)
self._driver.MouseClick()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testMouseButtonDownAndUp(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("mousedown", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new1<br>";'
'});'
'div.addEventListener("mouseup", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new2<a></a>";'
'});')
self._driver.MouseMoveTo(None, 50, 50)
self._driver.MouseButtonDown()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
self._driver.MouseButtonUp()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'a')))
def testMouseDoubleClick(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("dblclick", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
self._driver.MouseMoveTo(div, 1, 1)
self._driver.MouseDoubleClick()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testAlert(self):
self.assertFalse(self._driver.IsAlertOpen())
self._driver.ExecuteScript(
'window.setTimeout('
' function() { window.confirmed = confirm(\'HI\'); },'
' 0);')
self.assertTrue(self._driver.IsAlertOpen())
self.assertEquals('HI', self._driver.GetAlertMessage())
self._driver.HandleAlert(False)
self.assertFalse(self._driver.IsAlertOpen())
self.assertEquals(False,
self._driver.ExecuteScript('return window.confirmed'))
def testShouldHandleNewWindowLoadingProperly(self):
"""Tests that ChromeDriver determines loading correctly for new windows."""
self._http_server.SetDataForPath(
'/newwindow',
"""
<html>
<body>
<a href='%s' target='_blank'>new window/tab</a>
</body>
</html>""" % self._sync_server.GetUrl())
self._driver.Load(self._http_server.GetUrl() + '/newwindow')
old_windows = self._driver.GetWindowHandles()
self._driver.FindElement('tagName', 'a').Click()
new_window = self.WaitForNewWindow(self._driver, old_windows)
self.assertNotEqual(None, new_window)
self.assertFalse(self._driver.IsLoading())
self._driver.SwitchToWindow(new_window)
self.assertTrue(self._driver.IsLoading())
self._sync_server.RespondWithContent('<html>new window</html>')
self._driver.ExecuteScript('return 1') # Shouldn't hang.
def testPopups(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
old_handles = self._driver.GetWindowHandles()
self._driver.ExecuteScript('window.open("about:blank")')
new_window_handle = self.WaitForNewWindow(self._driver, old_handles)
self.assertNotEqual(None, new_window_handle)
def testNoSuchFrame(self):
self.assertRaises(chromedriver.NoSuchFrame,
self._driver.SwitchToFrame, 'nosuchframe')
self.assertRaises(chromedriver.NoSuchFrame,
self._driver.SwitchToFrame,
self._driver.FindElement('tagName', 'body'))
def testWindowPosition(self):
position = self._driver.GetWindowPosition()
self._driver.SetWindowPosition(position[0], position[1])
self.assertEquals(position, self._driver.GetWindowPosition())
# Resize so the window isn't moved offscreen.
# See https://code.google.com/p/chromedriver/issues/detail?id=297.
self._driver.SetWindowSize(300, 300)
self._driver.SetWindowPosition(100, 200)
self.assertEquals([100, 200], self._driver.GetWindowPosition())
def testWindowSize(self):
size = self._driver.GetWindowSize()
self._driver.SetWindowSize(size[0], size[1])
self.assertEquals(size, self._driver.GetWindowSize())
self._driver.SetWindowSize(600, 400)
self.assertEquals([600, 400], self._driver.GetWindowSize())
def testWindowMaximize(self):
self._driver.SetWindowPosition(100, 200)
self._driver.SetWindowSize(600, 400)
self._driver.MaximizeWindow()
self.assertNotEqual([100, 200], self._driver.GetWindowPosition())
self.assertNotEqual([600, 400], self._driver.GetWindowSize())
# Set size first so that the window isn't moved offscreen.
# See https://code.google.com/p/chromedriver/issues/detail?id=297.
self._driver.SetWindowSize(600, 400)
self._driver.SetWindowPosition(100, 200)
self.assertEquals([100, 200], self._driver.GetWindowPosition())
self.assertEquals([600, 400], self._driver.GetWindowSize())
def testConsoleLogSources(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/console_log.html'))
logs = self._driver.GetLog('browser')
self.assertEqual('javascript', logs[0]['source'])
self.assertTrue('TypeError' in logs[0]['message'])
self.assertEqual('network', logs[1]['source'])
self.assertTrue('nonexistent.png' in logs[1]['message'])
self.assertTrue('404' in logs[1]['message'])
# Sometimes, we also get an error for a missing favicon.
if len(logs) > 2:
self.assertEqual('network', logs[2]['source'])
self.assertTrue('favicon.ico' in logs[2]['message'])
self.assertTrue('404' in logs[2]['message'])
self.assertEqual(3, len(logs))
else:
self.assertEqual(2, len(logs))
def testAutoReporting(self):
self.assertFalse(self._driver.IsAutoReporting())
self._driver.SetAutoReporting(True)
self.assertTrue(self._driver.IsAutoReporting())
url = self.GetHttpUrlForFile('/chromedriver/console_log.html')
self.assertRaisesRegexp(
chromedriver.UnknownError,
".*Uncaught TypeError: Cannot read property 'y' of undefined.*",
self._driver.Load, url)
def testContextMenuEventFired(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/context_menu.html'))
self._driver.MouseMoveTo(self._driver.FindElement('tagName', 'div'))
self._driver.MouseClick(2)
self.assertTrue(self._driver.ExecuteScript('return success'))
def testHasFocusOnStartup(self):
# Some pages (about:blank) cause Chrome to put the focus in URL bar.
# This breaks tests depending on focus.
self.assertTrue(self._driver.ExecuteScript('return document.hasFocus()'))
def testTabCrash(self):
# If a tab is crashed, the session will be deleted.
# When 31 is released, will reload the tab instead.
# https://code.google.com/p/chromedriver/issues/detail?id=547
self.assertRaises(chromedriver.UnknownError,
self._driver.Load, 'chrome://crash')
self.assertRaises(chromedriver.NoSuchSession,
self._driver.GetCurrentUrl)
def testDoesntHangOnDebugger(self):
self._driver.Load('about:blank')
self._driver.ExecuteScript('debugger;')
def testMobileEmulationDisabledByDefault(self):
self.assertFalse(self._driver.capabilities['mobileEmulationEnabled'])
def testChromeDriverSendLargeData(self):
script = 's = ""; for (i = 0; i < 10e6; i++) s += "0"; return s;'
lots_of_data = self._driver.ExecuteScript(script)
self.assertEquals('0'.zfill(int(10e6)), lots_of_data)
def testEmulateNetworkConditions(self):
# Network conditions must be set before it can be retrieved.
self.assertRaises(chromedriver.UnknownError,
self._driver.GetNetworkConditions)
# DSL: 2Mbps throughput, 5ms RTT
latency = 5
throughput = 2048 * 1024
self._driver.SetNetworkConditions(latency, throughput, throughput)
network = self._driver.GetNetworkConditions()
self.assertEquals(latency, network['latency']);
self.assertEquals(throughput, network['download_throughput']);
self.assertEquals(throughput, network['upload_throughput']);
self.assertEquals(False, network['offline']);
# Network Conditions again cannot be retrieved after they've been deleted.
self._driver.DeleteNetworkConditions()
self.assertRaises(chromedriver.UnknownError,
self._driver.GetNetworkConditions)
def testEmulateNetworkConditionsName(self):
# DSL: 2Mbps throughput, 5ms RTT
#latency = 5
#throughput = 2048 * 1024
self._driver.SetNetworkConditionsName('DSL')
network = self._driver.GetNetworkConditions()
self.assertEquals(5, network['latency']);
self.assertEquals(2048*1024, network['download_throughput']);
self.assertEquals(2048*1024, network['upload_throughput']);
self.assertEquals(False, network['offline']);
def testEmulateNetworkConditionsSpeed(self):
# Warm up the browser.
self._http_server.SetDataForPath(
'/', "<html><body>blank</body></html>")
self._driver.Load(self._http_server.GetUrl() + '/')
# DSL: 2Mbps throughput, 5ms RTT
latency = 5
throughput_kbps = 2048
throughput = throughput_kbps * 1024
self._driver.SetNetworkConditions(latency, throughput, throughput)
_32_bytes = " 0 1 2 3 4 5 6 7 8 9 A B C D E F"
_1_megabyte = _32_bytes * 32768
self._http_server.SetDataForPath(
'/1MB',
"<html><body>%s</body></html>" % _1_megabyte)
start = time.time()
self._driver.Load(self._http_server.GetUrl() + '/1MB')
finish = time.time()
duration = finish - start
actual_throughput_kbps = 1024 / duration
self.assertLessEqual(actual_throughput_kbps, throughput_kbps * 1.5)
self.assertGreaterEqual(actual_throughput_kbps, throughput_kbps / 1.5)
def testEmulateNetworkConditionsNameSpeed(self):
# Warm up the browser.
self._http_server.SetDataForPath(
'/', "<html><body>blank</body></html>")
self._driver.Load(self._http_server.GetUrl() + '/')
# DSL: 2Mbps throughput, 5ms RTT
throughput_kbps = 2048
throughput = throughput_kbps * 1024
self._driver.SetNetworkConditionsName('DSL')
_32_bytes = " 0 1 2 3 4 5 6 7 8 9 A B C D E F"
_1_megabyte = _32_bytes * 32768
self._http_server.SetDataForPath(
'/1MB',
"<html><body>%s</body></html>" % _1_megabyte)
start = time.time()
self._driver.Load(self._http_server.GetUrl() + '/1MB')
finish = time.time()
duration = finish - start
actual_throughput_kbps = 1024 / duration
self.assertLessEqual(actual_throughput_kbps, throughput_kbps * 1.5)
self.assertGreaterEqual(actual_throughput_kbps, throughput_kbps / 1.5)
def testEmulateNetworkConditionsOffline(self):
# A workaround for crbug.com/177511; when setting offline, the throughputs
# must be 0.
self._driver.SetNetworkConditions(0, 0, 0, offline=True)
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/page_test.html'))
# The "X is not available" title is set after the page load event fires, so
# we have to explicitly wait for this to change. We can't rely on the
# navigation tracker to block the call to Load() above.
self.WaitForCondition(lambda: 'is not available' in self._driver.GetTitle())
def testShadowDomFindElementWithSlashDeep(self):
"""Checks that chromedriver can find elements in a shadow DOM using /deep/
css selectors."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
self.assertTrue(self._driver.FindElement("css", "* /deep/ #olderTextBox"))
def testShadowDomFindChildElement(self):
"""Checks that chromedriver can find child elements from a shadow DOM
element."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._driver.FindElement("css", "* /deep/ #olderChildDiv")
self.assertTrue(elem.FindElement("id", "olderTextBox"))
def testShadowDomFindElementFailsFromRootWithoutSlashDeep(self):
"""Checks that chromedriver can't find elements in a shadow DOM without
/deep/."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
# can't find element from the root without /deep/
with self.assertRaises(chromedriver.NoSuchElement):
self._driver.FindElement("id", "#olderTextBox")
def testShadowDomFindElementFailsBetweenShadowRoots(self):
"""Checks that chromedriver can't find elements in other shadow DOM
trees."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._driver.FindElement("css", "* /deep/ #youngerChildDiv")
with self.assertRaises(chromedriver.NoSuchElement):
elem.FindElement("id", "#olderTextBox")
def testShadowDomText(self):
"""Checks that chromedriver can find extract the text from a shadow DOM
element."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._driver.FindElement("css", "* /deep/ #olderHeading")
self.assertEqual("Older Child", elem.GetText())
def testShadowDomSendKeys(self):
"""Checks that chromedriver can call SendKeys on a shadow DOM element."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._driver.FindElement("css", "* /deep/ #olderTextBox")
elem.SendKeys("bar")
self.assertEqual("foobar", self._driver.ExecuteScript(
'return document.querySelector("* /deep/ #olderTextBox").value;'))
def testShadowDomClear(self):
"""Checks that chromedriver can call Clear on a shadow DOM element."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._driver.FindElement("css", "* /deep/ #olderTextBox")
elem.Clear()
self.assertEqual("", self._driver.ExecuteScript(
'return document.querySelector("* /deep/ #olderTextBox").value;'))
def testShadowDomClick(self):
"""Checks that chromedriver can call Click on an element in a shadow DOM."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._driver.FindElement("css", "* /deep/ #olderButton")
elem.Click()
# the button's onClicked handler changes the text box's value
self.assertEqual("Button Was Clicked", self._driver.ExecuteScript(
'return document.querySelector("* /deep/ #olderTextBox").value;'))
def testShadowDomHover(self):
"""Checks that chromedriver can call HoverOver on an element in a
shadow DOM."""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._driver.FindElement("css", "* /deep/ #olderButton")
elem.HoverOver()
# the button's onMouseOver handler changes the text box's value
self.assertEqual("Button Was Hovered Over", self._driver.ExecuteScript(
'return document.querySelector("* /deep/ #olderTextBox").value;'))
def testShadowDomStaleReference(self):
"""Checks that trying to manipulate shadow DOM elements that are detached
from the document raises a StaleElementReference exception"""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._driver.FindElement("css", "* /deep/ #olderButton")
self._driver.ExecuteScript(
'document.querySelector("#outerDiv").innerHTML="<div/>";')
with self.assertRaises(chromedriver.StaleElementReference):
elem.Click()
def testShadowDomDisplayed(self):
"""Checks that trying to manipulate shadow DOM elements that are detached
from the document raises a StaleElementReference exception"""
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/shadow_dom_test.html'))
elem = self._driver.FindElement("css", "* /deep/ #olderButton")
self.assertTrue(elem.IsDisplayed())
self._driver.ExecuteScript(
'document.querySelector("#outerDiv").style.display="None";')
self.assertFalse(elem.IsDisplayed())
def testTouchSingleTapElement(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/touch_action_tests.html'))
target = self._driver.FindElement('id', 'target')
target.SingleTap()
events = self._driver.FindElement('id', 'events')
self.assertEquals('events: touchstart touchend', events.GetText())
def testTouchDownMoveUpElement(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/touch_action_tests.html'))
target = self._driver.FindElement('id', 'target')
location = target.GetLocation()
self._driver.TouchDown(location['x'], location['y'])
events = self._driver.FindElement('id', 'events')
self.assertEquals('events: touchstart', events.GetText())
self._driver.TouchMove(location['x'] + 1, location['y'] + 1)
self.assertEquals('events: touchstart touchmove', events.GetText())
self._driver.TouchUp(location['x'] + 1, location['y'] + 1)
self.assertEquals('events: touchstart touchmove touchend', events.GetText())
def testTouchScrollElement(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/touch_action_tests.html'))
scroll_left = 'return document.body.scrollLeft;'
scroll_top = 'return document.body.scrollTop;'
self.assertEquals(0, self._driver.ExecuteScript(scroll_left))
self.assertEquals(0, self._driver.ExecuteScript(scroll_top))
target = self._driver.FindElement('id', 'target')
self._driver.TouchScroll(target, 47, 53)
# https://code.google.com/p/chromedriver/issues/detail?id=1179
self.assertAlmostEqual(47, self._driver.ExecuteScript(scroll_left), delta=1)
self.assertAlmostEqual(53, self._driver.ExecuteScript(scroll_top), delta=1)
def testTouchDoubleTapElement(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/touch_action_tests.html'))
target = self._driver.FindElement('id', 'target')
target.DoubleTap()
events = self._driver.FindElement('id', 'events')
self.assertEquals('events: touchstart touchend touchstart touchend',
events.GetText())
def testTouchLongPressElement(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/touch_action_tests.html'))
target = self._driver.FindElement('id', 'target')
target.LongPress()
events = self._driver.FindElement('id', 'events')
self.assertEquals('events: touchstart touchcancel', events.GetText())
def testTouchFlickElement(self):
dx = 3
dy = 4
speed = 5
flickTouchEventsPerSecond = 30
moveEvents = int(
math.sqrt(dx * dx + dy * dy) * flickTouchEventsPerSecond / speed)
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("touchstart", function() {'
' div.innerHTML = "preMove0";'
'});'
'div.addEventListener("touchmove", function() {'
' res = div.innerHTML.match(/preMove(\d+)/);'
' if (res != null) {'
' div.innerHTML = "preMove" + (parseInt(res[1], 10) + 1);'
' }'
'});'
'div.addEventListener("touchend", function() {'
' if (div.innerHTML == "preMove' + str(moveEvents) + '") {'
' div.innerHTML = "new<br>";'
' }'
'});'
'return div;')
self._driver.TouchFlick(div, dx, dy, speed)
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testTouchPinch(self):
self._driver.Load(self.GetHttpUrlForFile(
'/chromedriver/touch_action_tests.html'))
width_before_pinch = self._driver.ExecuteScript('return window.innerWidth;')
height_before_pinch = self._driver.ExecuteScript(
'return window.innerHeight;')
self._driver.TouchPinch(width_before_pinch / 2,
height_before_pinch / 2,
2.0)
width_after_pinch = self._driver.ExecuteScript('return window.innerWidth;')
self.assertAlmostEqual(2.0, float(width_before_pinch) / width_after_pinch)
def testBrowserDoesntSupportSyntheticGestures(self):
# WebView on KitKat does not support synthetic gesture commands in DevTools,
# so touch action tests have been disabled for chromedriver_webview_shell.
# TODO(samuong): when this test starts failing, re-enable touch tests and
# delete this test.
if _ANDROID_PACKAGE_KEY:
if _ANDROID_PACKAGE_KEY == 'chromedriver_webview_shell':
self.assertFalse(self._driver.capabilities['hasTouchScreen'])
def testHasTouchScreen(self):
self.assertIn('hasTouchScreen', self._driver.capabilities)
if _ANDROID_PACKAGE_KEY:
self.assertTrue(self._driver.capabilities['hasTouchScreen'])
else:
self.assertFalse(self._driver.capabilities['hasTouchScreen'])
def testSwitchesToTopFrameAfterNavigation(self):
self._driver.Load('about:blank')
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/outer.html'))
frame = self._driver.FindElement('tag name', 'iframe')
self._driver.SwitchToFrame(frame)
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/outer.html'))
p = self._driver.FindElement('tag name', 'p')
self.assertEquals('Two', p.GetText())
def testSwitchesToTopFrameAfterRefresh(self):
self._driver.Load('about:blank')
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/outer.html'))
frame = self._driver.FindElement('tag name', 'iframe')
self._driver.SwitchToFrame(frame)
self._driver.Refresh()
p = self._driver.FindElement('tag name', 'p')
self.assertEquals('Two', p.GetText())
def testSwitchesToTopFrameAfterGoingBack(self):
self._driver.Load('about:blank')
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/outer.html'))
frame = self._driver.FindElement('tag name', 'iframe')
self._driver.SwitchToFrame(frame)
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/inner.html'))
self._driver.GoBack()
p = self._driver.FindElement('tag name', 'p')
self.assertEquals('Two', p.GetText())
def testCanSwitchToPrintPreviewDialog(self):
old_handles = self._driver.GetWindowHandles()
self.assertEquals(1, len(old_handles))
self._driver.ExecuteScript('setTimeout(function(){window.print();}, 0);')
new_window_handle = self.WaitForNewWindow(self._driver, old_handles)
self.assertNotEqual(None, new_window_handle)
self._driver.SwitchToWindow(new_window_handle)
self.assertEquals('chrome://print/', self._driver.GetCurrentUrl())
def testCanClickInIframes(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/nested.html'))
a = self._driver.FindElement('tag name', 'a')
a.Click()
frame_url = self._driver.ExecuteScript('return window.location.href')
self.assertTrue(frame_url.endswith('#one'))
frame = self._driver.FindElement('tag name', 'iframe')
self._driver.SwitchToFrame(frame)
a = self._driver.FindElement('tag name', 'a')
a.Click()
frame_url = self._driver.ExecuteScript('return window.location.href')
self.assertTrue(frame_url.endswith('#two'))
def testDoesntHangOnFragmentNavigation(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html#x'))
def SetCookie(self, request):
return {'Set-Cookie': 'x=y; HttpOnly'}, "<!DOCTYPE html><html></html>"
def testGetHttpOnlyCookie(self):
self._http_server.SetCallbackForPath('/setCookie', self.SetCookie)
self._driver.Load(self.GetHttpUrlForFile('/setCookie'))
self._driver.AddCookie({'name': 'a', 'value': 'b'})
cookies = self._driver.GetCookies()
self.assertEquals(2, len(cookies))
for cookie in cookies:
self.assertIn('name', cookie)
if cookie['name'] == 'a':
self.assertFalse(cookie['httpOnly'])
elif cookie['name'] == 'x':
self.assertTrue(cookie['httpOnly'])
else:
self.fail('unexpected cookie: %s' % json.dumps(cookie))
def testGetUrlOnInvalidUrl(self):
# Make sure we don't return 'data:text/html,chromewebdata' (see
# https://bugs.chromium.org/p/chromedriver/issues/detail?id=1272). RFC 6761
# requires domain registrars to keep 'invalid.' unregistered (see
# https://tools.ietf.org/html/rfc6761#section-6.4).
self._driver.Load('http://invalid./')
self.assertEquals('http://invalid./', self._driver.GetCurrentUrl())
def testCanClickAlertInIframes(self):
# This test requires that the page be loaded from a file:// URI, rather than
# the test HTTP server.
path = os.path.join(chrome_paths.GetTestData(), 'chromedriver',
'page_with_frame.html')
url = 'file://' + urllib.pathname2url(path)
self._driver.Load(url)
frame = self._driver.FindElement('id', 'frm')
self._driver.SwitchToFrame(frame)
a = self._driver.FindElement('id', 'btn')
a.Click()
self.WaitForCondition(lambda: self._driver.IsAlertOpen())
self._driver.HandleAlert(True)
class ChromeDriverAndroidTest(ChromeDriverBaseTest):
"""End to end tests for Android-specific tests."""
def testLatestAndroidAppInstalled(self):
if ('stable' not in _ANDROID_PACKAGE_KEY and
'beta' not in _ANDROID_PACKAGE_KEY):
return
self._driver = self.CreateDriver()
try:
omaha_list = json.loads(
urllib2.urlopen('http://omahaproxy.appspot.com/all.json').read())
for l in omaha_list:
if l['os'] != 'android':
continue
for v in l['versions']:
if (('stable' in v['channel'] and 'stable' in _ANDROID_PACKAGE_KEY) or
('beta' in v['channel'] and 'beta' in _ANDROID_PACKAGE_KEY)):
omaha = map(int, v['version'].split('.'))
device = map(int, self._driver.capabilities['version'].split('.'))
self.assertTrue(omaha <= device)
return
raise RuntimeError('Malformed omaha JSON')
except urllib2.URLError as e:
print 'Unable to fetch current version info from omahaproxy (%s)' % e
def testDeviceManagement(self):
self._drivers = [self.CreateDriver()
for _ in device_utils.DeviceUtils.HealthyDevices()]
self.assertRaises(chromedriver.UnknownError, self.CreateDriver)
self._drivers[0].Quit()
self._drivers[0] = self.CreateDriver()
class ChromeDownloadDirTest(ChromeDriverBaseTest):
def __init__(self, *args, **kwargs):
super(ChromeDownloadDirTest, self).__init__(*args, **kwargs)
self._temp_dirs = []
def CreateTempDir(self):
temp_dir = tempfile.mkdtemp()
self._temp_dirs.append(temp_dir)
return temp_dir
def RespondWithCsvFile(self, request):
return {'Content-Type': 'text/csv'}, 'a,b,c\n1,2,3\n'
def WaitForFileToDownload(self, path):
deadline = time.time() + 60
while True:
time.sleep(0.1)
if os.path.isfile(path) or time.time() > deadline:
break
self.assertTrue(os.path.isfile(path), "Failed to download file!")
def tearDown(self):
# Call the superclass tearDown() method before deleting temp dirs, so that
# Chrome has a chance to exit before its user data dir is blown away from
# underneath it.
super(ChromeDownloadDirTest, self).tearDown()
for temp_dir in self._temp_dirs:
shutil.rmtree(temp_dir)
def testFileDownloadWithClick(self):
download_dir = self.CreateTempDir()
download_name = os.path.join(download_dir, 'a_red_dot.png')
driver = self.CreateDriver(download_dir=download_dir)
driver.Load(ChromeDriverTest.GetHttpUrlForFile(
'/chromedriver/download.html'))
driver.FindElement('id', 'red-dot').Click()
self.WaitForFileToDownload(download_name)
self.assertEqual(
ChromeDriverTest.GetHttpUrlForFile('/chromedriver/download.html'),
driver.GetCurrentUrl())
def testFileDownloadWithGet(self):
ChromeDriverTest._http_server.SetCallbackForPath(
'/abc.csv', self.RespondWithCsvFile)
download_dir = self.CreateTempDir()
download_name = os.path.join(download_dir, 'abc.csv')
driver = self.CreateDriver(download_dir=download_dir)
original_url = driver.GetCurrentUrl()
driver.Load(ChromeDriverTest.GetHttpUrlForFile('/abc.csv'))
self.WaitForFileToDownload(os.path.join(download_dir, 'abc.csv'))
major_version = int(driver.capabilities['version'].split('.')[0])
if major_version > 43:
# For some reason, the URL in M43 changes from 'data:,' to '', so we
# need to avoid doing this assertion unless we're on M44+.
# TODO(samuong): Assert unconditionally once we stop supporting M43.
self.assertEqual(original_url, driver.GetCurrentUrl())
def testDownloadDirectoryOverridesExistingPreferences(self):
user_data_dir = self.CreateTempDir()
download_dir = self.CreateTempDir()
sub_dir = os.path.join(user_data_dir, 'Default')
os.mkdir(sub_dir)
prefs_file_path = os.path.join(sub_dir, 'Preferences')
prefs = {
'test': 'this should not be changed',
'download': {
'default_directory': '/old/download/directory'
}
}
with open(prefs_file_path, 'w') as f:
json.dump(prefs, f)
driver = self.CreateDriver(
chrome_switches=['user-data-dir=' + user_data_dir],
download_dir=download_dir)
with open(prefs_file_path) as f:
prefs = json.load(f)
self.assertEqual('this should not be changed', prefs['test'])
download = prefs['download']
self.assertEqual(download['default_directory'], download_dir)
class ChromeSwitchesCapabilityTest(ChromeDriverBaseTest):
"""Tests that chromedriver properly processes chromeOptions.args capabilities.
Makes sure the switches are passed to Chrome.
"""
def testSwitchWithoutArgument(self):
"""Tests that switch --dom-automation can be passed to Chrome.
Unless --dom-automation is specified, window.domAutomationController
is undefined.
"""
driver = self.CreateDriver(chrome_switches=['dom-automation'])
self.assertNotEqual(
None,
driver.ExecuteScript('return window.domAutomationController'))
class ChromeExtensionsCapabilityTest(ChromeDriverBaseTest):
"""Tests that chromedriver properly processes chromeOptions.extensions."""
def _PackExtension(self, ext_path):
return base64.b64encode(open(ext_path, 'rb').read())
def testExtensionsInstall(self):
"""Checks that chromedriver can take the extensions in crx format."""
crx_1 = os.path.join(_TEST_DATA_DIR, 'ext_test_1.crx')
crx_2 = os.path.join(_TEST_DATA_DIR, 'ext_test_2.crx')
self.CreateDriver(chrome_extensions=[self._PackExtension(crx_1),
self._PackExtension(crx_2)])
def testExtensionsInstallZip(self):
"""Checks that chromedriver can take the extensions in zip format."""
zip_1 = os.path.join(_TEST_DATA_DIR, 'ext_test_1.zip')
self.CreateDriver(chrome_extensions=[self._PackExtension(zip_1)])
def testWaitsForExtensionToLoad(self):
did_load_event = threading.Event()
server = webserver.SyncWebServer()
def RunServer():
time.sleep(5)
server.RespondWithContent('<html>iframe</html>')
did_load_event.set()
thread = threading.Thread(target=RunServer)
thread.daemon = True
thread.start()
crx = os.path.join(_TEST_DATA_DIR, 'ext_slow_loader.crx')
driver = self.CreateDriver(
chrome_switches=['user-agent=' + server.GetUrl()],
chrome_extensions=[self._PackExtension(crx)])
self.assertTrue(did_load_event.is_set())
def testCanLaunchApp(self):
app_path = os.path.join(_TEST_DATA_DIR, 'test_app')
driver = self.CreateDriver(chrome_switches=['load-extension=%s' % app_path])
old_handles = driver.GetWindowHandles()
self.assertEqual(1, len(old_handles))
driver.LaunchApp('gegjcdcfeiojglhifpmibkadodekakpc')
new_window_handle = self.WaitForNewWindow(driver, old_handles)
driver.SwitchToWindow(new_window_handle)
body_element = driver.FindElement('tag name', 'body')
self.assertEqual('It works!', body_element.GetText())
def testCanInspectBackgroundPage(self):
app_path = os.path.join(_TEST_DATA_DIR, 'test_app')
extension_path = os.path.join(_TEST_DATA_DIR, 'all_frames')
driver = self.CreateDriver(
chrome_switches=['load-extension=%s' % app_path],
experimental_options={'windowTypes': ['background_page']})
old_handles = driver.GetWindowHandles()
driver.LaunchApp('gegjcdcfeiojglhifpmibkadodekakpc')
new_window_handle = self.WaitForNewWindow(
driver, old_handles, check_closed_windows=False)
handles = driver.GetWindowHandles()
for handle in handles:
driver.SwitchToWindow(handle)
if driver.GetCurrentUrl() == 'chrome-extension://' \
'gegjcdcfeiojglhifpmibkadodekakpc/_generated_background_page.html':
self.assertEqual(42, driver.ExecuteScript('return magic;'))
return
self.fail("couldn't find generated background page for test app")
def testDontExecuteScriptsInContentScriptContext(self):
# This test extension has a content script which runs in all frames (see
# https://developer.chrome.com/extensions/content_scripts) which causes each
# frame on the page to be associated with multiple JS execution contexts.
# Make sure that ExecuteScript operates on the page's context, rather than
# the extension's content script's one.
extension_path = os.path.join(_TEST_DATA_DIR, 'all_frames')
driver = self.CreateDriver(
chrome_switches=['load-extension=%s' % extension_path])
driver.Load(
ChromeDriverTest._http_server.GetUrl() + '/chromedriver/container.html')
driver.SwitchToMainFrame()
self.assertEqual('one', driver.ExecuteScript("return window['global_var']"))
driver.SwitchToFrame('iframe')
self.assertEqual('two', driver.ExecuteScript("return window['iframe_var']"))
class ChromeLogPathCapabilityTest(ChromeDriverBaseTest):
"""Tests that chromedriver properly processes chromeOptions.logPath."""
LOG_MESSAGE = 'Welcome to ChromeLogPathCapabilityTest!'
def testChromeLogPath(self):
"""Checks that user can specify the path of the chrome log.
Verifies that a log message is written into the specified log file.
"""
tmp_log_path = tempfile.NamedTemporaryFile()
driver = self.CreateDriver(chrome_log_path=tmp_log_path.name)
driver.ExecuteScript('console.info("%s")' % self.LOG_MESSAGE)
driver.Quit()
self.assertTrue(self.LOG_MESSAGE in open(tmp_log_path.name).read())
class MobileEmulationCapabilityTest(ChromeDriverBaseTest):
"""Tests that ChromeDriver processes chromeOptions.mobileEmulation.
Makes sure the device metrics are overridden in DevTools and user agent is
overridden in Chrome.
"""
@staticmethod
def GlobalSetUp():
def respondWithUserAgentString(request):
return {}, """
<html>
<body>%s</body>
</html>""" % request.GetHeader('User-Agent')
def respondWithUserAgentStringUseDeviceWidth(request):
return {}, """
<html>
<head>
<meta name="viewport" content="width=device-width,minimum-scale=1.0">
</head>
<body>%s</body>
</html>""" % request.GetHeader('User-Agent')
MobileEmulationCapabilityTest._http_server = webserver.WebServer(
chrome_paths.GetTestData())
MobileEmulationCapabilityTest._http_server.SetCallbackForPath(
'/userAgent', respondWithUserAgentString)
MobileEmulationCapabilityTest._http_server.SetCallbackForPath(
'/userAgentUseDeviceWidth', respondWithUserAgentStringUseDeviceWidth)
@staticmethod
def GlobalTearDown():
MobileEmulationCapabilityTest._http_server.Shutdown()
def testDeviceMetricsWithStandardWidth(self):
driver = self.CreateDriver(
mobile_emulation = {
'deviceMetrics': {'width': 360, 'height': 640, 'pixelRatio': 3},
'userAgent': 'Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 5 Bui'
'ld/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chr'
'ome/18.0.1025.166 Mobile Safari/535.19'
})
driver.SetWindowSize(600, 400)
driver.Load(self._http_server.GetUrl() + '/userAgent')
self.assertTrue(driver.capabilities['mobileEmulationEnabled'])
self.assertEqual(360, driver.ExecuteScript('return window.screen.width'))
self.assertEqual(640, driver.ExecuteScript('return window.screen.height'))
def testDeviceMetricsWithDeviceWidth(self):
driver = self.CreateDriver(
mobile_emulation = {
'deviceMetrics': {'width': 360, 'height': 640, 'pixelRatio': 3},
'userAgent': 'Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 5 Bui'
'ld/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chr'
'ome/18.0.1025.166 Mobile Safari/535.19'
})
driver.Load(self._http_server.GetUrl() + '/userAgentUseDeviceWidth')
self.assertTrue(driver.capabilities['mobileEmulationEnabled'])
self.assertEqual(360, driver.ExecuteScript('return window.screen.width'))
self.assertEqual(640, driver.ExecuteScript('return window.screen.height'))
def testUserAgent(self):
driver = self.CreateDriver(
mobile_emulation = {'userAgent': 'Agent Smith'})
driver.Load(self._http_server.GetUrl() + '/userAgent')
body_tag = driver.FindElement('tag name', 'body')
self.assertEqual("Agent Smith", body_tag.GetText())
def testDeviceName(self):
driver = self.CreateDriver(
mobile_emulation = {'deviceName': 'Google Nexus 5'})
driver.Load(self._http_server.GetUrl() + '/userAgentUseDeviceWidth')
self.assertEqual(360, driver.ExecuteScript('return window.screen.width'))
self.assertEqual(640, driver.ExecuteScript('return window.screen.height'))
body_tag = driver.FindElement('tag name', 'body')
self.assertEqual(
'Mozilla/5.0 (Linux; Android 4.4.4; Nexus 5 Build/KTU84P) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.114 Mobile '
'Safari/537.36',
body_tag.GetText())
def testSendKeysToElement(self):
driver = self.CreateDriver(
mobile_emulation = {'deviceName': 'Google Nexus 5'})
text = driver.ExecuteScript(
'document.body.innerHTML = \'<input type="text">\';'
'var input = document.getElementsByTagName("input")[0];'
'input.addEventListener("change", function() {'
' document.body.appendChild(document.createElement("br"));'
'});'
'return input;')
text.SendKeys('0123456789+-*/ Hi')
text.SendKeys(', there!')
value = driver.ExecuteScript('return arguments[0].value;', text)
self.assertEquals('0123456789+-*/ Hi, there!', value)
def testClickElement(self):
driver = self.CreateDriver(
mobile_emulation = {'deviceName': 'Google Nexus 5'})
driver.Load('about:blank')
div = driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("click", function() {'
' div.innerHTML="new<br>";'
'});'
'return div;')
div.Click()
self.assertEquals(1, len(driver.FindElements('tag name', 'br')))
def testHasTouchScreen(self):
driver = self.CreateDriver(
mobile_emulation = {'deviceName': 'Google Nexus 5'})
self.assertIn('hasTouchScreen', driver.capabilities)
self.assertTrue(driver.capabilities['hasTouchScreen'])
class ChromeDriverLogTest(ChromeDriverBaseTest):
"""Tests that chromedriver produces the expected log file."""
UNEXPECTED_CHROMEOPTION_CAP = 'unexpected_chromeoption_capability'
LOG_MESSAGE = 'unrecognized chrome option: %s' % UNEXPECTED_CHROMEOPTION_CAP
def testChromeDriverLog(self):
_, tmp_log_path = tempfile.mkstemp(prefix='chromedriver_log_')
chromedriver_server = server.Server(
_CHROMEDRIVER_BINARY, log_path=tmp_log_path)
try:
driver = chromedriver.ChromeDriver(
chromedriver_server.GetUrl(), chrome_binary=_CHROME_BINARY,
experimental_options={ self.UNEXPECTED_CHROMEOPTION_CAP : 1 })
driver.Quit()
except chromedriver.ChromeDriverException, e:
self.assertTrue(self.LOG_MESSAGE in e.message)
finally:
chromedriver_server.Kill()
with open(tmp_log_path, 'r') as f:
self.assertTrue(self.LOG_MESSAGE in f.read())
def testDisablingDriverLogsSuppressesChromeDriverLog(self):
_, tmp_log_path = tempfile.mkstemp(prefix='chromedriver_log_')
chromedriver_server = server.Server(
_CHROMEDRIVER_BINARY, log_path=tmp_log_path)
try:
driver = self.CreateDriver(
chromedriver_server.GetUrl(), logging_prefs={'driver':'OFF'})
driver.Load(
ChromeDriverTest._http_server.GetUrl() + '/chromedriver/empty.html')
driver.AddCookie({'name': 'secret_code', 'value': 'bosco'})
driver.Quit()
finally:
chromedriver_server.Kill()
with open(tmp_log_path, 'r') as f:
self.assertNotIn('bosco', f.read())
class PerformanceLoggerTest(ChromeDriverBaseTest):
"""Tests chromedriver tracing support and Inspector event collection."""
def testPerformanceLogger(self):
driver = self.CreateDriver(
experimental_options={'perfLoggingPrefs': {
'traceCategories': 'webkit.console,blink.console'
}}, logging_prefs={'performance':'ALL'})
driver.Load(
ChromeDriverTest._http_server.GetUrl() + '/chromedriver/empty.html')
# Mark the timeline; later we will verify the marks appear in the trace.
driver.ExecuteScript('console.time("foobar")')
driver.ExecuteScript('console.timeEnd("foobar")')
logs = driver.GetLog('performance')
driver.Quit()
marked_timeline_events = []
seen_log_domains = {}
for entry in logs:
devtools_message = json.loads(entry['message'])['message']
method = devtools_message['method']
domain = method[:method.find('.')]
seen_log_domains[domain] = True
if method != 'Tracing.dataCollected':
continue
self.assertTrue('params' in devtools_message)
self.assertTrue(isinstance(devtools_message['params'], dict))
cat = devtools_message['params'].get('cat', '')
# Depending on Chrome version, the events may occur for the webkit.console
# or blink.console category. They will only occur for one of them.
if (cat == 'blink.console' or cat == 'webkit.console'):
self.assertTrue(devtools_message['params']['name'] == 'foobar')
marked_timeline_events.append(devtools_message)
self.assertEquals(2, len(marked_timeline_events))
self.assertEquals({'Network', 'Page', 'Tracing'},
set(seen_log_domains.keys()))
class SessionHandlingTest(ChromeDriverBaseTest):
"""Tests for session operations."""
def testQuitASessionMoreThanOnce(self):
driver = self.CreateDriver()
driver.Quit()
driver.Quit()
def testGetSessions(self):
driver = self.CreateDriver()
response = driver.GetSessions()
self.assertEqual(1, len(response))
driver2 = self.CreateDriver()
response = driver2.GetSessions()
self.assertEqual(2, len(response))
class RemoteBrowserTest(ChromeDriverBaseTest):
"""Tests for ChromeDriver remote browser capability."""
def setUp(self):
self.assertTrue(_CHROME_BINARY is not None,
'must supply a chrome binary arg')
def testConnectToRemoteBrowser(self):
port = self.FindFreePort()
temp_dir = util.MakeTempDir()
process = subprocess.Popen([_CHROME_BINARY,
'--remote-debugging-port=%d' % port,
'--user-data-dir=%s' % temp_dir,
'--use-mock-keychain'])
if process is None:
raise RuntimeError('Chrome could not be started with debugging port')
try:
driver = self.CreateDriver(debugger_address='127.0.0.1:%d' % port)
driver.ExecuteScript('console.info("%s")' % 'connecting at %d!' % port)
driver.Quit()
finally:
process.terminate()
def FindFreePort(self):
for port in range(10000, 10100):
try:
socket.create_connection(('127.0.0.1', port), 0.2).close()
except socket.error:
return port
raise RuntimeError('Cannot find open port')
class PerfTest(ChromeDriverBaseTest):
"""Tests for ChromeDriver perf."""
def setUp(self):
self.assertTrue(_REFERENCE_CHROMEDRIVER is not None,
'must supply a reference-chromedriver arg')
def _RunDriverPerfTest(self, name, test_func):
"""Runs a perf test comparing a reference and new ChromeDriver server.
Args:
name: The name of the perf test.
test_func: Called with the server url to perform the test action. Must
return the time elapsed.
"""
class Results(object):
ref = []
new = []
ref_server = server.Server(_REFERENCE_CHROMEDRIVER)
results = Results()
result_url_pairs = zip([results.new, results.ref],
[_CHROMEDRIVER_SERVER_URL, ref_server.GetUrl()])
for iteration in range(30):
for result, url in result_url_pairs:
result += [test_func(url)]
# Reverse the order for the next run.
result_url_pairs = result_url_pairs[::-1]
def PrintResult(build, result):
mean = sum(result) / len(result)
avg_dev = sum([abs(sample - mean) for sample in result]) / len(result)
print 'perf result', build, name, mean, avg_dev, result
util.AddBuildStepText('%s %s: %.3f+-%.3f' % (
build, name, mean, avg_dev))
# Discard first result, which may be off due to cold start.
PrintResult('new', results.new[1:])
PrintResult('ref', results.ref[1:])
def testSessionStartTime(self):
def Run(url):
start = time.time()
driver = self.CreateDriver(url)
end = time.time()
driver.Quit()
return end - start
self._RunDriverPerfTest('session start', Run)
def testSessionStopTime(self):
def Run(url):
driver = self.CreateDriver(url)
start = time.time()
driver.Quit()
end = time.time()
return end - start
self._RunDriverPerfTest('session stop', Run)
def testColdExecuteScript(self):
def Run(url):
driver = self.CreateDriver(url)
start = time.time()
driver.ExecuteScript('return 1')
end = time.time()
driver.Quit()
return end - start
self._RunDriverPerfTest('cold exe js', Run)
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option(
'', '--chromedriver',
help='Path to chromedriver server (REQUIRED!)')
parser.add_option(
'', '--log-path',
help='Output verbose server logs to this file')
parser.add_option(
'', '--reference-chromedriver',
help='Path to the reference chromedriver server')
parser.add_option(
'', '--chrome', help='Path to a build of the chrome binary')
parser.add_option(
'', '--chrome-version', default='HEAD',
help='Version of chrome. Default is \'HEAD\'.')
parser.add_option(
'', '--filter', type='string', default='*',
help=('Filter for specifying what tests to run, "*" will run all. E.g., '
'*testStartStop'))
parser.add_option(
'', '--android-package',
help=('Android package key. Possible values: ' +
str(_ANDROID_NEGATIVE_FILTER.keys())))
options, args = parser.parse_args()
options.chromedriver = util.GetAbsolutePathOfUserPath(options.chromedriver)
if not options.chromedriver or not os.path.exists(options.chromedriver):
parser.error('chromedriver is required or the given path is invalid.' +
'Please run "%s --help" for help' % __file__)
global _CHROMEDRIVER_BINARY
_CHROMEDRIVER_BINARY = options.chromedriver
if (options.android_package and
options.android_package not in _ANDROID_NEGATIVE_FILTER):
parser.error('Invalid --android-package')
chromedriver_server = server.Server(_CHROMEDRIVER_BINARY, options.log_path)
global _CHROMEDRIVER_SERVER_URL
_CHROMEDRIVER_SERVER_URL = chromedriver_server.GetUrl()
global _REFERENCE_CHROMEDRIVER
_REFERENCE_CHROMEDRIVER = util.GetAbsolutePathOfUserPath(
options.reference_chromedriver)
global _CHROME_BINARY
if options.chrome:
_CHROME_BINARY = util.GetAbsolutePathOfUserPath(options.chrome)
else:
_CHROME_BINARY = None
global _ANDROID_PACKAGE_KEY
_ANDROID_PACKAGE_KEY = options.android_package
if _ANDROID_PACKAGE_KEY:
devil_chromium.Initialize()
if options.filter == '*':
if _ANDROID_PACKAGE_KEY:
negative_filter = _ANDROID_NEGATIVE_FILTER[_ANDROID_PACKAGE_KEY]
else:
negative_filter = _GetDesktopNegativeFilter(options.chrome_version)
options.filter = '*-' + ':__main__.'.join([''] + negative_filter)
all_tests_suite = unittest.defaultTestLoader.loadTestsFromModule(
sys.modules[__name__])
tests = unittest_util.FilterTestSuite(all_tests_suite, options.filter)
ChromeDriverTest.GlobalSetUp()
MobileEmulationCapabilityTest.GlobalSetUp()
result = unittest.TextTestRunner(stream=sys.stdout, verbosity=2).run(tests)
ChromeDriverTest.GlobalTearDown()
MobileEmulationCapabilityTest.GlobalTearDown()
sys.exit(len(result.failures) + len(result.errors))
|
|
"""Object-oriented interface to sparse polynomial representation. """
from sympy.polys.polyclasses import GenericPoly
class SparsePoly(GenericPoly):
"""Sparse polynomial over an arbitrary domain. """
__slots__ = ['rep', 'lev', 'ord', 'dom', '_hash']
def __init__(self, rep, ord, dom, lev=None):
if lev is None:
rep, lev = smp_validate(rep)
self.rep = rep
self.lev = lev
self.ord = ord
self.dom = dom
self._hash = None
def __repr__(self):
return "%s(%s, %s, %s)" % (self.__class__.__name__, self.rep, self.ord, self.dom)
def __hash__(self):
_hash = self._hash
if _hash is None:
self._hash = _hash = hash((self.__class__.__name__, repr(self.rep), self.ord, self.dom))
return _hash
def unify(f, g):
"""Unify representations of two sparse polynomials. """
if not hasattr(g, '__iter__'):
if f.lev == g.lev and f.ord == g.ord and f.dom == g.dom:
return f.lev, f.ord, f.dom, f.per, f.rep, g.rep
else:
raise UnificationFailed("can't unify %s with %s" % (f, g))
else:
lev, ord, dom, reps = f.lev, f.ord, f.dom, []
for gg in g:
if gg.lev == lev and gg.ord == ord and gg.dom == dom:
reps.append(gg.rep)
else:
raise UnificationFailed("can't unify %s with %s" % (f, g))
return lev, ord, dom, f.per, f.rep, reps
def per(f, rep, ord=None, dom=None, lower=False):
"""Create a sparse polynomial out of the given representation. """
lev = f.lev
if lower:
if not lev:
return rep
else:
lev -= 1
if dom is None:
dom = f.dom
if ord is None:
ord = f.ord
return SparsePoly(rep, dom, ord, lev)
@classmethod
def zero(cls, lev, ord, dom):
"""Construct a zero-polynomial with appropriate properties. """
return cls(smp_zero(lev), ord, dom, lev)
@classmethod
def one(cls, lev, ord, dom):
"""Construct a one-polynomial with appropriate properties. """
return cls(smp_one(lev, dom), ord, dom, lev)
@classmethod
def from_ground(cls, rep, lev, ord, dom):
"""Create sparse representation from an element of the ground domain. """
return cls(smp_from_ground(rep, lev, ord, dom), ord, dom, lev)
@classmethod
def from_dict(cls, rep, lev, ord, dom):
"""Create sparse representation from a ``dict`` with native coefficients. """
return cls(smp_from_dict(rep, lev, ord, dom), ord, dom, lev)
@classmethod
def from_sympy_dict(cls, rep, lev, ord, dom):
"""Create sparse representation from a ``dict`` with SymPy's coefficients. """
return cls(smp_from_sympy_dict(rep, lev, ord, dom), ord, dom, lev)
@classmethod
def from_list(cls, rep, lev, ord, dom):
"""Create sparse representation from a ``list`` with native coefficients. """
return cls(smp_from_dict(rep, lev, ord, dom), ord, dom, lev)
@classmethod
def from_sympy_list(cls, rep, lev, ord, dom):
"""Create sparse representation from a ``list`` with SymPy's coefficients. """
return cls(smp_from_sympy_dict(rep, lev, ord, dom), ord, dom, lev)
def to_ground(f):
"""Convert sparse representation to an element of the ground domain. """
return smp_to_ground(f.rep, f.lev, f.ord, f.dom)
def to_dict(f):
"""Convert sparse representation to a ``dict`` with native coefficients. """
return smp_to_dict(f.rep, f.lev, f.ord, f.dom)
def to_sympy_dict(f):
"""Convert sparse representation to a ``dict`` with SymPy's coefficients. """
return smp_to_sympy_dict(f.rep, f.lev, f.ord, f.dom)
def to_list(f):
"""Convert sparse representation to a ``list`` with native coefficients. """
return smp_to_dict(f.rep, f.lev, f.ord, f.dom)
def to_sympy_list(f):
"""Convert sparse representation to a ``list`` with SymPy's coefficients. """
return smp_to_sympy_dict(f.rep, f.lev, f.ord, f.dom)
def set_order(f, ord):
"""Set the ordering of monomials in `f` to ``ord``. """
if f.ord == ord:
return f
else:
return f.per(smp_set_order(f.rep, f.lev, ord, f.dom), ord=ord)
def set_domain(f, dom):
"""Set the ground domain in `f` to ``dom``. """
if f.dom == dom:
return f
else:
return f.per(smp_set_domain(f.rep, f.lev, f.ord, f.dom, dom), dom=dom)
def LC(f):
"""Return the leading coefficient of `f`. """
return smp_ground_LC(f.rep, f.lev, f.ord, f.dom)
def LM(f):
"""Return the leading monomial of `f`. """
return smp_ground_LM(f.rep, f.lev, f.ord, f.dom)
def LT(f):
"""Return the leading term of `f`. """
return smp_ground_LT(f.rep, f.lev, f.ord, f.dom)
def TC(f):
"""Return the trailing coefficient of `f`. """
return smp_ground_TC(f.rep, f.lev, f.ord, f.dom)
def TM(f):
"""Return the trailing monomial of `f`. """
return smp_ground_TM(f.rep, f.lev, f.ord, f.dom)
def TT(f):
"""Return the trailing coefficient of `f`. """
return smp_ground_TT(f.rep, f.lev, f.ord, f.dom)
def EC(f):
"""Return the last non-zero coefficient of `f`. """
return smp_ground_EC(f.rep, f.lev, f.ord, f.dom)
def EM(f):
"""Return the last non-zero monomial of `f`. """
return smp_ground_EM(f.rep, f.lev, f.ord, f.dom)
def ET(f):
"""Return the last non-zero coefficient of `f`. """
return smp_ground_ET(f.rep, f.lev, f.ord, f.dom)
def nth(f, *N):
"""Return `n`-th coefficient of `f`. """
return smp_ground_nth(f.rep, N, f.lev, f.dom)
def coeffs(f):
"""Return all non-zero coefficients of `f`. """
return smp_coeffs(f.rep, f.lev, f.ord, f.dom)
def monoms(f):
"""Return all non-zero monomials of `f`. """
return smp_monoms(f.rep, f.lev, f.ord, f.dom)
def terms(f):
"""Return all non-zero terms from `f`. """
return smp_terms(f.rep, f.lev, f.ord, f.dom)
def all_coeffs(f):
"""Return all coefficients of `f`. """
return smp_all_coeffs(f.rep, f.lev, f.ord, f.dom)
def all_monoms(f):
"""Return all monomials of `f`. """
return smp_all_monoms(f.rep, f.lev, f.ord, f.dom)
def all_terms(f):
"""Return all terms of `f`. """
return smp_all_terms(f.rep, f.lev, f.ord, f.dom)
def degree(f, j=0):
"""Return the degree of `f` in `x_j`. """
return smp_degree(f.rep, j, f.lev)
def degrees(f):
"""Return the list of degrees of `f`. """
return smp_degrees(f.rep, f.lev)
def total_degree(f):
"""Return the total degree of `f`. """
return smp_total_degree(f.rep, f.lev)
def deflate(f):
"""Reduce degree of `f` by mapping `x_i^m` to `y_i`. """
M, F = smp_deflate(f.rep, f.lev, f.ord, f.dom)
return M, f.per(F)
def inflate(f, M):
"""Revert :func:`deflate` by mapping `y_i` to `x_i^m`. """
return f.per(smp_inflate(f.rep, M, f.lev, f.ord, f.dom))
def terms_gcd(f):
"""Remove GCD of terms from the polynomial `f`. """
J, F = smp_terms_gcd(f.rep, f.lev, f.ord, f.dom)
return J, f.per(F)
def add_ground(f, c):
"""Add an element of the ground domain to `f`. """
return f.per(smp_add_ground(f.rep, f.dom.convert(c), f.lev, f.ord, f.dom))
def sub_ground(f, c):
"""Subtract an element of the ground domain from `f`. """
return f.per(smp_sub_ground(f.rep, f.dom.convert(c), f.lev, f.ord, f.dom))
def mul_ground(f, c):
"""Multiply `f` by an element of the ground domain. """
return f.per(smp_mul_ground(f.rep, f.dom.convert(c), f.lev, f.ord, f.dom))
def quo_ground(f, c):
"""Quotient of `f` by an element of the ground domain. """
return f.per(smp_quo_ground(f.rep, f.dom.convert(c), f.lev, f.ord, f.dom))
def exquo_ground(f, c):
"""Exact quotient of `f` by an element of the ground domain. """
return f.per(smp_exquo_ground(f.rep, f.dom.convert(c), f.lev, f.ord, f.dom))
def abs(f):
"""Make all coefficients in `f` positive. """
return f.per(smp_abs(f.rep, f.lev, f.ord, f.dom))
def neg(f):
"""Negate all coefficients in `f`. """
return f.per(smp_neg(f.rep, f.lev, f.ord, f.dom))
def add(f, g):
"""Add two multivariate polynomials `f` and `g`. """
lev, ord, dom, per, F, G = f.unify(g)
return per(smp_add(F, G, lev, ord, dom))
def sub(f, g):
"""Subtract two multivariate polynomials `f` and `g`. """
lev, ord, dom, per, F, G = f.unify(g)
return per(smp_sub(F, G, lev, ord, dom))
def mul(f, g):
"""Multiply two multivariate polynomials `f` and `g`. """
lev, ord, dom, per, F, G = f.unify(g)
return per(smp_mul(F, G, lev, ord, dom))
def sqr(f):
"""Square a multivariate polynomial `f`. """
return f.per(smp_sqr(f.rep, f.lev, f.ord, f.dom))
def pow(f, n):
"""Raise `f` to a non-negative power `n`. """
return f.per(smp_pow(f.rep, n, f.lev, f.ord, f.dom))
def pdiv(f, g):
"""Polynomial pseudo-division of `f` and `g`. """
lev, ord, dom, per, F, G = f.unify(g)
q, r = smp_pdiv(F, G, lev, ord, dom)
return per(q), per(r)
def prem(f, g):
"""Polynomial pseudo-remainder of `f` and `g`. """
lev, ord, dom, per, F, G = f.unify(g)
return per(smp_prem(F, G, lev, dom))
def pquo(f, g):
"""Polynomial pseudo-quotient of `f` and `g`. """
lev, ord, dom, per, F, G = f.unify(g)
return per(smp_pquo(F, G, lev, ord, dom))
def pexquo(f, g):
"""Polynomial exact pseudo-quotient of `f` and `g`. """
lev, ord, dom, per, F, G = f.unify(g)
return per(smp_pexquo(F, G, lev, ord, dom))
def div(f, g):
"""Polynomial division with remainder of `f` and `g`. """
lev, ord, dom, per, F, G = f.unify(g)
q, r = smp_div(F, G, lev, ord, dom)
return per(q), per(r)
def rem(f, g):
"""Compute polynomial remainder of `f` and `g`. """
lev, ord, dom, per, F, G = f.unify(g)
return per(smp_rem(F, G, lev, ord, dom))
def quo(f, g):
"""Compute polynomial quotient of `f` and `g`. """
lev, ord, dom, per, F, G = f.unify(g)
return per(smp_quo(F, G, lev, ord, dom))
def exquo(f, g):
"""Compute polynomial exact quotient of `f` and `g`. """
lev, ord, dom, per, F, G = f.unify(g)
return per(smp_exquo(F, G, lev, ord, dom))
def reduced(f, G):
"""Reduce `f` modulo a set of polynomials `G`. """
lev, ord, dom, per, f, G = f.unify(G)
return per(smp_reduced(f, G, lev, ord, dom))
def max_norm(f):
"""Returns maximum norm of `f`. """
return smp_max_norm(f.rep, f.lev, f.ord, f.dom)
def l1_norm(f):
"""Returns l1 norm of `f`. """
return smp_l1_norm(f.rep, f.lev, f.ord, f.dom)
def clear_denoms(f, convert=False):
"""Clear denominators in `f`, but keep the ground domain. """
coeff, F = smp_clear_denoms(f.rep, f.lev, f.ord, f.dom, convert=convert)
return coeff, f.per(F)
def lift(f):
"""Convert algebraic coefficients to rationals. """
return f.per(smp_lift(f.rep, f.lev, f.ord, f.dom), dom=f.dom.dom)
def half_gcdex(f, g):
"""Half extended Euclidean algorithm. """
lev, ord, dom, per, F, G = f.unify(g)
s, h = smp_half_gcdex(F, G, ord, dom)
return per(s), per(h)
def gcdex(f, g):
"""Extended Euclidean algorithm. """
lev, ord, dom, per, F, G = f.unify(g)
s, t, h = smp_gcdex(F, G, lev, ord, dom)
return per(s), per(t), per(h)
def invert(f, g):
"""Invert `f` modulo `g`, if possible. """
lev, ord, dom, per, F, G = f.unify(g)
return per(smp_invert(F, G, lev, ord, dom))
def subresultants(f, g):
"""Compute subresultant PRS sequence of `f` and `g`. """
lev, ord, dom, per, F, G = f.unify(g)
R = smp_subresultants(F, G, lev, ord, dom)
return map(per, R)
def resultant(f, g):
"""Compute resultant of `f` and `g`. """
lev, ord, dom, per, F, G = f.unify(g)
return per(smp_resultant(F, G, lev, ord, dom), lower=True)
def discriminant(f):
"""Compute discriminant of `f`. """
return f.per(smp_discriminant(f.rep, f.lev, f.ord, f.dom), lower=True)
def cofactors(f, g):
"""Compute GCD of `f` and `g` and their cofactors. """
lev, ord, dom, per, F, G = f.unify(g)
h, cff, cfg = smp_cofactors(F, G, lev, ord, dom)
return per(h), per(cff), per(cfg)
def gcd(f, g):
"""Compute polynomial GCD of `f` and `g`. """
lev, ord, dom, per, F, G = f.unify(g)
return per(smp_gcd(F, G, lev, ord, dom))
def lcm(f, g):
"""Compute polynomial LCM of `f` and `g`. """
lev, ord, dom, per, F, G = f.unify(g)
return per(smp_lcm(F, G, lev, ord, dom))
def trunc(f, p):
"""Reduce `f` modulo an element of the ground domain. """
return f.per(smp_ground_trunc(f.rep, f.dom.convert(p), f.lev, f.ord, f.dom))
def monic(f):
"""Divide all coefficients by the leading coefficient of `f`. """
return f.per(smp_ground_monic(f.rep, f.lev, f.ord, f.dom))
def content(f):
"""Compute GCD of all coefficients of `f`. """
return smp_ground_content(f.rep, f.lev, f.ord, f.dom)
def primitive(f):
"""Compute content and the primitive form of `f`. """
cont, F = smp_ground_primitive(f.rep, f.lev, f.ord, f.dom)
return cont, f.per(F)
def integrate(f, m=1, j=0):
"""Compute `m`-th order indefinite integral of `f` in `x_j`. """
return f.per(smp_integrate_in(f.rep, m, j, f.lev, f.ord, f.dom))
def diff(f, m=1, j=0):
"""Compute `m`-th order derivative of `f` in `x_j`. """
return f.per(smp_diff_in(f.rep, m, j, f.lev, f.ord, f.dom))
def eval(f, a, j=0):
"""Evaluate `f` at the given point `a` in `x_j`. """
return f.per(smp_eval_in(f.rep, f.dom.convert(a), j, f.lev, f.ord, f.dom), lower=True)
def mirror(f, j=0):
"""Evaluate efficiently composition `f(-x_j)`. """
return f.per(smp_mirror_in(f.rep, j, f.lev, f.ord, f.dom))
def scale(f, a, j=0):
"""Evaluate efficiently composition `f(a x_j)`. """
return f.per(smp_scale_in(f.rep, f.dom.convert(a), j, f.lev, f.ord, f.dom))
def taylor(f, a, j=0):
"""Evaluate efficiently Taylor shift `f(x_j + a)`. """
return f.per(smp_taylor_in(f.rep, f.dom.convert(a), j, f.lev, f.ord, f.dom))
def transform(f, p, q, j=0):
"""Evaluate functional transformation `q^n \cdot f(p/q)`. """
lev, ord, dom, per, F, (P, Q) = f.unify((p, q))
return per(smp_transform_in(F, P, Q, j, lev, ord, dom))
def compose(f, g):
"""Compute functional composition of `f` and `g`. """
lev, ord, dom, per, F, G = f.unify(g)
return per(smp_compose(F, G, lev, ord, dom))
def decompose(f):
"""Computes functional decomposition of `f`. """
return map(f.per, smp_decompose(f.rep, f.lev, f.ord, f.dom))
def sturm(f):
"""Computes the Sturm sequence of `f`. """
return map(f.per, smp_sturm(f.rep, f.lev, f.ord, f.dom))
def sqf_norm(f):
"""Compute square-free norm of `f`. """
s, g, r = smp_sqf_norm(f.rep, f.lev, f.ord, f.dom)
return s, f.per(g), f.per(r, dom=f.dom.dom)
def sqf_part(f):
"""Compute square-free part of `f`. """
return f.per(smp_sqf_part(f.rep, f.lev, f.ord, f.dom))
def sqf_list(f, all=False, include=False):
"""Return a list of square-free factors of `f`. """
result = smp_sqf_list(f.rep, f.lev, f.ord, f.dom, all=all, include=include)
return f._perify_factors(result, include)
def factor_list(f, include=False):
"""Return a list of irreducible factors of `f`. """
result = smp_factor_list(f.rep, f.lev, f.ord, f.dom, include=include)
return f._perify_factors(f.per, result, include)
def real_intervals(f, eps=None, inf=None, sup=None, fast=False, sqf=False):
"""Compute isolating intervals for real roots of `f`. """
return smp_real_intervals(f.rep, f.lev, f.ord, f.dom, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
def complex_intervals(f, eps=None, inf=None, sup=None, fast=False, sqf=False):
"""Compute isolating rectangles for complex roots of `f`. """
return smp_complex_intervals(f.rep, f.lev, f.ord, f.dom, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
def refine_real_root(f, s, t, eps=None, steps=None, fast=False, sqf=False):
"""Refine a real root isolating interval to the given precision. """
return smp_refine_real_root(f.rep, s, t, f.lev, f.ord, f.dom, eps=eps, steps=steps, fast=fast, sqf=sqf)
def refine_complex_root(f, s, t, eps=None, steps=None, fast=False, sqf=False):
"""Refine a complex root isolating rectangle to the given precision. """
return smp_refine_complex_root(f.rep, s, t, f.lev, f.ord, f.dom, eps=eps, steps=steps, fast=fast, sqf=sqf)
def count_real_roots(f, inf=None, sup=None):
"""Return the number of real roots of `f` in the ``[inf, sup]`` interval. """
return smp_count_real_roots(f.rep, f.lev, f.ord, f.dom, inf=inf, sup=sup)
def count_complex_roots(f, inf=None, sup=None):
"""Return the number of complex roots of `f` in the ``[inf, sup]`` rectangle. """
return smp_count_complex_roots(f.rep, f.lev, f.ord, f.dom, inf=inf, sup=sup)
@property
def is_zero(f):
"""Returns ``True`` if `f` is equivalent to zero. """
return smp_zero_p(f.rep, f.lev)
@property
def is_one(f):
"""Return ``True`` if `f` is equivalent to one. """
return smp_one_p(f.rep, f.lev, f.dom)
@property
def is_ground(f):
"""Return ``True`` if `f` is an element of the ground domain. """
return smp_ground_p(f.rep, f.lev)
@property
def is_sqf(f):
"""Return ``True`` if `f` is a square-free polynomial. """
return smp_sqf_p(f.rep, f.lev, f.ord, f.dom)
@property
def is_monic(f):
"""Return ``True`` if the leading coefficient of `f` is one. """
return smp_monic_p(f.rep, f.lev, f.ord, f.dom)
@property
def is_primitive(f):
"""Return ``True`` if GCD of coefficients of `f` is one. """
return smp_primitive_p(f.rep, f.lev, f.ord, f.dom)
@property
def is_linear(f):
"""Return ``True`` if `f` is linear in all its variables. """
return smp_linear_p(f.rep, f.lev, f.ord, f.dom)
@property
def is_homogeneous(f):
"""Return ``True`` if `f` has zero trailing coefficient. """
return smp_homogeneous_p(f.rep, f.lev, f.ord, f.dom)
def __abs__(f):
return f.abs()
def __neg__(f):
return f.neg()
def __add__(f, g):
if not isinstance(g, SparsePoly):
return f.add_ground(g)
else:
return f.add(g)
def __radd__(f, g):
return f.__add__(g)
def __sub__(f, g):
if not isinstance(g, SparsePoly):
return f.sub_ground(g)
else:
return f.sub(g)
def __rsub__(f, g):
return (-f).__add__(g)
def __mul__(f, g):
if not isinstance(g, SparsePoly):
return f.mul_ground(g)
else:
return f.mul(g)
def __rmul__(f, g):
return f.__mul__(g)
def __pow__(f, n):
return f.pow(n)
def __divmod__(f, g):
return f.div(g)
def __mod__(f, g):
return f.rem(g)
def __floordiv__(f, g):
if not isinstance(g, SparsePoly):
return f.exquo_ground(g)
else:
return f.exquo(g)
def __eq__(f, g):
return isinstance(g, SparsePoly) and f.rep == g.rep
def __ne__(f, g):
return not f.__eq__(g)
def __nonzero__(f):
return not f.is_zero
|
|
"""
GANs for the super-resolution application.
- Example data is MNIST.
- Input is subsampled MNIST data
- Target is recovered high-resolution MNIST data
"""
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Reshape
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import UpSampling1D, Convolution1D, MaxPooling1D
from keras.layers.core import Activation, Flatten, Dropout
from keras.optimizers import SGD
from keras.datasets import mnist
from keras.layers.advanced_activations import LeakyReLU
from keras import backend as K
K.set_image_dim_ordering('th')
import numpy as np
from PIL import Image
import argparse
import math
from sklearn import model_selection
import time
import os
INPUT_LN = 56 * 2
N_GEN_l = [4, 4]
CODE_LN = int(INPUT_LN / np.prod(N_GEN_l))
# kera (Underdeveloping)
class Seq(Sequential):
def __init__(self):
pass
def act(self):
self.add(Activiation())
return self
def generator_model(): #INPUT_LN=INPUT_LN, CODE_LN=CODE_LN, N_GEN_l=N_GEN_l):
print(INPUT_LN, N_GEN_l, CODE_LN)
model = Sequential()
model.add(Dense(int(INPUT_LN), input_dim=CODE_LN, init='uniform'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(INPUT_LN, init='uniform'))
return model
def discriminator_model():
model = Sequential()
model.add(Dense(int(INPUT_LN/2), input_dim=INPUT_LN, init='uniform'))
model.add(LeakyReLU(0.2))
model.add(Dropout(0.2))
model.add(Dense(int(INPUT_LN/4), init='uniform'))
model.add(LeakyReLU(0.2))
model.add(Dropout(0.2))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
def generator_containing_discriminator(generator, discriminator):
model = Sequential()
model.add(generator)
discriminator.trainable = False
model.add(discriminator)
return model
def combine_images(generated_images):
num = generated_images.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = generated_images.shape[2:]
image = np.zeros((height * shape[0], width * shape[1]),
dtype=generated_images.dtype)
for index, img in enumerate(generated_images):
i = int(index / width)
j = index % width
image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1]] = \
img[0, :, :]
return image
def train_VI(BATCH_SIZE, V, I=None, no_epoch = 100, disp=False):
"""
Developing
==========
[2017-3-14]
- To include saving the progress: tsplot of input and output
- Input can be saved separately.
"""
if I is None:
X_train = V
else:
X_train = np.concatenate([V, I], axis=1)
# X_train = X_train.astype(np.float32)
# X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
discriminator = discriminator_model()
generator = generator_model()
discriminator_on_generator = \
generator_containing_discriminator(generator, discriminator)
d_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
g_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
generator.compile(loss='binary_crossentropy', optimizer="SGD")
discriminator_on_generator.compile(
loss='binary_crossentropy', optimizer=g_optim)
discriminator.trainable = True
discriminator.compile(loss='binary_crossentropy', optimizer=d_optim)
noise = np.zeros((BATCH_SIZE, CODE_LN))
for epoch in range(no_epoch):
if disp:
print("Epoch is", epoch)
print("Number of batches", int(X_train.shape[0] / BATCH_SIZE))
elif epoch % 100 == 0:
print("Epoch is", epoch)
print("Number of batches", int(X_train.shape[0] / BATCH_SIZE))
no_index = int(X_train.shape[0] / BATCH_SIZE)
for index in range(no_index):
# =============================================
# Training Discriminator after generation
# =============================================
for i in range(BATCH_SIZE):
noise[i, :] = np.random.uniform(-1, 1, CODE_LN)
#print('noise.shape -->', noise.shape)
generated_images = generator.predict(noise, verbose=0)
if index % 20 == 0:
"""
Saving data
"""
if disp:
print(index)
image_batch = X_train[index * BATCH_SIZE:(index + 1) * BATCH_SIZE]
if disp:
print('image_batch.shape, generated_images.shape -->',
image_batch.shape, generated_images.shape)
X = np.concatenate((image_batch, generated_images))
y = [1] * BATCH_SIZE + [0] * BATCH_SIZE
d_loss = discriminator.train_on_batch(X, y)
if disp:
print("batch %d d_loss : %f" % (index, d_loss))
# =============================================
# Training Generator using discriminator information
# =============================================
for i in range(BATCH_SIZE):
noise[i, :] = np.random.uniform(-1, 1, CODE_LN)
discriminator.trainable = False
g_loss = discriminator_on_generator.train_on_batch(
noise, [1] * BATCH_SIZE)
discriminator.trainable = True
if disp:
print("batch %d g_loss : %f" % (index, g_loss))
if index == no_index - 1:
if disp:
print('Save generator and discriminator!')
generator.save_weights('generator', True)
discriminator.save_weights('discriminator', True)
def train_VI_randn(BATCH_SIZE, V, I=None, no_epoch = 100, disp=False):
"""
Developing
==========
[2017-3-14]
- To include saving the progress: tsplot of input and output
- Input can be saved separately.
"""
if I is None:
X_train = V
else:
X_train = np.concatenate([V, I], axis=1)
# X_train = X_train.astype(np.float32)
# X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
discriminator = discriminator_model()
generator = generator_model()
discriminator_on_generator = \
generator_containing_discriminator(generator, discriminator)
d_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
g_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
generator.compile(loss='binary_crossentropy', optimizer="SGD")
discriminator_on_generator.compile(
loss='binary_crossentropy', optimizer=g_optim)
discriminator.trainable = True
discriminator.compile(loss='binary_crossentropy', optimizer=d_optim)
noise = np.zeros((BATCH_SIZE, CODE_LN))
for epoch in range(no_epoch):
if disp:
print("Epoch is", epoch)
print("Number of batches", int(X_train.shape[0] / BATCH_SIZE))
elif epoch % 100 == 0:
print("Epoch is", epoch)
print("Number of batches", int(X_train.shape[0] / BATCH_SIZE))
no_index = int(X_train.shape[0] / BATCH_SIZE)
for index in range(no_index):
# =============================================
# Training Discriminator after generation
# =============================================
for i in range(BATCH_SIZE):
noise[i, :] = np.random.randn(CODE_LN)
#print('noise.shape -->', noise.shape)
generated_images = generator.predict(noise, verbose=0)
if index % 20 == 0:
"""
Saving data
"""
if disp:
print(index)
image_batch = X_train[index * BATCH_SIZE:(index + 1) * BATCH_SIZE]
if disp:
print('image_batch.shape, generated_images.shape -->',
image_batch.shape, generated_images.shape)
X = np.concatenate((image_batch, generated_images))
y = [1] * BATCH_SIZE + [0] * BATCH_SIZE
d_loss = discriminator.train_on_batch(X, y)
if disp:
print("batch %d d_loss : %f" % (index, d_loss))
# =============================================
# Training Generator using discriminator information
# =============================================
for i in range(BATCH_SIZE):
noise[i, :] = np.random.randn(CODE_LN)
discriminator.trainable = False
g_loss = discriminator_on_generator.train_on_batch(
noise, [1] * BATCH_SIZE)
discriminator.trainable = True
if disp:
print("batch %d g_loss : %f" % (index, g_loss))
if index == no_index - 1:
if disp:
print('Save generator and discriminator!')
generator.save_weights('generator', True)
discriminator.save_weights('discriminator', True)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str)
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--nice", dest="nice", action="store_true")
parser.set_defaults(nice=False)
args = parser.parse_args()
return args
def generate(no_images = 100):
generator = generator_model()
generator.compile(loss='binary_crossentropy', optimizer="SGD")
generator.load_weights('generator')
noise = np.zeros((no_images, CODE_LN))
for i in range(no_images):
noise[i, :] = np.random.uniform(-1, 1, CODE_LN)
generated_images = generator.predict(noise, verbose=1)
return generated_images
def generate_randn(no_images = 100):
generator = generator_model()
generator.compile(loss='binary_crossentropy', optimizer="SGD")
generator.load_weights('generator')
noise = np.zeros((no_images, CODE_LN))
for i in range(no_images):
noise[i, :] = np.random.randn(CODE_LN)
generated_images = generator.predict(noise, verbose=1)
return generated_images
class GAN1DV_DNN():
def __init__(self,
a_INPUT_LN=56,
a_generator_model=generator_model,
rand='uniform',
foldname='tmp',
disp=False):
"""
Input
=====
foldname, string
foldname of saving weights.
"""
global generator_model
global INPUT_LN, CODE_LN
self.rand = rand
self.disp = disp
self.foldname = foldname
generator_model = a_generator_model
INPUT_LN = a_INPUT_LN
CODE_LN = int(a_INPUT_LN/2)
if self.disp:
print(INPUT_LN, CODE_LN)
os.makedirs(self.foldname, exist_ok=True)
def train_r0(self, BATCH_SIZE, V, no_epoch=100):
"""
Input
=====
rand can be 'uniform', 'randn'
"""
if self.disp:
print(INPUT_LN, CODE_LN)
s = time.time()
if self.rand == 'randn':
r = train_VI_randn(BATCH_SIZE, V, no_epoch=no_epoch, disp=self.disp)
else:
r = train_VI(BATCH_SIZE, V, no_epoch=no_epoch, disp=self.disp)
e = time.time()
print('Elasped time: {}s'.format(e - s))
return r
def train(self, BATCH_SIZE, V, no_epoch=100):
"""
Input
=====
rand can be 'uniform', 'randn'
"""
if self.disp:
print(INPUT_LN, CODE_LN)
s = time.time()
r = self.train_VI(BATCH_SIZE, V, no_epoch=no_epoch)
e = time.time()
print('Elasped time: {}s'.format(e - s))
return r
def generate(self, no_images):
generator = generator_model()
generator.compile(loss='binary_crossentropy', optimizer="SGD")
generator.load_weights(self.foldname+'/generator')
noise = np.zeros((no_images, CODE_LN))
for i in range(no_images):
noise[i, :] = self.random(CODE_LN)
generated_images = generator.predict(noise, verbose=1)
return generated_images
def random(self, n):
if self.rand == 'randn':
return np.random.randn(n)
else:
return np.random.uniform(-1, 1, n)
def train_VI(self, BATCH_SIZE, V, I=None, no_epoch = 100):
"""
Developing
==========
[2017-3-14]
- To include saving the progress: tsplot of input and output
- Input can be saved separately.
"""
disp = self.disp
if I is None:
X_train = V
else:
X_train = np.concatenate([V, I], axis=1)
# X_train = X_train.astype(np.float32)
# X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
discriminator = discriminator_model()
generator = generator_model()
discriminator_on_generator = \
generator_containing_discriminator(generator, discriminator)
d_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
g_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
generator.compile(loss='binary_crossentropy', optimizer="SGD")
discriminator_on_generator.compile(
loss='binary_crossentropy', optimizer=g_optim)
discriminator.trainable = True
discriminator.compile(loss='binary_crossentropy', optimizer=d_optim)
noise = np.zeros((BATCH_SIZE, CODE_LN))
for epoch in range(no_epoch):
if disp:
print("Epoch is", epoch)
print("Number of batches", int(X_train.shape[0] / BATCH_SIZE))
elif epoch % 100 == 0:
print("Epoch is", epoch)
print("Number of batches", int(X_train.shape[0] / BATCH_SIZE))
no_index = int(X_train.shape[0] / BATCH_SIZE)
for index in range(no_index):
# =============================================
# Training Discriminator after generation
# =============================================
for i in range(BATCH_SIZE):
noise[i, :] = self.random(CODE_LN)
#print('noise.shape -->', noise.shape)
generated_images = generator.predict(noise, verbose=0)
if index % 20 == 0:
"""
Saving data
"""
if disp:
print(index)
image_batch = X_train[index * BATCH_SIZE:(index + 1) * BATCH_SIZE]
if disp:
print('image_batch.shape, generated_images.shape -->',
image_batch.shape, generated_images.shape)
X = np.concatenate((image_batch, generated_images))
y = [1] * BATCH_SIZE + [0] * BATCH_SIZE
d_loss = discriminator.train_on_batch(X, y)
if disp:
print("batch %d d_loss : %f" % (index, d_loss))
# =============================================
# Training Generator using discriminator information
# =============================================
for i in range(BATCH_SIZE):
noise[i, :] = self.random(CODE_LN)
discriminator.trainable = False
g_loss = discriminator_on_generator.train_on_batch(
noise, [1] * BATCH_SIZE)
discriminator.trainable = True
if disp:
print("batch %d g_loss : %f" % (index, g_loss))
if index == no_index - 1:
if disp:
print('Save generator and discriminator!')
generator.save_weights(self.foldname + '/generator', True)
discriminator.save_weights(self.foldname + '/discriminator', True)
|
|
# Copyright (C) 2011, 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import collections
import logging
import struct
import sys
import functools
from ryu import exception
from ryu import utils
from ryu.lib import stringify
from . import ofproto_common
LOG = logging.getLogger('ryu.ofproto.ofproto_parser')
def header(buf):
assert len(buf) >= ofproto_common.OFP_HEADER_SIZE
# LOG.debug('len %d bufsize %d', len(buf), ofproto.OFP_HEADER_SIZE)
return struct.unpack_from(ofproto_common.OFP_HEADER_PACK_STR, buffer(buf))
_MSG_PARSERS = {}
def register_msg_parser(version):
def register(msg_parser):
_MSG_PARSERS[version] = msg_parser
return msg_parser
return register
def msg(datapath, version, msg_type, msg_len, xid, buf):
assert len(buf) >= msg_len
msg_parser = _MSG_PARSERS.get(version)
if msg_parser is None:
raise exception.OFPUnknownVersion(version=version)
try:
return msg_parser(datapath, version, msg_type, msg_len, xid, buf)
except:
LOG.exception(
'Encounter an error during parsing OpenFlow packet from switch.'
'This implies switch sending a malformed OpenFlow packet.'
'version 0x%02x msg_type %d msg_len %d xid %d buf %s',
version, msg_type, msg_len, xid, utils.hex_array(buf))
return None
def create_list_of_base_attributes(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
ret = f(self, *args, **kwargs)
cls = self.__class__
# hasattr(cls, '_base_attributes') doesn't work because super class
# may already have the attribute.
if '_base_attributes' not in cls.__dict__:
cls._base_attributes = set(dir(self))
return ret
return wrapper
def ofp_msg_from_jsondict(dp, jsondict):
"""
This function instanticates an appropriate OpenFlow message class
from the given JSON style dictionary.
The objects created by following two code fragments are equivalent.
Code A::
jsonstr = '{ "OFPSetConfig": { "flags": 0, "miss_send_len": 128 } }'
jsondict = json.loads(jsonstr)
o = ofp_msg_from_jsondict(dp, jsondict)
Code B::
o = dp.ofproto_parser.OFPSetConfig(flags=0, miss_send_len=128)
This function takes the following arguments.
======== =======================================
Argument Description
======== =======================================
dp An instance of ryu.controller.Datapath.
jsondict A JSON style dict.
======== =======================================
"""
parser = dp.ofproto_parser
assert len(jsondict) == 1
for k, v in jsondict.iteritems():
cls = getattr(parser, k)
assert issubclass(cls, MsgBase)
return cls.from_jsondict(v, datapath=dp)
class StringifyMixin(stringify.StringifyMixin):
_class_prefixes = ["OFP", "ONF", "MT"]
@classmethod
def cls_from_jsondict_key(cls, k):
obj_cls = super(StringifyMixin, cls).cls_from_jsondict_key(k)
return obj_cls
class MsgBase(StringifyMixin):
"""
This is a base class for OpenFlow message classes.
An instance of this class has at least the following attributes.
========= ==============================
Attribute Description
========= ==============================
datapath A ryu.controller.controller.Datapath instance for this message
version OpenFlow protocol version
msg_type Type of OpenFlow message
msg_len Length of the message
xid Transaction id
buf Raw data
========= ==============================
"""
@create_list_of_base_attributes
def __init__(self, datapath):
super(MsgBase, self).__init__()
self.datapath = datapath
self.version = None
self.msg_type = None
self.msg_len = None
self.xid = None
self.buf = None
def set_headers(self, version, msg_type, msg_len, xid):
assert msg_type == self.cls_msg_type
self.version = version
self.msg_type = msg_type
self.msg_len = msg_len
self.xid = xid
def set_xid(self, xid):
assert self.xid is None
self.xid = xid
def set_buf(self, buf):
self.buf = buffer(buf)
def __str__(self):
def hexify(x):
return str(None) if x is None else '0x%x' % x
buf = 'version: %s msg_type %s xid %s ' % (hexify(self.version),
hexify(self.msg_type),
hexify(self.xid))
return buf + StringifyMixin.__str__(self)
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg_ = cls(datapath)
msg_.set_headers(version, msg_type, msg_len, xid)
msg_.set_buf(buf)
return msg_
def _serialize_pre(self):
self.version = self.datapath.ofproto.OFP_VERSION
self.msg_type = self.cls_msg_type
self.buf = bytearray(self.datapath.ofproto.OFP_HEADER_SIZE)
def _serialize_header(self):
# buffer length is determined after trailing data is formated.
assert self.version is not None
assert self.msg_type is not None
assert self.buf is not None
assert len(self.buf) >= self.datapath.ofproto.OFP_HEADER_SIZE
self.msg_len = len(self.buf)
if self.xid is None:
self.xid = 0
struct.pack_into(self.datapath.ofproto.OFP_HEADER_PACK_STR,
self.buf, 0,
self.version, self.msg_type, self.msg_len, self.xid)
def _serialize_body(self):
pass
def serialize(self):
self._serialize_pre()
self._serialize_body()
self._serialize_header()
class MsgInMsgBase(MsgBase):
@classmethod
def _decode_value(cls, k, json_value, decode_string=base64.b64decode,
**additional_args):
return cls._get_decoder(k, decode_string)(json_value,
**additional_args)
def namedtuple(typename, fields, **kwargs):
class _namedtuple(StringifyMixin,
collections.namedtuple(typename, fields, **kwargs)):
pass
return _namedtuple
def msg_str_attr(msg_, buf, attr_list=None):
if attr_list is None:
attr_list = stringify.obj_attrs(msg_)
for attr in attr_list:
val = getattr(msg_, attr, None)
if val is not None:
buf += ' %s %s' % (attr, val)
return buf
|
|
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Logical units dealing with instances."""
import logging
import os
from ganeti import compat
from ganeti import constants
from ganeti import errors
from ganeti import locking
from ganeti.masterd import iallocator
from ganeti import masterd
from ganeti import netutils
from ganeti import objects
from ganeti import utils
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
from ganeti.cmdlib.common import \
INSTANCE_NOT_RUNNING, CheckNodeOnline, \
ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
LoadNodeEvacResult, \
ExpandInstanceUuidAndName, \
CheckInstanceState, ExpandNodeUuidAndName, \
CheckDiskTemplateEnabled
from ganeti.cmdlib.instance_storage import CreateDisks, \
ComputeDisks, \
StartInstanceDisks, ShutdownInstanceDisks, \
AssembleInstanceDisks
from ganeti.cmdlib.instance_utils import \
BuildInstanceHookEnvByObject,\
CheckNodeNotDrained, RemoveInstance, CopyLockList, \
CheckNodeVmCapable, CheckTargetNodeIPolicy, \
GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
CheckInstanceBridgesExist, \
CheckInstanceExistence, \
CheckHostnameSane, CheckOpportunisticLocking, ComputeFullBeParams, \
ComputeNics, CreateInstanceAllocRequest
import ganeti.masterd.instance
class LUInstanceRename(LogicalUnit):
"""Rename an instance.
"""
HPATH = "instance-rename"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def CheckArguments(self):
"""Check arguments.
"""
if self.op.ip_check and not self.op.name_check:
# TODO: make the ip check more flexible and not depend on the name check
raise errors.OpPrereqError("IP address check requires a name check",
errors.ECODE_INVAL)
self._new_name_resolved = False
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
env = BuildInstanceHookEnvByObject(self, self.instance)
env["INSTANCE_NEW_NAME"] = self.op.new_name
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + \
list(self.cfg.GetInstanceNodes(self.instance.uuid))
return (nl, nl)
def _PerformChecksAndResolveNewName(self):
"""Checks and resolves the new name, storing the FQDN, if permitted.
"""
if self._new_name_resolved or not self.op.name_check:
return
hostname = CheckHostnameSane(self, self.op.new_name)
self.op.new_name = hostname.name
if (self.op.ip_check and
netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
raise errors.OpPrereqError("IP %s of instance %s already in use" %
(hostname.ip, self.op.new_name),
errors.ECODE_NOTUNIQUE)
self._new_name_resolved = True
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster and is not running.
"""
(self.op.instance_uuid, self.op.instance_name) = \
ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
self.op.instance_name)
instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert instance is not None
# It should actually not happen that an instance is running with a disabled
# disk template, but in case it does, the renaming of file-based instances
# will fail horribly. Thus, we test it before.
for disk in self.cfg.GetInstanceDisks(instance.uuid):
if (disk.dev_type in constants.DTS_FILEBASED and
self.op.new_name != instance.name):
# TODO: when disks are separate objects, this should check for disk
# types, not disk templates.
CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(), disk.dev_type)
CheckNodeOnline(self, instance.primary_node)
CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
msg="cannot rename")
self.instance = instance
self._PerformChecksAndResolveNewName()
if self.op.new_name != instance.name:
CheckInstanceExistence(self, self.op.new_name)
def ExpandNames(self):
self._ExpandAndLockInstance(allow_forthcoming=True)
# Note that this call might not resolve anything if name checks have been
# disabled in the opcode. In this case, we might have a renaming collision
# if a shortened name and a full name are used simultaneously, as we will
# have two different locks. However, at that point the user has taken away
# the tools necessary to detect this issue.
self._PerformChecksAndResolveNewName()
# Used to prevent instance namespace collisions.
if self.op.new_name != self.op.instance_name:
CheckInstanceExistence(self, self.op.new_name)
self.add_locks[locking.LEVEL_INSTANCE] = self.op.new_name
def Exec(self, feedback_fn):
"""Rename the instance.
"""
old_name = self.instance.name
rename_file_storage = False
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
renamed_storage = [d for d in disks
if (d.dev_type in constants.DTS_FILEBASED and
d.dev_type != constants.DT_GLUSTER)]
if (renamed_storage and self.op.new_name != self.instance.name):
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
old_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
rename_file_storage = True
self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
# Assert that we have both the locks needed
assert old_name in self.owned_locks(locking.LEVEL_INSTANCE)
assert self.op.new_name in self.owned_locks(locking.LEVEL_INSTANCE)
# re-read the instance from the configuration after rename
renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
disks = self.cfg.GetInstanceDisks(renamed_inst.uuid)
if self.instance.forthcoming:
return renamed_inst.name
if rename_file_storage:
new_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
old_file_storage_dir,
new_file_storage_dir)
result.Raise("Could not rename on node %s directory '%s' to '%s'"
" (but the instance has been renamed in Ganeti)" %
(self.cfg.GetNodeName(renamed_inst.primary_node),
old_file_storage_dir, new_file_storage_dir))
StartInstanceDisks(self, renamed_inst, None)
renamed_inst = self.cfg.GetInstanceInfo(renamed_inst.uuid)
# update info on disks
info = GetInstanceInfoText(renamed_inst)
for (idx, disk) in enumerate(disks):
for node_uuid in self.cfg.GetInstanceNodes(renamed_inst.uuid):
result = self.rpc.call_blockdev_setinfo(node_uuid,
(disk, renamed_inst), info)
result.Warn("Error setting info on node %s for disk %s" %
(self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
try:
result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
renamed_inst, old_name,
self.op.debug_level)
result.Warn("Could not run OS rename script for instance %s on node %s"
" (but the instance has been renamed in Ganeti)" %
(renamed_inst.name,
self.cfg.GetNodeName(renamed_inst.primary_node)),
self.LogWarning)
finally:
ShutdownInstanceDisks(self, renamed_inst)
return renamed_inst.name
class LUInstanceRemove(LogicalUnit):
"""Remove an instance.
"""
HPATH = "instance-remove"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance(allow_forthcoming=True)
self.needed_locks[locking.LEVEL_NODE] = []
self.needed_locks[locking.LEVEL_NODE_RES] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
self.dont_collate_locks[locking.LEVEL_NODE] = True
self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes()
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
env = BuildInstanceHookEnvByObject(self, self.instance,
secondary_nodes=self.secondary_nodes,
disks=self.inst_disks)
env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()]
nl_post = list(self.cfg.GetInstanceNodes(self.instance.uuid)) + nl
return (nl, nl_post)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
self.secondary_nodes = \
self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
self.inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
def Exec(self, feedback_fn):
"""Remove the instance.
"""
assert (self.owned_locks(locking.LEVEL_NODE) ==
self.owned_locks(locking.LEVEL_NODE_RES))
assert not (set(self.cfg.GetInstanceNodes(self.instance.uuid)) -
self.owned_locks(locking.LEVEL_NODE)), \
"Not owning correct locks"
if not self.instance.forthcoming:
logging.info("Shutting down instance %s on node %s", self.instance.name,
self.cfg.GetNodeName(self.instance.primary_node))
result = self.rpc.call_instance_shutdown(self.instance.primary_node,
self.instance,
self.op.shutdown_timeout,
self.op.reason)
if self.op.ignore_failures:
result.Warn("Warning: can't shutdown instance", feedback_fn)
else:
result.Raise("Could not shutdown instance %s on node %s" %
(self.instance.name,
self.cfg.GetNodeName(self.instance.primary_node)))
else:
logging.info("Instance %s on node %s is forthcoming; not shutting down",
self.instance.name,
self.cfg.GetNodeName(self.instance.primary_node))
RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
class LUInstanceMove(LogicalUnit):
"""Move an instance by data-copying.
This LU is only used if the instance needs to be moved by copying the data
from one node in the cluster to another. The instance is shut down and
the data is copied to the new node and the configuration change is propagated,
then the instance is started again.
See also:
L{LUInstanceFailover} for moving an instance on shared storage (no copying
required).
L{LUInstanceMigrate} for the live migration of an instance (no shutdown
required).
"""
HPATH = "instance-move"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
(self.op.target_node_uuid, self.op.target_node) = \
ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
self.op.target_node)
self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
self.needed_locks[locking.LEVEL_NODE_RES] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes(primary_only=True)
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and target nodes of the instance.
"""
env = {
"TARGET_NODE": self.op.target_node,
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
}
env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [
self.cfg.GetMasterNode(),
self.instance.primary_node,
self.op.target_node_uuid,
]
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
for idx, dsk in enumerate(disks):
if dsk.dev_type not in constants.DTS_COPYABLE:
raise errors.OpPrereqError("Instance disk %d has disk type %s and is"
" not suitable for copying"
% (idx, dsk.dev_type), errors.ECODE_STATE)
target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
assert target_node is not None, \
"Cannot retrieve locked node %s" % self.op.target_node
self.target_node_uuid = target_node.uuid
if target_node.uuid == self.instance.primary_node:
raise errors.OpPrereqError("Instance %s is already on the node %s" %
(self.instance.name, target_node.name),
errors.ECODE_STATE)
cluster = self.cfg.GetClusterInfo()
bep = cluster.FillBE(self.instance)
CheckNodeOnline(self, target_node.uuid)
CheckNodeNotDrained(self, target_node.uuid)
CheckNodeVmCapable(self, target_node.uuid)
group_info = self.cfg.GetNodeGroup(target_node.group)
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
ignore=self.op.ignore_ipolicy)
if self.instance.admin_state == constants.ADMINST_UP:
# check memory requirements on the target node
CheckNodeFreeMemory(
self, target_node.uuid, "failing over instance %s" %
self.instance.name, bep[constants.BE_MAXMEM],
self.instance.hypervisor,
cluster.hvparams[self.instance.hypervisor])
else:
self.LogInfo("Not checking memory on the secondary node as"
" instance will not be started")
# check bridge existance
CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
def Exec(self, feedback_fn):
"""Move an instance.
The move is done by shutting it down on its present node, copying
the data over (slow) and starting it on the new node.
"""
source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
self.LogInfo("Shutting down instance %s on source node %s",
self.instance.name, source_node.name)
assert (self.owned_locks(locking.LEVEL_NODE) ==
self.owned_locks(locking.LEVEL_NODE_RES))
result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
self.op.shutdown_timeout,
self.op.reason)
if self.op.ignore_consistency:
result.Warn("Could not shutdown instance %s on node %s. Proceeding"
" anyway. Please make sure node %s is down. Error details" %
(self.instance.name, source_node.name, source_node.name),
self.LogWarning)
else:
result.Raise("Could not shutdown instance %s on node %s" %
(self.instance.name, source_node.name))
# create the target disks
try:
CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
except errors.OpExecError:
self.LogWarning("Device creation failed")
for disk_uuid in self.instance.disks:
self.cfg.ReleaseDRBDMinors(disk_uuid)
raise
errs = []
transfers = []
# activate, get path, create transfer jobs
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
for idx, disk in enumerate(disks):
# FIXME: pass debug option from opcode to backend
dt = masterd.instance.DiskTransfer("disk/%s" % idx,
constants.IEIO_RAW_DISK,
(disk, self.instance),
constants.IEIO_RAW_DISK,
(disk, self.instance),
None)
transfers.append(dt)
self.cfg.Update(disk, feedback_fn)
import_result = \
masterd.instance.TransferInstanceData(self, feedback_fn,
source_node.uuid,
target_node.uuid,
target_node.secondary_ip,
self.op.compress,
self.instance, transfers)
if not compat.all(import_result):
errs.append("Failed to transfer instance data")
if errs:
self.LogWarning("Some disks failed to copy, aborting")
try:
RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
finally:
for disk_uuid in self.instance.disks:
self.cfg.ReleaseDRBDMinors(disk_uuid)
raise errors.OpExecError("Errors during disk copy: %s" %
(",".join(errs),))
self.instance.primary_node = target_node.uuid
self.cfg.Update(self.instance, feedback_fn)
for disk in disks:
self.cfg.SetDiskNodes(disk.uuid, [target_node.uuid])
self.LogInfo("Removing the disks on the original node")
RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
# Only start the instance if it's marked as up
if self.instance.admin_state == constants.ADMINST_UP:
self.LogInfo("Starting instance %s on node %s",
self.instance.name, target_node.name)
disks_ok, _, _ = AssembleInstanceDisks(self, self.instance,
ignore_secondaries=True)
if not disks_ok:
ShutdownInstanceDisks(self, self.instance)
raise errors.OpExecError("Can't activate the instance's disks")
result = self.rpc.call_instance_start(target_node.uuid,
(self.instance, None, None), False,
self.op.reason)
msg = result.fail_msg
if msg:
ShutdownInstanceDisks(self, self.instance)
raise errors.OpExecError("Could not start instance %s on node %s: %s" %
(self.instance.name, target_node.name, msg))
class LUInstanceMultiAlloc(NoHooksLU):
"""Allocates multiple instances at the same time.
"""
REQ_BGL = False
def CheckArguments(self):
"""Check arguments.
"""
nodes = []
for inst in self.op.instances:
if inst.iallocator is not None:
raise errors.OpPrereqError("iallocator are not allowed to be set on"
" instance objects", errors.ECODE_INVAL)
nodes.append(bool(inst.pnode))
if inst.disk_template in constants.DTS_INT_MIRROR:
nodes.append(bool(inst.snode))
has_nodes = compat.any(nodes)
if compat.all(nodes) ^ has_nodes:
raise errors.OpPrereqError("There are instance objects providing"
" pnode/snode while others do not",
errors.ECODE_INVAL)
if not has_nodes and self.op.iallocator is None:
default_iallocator = self.cfg.GetDefaultIAllocator()
if default_iallocator:
self.op.iallocator = default_iallocator
else:
raise errors.OpPrereqError("No iallocator or nodes on the instances"
" given and no cluster-wide default"
" iallocator found; please specify either"
" an iallocator or nodes on the instances"
" or set a cluster-wide default iallocator",
errors.ECODE_INVAL)
CheckOpportunisticLocking(self.op)
dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
if dups:
raise errors.OpPrereqError("There are duplicate instance names: %s" %
utils.CommaJoin(dups), errors.ECODE_INVAL)
def ExpandNames(self):
"""Calculate the locks.
"""
self.share_locks = ShareAll()
self.needed_locks = {}
if self.op.iallocator:
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
if self.op.opportunistic_locking:
self.opportunistic_locks[locking.LEVEL_NODE] = True
self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
else:
nodeslist = []
for inst in self.op.instances:
(inst.pnode_uuid, inst.pnode) = \
ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
nodeslist.append(inst.pnode_uuid)
if inst.snode is not None:
(inst.snode_uuid, inst.snode) = \
ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
nodeslist.append(inst.snode_uuid)
self.needed_locks[locking.LEVEL_NODE] = nodeslist
# Lock resources of instance's primary and secondary nodes (copy to
# prevent accidential modification)
self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
def CheckPrereq(self):
"""Check prerequisite.
"""
if self.op.iallocator:
cluster = self.cfg.GetClusterInfo()
default_vg = self.cfg.GetVGName()
ec_id = self.proc.GetECId()
if self.op.opportunistic_locking:
# Only consider nodes for which a lock is held
node_whitelist = self.cfg.GetNodeNames(
set(self.owned_locks(locking.LEVEL_NODE)) &
set(self.owned_locks(locking.LEVEL_NODE_RES)))
else:
node_whitelist = None
insts = [CreateInstanceAllocRequest(op, ComputeDisks(op.disks,
op.disk_template,
default_vg),
ComputeNics(op, cluster, None,
self.cfg, ec_id),
ComputeFullBeParams(op, cluster),
node_whitelist)
for op in self.op.instances]
req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
ial = iallocator.IAllocator(self.cfg, self.rpc, req)
ial.Run(self.op.iallocator)
if not ial.success:
raise errors.OpPrereqError("Can't compute nodes using"
" iallocator '%s': %s" %
(self.op.iallocator, ial.info),
errors.ECODE_NORES)
self.ia_result = ial.result
if self.op.dry_run:
self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
constants.JOB_IDS_KEY: [],
})
def _ConstructPartialResult(self):
"""Contructs the partial result.
"""
if self.op.iallocator:
(allocatable, failed_insts) = self.ia_result
allocatable_insts = map(compat.fst, allocatable)
else:
allocatable_insts = [op.instance_name for op in self.op.instances]
failed_insts = []
return {
constants.ALLOCATABLE_KEY: allocatable_insts,
constants.FAILED_KEY: failed_insts,
}
def Exec(self, feedback_fn):
"""Executes the opcode.
"""
jobs = []
if self.op.iallocator:
op2inst = dict((op.instance_name, op) for op in self.op.instances)
(allocatable, failed) = self.ia_result
for (name, node_names) in allocatable:
op = op2inst.pop(name)
(op.pnode_uuid, op.pnode) = \
ExpandNodeUuidAndName(self.cfg, None, node_names[0])
if len(node_names) > 1:
(op.snode_uuid, op.snode) = \
ExpandNodeUuidAndName(self.cfg, None, node_names[1])
jobs.append([op])
missing = set(op2inst.keys()) - set(failed)
assert not missing, \
"Iallocator did return incomplete result: %s" % \
utils.CommaJoin(missing)
else:
jobs.extend([op] for op in self.op.instances)
return ResultWithJobs(jobs, **self._ConstructPartialResult())
class LUInstanceChangeGroup(LogicalUnit):
HPATH = "instance-change-group"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def ExpandNames(self):
self.share_locks = ShareAll()
self.needed_locks = {
locking.LEVEL_NODEGROUP: [],
locking.LEVEL_NODE: [],
}
self._ExpandAndLockInstance()
if self.op.target_groups:
self.req_target_uuids = map(self.cfg.LookupNodeGroup,
self.op.target_groups)
else:
self.req_target_uuids = None
self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
def DeclareLocks(self, level):
if level == locking.LEVEL_NODEGROUP:
assert not self.needed_locks[locking.LEVEL_NODEGROUP]
if self.req_target_uuids:
lock_groups = set(self.req_target_uuids)
# Lock all groups used by instance optimistically; this requires going
# via the node before it's locked, requiring verification later on
instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
lock_groups.update(instance_groups)
else:
# No target groups, need to lock all of them
lock_groups = locking.ALL_SET
self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
elif level == locking.LEVEL_NODE:
if self.req_target_uuids:
# Lock all nodes used by instances
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
self._LockInstancesNodes()
# Lock all nodes in all potential target groups
lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) |
self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
member_nodes = [node_uuid
for group in lock_groups
for node_uuid in self.cfg.GetNodeGroup(group).members]
self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
else:
# Lock all nodes as all groups are potential targets
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
def CheckPrereq(self):
owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
assert (self.req_target_uuids is None or
owned_groups.issuperset(self.req_target_uuids))
assert owned_instance_names == set([self.op.instance_name])
# Get instance information
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
# Check if node groups for locked instance are still correct
instance_all_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
assert owned_nodes.issuperset(instance_all_nodes), \
("Instance %s's nodes changed while we kept the lock" %
self.op.instance_name)
inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
owned_groups)
if self.req_target_uuids:
# User requested specific target groups
self.target_uuids = frozenset(self.req_target_uuids)
else:
# All groups except those used by the instance are potential targets
self.target_uuids = owned_groups - inst_groups
conflicting_groups = self.target_uuids & inst_groups
if conflicting_groups:
raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
" used by the instance '%s'" %
(utils.CommaJoin(conflicting_groups),
self.op.instance_name),
errors.ECODE_INVAL)
if not self.target_uuids:
raise errors.OpPrereqError("There are no possible target groups",
errors.ECODE_INVAL)
def BuildHooksEnv(self):
"""Build hooks env.
"""
assert self.target_uuids
env = {
"TARGET_GROUPS": " ".join(self.target_uuids),
}
env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
mn = self.cfg.GetMasterNode()
return ([mn], [mn])
def Exec(self, feedback_fn):
instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
assert instances == [self.op.instance_name], "Instance not locked"
req = iallocator.IAReqGroupChange(instances=instances,
target_groups=list(self.target_uuids))
ial = iallocator.IAllocator(self.cfg, self.rpc, req)
ial.Run(self.op.iallocator)
if not ial.success:
raise errors.OpPrereqError("Can't compute solution for changing group of"
" instance '%s' using iallocator '%s': %s" %
(self.op.instance_name, self.op.iallocator,
ial.info), errors.ECODE_NORES)
jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
self.LogInfo("Iallocator returned %s job(s) for changing group of"
" instance '%s'", len(jobs), self.op.instance_name)
return ResultWithJobs(jobs)
|
|
#!/bin/bash/env python
"""
The MIT License
Copyright (c) 2014 Dennis Hoppe
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import codecs
from cStringIO import StringIO
from getopt import getopt
from getopt import GetoptError
from lxml import etree
from sys import argv
from sys import exit
from time import time
import os
import zipfile
__author__ = 'Dennis Hoppe'
__email__ = 'hoppe.dennis@ymail.com'
__status__ = 'Development'
class FileHandler():
def __init__(self, zfile):
self.zfile = zfile
def readline(self):
return self.zfile.readline()
def listXmls(self):
output = StringIO()
line = self.readline()
output.write(line)
line = self.readline()
while line is not '':
if '<?xml version="1.0" encoding="UTF-8"?>' in line:
line = line.replace('<?xml version="1.0" encoding="UTF-8"?>', '')
output.write(line)
output.seek(0)
yield output
output = StringIO()
output.write('<?xml version="1.0" encoding="UTF-8"?>')
elif '<?xml version="1.0"?>' in line:
line = line.replace('<?xml version="1.0"?>', '')
output.write(line)
output.seek(0)
yield output
output = StringIO()
output.write('<?xml version="1.0"?>')
else:
output.write(line)
try:
line = self.readline()
except StopIteration:
break
output.seek(0)
yield output
class SimpleXMLHandler(object):
def __init__(self):
''' xml elements '''
self.last_name = 0
self.first_name = 0
self.given_name = 0
self.middle_name = 0
self.family_name = 0
self.organization_name = 0
self.inventor = 0
self.assignee = 0
self.country_code = 0
self.document_date = 0
self.pct_application = 0
self.ipc = 0
self.primary_ipc = 0
self.primary_uspc = 0
self.title = 0
self.abstract = 0
self.document_id = 0
self.set_id = False
self.code_class = 0
self.subclass = 0
self.section = 0
self.group = 0
self.subgroup = 0
self.references_cited = 0
self.publication_reference = 0
self.addressbook = 0
self.classification_search = 0
''' buffer '''
self.buffer = ''
self.firstname_buffer = ''
self.lastname_buffer = ''
self.organization_buffer = ''
self.given_name_buffer = ''
self.middle_name_buffer = ''
self.family_name_buffer = ''
self.country_buffer = ''
self.date_buffer = ''
self.ipc_buffer = ''
self.uspc_buffer = ''
self.title_buffer = ''
self.abstract_buffer = ''
self.id_buffer = ''
''' storage '''
self.write_buffer = []
def start(self, tag, attributes):
if tag == 'last-name':
self.last_name = 1
elif tag == 'first-name':
self.first_name = 1
elif tag == 'given-name':
self.given_name = 1
elif tag == 'middle-name':
self.middle_name = 1
elif tag == 'family-name':
self.family_name = 1
elif tag == 'organization-name' or tag == 'orgname':
self.organization_name = 1
elif tag == 'inventors' or tag == 'applicants':
self.inventor = 1
elif tag == 'assignees':
self.assignee = 1
elif tag == 'country-code' or tag == 'country':
self.country_code = 1
elif tag == 'classification-ipc-primary' or tag == 'classification-ipcr':
self.primary_ipc = 1
elif tag == 'ipc':
self.ipc = 1
elif tag == 'class':
self.code_class = 1
elif tag == 'subclass':
self.subclass = 1
elif tag == 'section':
self.section = 1
elif tag == 'main-group':
self.group = 1
elif tag == 'subgroup' or tag == 'main-classification':
self.subgroup = 1
elif tag == 'references-cited':
self.references_cited = 1
elif tag == 'classification-us-primary' or tag == 'classification-national':
self.primary_uspc = 1
elif tag == 'pct-application':
self.pct_application = 1
elif tag == 'publication-reference':
self.pct_application = 0
elif tag == 'addressbook':
self.addressbook = 1
elif tag == 'document-date' or tag == 'date':
self.document_date = 1
elif tag == 'title-of-invention' or tag == 'invention-title':
self.title = 1
elif tag == 'subdoc-abstract' or tag == 'abstract':
self.abstract = 1
elif tag == 'doc-number':
self.document_id = 1
elif tag == 'us-field-of-classification-search':
self.classification_search = 1
def data(self, data):
if self.last_name == 1:
self.lastname_buffer += data
elif self.first_name == 1:
self.firstname_buffer += data
elif self.given_name == 1:
self.given_name_buffer += data
elif self.middle_name == 1:
self.middle_name_buffer += data
elif self.family_name == 1:
self.family_name_buffer += data
elif self.organization_name == 1:
self.organization_buffer += data
elif self.country_code == 1:
self.country_buffer += data
elif self.document_date == 1:
self.date_buffer += data
elif self.ipc == 1:
self.ipc_buffer += data
elif self.code_class == 1:
self.ipc_buffer += data
self.uspc_buffer += data
elif self.subclass == 1:
self.ipc_buffer += data
elif self.code_class == 1:
self.ipc_buffer += data
elif self.section == 1:
self.ipc_buffer += data
elif self.group == 1:
self.ipc_buffer += data
elif self.subgroup == 1:
self.ipc_buffer += data
self.uspc_buffer += data
elif self.title == 1:
self.title_buffer += data
elif self.abstract == 1:
self.abstract_buffer += data
elif self.document_id == 1:
self.id_buffer += data
def end(self, tag):
if tag == 'last-name':
self.last_name = 0
elif tag == 'first-name':
if self.addressbook == 1:
if self.inventor == 1:
self.firstname_buffer += ' '
self.write_buffer.append("INV/\"" + self.firstname_buffer + self.lastname_buffer + "\"")
self.firstname_buffer = ''
self.lastname_buffer = ''
self.first_name = 0
elif tag == 'given-name':
self.given_name = 0
elif tag == 'middle-name':
self.middle_name = 0
elif tag == 'family-name':
if self.addressbook == 1:
if self.inventor == 1:
self.given_name_buffer += ' '
if self.middle_name_buffer != '':
self.given_name_buffer += self.middle_name_buffer
self.given_name_buffer += ' '
self.write_buffer.append("INV/\"" + self.given_name_buffer + self.family_name_buffer + "\"")
self.given_name_buffer = ''
self.middle_name_buffer = ''
self.family_name_buffer = ''
self.family_name = 0
elif tag == 'organization-name' or tag == 'orgname':
if self.addressbook == 1:
if self.assignee == 1:
self.write_buffer.append("AS/\"" + self.organization_buffer.strip() + "\"")
self.organization_buffer = ''
self.organization_name = 0
elif tag == 'inventors' or tag == 'applicants':
self.inventor = 0
elif tag == 'assignees':
self.assignee = 0
elif tag == 'addressbook':
self.addressbook = 0
elif tag == 'country-code' or tag == 'country':
if self.addressbook == 1:
if self.inventor == 1:
self.write_buffer.append("ICN/" + self.country_buffer);
elif self.assignee == 1:
self.write_buffer.append("ACN/" + self.country_buffer);
self.country_buffer = ''
self.country_code = 0
elif tag == 'doc-number':
if self.set_id == False and self.pct_application == 0:
self.write_buffer.append("ID/" + self.id_buffer);
self.set_id = True
self.id_buffer = ''
self.document_id = 0
elif tag == 'document-date' or tag == 'date':
if self.pct_application == 0:
self.write_buffer.append("APD/" + self.date_buffer);
self.date_buffer = ''
self.document_date = 0
elif tag == 'pct-application':
self.pct_application = 0
elif tag == 'publication-reference':
self.pct_application = 1
elif tag == 'classification-ipc-primary' or tag == 'classification-ipcr':
self.primary_ipc = 0
elif tag == 'ipc':
if self.primary_ipc == 1:
self.write_buffer.append("ICL/" + self.ipc_buffer);
self.ipc_buffer = ''
self.ipc = 0
elif tag == 'classification-us-primary' or tag == 'classification-national':
self.primary_uspc = 0
elif tag == 'class':
self.code_class = 0
elif tag == 'subclass':
if self.primary_uspc == 1:
self.write_buffer.append("CCL/" + self.uspc_buffer);
if self.ipc != 1:
self.uspc_buffer = ''
self.subclass = 0
elif tag == 'section':
self.section = 0
elif tag == 'main-group':
self.group = 0
elif tag == 'subgroup' or tag == 'main-classification':
if self.references_cited == 0 and self.classification_search == 0:
if self.primary_uspc == 1:
self.write_buffer.append("CCL/" + self.uspc_buffer.strip());
elif self.primary_ipc == 1:
self.write_buffer.append("ICL/" + self.ipc_buffer.strip());
self.uspc_buffer = ''
self.ipc_buffer = ''
self.subgroup = 0
elif tag == 'title-of-invention' or tag == 'invention-title':
self.write_buffer.append("TTL/\"" + self.title_buffer + "\"");
self.title_buffer = ''
self.title = 0
elif tag == 'subdoc-abstract' or tag == 'abstract':
if self.abstract_buffer != '':
self.abstract_buffer = self.abstract_buffer.expandtabs(1)
self.abstract_buffer = ' '.join(self.abstract_buffer.splitlines())
self.abstract_buffer = self.abstract_buffer.strip()
self.write_buffer.append("ABST/\"" + self.abstract_buffer + "\"");
self.abstract_buffer = ''
self.abstract = 0
elif tag == 'references-cited':
self.references_cited = 0
elif tag == 'us-field-of-classification-search':
self.classification_search = 0
def close(self):
return self.write_buffer
def main(argv):
""" XML Parser for USPTO patent files
Usage: python uspto-xml-parser.py [options] [source]
Options:
-h, --help show this help
-f .., --file=...
-p .., --path=... path to UPSTO patent applications
-o .., --output=... file to write results
-l .., --limit-to=... limit parsing to a specific year
-d, --load-dtd if set, then the DTD is loaded (e.g., 2001)
Examples:
uspto-xml-parser.py -p /media/backup_/patents/upsto-pair/appl_full_texts
Issues:
The file '2003/pa030501.zip' is corrupt. While parsing, one encounters
a long sequence of chars 0x0 ending not well-formed. The patent is currently
discarded.
Parsing XML is very slow due to '<?xml version' checking. Implement
a character-based check instead of line-based checking.
"""
dtd = False
limit = None
folder_in = None
file_in = None
out_file = 'names-in-patents.txt'
try:
opts, args = getopt(argv, 'dl:o:p:f:h', ['load-dtd', 'limit-to', 'output', 'file', 'path', 'help'])
except GetoptError:
usage()
exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
exit()
if opt in ('-p', '--path'):
if not os.path.isdir(arg):
usage()
exit(2)
folder_in = arg
if opt in ('-f', '--file'):
if not os.path.isfile(arg):
usage()
exit(2)
file_in = arg
if opt in ('-o', '--output'):
if os.path.isdir(arg):
usage()
exit(2)
out_file = arg
if opt in ('-l', '--limit-to'):
limit = arg
if opt in ('-d', '--load-dtd'):
dtd = True
write_buffer = codecs.open(out_file, 'w', 'utf-8')
patents_count = 0
if file_in is not None:
start = time()
f = open(file_in, 'rU')
parser = etree.XMLParser(target=SimpleXMLHandler(), resolve_entities=False, load_dtd=dtd)
result = etree.parse(f, parser)
write_buffer.write('FILE/' + os.path.basename(file_in) + '\t')
for applicant in result:
write_buffer.write(applicant)
write_buffer.write('\t')
write_buffer.write('\n')
write_buffer.flush()
elapsed = (time() - start)
print str(elapsed) + ' seconds elapsed in total.'
elif folder_in is not None:
file_count = 0
for folder in os.listdir(folder_in):
folder = folder_in + '/' + folder
if os.path.isdir(folder):
if limit is not None:
if not folder.endswith(limit):
continue
for zip_file in os.listdir(folder):
zip_file = folder + '/' + zip_file
if not zip_file.endswith('zip'):
continue
if zip_file.endswith('pa030501.zip'):
continue # that file is corrupt
try:
zfile = zipfile.ZipFile(zip_file, 'r')
except zipfile.BadZipfile:
continue
print 'process ' + str(zip_file)
patents_within_document = 0
start = time()
for name in zfile.namelist():
if not name.endswith('.xml') or not name.endswith('.sgml'):
continue
f = FileHandler(zfile.open(name, 'rU'))
for elem in f.listXmls():
# debug start
#z = codecs.open('debug.txt', 'w', 'utf-8')
#z.write(elem.getvalue())
#z.close()
# debug end
parser = etree.XMLParser(target=SimpleXMLHandler(), resolve_entities=False, load_dtd=dtd)
result = etree.parse(elem, parser)
write_buffer.write('FILE/' + os.path.basename(name) + '\t')
for applicant in result:
write_buffer.write(applicant)
write_buffer.write('\t')
write_buffer.write('\n')
write_buffer.flush()
patents_count = patents_count + 1
patents_within_document = patents_within_document + 1
zfile.close()
print str(patents_within_document) + ' patents parsed.'
elapsed = (time() - start)
print str(elapsed) + ' seconds elapsed in total.'
print str(patents_count) + ' patents examined.'
#fi
def usage():
print main.__doc__
if __name__ == "__main__":
main(argv[1:])
# EOF
|
|
# coding=utf-8
# Copyright {{cookiecutter.authors}} and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for {{cookiecutter.modelname}}."""
{%- if cookiecutter.tokenizer_type == "Based on BERT" %}
from ...utils import logging
from ..bert.tokenization_bert import BertTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"{{cookiecutter.checkpoint_identifier}}": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"{{cookiecutter.checkpoint_identifier}}": {"do_lower_case": False},
}
class {{cookiecutter.camelcase_modelname}}Tokenizer(BertTokenizer):
r"""
Construct a {{cookiecutter.modelname}} tokenizer.
:class:`~transformers.{{cookiecutter.camelcase_modelname}}Tokenizer` is identical to :class:`~transformers.BertTokenizer` and runs end-to-end
tokenization: punctuation splitting and wordpiece.
Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning
parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
{%- elif cookiecutter.tokenizer_type == "Based on BART" %}
from ...utils import logging
from ..bart.tokenization_bart import BartTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/vocab.json",
},
"merges_file": {
"{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/merges.txt",
},
"tokenizer_file": {
"{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"{{cookiecutter.checkpoint_identifier}}": 1024,
}
class {{cookiecutter.camelcase_modelname}}Tokenizer(BartTokenizer):
"""
Construct a {{cookiecutter.modelname}} tokenizer.
:class:`~transformers.{{cookiecutter.camelcase_modelname}}Tokenizer` is identical to :class:`~transformers.BartTokenizer` and runs end-to-end
tokenization: punctuation splitting and wordpiece.
Refer to superclass :class:`~transformers.BartTokenizer` for usage examples and documentation concerning
parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
{%- elif cookiecutter.tokenizer_type == "Standalone" %}
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/vocab.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"{{cookiecutter.checkpoint_identifier}}": 1024,
}
class {{cookiecutter.camelcase_modelname}}Tokenizer(PreTrainedTokenizer):
"""
Construct a {{cookiecutter.modelname}} tokenizer. Based on byte-level Byte-Pair-Encoding.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
unk_token="<|endoftext|>",
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
**kwargs
):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs)
"Initialisation"
@property
def vocab_size(self):
"Returns vocab size"
def get_vocab(self):
"Returns vocab as a dict"
def _tokenize(self, text):
""" Returns a tokenized string. """
def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
def save_vocabulary(self, save_directory):
"""
Save the vocabulary and special tokens file to a directory.
Args:
save_directory (:obj:`str`):
The directory in which to save the vocabulary.
Returns:
:obj:`Tuple(str)`: Paths to the files saved.
"""
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens.
A {{cookiecutter.modelname}} sequence has the following format:
- single sequence: ``<s> X </s>``
- pair of sequences: ``<s> A </s></s> B </s>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task.
{{cookiecutter.modelname}} does not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
text = " " + text
return (text, kwargs)
class {{cookiecutter.camelcase_modelname}}TokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" {{cookiecutter.modelname}} tokenizer (backed by HuggingFace's `tokenizers` library).
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
merges_file,
unk_token="<|endoftext|>",
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
add_prefix_space=False,
trim_offsets=True,
**kwargs
):
super().__init__(
ByteLevelBPETokenizer(
vocab_file=vocab_file,
merges_file=merges_file,
add_prefix_space=add_prefix_space,
trim_offsets=trim_offsets,
),
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
**kwargs,
)
self.add_prefix_space = add_prefix_space
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
if token_ids_1 is None:
return output
return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task.
{{cookiecutter.modelname}} does not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
{% endif %}
|
|
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Suppressions for V8 correctness fuzzer failures.
We support three types of suppressions:
1. Ignore test case by pattern.
Map a regular expression to a bug entry. A new failure will be reported
when the pattern matches a JS test case.
Subsequent matches will be recoreded under the first failure.
2. Ignore test run by output pattern:
Map a regular expression to a bug entry. A new failure will be reported
when the pattern matches the output of a particular run.
Subsequent matches will be recoreded under the first failure.
3. Relax line-to-line comparisons with expressions of lines to ignore and
lines to be normalized (i.e. ignore only portions of lines).
These are not tied to bugs, be careful to not silently switch off this tool!
Alternatively, think about adding a behavior change to v8_suppressions.js
to silence a particular class of problems.
"""
import itertools
import re
# Max line length for regular experessions checking for lines to ignore.
MAX_LINE_LENGTH = 512
# For ignoring lines before carets and to ignore caret positions.
CARET_RE = re.compile(r'^\s*\^\s*$')
# Ignore by test case pattern. Map from bug->regexp.
# Regular expressions are assumed to be compiled. We use regexp.match.
IGNORE_TEST_CASES = {
'crbug.com/662907':
re.compile(r'.*new Array.*\[\d+\] =.*'
r'((Array)|(Object)).prototype.__defineSetter__.*', re.S),
'crbug.com/663340':
re.compile(r'.*\.shift\(\).*', re.S),
'crbug.com/666308':
re.compile(r'.*End stripped down and modified version.*'
r'\.prototype.*instanceof.*.*', re.S),
}
# Ignore by output pattern. Map from config->bug->regexp. Config '' is used
# to match all configurations. Otherwise use either a compiler configuration,
# e.g. fullcode or validate_asm or an architecture, e.g. x64 or ia32 or a
# comma-separated combination, e.g. x64,fullcode, for more specific
# suppressions.
# Bug is preferred to be a crbug.com/XYZ, but can be any short distinguishable
# label.
# Regular expressions are assumed to be compiled. We use regexp.search.
IGNORE_OUTPUT = {
'': {
'crbug.com/664068':
re.compile(r'RangeError', re.S),
'crbug.com/669017':
re.compile(r'SyntaxError', re.S),
},
'validate_asm': {
'validate_asm':
re.compile(r'TypeError'),
},
}
# Lines matching any of the following regular expressions will be ignored
# if appearing on both sides. The capturing groups need to match exactly.
# Use uncompiled regular expressions - they'll be compiled later.
ALLOWED_LINE_DIFFS = [
# Ignore caret position in stack traces.
r'^\s*\^\s*$',
# Ignore some stack trace headers as messages might not match.
r'^(.*)TypeError: .* is not a function$',
r'^(.*)TypeError: .* is not a constructor$',
r'^(.*)TypeError: (.*) is not .*$',
r'^(.*)ReferenceError: .* is not defined$',
r'^(.*):\d+: ReferenceError: .* is not defined$',
# These are rarely needed. It includes some cases above.
r'^\w*Error: .* is not .*$',
r'^(.*) \w*Error: .* is not .*$',
r'^(.*):\d+: \w*Error: .* is not .*$',
# Some test cases just print the message.
r'^.* is not a function(.*)$',
r'^(.*) is not a .*$',
# crbug.com/669017
r'^(.*)SyntaxError: .*$',
# Ignore lines of stack traces as character positions might not match.
r'^ at (?:new )?([^:]*):\d+:\d+(.*)$',
r'^(.*):\d+:(.*)$',
# crbug.com/662840
r"^.*(?:Trying to access ')?(\w*)(?:(?:' through proxy)|"
r"(?: is not defined))$",
]
# Lines matching any of the following regular expressions will be ignored.
# Use uncompiled regular expressions - they'll be compiled later.
IGNORE_LINES = [
r'^Validation of asm\.js module failed: .+$',
r'^.*:\d+: Invalid asm.js: .*$',
r'^Warning: unknown flag .*$',
r'^Warning: .+ is deprecated.*$',
r'^Try --help for options$',
# crbug.com/677032
r'^.*:\d+:.*asm\.js.*: success$',
]
###############################################################################
# Implementation - you should not need to change anything below this point.
# Compile regular expressions.
ALLOWED_LINE_DIFFS = [re.compile(exp) for exp in ALLOWED_LINE_DIFFS]
IGNORE_LINES = [re.compile(exp) for exp in IGNORE_LINES]
def line_pairs(lines):
return itertools.izip_longest(
lines, itertools.islice(lines, 1, None), fillvalue=None)
def caret_match(line1, line2):
if (not line1 or
not line2 or
len(line1) > MAX_LINE_LENGTH or
len(line2) > MAX_LINE_LENGTH):
return False
return bool(CARET_RE.match(line1) and CARET_RE.match(line2))
def short_line_output(line):
if len(line) <= MAX_LINE_LENGTH:
# Avoid copying.
return line
return line[0:MAX_LINE_LENGTH] + '...'
def ignore_by_regexp(line1, line2, allowed):
if len(line1) > MAX_LINE_LENGTH or len(line2) > MAX_LINE_LENGTH:
return False
for exp in allowed:
match1 = exp.match(line1)
match2 = exp.match(line2)
if match1 and match2:
# If there are groups in the regexp, ensure the groups matched the same
# things.
if match1.groups() == match2.groups(): # tuple comparison
return True
return False
def diff_output(output1, output2, allowed, ignore1, ignore2):
def useful_line(ignore):
def fun(line):
return all(not e.match(line) for e in ignore)
return fun
lines1 = filter(useful_line(ignore1), output1)
lines2 = filter(useful_line(ignore2), output2)
for ((line1, lookahead1), (line2, lookahead2)) in itertools.izip_longest(
line_pairs(lines1), line_pairs(lines2), fillvalue=(None, None)):
# Only one of the two iterators should run out.
assert not (line1 is None and line2 is None)
# One iterator ends earlier.
if line1 is None:
return '+ %s' % short_line_output(line2)
if line2 is None:
return '- %s' % short_line_output(line1)
# If lines are equal, no further checks are necessary.
if line1 == line2:
continue
# Look ahead. If next line is a caret, ignore this line.
if caret_match(lookahead1, lookahead2):
continue
# Check if a regexp allows these lines to be different.
if ignore_by_regexp(line1, line2, allowed):
continue
# Lines are different.
return '- %s\n+ %s' % (short_line_output(line1), short_line_output(line2))
# No difference found.
return None
def get_suppression(arch1, config1, arch2, config2):
return V8Suppression(arch1, config1, arch2, config2)
class Suppression(object):
def diff(self, output1, output2):
return None
def ignore(self, testcase):
return False
def ignore_by_output1(self, output):
return False
def ignore_by_output2(self, output):
return False
class V8Suppression(Suppression):
def __init__(self, arch1, config1, arch2, config2):
self.arch1 = arch1
self.config1 = config1
self.arch2 = arch2
self.config2 = config2
def diff(self, output1, output2):
return diff_output(
output1.splitlines(),
output2.splitlines(),
ALLOWED_LINE_DIFFS,
IGNORE_LINES,
IGNORE_LINES,
)
def ignore(self, testcase):
for bug, exp in IGNORE_TEST_CASES.iteritems():
if exp.match(testcase):
return bug
return False
def ignore_by_output1(self, output):
return self.ignore_by_output(output, self.arch1, self.config1)
def ignore_by_output2(self, output):
return self.ignore_by_output(output, self.arch2, self.config2)
def ignore_by_output(self, output, arch, config):
def check(mapping):
for bug, exp in mapping.iteritems():
if exp.search(output):
return bug
return None
bug = check(IGNORE_OUTPUT.get('', {}))
if bug:
return bug
bug = check(IGNORE_OUTPUT.get(arch, {}))
if bug:
return bug
bug = check(IGNORE_OUTPUT.get(config, {}))
if bug:
return bug
bug = check(IGNORE_OUTPUT.get('%s,%s' % (arch, config), {}))
if bug:
return bug
return None
|
|
#!/usr/bin/env python
"""
<Program Name>
test_util.py
<Author>
Konstantin Andrianov.
<Started>
February 1, 2013.
<Copyright>
See LICENSE for licensing information.
<Purpose>
Unit test for 'util.py'
"""
import os
import sys
import shutil
import logging
import tempfile
import unittest
import timeit
import securesystemslib.settings
import securesystemslib.hash
import securesystemslib.util
import securesystemslib.unittest_toolbox as unittest_toolbox
import securesystemslib.exceptions as exceptions
logger = logging.getLogger(__name__)
class TestUtil(unittest_toolbox.Modified_TestCase):
def setUp(self):
unittest_toolbox.Modified_TestCase.setUp(self)
self.temp_fileobj = tempfile.TemporaryFile()
def tearDown(self):
unittest_toolbox.Modified_TestCase.tearDown(self)
self.temp_fileobj.close()
def test_B1_get_file_details(self):
# Goal: Verify proper output given certain expected/unexpected input.
# Making a temporary file.
filepath = self.make_temp_data_file()
# Computing the hash and length of the tempfile.
digest_object = securesystemslib.hash.digest_filename(filepath, algorithm='sha256')
file_hash = {'sha256' : digest_object.hexdigest()}
file_length = os.path.getsize(filepath)
# Test: Expected input.
self.assertEqual(securesystemslib.util.get_file_details(filepath),
(file_length, file_hash))
# Test: Incorrect input.
bogus_inputs = [self.random_string(), 1234, [self.random_string()],
{'a': 'b'}, None]
for bogus_input in bogus_inputs:
if isinstance(bogus_input, str):
self.assertRaises(securesystemslib.exceptions.StorageError,
securesystemslib.util.get_file_details, bogus_input)
else:
self.assertRaises(securesystemslib.exceptions.FormatError,
securesystemslib.util.get_file_details, bogus_input)
def test_B2_get_file_hashes(self):
# Goal: Verify proper output given certain expected/unexpected input.
# Making a temporary file.
filepath = self.make_temp_data_file()
# Computing the hash of the tempfile.
digest_object = securesystemslib.hash.digest_filename(filepath, algorithm='sha256')
file_hash = {'sha256' : digest_object.hexdigest()}
# Test: Expected input.
self.assertEqual(securesystemslib.util.get_file_hashes(filepath),
file_hash)
# Test: Incorrect input.
bogus_inputs = [self.random_string(), 1234, [self.random_string()],
{'a': 'b'}, None]
for bogus_input in bogus_inputs:
if isinstance(bogus_input, str):
self.assertRaises(securesystemslib.exceptions.StorageError,
securesystemslib.util.get_file_hashes, bogus_input)
else:
self.assertRaises(securesystemslib.exceptions.FormatError,
securesystemslib.util.get_file_hashes, bogus_input)
def test_B3_get_file_length(self):
# Goal: Verify proper output given certain expected/unexpected input.
# Making a temporary file.
filepath = self.make_temp_data_file()
# Computing the length of the tempfile.
digest_object = securesystemslib.hash.digest_filename(filepath, algorithm='sha256')
file_length = os.path.getsize(filepath)
# Test: Expected input.
self.assertEqual(securesystemslib.util.get_file_length(filepath), file_length)
# Test: Incorrect input.
bogus_inputs = [self.random_string(), 1234, [self.random_string()],
{'a': 'b'}, None]
for bogus_input in bogus_inputs:
if isinstance(bogus_input, str):
self.assertRaises(securesystemslib.exceptions.StorageError,
securesystemslib.util.get_file_length, bogus_input)
else:
self.assertRaises(securesystemslib.exceptions.FormatError,
securesystemslib.util.get_file_length, bogus_input)
def test_B4_ensure_parent_dir(self):
existing_parent_dir = self.make_temp_directory()
non_existing_parent_dir = os.path.join(existing_parent_dir, 'a', 'b')
for parent_dir in [existing_parent_dir, non_existing_parent_dir, 12, [3]]:
if isinstance(parent_dir, str):
securesystemslib.util.ensure_parent_dir(os.path.join(parent_dir, 'a.txt'))
self.assertTrue(os.path.isdir(parent_dir))
else:
self.assertRaises(securesystemslib.exceptions.FormatError,
securesystemslib.util.ensure_parent_dir, parent_dir)
# Check that when a folder cannot be created a StorageError is thrown
with self.assertRaises(securesystemslib.exceptions.StorageError):
securesystemslib.util.ensure_parent_dir("/a/file.txt")
# When we call ensure_parent_dir with filepath arg like "a.txt",
# then the directory of that filepath will be an empty string.
# We want to make sure that securesyslib.storage.create_folder()
# won't be called with an empty string and thus raise an exception.
# If an exception is thrown the test will fail.
securesystemslib.util.ensure_parent_dir('a.txt')
def test_B5_file_in_confined_directories(self):
# Goal: Provide invalid input for 'filepath' and 'confined_directories'.
# Include inputs like: '[1, 2, "a"]' and such...
# Reference to 'file_in_confined_directories()' to improve readability.
in_confined_directory = securesystemslib.util.file_in_confined_directories
list_of_confined_directories = ['a', 12, {'a':'a'}, [1]]
list_of_filepaths = [12, ['a'], {'a':'a'}, 'a']
for bogus_confined_directory in list_of_confined_directories:
for filepath in list_of_filepaths:
self.assertRaises(securesystemslib.exceptions.FormatError,
in_confined_directory, filepath, bogus_confined_directory)
# Test: Inputs that evaluate to False.
confined_directories = ['a/b/', 'a/b/c/d/']
self.assertFalse(in_confined_directory('a/b/c/1.txt', confined_directories))
confined_directories = ['a/b/c/d/e/']
self.assertFalse(in_confined_directory('a', confined_directories))
self.assertFalse(in_confined_directory('a/b', confined_directories))
self.assertFalse(in_confined_directory('a/b/c', confined_directories))
self.assertFalse(in_confined_directory('a/b/c/d', confined_directories))
# Below, 'e' is a file in the 'a/b/c/d/' directory.
self.assertFalse(in_confined_directory('a/b/c/d/e', confined_directories))
# Test: Inputs that evaluate to True.
self.assertTrue(in_confined_directory('a/b/c.txt', ['']))
self.assertTrue(in_confined_directory('a/b/c.txt', ['a/b/']))
self.assertTrue(in_confined_directory('a/b/c.txt', ['x', '']))
self.assertTrue(in_confined_directory('a/b/c/..', ['a/']))
def test_B7_load_json_string(self):
# Test normal case.
data = ['a', {'b': ['c', None, 30.3, 29]}]
json_string = securesystemslib.util.json.dumps(data)
self.assertEqual(data, securesystemslib.util.load_json_string(json_string))
# Test invalid arguments.
self.assertRaises(securesystemslib.exceptions.Error,
securesystemslib.util.load_json_string, 8)
invalid_json_string = json_string + '.'
self.assertRaises(securesystemslib.exceptions.Error,
securesystemslib.util.load_json_string, invalid_json_string)
def test_B8_load_json_file(self):
data = ['a', {'b': ['c', None, 30.3, 29]}]
filepath = self.make_temp_file()
fileobj = open(filepath, 'wt')
securesystemslib.util.json.dump(data, fileobj)
fileobj.close()
self.assertEqual(data, securesystemslib.util.load_json_file(filepath))
# Improperly formatted arguments.
for bogus_arg in [1, [b'a'], {'a':b'b'}]:
self.assertRaises(securesystemslib.exceptions.FormatError,
securesystemslib.util.load_json_file, bogus_arg)
# Non-existent path.
self.assertRaises(securesystemslib.exceptions.StorageError,
securesystemslib.util.load_json_file, 'non-existent.json')
# Invalid JSON content.
filepath_bad_data = self.make_temp_file()
fileobj = open(filepath_bad_data, 'wt')
fileobj.write('junk data')
fileobj.close()
self.assertRaises(securesystemslib.exceptions.Error,
securesystemslib.util.load_json_file, filepath_bad_data)
def test_B9_persist_temp_file(self):
# Destination directory to save the temporary file in.
dest_temp_dir = self.make_temp_directory()
# Test the default of persisting the file and closing the tmpfile
dest_path = os.path.join(dest_temp_dir, self.random_string())
tmpfile = tempfile.TemporaryFile()
tmpfile.write(self.random_string().encode('utf-8'))
securesystemslib.util.persist_temp_file(tmpfile, dest_path)
self.assertTrue(dest_path)
self.assertTrue(tmpfile.closed)
# Test persisting a file without automatically closing the tmpfile
dest_path2 = os.path.join(dest_temp_dir, self.random_string())
tmpfile = tempfile.TemporaryFile()
tmpfile.write(self.random_string().encode('utf-8'))
securesystemslib.util.persist_temp_file(tmpfile, dest_path2,
should_close=False)
self.assertFalse(tmpfile.closed)
# Test persisting a file with an empty filename
with self.assertRaises(exceptions.StorageError):
securesystemslib.util.persist_temp_file(tmpfile, "")
tmpfile.close()
def test_C5_unittest_toolbox_make_temp_directory(self):
# Verify that the tearDown function does not fail when
# unittest_toolbox.make_temp_directory deletes the generated temp directory
# here.
temp_directory = self.make_temp_directory()
os.rmdir(temp_directory)
def test_c5_unittest_toolbox_random_path(self):
# Verify that a random path can be generated with unittest_toolbox.
random_path = self.random_path(length=10)
self.assertTrue(securesystemslib.formats.PATH_SCHEMA.matches(random_path))
self.assertTrue(10, len(random_path))
def test_digests_are_equal(self):
digest = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
# Normal case: test for digests that are equal.
self.assertTrue(securesystemslib.util.digests_are_equal(digest, digest))
# Normal case: test for digests that are unequal.
self.assertFalse(securesystemslib.util.digests_are_equal(digest, '0a8df1'))
# Test for invalid arguments.
self.assertRaises(securesystemslib.exceptions.FormatError,
securesystemslib.util.digests_are_equal, 7, digest)
self.assertRaises(securesystemslib.exceptions.FormatError,
securesystemslib.util.digests_are_equal, digest, 7)
# Test that digests_are_equal() takes the same amount of time to compare
# equal and unequal arguments.
runtime = timeit.timeit('digests_are_equal("ab8df", "ab8df")',
setup='from securesystemslib.util import digests_are_equal',
number=100000)
runtime2 = timeit.timeit('digests_are_equal("ab8df", "1b8df")',
setup='from securesystemslib.util import digests_are_equal',
number=100000)
runtime3 = timeit.timeit('"ab8df" == "ab8df"', number=100000)
runtime4 = timeit.timeit('"ab8df" == "1b8df"', number=1000000)
# The ratio for the 'digest_are_equal' runtimes should be at or near 1.
ratio_digests_are_equal = abs(runtime2 / runtime)
# The ratio for the variable-time runtimes should be (>1) & at or near 10?
ratio_variable_compare = abs(runtime4 / runtime3)
self.assertTrue(ratio_digests_are_equal < ratio_variable_compare)
# Run unit test.
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2008-2014 Software freedom conservancy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The WebDriver implementation.
"""
import base64
import warnings
from .command import Command
from .webelement import WebElement
from .remote_connection import RemoteConnection
from .errorhandler import ErrorHandler
from .switch_to import SwitchTo
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import InvalidSelectorException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.html5.application_cache import ApplicationCache
try:
str = basestring
except NameError:
pass
class WebDriver(object):
"""
Controls a browser by sending commands to a remote server.
This server is expected to be running the WebDriver wire protocol as defined
here: http://code.google.com/p/selenium/wiki/JsonWireProtocol
:Attributes:
- command_executor - The command.CommandExecutor object used to execute commands.
- error_handler - errorhandler.ErrorHandler object used to verify that the server did not return an error.
- session_id - The session ID to send with every command.
- capabilities - A dictionary of capabilities of the underlying browser for this instance's session.
- proxy - A selenium.webdriver.common.proxy.Proxy object, to specify a proxy for the browser to use.
"""
def __init__(self, command_executor='http://127.0.0.1:4444/wd/hub',
desired_capabilities=None, browser_profile=None, proxy=None, keep_alive=False):
"""
Create a new driver that will issue commands using the wire protocol.
:Args:
- command_executor - Either a command.CommandExecutor object or a string that specifies the URL of a remote server to send commands to.
- desired_capabilities - Dictionary holding predefined values for starting a browser
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested.
"""
if desired_capabilities is None:
raise WebDriverException("Desired Capabilities can't be None")
if not isinstance(desired_capabilities, dict):
raise WebDriverException("Desired Capabilities must be a dictionary")
if proxy is not None:
proxy.add_to_capabilities(desired_capabilities)
self.command_executor = command_executor
if type(self.command_executor) is bytes or type(self.command_executor) is str:
self.command_executor = RemoteConnection(command_executor, keep_alive=keep_alive)
self._is_remote = True
self.session_id = None
self.capabilities = {}
self.error_handler = ErrorHandler()
self.start_client()
self.start_session(desired_capabilities, browser_profile)
self._switch_to = SwitchTo(self)
@property
def name(self):
"""Returns the name of the underlying browser for this instance.
:Usage:
- driver.name
"""
if 'browserName' in self.capabilities:
return self.capabilities['browserName']
else:
raise KeyError('browserName not specified in session capabilities')
def start_client(self):
"""
Called before starting a new session. This method may be overridden
to define custom startup behavior.
"""
pass
def stop_client(self):
"""
Called after executing a quit command. This method may be overridden
to define custom shutdown behavior.
"""
pass
def start_session(self, desired_capabilities, browser_profile=None):
"""
Creates a new session with the desired capabilities.
:Args:
- browser_name - The name of the browser to request.
- version - Which browser version to request.
- platform - Which platform to request the browser on.
- javascript_enabled - Whether the new session should support JavaScript.
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested.
"""
if browser_profile:
desired_capabilities['firefox_profile'] = browser_profile.encoded
response = self.execute(Command.NEW_SESSION, {
'desiredCapabilities': desired_capabilities,
})
self.session_id = response['sessionId']
self.capabilities = response['value']
def _wrap_value(self, value):
if isinstance(value, dict):
converted = {}
for key, val in value.items():
converted[key] = self._wrap_value(val)
return converted
elif isinstance(value, WebElement):
return {'ELEMENT': value.id}
elif isinstance(value, list):
return list(self._wrap_value(item) for item in value)
else:
return value
def create_web_element(self, element_id):
"""
Creates a web element with the specified element_id.
"""
return WebElement(self, element_id)
def _unwrap_value(self, value):
if isinstance(value, dict) and 'ELEMENT' in value:
return self.create_web_element(value['ELEMENT'])
elif isinstance(value, list):
return list(self._unwrap_value(item) for item in value)
else:
return value
def execute(self, driver_command, params=None):
"""
Sends a command to be executed by a command.CommandExecutor.
:Args:
- driver_command: The name of the command to execute as a string.
- params: A dictionary of named parameters to send with the command.
:Returns:
The command's JSON response loaded into a dictionary object.
"""
if not params:
params = {'sessionId': self.session_id}
elif 'sessionId' not in params:
params['sessionId'] = self.session_id
params = self._wrap_value(params)
response = self.command_executor.execute(driver_command, params)
if response:
self.error_handler.check_response(response)
response['value'] = self._unwrap_value(
response.get('value', None))
return response
# If the server doesn't send a response, assume the command was
# a success
return {'success': 0, 'value': None, 'sessionId': self.session_id}
def get(self, url):
"""
Loads a web page in the current browser session.
"""
self.execute(Command.GET, {'url': url})
@property
def title(self):
"""Returns the title of the current page.
:Usage:
driver.title
"""
resp = self.execute(Command.GET_TITLE)
return resp['value'] if resp['value'] is not None else ""
def find_element_by_id(self, id_):
"""Finds an element by id.
:Args:
- id\_ - The id of the element to be found.
:Usage:
driver.find_element_by_id('foo')
"""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
"""
Finds multiple elements by id.
:Args:
- id\_ - The id of the elements to be found.
:Usage:
driver.find_element_by_id('foo')
"""
return self.find_elements(by=By.ID, value=id_)
def find_element_by_xpath(self, xpath):
"""
Finds an element by xpath.
:Args:
- xpath - The xpath locator of the element to find.
:Usage:
driver.find_element_by_xpath('//div/td[1]')
"""
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""
Finds multiple elements by xpath.
:Args:
- xpath - The xpath locator of the elements to be found.
:Usage:
driver.find_elements_by_xpath("//div[contains(@class, 'foo')]")
"""
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_link_text(self, link_text):
"""
Finds an element by link text.
:Args:
- link_text: The text of the element to be found.
:Usage:
driver.find_element_by_link_text('Sign In')
"""
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, text):
"""
Finds elements by link text.
:Args:
- link_text: The text of the elements to be found.
:Usage:
driver.find_elements_by_link_text('Sign In')
"""
return self.find_elements(by=By.LINK_TEXT, value=text)
def find_element_by_partial_link_text(self, link_text):
"""
Finds an element by a partial match of its link text.
:Args:
- link_text: The text of the element to partially match on.
:Usage:
driver.find_element_by_partial_link_text('Sign')
"""
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
"""
Finds elements by a partial match of their link text.
:Args:
- link_text: The text of the element to partial match on.
:Usage:
driver.find_element_by_partial_link_text('Sign')
"""
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_name(self, name):
"""
Finds an element by name.
:Args:
- name: The name of the element to find.
:Usage:
driver.find_element_by_name('foo')
"""
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
"""
Finds elements by name.
:Args:
- name: The name of the elements to find.
:Usage:
driver.find_elements_by_name('foo')
"""
return self.find_elements(by=By.NAME, value=name)
def find_element_by_tag_name(self, name):
"""
Finds an element by tag name.
:Args:
- name: The tag name of the element to find.
:Usage:
driver.find_element_by_tag_name('foo')
"""
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
"""
Finds elements by tag name.
:Args:
- name: The tag name the use when finding elements.
:Usage:
driver.find_elements_by_tag_name('foo')
"""
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_class_name(self, name):
"""
Finds an element by class name.
:Args:
- name: The class name of the element to find.
:Usage:
driver.find_element_by_class_name('foo')
"""
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""
Finds elements by class name.
:Args:
- name: The class name of the elements to find.
:Usage:
driver.find_elements_by_class_name('foo')
"""
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""
Finds an element by css selector.
:Args:
- css_selector: The css selector to use when finding elements.
:Usage:
driver.find_element_by_css_selector('#foo')
"""
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""
Finds elements by css selector.
:Args:
- css_selector: The css selector to use when finding elements.
:Usage:
driver.find_elements_by_css_selector('.foo')
"""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def execute_script(self, script, *args):
"""
Synchronously Executes JavaScript in the current window/frame.
:Args:
- script: The JavaScript to execute.
- \*args: Any applicable arguments for your JavaScript.
:Usage:
driver.execute_script('document.title')
"""
converted_args = list(args)
return self.execute(Command.EXECUTE_SCRIPT,
{'script': script, 'args':converted_args})['value']
def execute_async_script(self, script, *args):
"""
Asynchronously Executes JavaScript in the current window/frame.
:Args:
- script: The JavaScript to execute.
- \*args: Any applicable arguments for your JavaScript.
:Usage:
driver.execute_async_script('document.title')
"""
converted_args = list(args)
return self.execute(Command.EXECUTE_ASYNC_SCRIPT,
{'script': script, 'args':converted_args})['value']
@property
def current_url(self):
"""
Gets the URL of the current page.
:Usage:
driver.current_url
"""
return self.execute(Command.GET_CURRENT_URL)['value']
@property
def page_source(self):
"""
Gets the source of the current page.
:Usage:
driver.page_source
"""
return self.execute(Command.GET_PAGE_SOURCE)['value']
def close(self):
"""
Closes the current window.
:Usage:
driver.close()
"""
self.execute(Command.CLOSE)
def quit(self):
"""
Quits the driver and closes every associated window.
:Usage:
driver.quit()
"""
try:
self.execute(Command.QUIT)
finally:
self.stop_client()
@property
def current_window_handle(self):
"""
Returns the handle of the current window.
:Usage:
driver.current_window_handle
"""
return self.execute(Command.GET_CURRENT_WINDOW_HANDLE)['value']
@property
def window_handles(self):
"""
Returns the handles of all windows within the current session.
:Usage:
driver.window_handles
"""
return self.execute(Command.GET_WINDOW_HANDLES)['value']
def maximize_window(self):
"""
Maximizes the current window that webdriver is using
"""
self.execute(Command.MAXIMIZE_WINDOW, {"windowHandle": "current"})
@property
def switch_to(self):
return self._switch_to
#Target Locators
def switch_to_active_element(self):
""" Deprecated use driver.switch_to.active_element
"""
warnings.warn("use driver.switch_to.active_element instead", DeprecationWarning)
return self._switch_to.active_element
def switch_to_window(self, window_name):
""" Deprecated use driver.switch_to.window
"""
warnings.warn("use driver.switch_to.window instead", DeprecationWarning)
self._switch_to.window(window_name)
def switch_to_frame(self, frame_reference):
""" Deprecated use driver.switch_to.frame
"""
warnings.warn("use driver.switch_to.frame instead", DeprecationWarning)
self._switch_to.frame(frame_reference)
def switch_to_default_content(self):
""" Deprecated use driver.switch_to.default_content
"""
warnings.warn("use driver.switch_to.default_content instead", DeprecationWarning)
self._switch_to.default_content()
def switch_to_alert(self):
""" Deprecated use driver.switch_to.alert
"""
warnings.warn("use driver.switch_to.alert instead", DeprecationWarning)
return self._switch_to.alert
#Navigation
def back(self):
"""
Goes one step backward in the browser history.
:Usage:
driver.back()
"""
self.execute(Command.GO_BACK)
def forward(self):
"""
Goes one step forward in the browser history.
:Usage:
driver.forward()
"""
self.execute(Command.GO_FORWARD)
def refresh(self):
"""
Refreshes the current page.
:Usage:
driver.refresh()
"""
self.execute(Command.REFRESH)
# Options
def get_cookies(self):
"""
Returns a set of dictionaries, corresponding to cookies visible in the current session.
:Usage:
driver.get_cookies()
"""
return self.execute(Command.GET_ALL_COOKIES)['value']
def get_cookie(self, name):
"""
Get a single cookie by name. Returns the cookie if found, None if not.
:Usage:
driver.get_cookie('my_cookie')
"""
cookies = self.get_cookies()
for cookie in cookies:
if cookie['name'] == name:
return cookie
return None
def delete_cookie(self, name):
"""
Deletes a single cookie with the given name.
:Usage:
driver.delete_cookie('my_cookie')
"""
self.execute(Command.DELETE_COOKIE, {'name': name})
def delete_all_cookies(self):
"""
Delete all cookies in the scope of the session.
:Usage:
driver.delete_all_cookies()
"""
self.execute(Command.DELETE_ALL_COOKIES)
def add_cookie(self, cookie_dict):
"""
Adds a cookie to your current session.
:Args:
- cookie_dict: A dictionary object, with required keys - "name" and "value";
optional keys - "path", "domain", "secure", "expiry"
Usage:
driver.add_cookie({'name' : 'foo', 'value' : 'bar'})
driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/'})
driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/', 'secure':True})
"""
self.execute(Command.ADD_COOKIE, {'cookie': cookie_dict})
# Timeouts
def implicitly_wait(self, time_to_wait):
"""
Sets a sticky timeout to implicitly wait for an element to be found,
or a command to complete. This method only needs to be called one
time per session. To set the timeout for calls to
execute_async_script, see set_script_timeout.
:Args:
- time_to_wait: Amount of time to wait (in seconds)
:Usage:
driver.implicitly_wait(30)
"""
self.execute(Command.IMPLICIT_WAIT, {'ms': float(time_to_wait) * 1000})
def set_script_timeout(self, time_to_wait):
"""
Set the amount of time that the script should wait during an
execute_async_script call before throwing an error.
:Args:
- time_to_wait: The amount of time to wait (in seconds)
:Usage:
driver.set_script_timeout(30)
"""
self.execute(Command.SET_SCRIPT_TIMEOUT,
{'ms': float(time_to_wait) * 1000})
def set_page_load_timeout(self, time_to_wait):
"""
Set the amount of time to wait for a page load to complete
before throwing an error.
:Args:
- time_to_wait: The amount of time to wait
:Usage:
driver.set_page_load_timeout(30)
"""
self.execute(Command.SET_TIMEOUTS,
{'ms': float(time_to_wait) * 1000, 'type':'page load'})
def find_element(self, by=By.ID, value=None):
"""
'Private' method used by the find_element_by_* methods.
:Usage:
Use the corresponding find_element_by_* instead of this.
:rtype: WebElement
"""
if not By.is_valid(by) or not isinstance(value, str):
raise InvalidSelectorException("Invalid locator values passed in")
return self.execute(Command.FIND_ELEMENT,
{'using': by, 'value': value})['value']
def find_elements(self, by=By.ID, value=None):
"""
'Private' method used by the find_elements_by_* methods.
:Usage:
Use the corresponding find_elements_by_* instead of this.
:rtype: list of WebElement
"""
if not By.is_valid(by) or not isinstance(value, str):
raise InvalidSelectorException("Invalid locator values passed in")
return self.execute(Command.FIND_ELEMENTS,
{'using': by, 'value': value})['value']
@property
def desired_capabilities(self):
"""
returns the drivers current desired capabilities being used
"""
return self.capabilities
def get_screenshot_as_file(self, filename):
"""
Gets the screenshot of the current window. Returns False if there is
any IOError, else returns True. Use full paths in your filename.
:Args:
- filename: The full path you wish to save your screenshot to.
:Usage:
driver.get_screenshot_as_file('/Screenshots/foo.png')
"""
png = self.get_screenshot_as_png()
try:
with open(filename, 'wb') as f:
f.write(png)
except IOError:
return False
finally:
del png
return True
save_screenshot = get_screenshot_as_file
def get_screenshot_as_png(self):
"""
Gets the screenshot of the current window as a binary data.
:Usage:
driver.get_screenshot_as_png()
"""
return base64.b64decode(self.get_screenshot_as_base64().encode('ascii'))
def get_screenshot_as_base64(self):
"""
Gets the screenshot of the current window as a base64 encoded string
which is useful in embedded images in HTML.
:Usage:
driver.get_screenshot_as_base64()
"""
return self.execute(Command.SCREENSHOT)['value']
def set_window_size(self, width, height, windowHandle='current'):
"""
Sets the width and height of the current window. (window.resizeTo)
:Args:
- width: the width in pixels to set the window to
- height: the height in pixels to set the window to
:Usage:
driver.set_window_size(800,600)
"""
self.execute(Command.SET_WINDOW_SIZE, {'width': int(width), 'height': int(height),
'windowHandle': windowHandle})
def get_window_size(self, windowHandle='current'):
"""
Gets the width and height of the current window.
:Usage:
driver.get_window_size()
"""
return self.execute(Command.GET_WINDOW_SIZE,
{'windowHandle': windowHandle})['value']
def set_window_position(self, x, y, windowHandle='current'):
"""
Sets the x,y position of the current window. (window.moveTo)
:Args:
- x: the x-coordinate in pixels to set the window position
- y: the y-coordinate in pixels to set the window position
:Usage:
driver.set_window_position(0,0)
"""
self.execute(Command.SET_WINDOW_POSITION, {'x': int(x), 'y': int(y),
'windowHandle': windowHandle})
def get_window_position(self, windowHandle='current'):
"""
Gets the x,y position of the current window.
:Usage:
driver.get_window_position()
"""
return self.execute(Command.GET_WINDOW_POSITION,
{'windowHandle': windowHandle})['value']
@property
def orientation(self):
"""
Gets the current orientation of the device
:Usage:
orientation = driver.orientation
"""
return self.execute(Command.GET_SCREEN_ORIENTATION)['value']
@orientation.setter
def orientation(self, value):
"""
Sets the current orientation of the device
:Args:
- value: orientation to set it to.
:Usage:
driver.orientation = 'landscape'
"""
allowed_values = ['LANDSCAPE', 'PORTRAIT']
if value.upper() in allowed_values:
self.execute(Command.SET_SCREEN_ORIENTATION, {'orientation': value})['value']
else:
raise WebDriverException("You can only set the orientation to 'LANDSCAPE' and 'PORTRAIT'")
def is_online(self):
""" Returns a boolean if the browser is online or offline"""
return self.execute(Command.IS_BROWSER_ONLINE)['value']
@property
def application_cache(self):
""" Returns a ApplicationCache Object to interact with the browser app cache"""
return ApplicationCache(self)
@property
def log_types(self):
"""
Gets a list of the available log types
:Usage:
driver.log_types
"""
return self.execute(Command.GET_AVAILABLE_LOG_TYPES)['value']
def get_log(self, log_type):
"""
Gets the log for a given log type
:Args:
- log_type: type of log that which will be returned
:Usage:
driver.get_log('browser')
driver.get_log('driver')
driver.get_log('client')
driver.get_log('server')
"""
return self.execute(Command.GET_LOG, {'type': log_type})['value']
|
|
# -*- coding: utf-8 -*-
'''
Management of the Salt scheduler
==============================================
.. code-block:: yaml
job3:
schedule.present:
- function: test.ping
- seconds: 3600
- splay: 10
This will schedule the command: test.ping every 3600 seconds
(every hour) splaying the time between 0 and 10 seconds
job2:
schedule.present:
- function: test.ping
- seconds: 15
- splay:
- start: 10
- end: 20
This will schedule the command: test.ping every 3600 seconds
(every hour) splaying the time between 10 and 20 seconds
job1:
schedule.present:
- function: state.sls
- job_args:
- httpd
- job_kwargs:
test: True
- when:
- Monday 5:00pm
- Tuesday 3:00pm
- Wednesday 5:00pm
- Thursday 3:00pm
- Friday 5:00pm
This will schedule the command: state.sls httpd test=True at 5pm on Monday,
Wednesday and Friday, and 3pm on Tuesday and Thursday. Requires that
python-dateutil is installed on the minion.
job1:
schedule.present:
- function: state.sls
- job_args:
- httpd
- job_kwargs:
test: True
- cron: '*/5 * * * *'
Scheduled jobs can also be specified using the format used by cron. This will
schedule the command: state.sls httpd test=True to run every 5 minutes. Requires
that python-croniter is installed on the minion.
job1:
schedule.present:
- function: state.sls
- job_args:
- httpd
- job_kwargs:
test: True
- when:
- Monday 5:00pm
- Tuesday 3:00pm
- Wednesday 5:00pm
- Thursday 3:00pm
- Friday 5:00pm
- returner: xmpp
- return_config: xmpp_state_run
This will schedule the command: state.sls httpd test=True at 5pm on Monday,
Wednesday and Friday, and 3pm on Tuesday and Thursday. Using the xmpp returner
to return the results of the scheduled job, with the alternative configuration
options found in the xmpp_state_run section.
'''
def present(name,
**kwargs):
'''
Ensure a job is present in the schedule
name
The unique name that is given to the scheduled job.
seconds
The scheduled job will be executed after the specified
number of seconds have passed.
minutes
The scheduled job will be executed after the specified
number of minutes have passed.
hours
The scheduled job will be executed after the specified
number of hours have passed.
days
The scheduled job will be executed after the specified
number of days have passed.
when
This will schedule the job at the specified time(s).
The when parameter must be a single value or a dictionary
with the date string(s) using the dateutil format.
Requires python-dateutil.
cron
This will schedule the job at the specified time(s)
using the crontab format.
Requires python-croniter.
function
The function that should be executed by the scheduled job.
job_args
The arguments that will be used by the scheduled job.
job_kwargs
The keyword arguments that will be used by the scheduled job.
maxrunning
Ensure that there are no more than N copies of a particular job running.
jid_include
Include the job into the job cache.
splay
The amount of time in seconds to splay a scheduled job.
Can be specified as a single value in seconds or as a dictionary
range with 'start' and 'end' values.
range
This will schedule the command within the range specified.
The range parameter must be a dictionary with the date strings
using the dateutil format. Requires python-dateutil.
once
This will schedule a job to run once on the specified date.
once_fmt
The default date format is ISO 8601 but can be overridden by
also specifying the ``once_fmt`` option.
enabled
Whether the job should be enabled or disabled. Value should be a boolean.
return_job
Whether to return information to the Salt master upon job completion.
metadata
Using the metadata parameter special values can be associated with
a scheduled job. These values are not used in the execution of the job,
but can be used to search for specific jobs later if combined with the
return_job parameter. The metadata parameter must be specified as a
dictionary, othewise it will be ignored.
returner
The returner to use to return the results of the scheduled job.
return_config
The alternative configuration to use for returner configuration options.
persist
Whether the job should persist between minion restarts, defaults to True.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_schedule = __salt__['schedule.list'](show_all=True, return_yaml=False)
if name in current_schedule:
new_item = __salt__['schedule.build_schedule_item'](name, **kwargs)
# See if the new_item is valid
if isinstance(new_item, dict):
if 'result' in new_item and not new_item['result']:
ret['result'] = new_item['result']
ret['comment'] = new_item['comment']
return ret
# The schedule.list gives us an item that is guaranteed to have an
# 'enabled' argument. Before comparing, add 'enabled' if it's not
# available (assume True, like schedule.list does)
if 'enabled' not in new_item:
new_item['enabled'] = True
if new_item == current_schedule[name]:
ret['comment'].append('Job {0} in correct state'.format(name))
else:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['schedule.modify'](name, **kwargs)
ret['comment'].append(result['comment'])
ret['changes'] = result['changes']
else:
result = __salt__['schedule.modify'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Modifying job {0} in schedule'.format(name))
ret['changes'] = result['changes']
else:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['schedule.add'](name, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['schedule.add'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Adding new job {0} to schedule'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret
def absent(name, **kwargs):
'''
Ensure a job is absent from the schedule
name
The unique name that is given to the scheduled job.
persist
Whether the job should persist between minion restarts, defaults to True.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_schedule = __salt__['schedule.list'](show_all=True, return_yaml=False)
if name in current_schedule:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['schedule.delete'](name, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['schedule.delete'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Removed job {0} from schedule'.format(name))
else:
ret['comment'].append('Job {0} not present in schedule'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret
def enabled(name, **kwargs):
'''
Ensure a job is enabled in the schedule
name
The unique name that is given to the scheduled job.
persist
Whether the job should persist between minion restarts, defaults to True.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_schedule = __salt__['schedule.list'](show_all=True, return_yaml=False)
if name in current_schedule:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['schedule.enable_job'](name, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['schedule.enable_job'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Enabled job {0} from schedule'.format(name))
else:
ret['comment'].append('Job {0} not present in schedule'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret
def disabled(name, **kwargs):
'''
Ensure a job is disabled in the schedule
name
The unique name that is given to the scheduled job.
persist
Whether the job should persist between minion restarts, defaults to True.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_schedule = __salt__['schedule.list'](show_all=True, return_yaml=False)
if name in current_schedule:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['schedule.disable_job'](name, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['schedule.disable_job'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Disabled job {0} from schedule'.format(name))
else:
ret['comment'].append('Job {0} not present in schedule'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret
|
|
from __future__ import annotations
from collections import defaultdict
import csv
import datetime
from enum import Enum
import itertools
from typing import (
Any,
Callable,
DefaultDict,
Iterable,
Sequence,
cast,
)
import warnings
import numpy as np
import pandas._libs.lib as lib
import pandas._libs.ops as libops
import pandas._libs.parsers as parsers
from pandas._libs.parsers import STR_NA_VALUES
from pandas._libs.tslibs import parsing
from pandas._typing import (
DtypeArg,
FilePathOrBuffer,
final,
)
from pandas.errors import (
ParserError,
ParserWarning,
)
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
ensure_object,
ensure_str,
is_bool_dtype,
is_categorical_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_scalar,
is_string_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms
from pandas.core.arrays import Categorical
from pandas.core.indexes.api import (
Index,
MultiIndex,
ensure_index_from_sequences,
)
from pandas.core.series import Series
from pandas.core.tools import datetimes as tools
from pandas.io.common import (
IOHandles,
get_handle,
)
from pandas.io.date_converters import generic_parser
parser_defaults = {
"delimiter": None,
"escapechar": None,
"quotechar": '"',
"quoting": csv.QUOTE_MINIMAL,
"doublequote": True,
"skipinitialspace": False,
"lineterminator": None,
"header": "infer",
"index_col": None,
"names": None,
"prefix": None,
"skiprows": None,
"skipfooter": 0,
"nrows": None,
"na_values": None,
"keep_default_na": True,
"true_values": None,
"false_values": None,
"converters": None,
"dtype": None,
"cache_dates": True,
"thousands": None,
"comment": None,
"decimal": ".",
# 'engine': 'c',
"parse_dates": False,
"keep_date_col": False,
"dayfirst": False,
"date_parser": None,
"usecols": None,
# 'iterator': False,
"chunksize": None,
"verbose": False,
"encoding": None,
"squeeze": False,
"compression": None,
"mangle_dupe_cols": True,
"infer_datetime_format": False,
"skip_blank_lines": True,
"encoding_errors": "strict",
"on_bad_lines": "error",
}
class ParserBase:
class BadLineHandleMethod(Enum):
ERROR = 0
WARN = 1
SKIP = 2
_implicit_index: bool = False
_first_chunk: bool
def __init__(self, kwds):
self.names = kwds.get("names")
self.orig_names: list | None = None
self.prefix = kwds.pop("prefix", None)
self.index_col = kwds.get("index_col", None)
self.unnamed_cols: set = set()
self.index_names: list | None = None
self.col_names = None
self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
self.date_parser = kwds.pop("date_parser", None)
self.dayfirst = kwds.pop("dayfirst", False)
self.keep_date_col = kwds.pop("keep_date_col", False)
self.na_values = kwds.get("na_values")
self.na_fvalues = kwds.get("na_fvalues")
self.na_filter = kwds.get("na_filter", False)
self.keep_default_na = kwds.get("keep_default_na", True)
self.true_values = kwds.get("true_values")
self.false_values = kwds.get("false_values")
self.mangle_dupe_cols = kwds.get("mangle_dupe_cols", True)
self.infer_datetime_format = kwds.pop("infer_datetime_format", False)
self.cache_dates = kwds.pop("cache_dates", True)
self._date_conv = _make_date_converter(
date_parser=self.date_parser,
dayfirst=self.dayfirst,
infer_datetime_format=self.infer_datetime_format,
cache_dates=self.cache_dates,
)
# validate header options for mi
self.header = kwds.get("header")
if isinstance(self.header, (list, tuple, np.ndarray)):
if not all(map(is_integer, self.header)):
raise ValueError("header must be integer or list of integers")
if any(i < 0 for i in self.header):
raise ValueError(
"cannot specify multi-index header with negative integers"
)
if kwds.get("usecols"):
raise ValueError(
"cannot specify usecols when specifying a multi-index header"
)
if kwds.get("names"):
raise ValueError(
"cannot specify names when specifying a multi-index header"
)
# validate index_col that only contains integers
if self.index_col is not None:
is_sequence = isinstance(self.index_col, (list, tuple, np.ndarray))
if not (
is_sequence
and all(map(is_integer, self.index_col))
or is_integer(self.index_col)
):
raise ValueError(
"index_col must only contain row numbers "
"when specifying a multi-index header"
)
elif self.header is not None:
# GH 27394
if self.prefix is not None:
raise ValueError(
"Argument prefix must be None if argument header is not None"
)
# GH 16338
elif not is_integer(self.header):
raise ValueError("header must be integer or list of integers")
# GH 27779
elif self.header < 0:
raise ValueError(
"Passing negative integer to header is invalid. "
"For no header, use header=None instead"
)
self._name_processed = False
self._first_chunk = True
self.usecols, self.usecols_dtype = self._validate_usecols_arg(kwds["usecols"])
self.handles: IOHandles | None = None
# Fallback to error to pass a sketchy test(test_override_set_noconvert_columns)
# Normally, this arg would get pre-processed earlier on
self.on_bad_lines = kwds.get("on_bad_lines", self.BadLineHandleMethod.ERROR)
def _open_handles(self, src: FilePathOrBuffer, kwds: dict[str, Any]) -> None:
"""
Let the readers open IOHandles after they are done with their potential raises.
"""
self.handles = get_handle(
src,
"r",
encoding=kwds.get("encoding", None),
compression=kwds.get("compression", None),
memory_map=kwds.get("memory_map", False),
storage_options=kwds.get("storage_options", None),
errors=kwds.get("encoding_errors", "strict"),
)
def _validate_parse_dates_presence(self, columns: list[str]) -> None:
"""
Check if parse_dates are in columns.
If user has provided names for parse_dates, check if those columns
are available.
Parameters
----------
columns : list
List of names of the dataframe.
Raises
------
ValueError
If column to parse_date is not in dataframe.
"""
cols_needed: Iterable
if is_dict_like(self.parse_dates):
cols_needed = itertools.chain(*self.parse_dates.values())
elif is_list_like(self.parse_dates):
# a column in parse_dates could be represented
# ColReference = Union[int, str]
# DateGroups = List[ColReference]
# ParseDates = Union[DateGroups, List[DateGroups],
# Dict[ColReference, DateGroups]]
cols_needed = itertools.chain.from_iterable(
col if is_list_like(col) else [col] for col in self.parse_dates
)
else:
cols_needed = []
# get only columns that are references using names (str), not by index
missing_cols = ", ".join(
sorted(
{
col
for col in cols_needed
if isinstance(col, str) and col not in columns
}
)
)
if missing_cols:
raise ValueError(
f"Missing column provided to 'parse_dates': '{missing_cols}'"
)
def close(self):
if self.handles is not None:
self.handles.close()
@final
@property
def _has_complex_date_col(self) -> bool:
return isinstance(self.parse_dates, dict) or (
isinstance(self.parse_dates, list)
and len(self.parse_dates) > 0
and isinstance(self.parse_dates[0], list)
)
@final
def _should_parse_dates(self, i: int) -> bool:
if isinstance(self.parse_dates, bool):
return self.parse_dates
else:
if self.index_names is not None:
name = self.index_names[i]
else:
name = None
j = i if self.index_col is None else self.index_col[i]
if is_scalar(self.parse_dates):
return (j == self.parse_dates) or (
name is not None and name == self.parse_dates
)
else:
return (j in self.parse_dates) or (
name is not None and name in self.parse_dates
)
@final
def _extract_multi_indexer_columns(
self, header, index_names, col_names, passed_names: bool = False
):
"""
extract and return the names, index_names, col_names
header is a list-of-lists returned from the parsers
"""
if len(header) < 2:
return header[0], index_names, col_names, passed_names
# the names are the tuples of the header that are not the index cols
# 0 is the name of the index, assuming index_col is a list of column
# numbers
ic = self.index_col
if ic is None:
ic = []
if not isinstance(ic, (list, tuple, np.ndarray)):
ic = [ic]
sic = set(ic)
# clean the index_names
index_names = header.pop(-1)
index_names, _, _ = self._clean_index_names(
index_names, self.index_col, self.unnamed_cols
)
# extract the columns
field_count = len(header[0])
def extract(r):
return tuple(r[i] for i in range(field_count) if i not in sic)
columns = list(zip(*(extract(r) for r in header)))
names = ic + columns
# If we find unnamed columns all in a single
# level, then our header was too long.
for n in range(len(columns[0])):
if all(ensure_str(col[n]) in self.unnamed_cols for col in columns):
header = ",".join(str(x) for x in self.header)
raise ParserError(
f"Passed header=[{header}] are too many rows "
"for this multi_index of columns"
)
# Clean the column names (if we have an index_col).
if len(ic):
col_names = [
r[0] if ((r[0] is not None) and r[0] not in self.unnamed_cols) else None
for r in header
]
else:
col_names = [None] * len(header)
passed_names = True
return names, index_names, col_names, passed_names
@final
def _maybe_dedup_names(self, names):
# see gh-7160 and gh-9424: this helps to provide
# immediate alleviation of the duplicate names
# issue and appears to be satisfactory to users,
# but ultimately, not needing to butcher the names
# would be nice!
if self.mangle_dupe_cols:
names = list(names) # so we can index
counts: DefaultDict[int | str | tuple, int] = defaultdict(int)
is_potential_mi = _is_potential_multi_index(names, self.index_col)
for i, col in enumerate(names):
cur_count = counts[col]
while cur_count > 0:
counts[col] = cur_count + 1
if is_potential_mi:
col = col[:-1] + (f"{col[-1]}.{cur_count}",)
else:
col = f"{col}.{cur_count}"
cur_count = counts[col]
names[i] = col
counts[col] = cur_count + 1
return names
@final
def _maybe_make_multi_index_columns(self, columns, col_names=None):
# possibly create a column mi here
if _is_potential_multi_index(columns):
columns = MultiIndex.from_tuples(columns, names=col_names)
return columns
@final
def _make_index(self, data, alldata, columns, indexnamerow=False):
if not is_index_col(self.index_col) or not self.index_col:
index = None
elif not self._has_complex_date_col:
index = self._get_simple_index(alldata, columns)
index = self._agg_index(index)
elif self._has_complex_date_col:
if not self._name_processed:
(self.index_names, _, self.index_col) = self._clean_index_names(
list(columns), self.index_col, self.unnamed_cols
)
self._name_processed = True
index = self._get_complex_date_index(data, columns)
index = self._agg_index(index, try_parse_dates=False)
# add names for the index
if indexnamerow:
coffset = len(indexnamerow) - len(columns)
assert index is not None
index = index.set_names(indexnamerow[:coffset])
# maybe create a mi on the columns
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
return index, columns
@final
def _get_simple_index(self, data, columns):
def ix(col):
if not isinstance(col, str):
return col
raise ValueError(f"Index {col} invalid")
to_remove = []
index = []
for idx in self.index_col:
i = ix(idx)
to_remove.append(i)
index.append(data[i])
# remove index items from content and columns, don't pop in
# loop
for i in sorted(to_remove, reverse=True):
data.pop(i)
if not self._implicit_index:
columns.pop(i)
return index
@final
def _get_complex_date_index(self, data, col_names):
def _get_name(icol):
if isinstance(icol, str):
return icol
if col_names is None:
raise ValueError(f"Must supply column order to use {icol!s} as index")
for i, c in enumerate(col_names):
if i == icol:
return c
to_remove = []
index = []
for idx in self.index_col:
name = _get_name(idx)
to_remove.append(name)
index.append(data[name])
# remove index items from content and columns, don't pop in
# loop
for c in sorted(to_remove, reverse=True):
data.pop(c)
col_names.remove(c)
return index
@final
def _agg_index(self, index, try_parse_dates: bool = True) -> Index:
arrays = []
for i, arr in enumerate(index):
if try_parse_dates and self._should_parse_dates(i):
arr = self._date_conv(arr)
if self.na_filter:
col_na_values = self.na_values
col_na_fvalues = self.na_fvalues
else:
col_na_values = set()
col_na_fvalues = set()
if isinstance(self.na_values, dict):
assert self.index_names is not None
col_name = self.index_names[i]
if col_name is not None:
col_na_values, col_na_fvalues = _get_na_values(
col_name, self.na_values, self.na_fvalues, self.keep_default_na
)
arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
arrays.append(arr)
names = self.index_names
index = ensure_index_from_sequences(arrays, names)
return index
@final
def _convert_to_ndarrays(
self,
dct: dict,
na_values,
na_fvalues,
verbose: bool = False,
converters=None,
dtypes=None,
):
result = {}
for c, values in dct.items():
conv_f = None if converters is None else converters.get(c, None)
if isinstance(dtypes, dict):
cast_type = dtypes.get(c, None)
else:
# single dtype or None
cast_type = dtypes
if self.na_filter:
col_na_values, col_na_fvalues = _get_na_values(
c, na_values, na_fvalues, self.keep_default_na
)
else:
col_na_values, col_na_fvalues = set(), set()
if conv_f is not None:
# conv_f applied to data before inference
if cast_type is not None:
warnings.warn(
(
"Both a converter and dtype were specified "
f"for column {c} - only the converter will be used"
),
ParserWarning,
stacklevel=7,
)
try:
values = lib.map_infer(values, conv_f)
except ValueError:
# error: Argument 2 to "isin" has incompatible type "List[Any]";
# expected "Union[Union[ExtensionArray, ndarray], Index, Series]"
mask = algorithms.isin(
values, list(na_values) # type: ignore[arg-type]
).view(np.uint8)
values = lib.map_infer_mask(values, conv_f, mask)
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues, try_num_bool=False
)
else:
is_ea = is_extension_array_dtype(cast_type)
is_str_or_ea_dtype = is_ea or is_string_dtype(cast_type)
# skip inference if specified dtype is object
# or casting to an EA
try_num_bool = not (cast_type and is_str_or_ea_dtype)
# general type inference and conversion
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues, try_num_bool
)
# type specified in dtype param or cast_type is an EA
if cast_type and (
not is_dtype_equal(cvals, cast_type)
or is_extension_array_dtype(cast_type)
):
if not is_ea and na_count > 0:
try:
if is_bool_dtype(cast_type):
raise ValueError(
f"Bool column has NA values in column {c}"
)
except (AttributeError, TypeError):
# invalid input to is_bool_dtype
pass
cast_type = pandas_dtype(cast_type)
cvals = self._cast_types(cvals, cast_type, c)
result[c] = cvals
if verbose and na_count:
print(f"Filled {na_count} NA values in column {c!s}")
return result
@final
def _set_noconvert_dtype_columns(
self, col_indices: list[int], names: list[int | str | tuple]
) -> set[int]:
"""
Set the columns that should not undergo dtype conversions.
Currently, any column that is involved with date parsing will not
undergo such conversions. If usecols is specified, the positions of the columns
not to cast is relative to the usecols not to all columns.
Parameters
----------
col_indices: The indices specifying order and positions of the columns
names: The column names which order is corresponding with the order
of col_indices
Returns
-------
A set of integers containing the positions of the columns not to convert.
"""
usecols: list[int] | list[str] | None
noconvert_columns = set()
if self.usecols_dtype == "integer":
# A set of integers will be converted to a list in
# the correct order every single time.
usecols = sorted(self.usecols)
elif callable(self.usecols) or self.usecols_dtype not in ("empty", None):
# The names attribute should have the correct columns
# in the proper order for indexing with parse_dates.
usecols = col_indices
else:
# Usecols is empty.
usecols = None
def _set(x) -> int:
if usecols is not None and is_integer(x):
x = usecols[x]
if not is_integer(x):
x = col_indices[names.index(x)]
return x
if isinstance(self.parse_dates, list):
for val in self.parse_dates:
if isinstance(val, list):
for k in val:
noconvert_columns.add(_set(k))
else:
noconvert_columns.add(_set(val))
elif isinstance(self.parse_dates, dict):
for val in self.parse_dates.values():
if isinstance(val, list):
for k in val:
noconvert_columns.add(_set(k))
else:
noconvert_columns.add(_set(val))
elif self.parse_dates:
if isinstance(self.index_col, list):
for k in self.index_col:
noconvert_columns.add(_set(k))
elif self.index_col is not None:
noconvert_columns.add(_set(self.index_col))
return noconvert_columns
def _infer_types(self, values, na_values, try_num_bool=True):
"""
Infer types of values, possibly casting
Parameters
----------
values : ndarray
na_values : set
try_num_bool : bool, default try
try to cast values to numeric (first preference) or boolean
Returns
-------
converted : ndarray
na_count : int
"""
na_count = 0
if issubclass(values.dtype.type, (np.number, np.bool_)):
# error: Argument 2 to "isin" has incompatible type "List[Any]"; expected
# "Union[Union[ExtensionArray, ndarray], Index, Series]"
mask = algorithms.isin(values, list(na_values)) # type: ignore[arg-type]
na_count = mask.sum()
if na_count > 0:
if is_integer_dtype(values):
values = values.astype(np.float64)
np.putmask(values, mask, np.nan)
return values, na_count
if try_num_bool and is_object_dtype(values.dtype):
# exclude e.g DatetimeIndex here
try:
result, _ = lib.maybe_convert_numeric(values, na_values, False)
except (ValueError, TypeError):
# e.g. encountering datetime string gets ValueError
# TypeError can be raised in floatify
result = values
na_count = parsers.sanitize_objects(result, na_values, False)
else:
na_count = isna(result).sum()
else:
result = values
if values.dtype == np.object_:
na_count = parsers.sanitize_objects(values, na_values, False)
if result.dtype == np.object_ and try_num_bool:
result, _ = libops.maybe_convert_bool(
np.asarray(values),
true_values=self.true_values,
false_values=self.false_values,
)
return result, na_count
def _cast_types(self, values, cast_type, column):
"""
Cast values to specified type
Parameters
----------
values : ndarray
cast_type : string or np.dtype
dtype to cast values to
column : string
column name - used only for error reporting
Returns
-------
converted : ndarray
"""
if is_categorical_dtype(cast_type):
known_cats = (
isinstance(cast_type, CategoricalDtype)
and cast_type.categories is not None
)
if not is_object_dtype(values) and not known_cats:
# TODO: this is for consistency with
# c-parser which parses all categories
# as strings
values = astype_nansafe(values, np.dtype(str))
cats = Index(values).unique().dropna()
values = Categorical._from_inferred_categories(
cats, cats.get_indexer(values), cast_type, true_values=self.true_values
)
# use the EA's implementation of casting
elif is_extension_array_dtype(cast_type):
# ensure cast_type is an actual dtype and not a string
cast_type = pandas_dtype(cast_type)
array_type = cast_type.construct_array_type()
try:
if is_bool_dtype(cast_type):
return array_type._from_sequence_of_strings(
values,
dtype=cast_type,
true_values=self.true_values,
false_values=self.false_values,
)
else:
return array_type._from_sequence_of_strings(values, dtype=cast_type)
except NotImplementedError as err:
raise NotImplementedError(
f"Extension Array: {array_type} must implement "
"_from_sequence_of_strings in order to be used in parser methods"
) from err
else:
try:
values = astype_nansafe(values, cast_type, copy=True, skipna=True)
except ValueError as err:
raise ValueError(
f"Unable to convert column {column} to type {cast_type}"
) from err
return values
def _do_date_conversions(self, names, data):
# returns data, columns
if self.parse_dates is not None:
data, names = _process_date_conversion(
data,
self._date_conv,
self.parse_dates,
self.index_col,
self.index_names,
names,
keep_date_col=self.keep_date_col,
)
return names, data
def _evaluate_usecols(self, usecols, names):
"""
Check whether or not the 'usecols' parameter
is a callable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a callable, returns 'usecols'.
"""
if callable(usecols):
return {i for i, name in enumerate(names) if usecols(name)}
return usecols
def _validate_usecols_names(self, usecols, names):
"""
Validates that all usecols are present in a given
list of names. If not, raise a ValueError that
shows what usecols are missing.
Parameters
----------
usecols : iterable of usecols
The columns to validate are present in names.
names : iterable of names
The column names to check against.
Returns
-------
usecols : iterable of usecols
The `usecols` parameter if the validation succeeds.
Raises
------
ValueError : Columns were missing. Error message will list them.
"""
missing = [c for c in usecols if c not in names]
if len(missing) > 0:
raise ValueError(
f"Usecols do not match columns, columns expected but not found: "
f"{missing}"
)
return usecols
def _validate_usecols_arg(self, usecols):
"""
Validate the 'usecols' parameter.
Checks whether or not the 'usecols' parameter contains all integers
(column selection by index), strings (column by name) or is a callable.
Raises a ValueError if that is not the case.
Parameters
----------
usecols : list-like, callable, or None
List of columns to use when parsing or a callable that can be used
to filter a list of table columns.
Returns
-------
usecols_tuple : tuple
A tuple of (verified_usecols, usecols_dtype).
'verified_usecols' is either a set if an array-like is passed in or
'usecols' if a callable or None is passed in.
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
is passed in or None if a callable or None is passed in.
"""
msg = (
"'usecols' must either be list-like of all strings, all unicode, "
"all integers or a callable."
)
if usecols is not None:
if callable(usecols):
return usecols, None
if not is_list_like(usecols):
# see gh-20529
#
# Ensure it is iterable container but not string.
raise ValueError(msg)
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
if usecols_dtype not in ("empty", "integer", "string"):
raise ValueError(msg)
usecols = set(usecols)
return usecols, usecols_dtype
return usecols, None
def _clean_index_names(self, columns, index_col, unnamed_cols):
if not is_index_col(index_col):
return None, columns, index_col
columns = list(columns)
# In case of no rows and multiindex columns we have to set index_names to
# list of Nones GH#38292
if not columns:
return [None] * len(index_col), columns, index_col
cp_cols = list(columns)
index_names: list[str | int | None] = []
# don't mutate
index_col = list(index_col)
for i, c in enumerate(index_col):
if isinstance(c, str):
index_names.append(c)
for j, name in enumerate(cp_cols):
if name == c:
index_col[i] = j
columns.remove(name)
break
else:
name = cp_cols[c]
columns.remove(name)
index_names.append(name)
# Only clean index names that were placeholders.
for i, name in enumerate(index_names):
if isinstance(name, str) and name in unnamed_cols:
index_names[i] = None
return index_names, columns, index_col
def _get_empty_meta(
self, columns, index_col, index_names, dtype: DtypeArg | None = None
):
columns = list(columns)
# Convert `dtype` to a defaultdict of some kind.
# This will enable us to write `dtype[col_name]`
# without worrying about KeyError issues later on.
if not is_dict_like(dtype):
# if dtype == None, default will be object.
default_dtype = dtype or object
# error: Argument 1 to "defaultdict" has incompatible type "Callable[[],
# Union[ExtensionDtype, str, dtype[Any], Type[object], Dict[Hashable,
# Union[ExtensionDtype, Union[str, dtype[Any]], Type[str], Type[float],
# Type[int], Type[complex], Type[bool], Type[object]]]]]"; expected
# "Optional[Callable[[], Union[ExtensionDtype, str, dtype[Any],
# Type[object]]]]"
# error: Incompatible return value type (got "Union[ExtensionDtype, str,
# dtype[Any], Type[object], Dict[Hashable, Union[ExtensionDtype, Union[str,
# dtype[Any]], Type[str], Type[float], Type[int], Type[complex], Type[bool],
# Type[object]]]]", expected "Union[ExtensionDtype, str, dtype[Any],
# Type[object]]")
dtype = defaultdict(
lambda: default_dtype # type: ignore[arg-type, return-value]
)
else:
dtype = cast(dict, dtype)
dtype = defaultdict(
lambda: object,
{columns[k] if is_integer(k) else k: v for k, v in dtype.items()},
)
# Even though we have no data, the "index" of the empty DataFrame
# could for example still be an empty MultiIndex. Thus, we need to
# check whether we have any index columns specified, via either:
#
# 1) index_col (column indices)
# 2) index_names (column names)
#
# Both must be non-null to ensure a successful construction. Otherwise,
# we have to create a generic empty Index.
if (index_col is None or index_col is False) or index_names is None:
index = Index([])
else:
data = [Series([], dtype=dtype[name]) for name in index_names]
index = ensure_index_from_sequences(data, names=index_names)
index_col.sort()
for i, n in enumerate(index_col):
columns.pop(n - i)
col_dict = {col_name: Series([], dtype=dtype[col_name]) for col_name in columns}
return index, columns, col_dict
def _make_date_converter(
date_parser=None, dayfirst=False, infer_datetime_format=False, cache_dates=True
):
def converter(*date_cols):
if date_parser is None:
strs = parsing.concat_date_cols(date_cols)
try:
return tools.to_datetime(
ensure_object(strs),
utc=None,
dayfirst=dayfirst,
errors="ignore",
infer_datetime_format=infer_datetime_format,
cache=cache_dates,
).to_numpy()
except ValueError:
return tools.to_datetime(
parsing.try_parse_dates(strs, dayfirst=dayfirst), cache=cache_dates
)
else:
try:
result = tools.to_datetime(
date_parser(*date_cols), errors="ignore", cache=cache_dates
)
if isinstance(result, datetime.datetime):
raise Exception("scalar parser")
return result
except Exception:
try:
return tools.to_datetime(
parsing.try_parse_dates(
parsing.concat_date_cols(date_cols),
parser=date_parser,
dayfirst=dayfirst,
),
errors="ignore",
)
except Exception:
return generic_parser(date_parser, *date_cols)
return converter
def _process_date_conversion(
data_dict,
converter: Callable,
parse_spec,
index_col,
index_names,
columns,
keep_date_col: bool = False,
):
def _isindex(colspec):
return (isinstance(index_col, list) and colspec in index_col) or (
isinstance(index_names, list) and colspec in index_names
)
new_cols = []
new_data = {}
orig_names = columns
columns = list(columns)
date_cols = set()
if parse_spec is None or isinstance(parse_spec, bool):
return data_dict, columns
if isinstance(parse_spec, list):
# list of column lists
for colspec in parse_spec:
if is_scalar(colspec):
if isinstance(colspec, int) and colspec not in data_dict:
colspec = orig_names[colspec]
if _isindex(colspec):
continue
data_dict[colspec] = converter(data_dict[colspec])
else:
new_name, col, old_names = _try_convert_dates(
converter, colspec, data_dict, orig_names
)
if new_name in data_dict:
raise ValueError(f"New date column already in dict {new_name}")
new_data[new_name] = col
new_cols.append(new_name)
date_cols.update(old_names)
elif isinstance(parse_spec, dict):
# dict of new name to column list
for new_name, colspec in parse_spec.items():
if new_name in data_dict:
raise ValueError(f"Date column {new_name} already in dict")
_, col, old_names = _try_convert_dates(
converter, colspec, data_dict, orig_names
)
new_data[new_name] = col
new_cols.append(new_name)
date_cols.update(old_names)
data_dict.update(new_data)
new_cols.extend(columns)
if not keep_date_col:
for c in list(date_cols):
data_dict.pop(c)
new_cols.remove(c)
return data_dict, new_cols
def _try_convert_dates(parser: Callable, colspec, data_dict, columns):
colset = set(columns)
colnames = []
for c in colspec:
if c in colset:
colnames.append(c)
elif isinstance(c, int) and c not in columns:
colnames.append(columns[c])
else:
colnames.append(c)
new_name = "_".join(str(x) for x in colnames)
to_parse = [data_dict[c] for c in colnames if c in data_dict]
new_col = parser(*to_parse)
return new_name, new_col, colnames
def _get_na_values(col, na_values, na_fvalues, keep_default_na):
"""
Get the NaN values for a given column.
Parameters
----------
col : str
The name of the column.
na_values : array-like, dict
The object listing the NaN values as strings.
na_fvalues : array-like, dict
The object listing the NaN values as floats.
keep_default_na : bool
If `na_values` is a dict, and the column is not mapped in the
dictionary, whether to return the default NaN values or the empty set.
Returns
-------
nan_tuple : A length-two tuple composed of
1) na_values : the string NaN values for that column.
2) na_fvalues : the float NaN values for that column.
"""
if isinstance(na_values, dict):
if col in na_values:
return na_values[col], na_fvalues[col]
else:
if keep_default_na:
return STR_NA_VALUES, set()
return set(), set()
else:
return na_values, na_fvalues
def _is_potential_multi_index(
columns, index_col: bool | Sequence[int] | None = None
) -> bool:
"""
Check whether or not the `columns` parameter
could be converted into a MultiIndex.
Parameters
----------
columns : array-like
Object which may or may not be convertible into a MultiIndex
index_col : None, bool or list, optional
Column or columns to use as the (possibly hierarchical) index
Returns
-------
bool : Whether or not columns could become a MultiIndex
"""
if index_col is None or isinstance(index_col, bool):
index_col = []
return bool(
len(columns)
and not isinstance(columns, MultiIndex)
and all(isinstance(c, tuple) for c in columns if c not in list(index_col))
)
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = (
"Only booleans, lists, and dictionaries are accepted "
"for the 'parse_dates' parameter"
)
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg)
elif not isinstance(parse_dates, (list, dict)):
raise TypeError(msg)
return parse_dates
def is_index_col(col) -> bool:
return col is not None and col is not False
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
class IsiFieldofstudy():
'''
Maps the MAG field of studies to the ISI field of studies
'''
mappingLevel0 = {
'Engineering': 'Engineering',
'History': 'Social Sciences, general',
'Environmental science': 'Environment/Ecology',
'Psychology': 'Psychiatry/Psychology',
'Biology': 'Biology & Biochemistry',
'Materials Science': 'Materials Science',
'Geography': 'Geosciences',
'Mathematics': 'Mathematics',
'Political Science': 'Social Sciences, general',
'Computer Science': 'Computer Science',
'Business': 'Economics & Business',
'Geology': 'Geosciences',
'Chemistry': 'Chemistry',
'Physics': 'Physics',
'Sociology': 'Social Sciences, general',
'Economics': 'Economics & Business'
}
mappingLevel1 = {
'Quantum mechanics':'Physics',
'Applied psychology':'Psychiatry/Psychology',
#'Physical therapy':None,
'Anthropology':'Social Sciences, general',
'Mathematical analysis':'Mathematics',
#'Marketing':None,
#'Dentistry':None,
'Finance':'Economics & Business',
'Social science':'Social Sciences, general',
'Astrophysics':'Physics',
'Environmental chemistry':'Chemistry',
'Demography':'Social Sciences, general',
'Ceramic materials':'Materials Science',
'Human computer interaction':'Computer Science',
'Internet privacy':'Computer Science',
'Seismology':'Geosciences',
'Marine engineering':'Engineering',
'Applied mathematics':'Mathematics',
'Climatology':'Geosciences',
'Biochemical engineering':'Engineering',
#'Dermatology':None,
#'Environmental ethics':None,
'Operating system':'Computer Science',
'Composite material':'Materials Science',
'Management':'Economics & Business',
#'Pulp and paper industry':None,
'Agronomy':'Agricultural Sciences',
#'Telecommunications':None,
'Environmental planning':'Environment/Ecology',
'Surgery':'Clinical Medicine',
'Mathematical Economics':'Economics & Business',
'Automotive engineering':'Engineering',
'Transport engineering':'Engineering',
'Chromatography':'Chemistry',
'Economic policy':'Social Sciences, general',
#'Aesthetics':None,
'Emergency medicine':'Clinical Medicine',
#'Cancer research':None,
'Monetary economics':'Economics & Business',
#'Alternative medicine':None,
'Nuclear chemistry':'Chemistry',
#'Engineering ethics':None,
'Stereochemistry':'Chemistry',
'Topology':'Mathematics',
'Molecular biology':'Molecular Biology & Genetics',
#'Traditional medicine':None,
'Information retrieval':'Computer Science',
'Manufacturing engineering':'Engineering',
'Computer network':'Computer Science',
'Archaeology':'Social Sciences, general',
#'Religious Studies':None,
#'Theology':None,
'Nuclear magnetic resonance':'Physics',
'Actuarial science':'Mathematics',
'Astrobiology':'Space Science',
'Electrical engineering':'Engineering',
'Geodesy':'Engineering',
'Accounting':'Economics & Business',
'Calculus':'Mathematics',
#'Gerontology':None,
'Agricultural engineering':'Engineering',
'Construction engineering':'Engineering',
#'Audiology':None,
'Physical Chemistry':'Chemistry',
#'Performance art':None,
#'Diabetes mellitus':None,
'Crystallography':'Geosciences',
'Remote sensing':'Geosciences',
'Statistics':'Mathematics',
'Engineering Management':'Engineering',
'Inorganic chemistry':'Chemistry',
'International Economics':'Economics & Business',
'Hydrology':'Geosciences',
'Arithmetic':'Mathematics',
#'Urology':None,
'Developmental psychology':'Psychiatry/Psychology',
'Artificial intelligence':'Computer Science',
'Astronomy':'Space Science',
'Geochemistry':'Chemistry',
'Agroforestry':'Agricultural Sciences',
#'Physical medicine and rehabilitation':None,
#'Pathology':None,
#'Pediatrics':None,
'Industrial engineering':'Engineering',
'Physical geography':'Geosciences',
'Operations research':'Mathematics',
'Biochemistry':'Biology & Biochemistry',
'Mechanics':'Physics',
'Law and economics':'Economics & Business',
'Reliability engineering':'Engineering',
'Geometry':'Mathematics',
'Pharmacology':'Pharmacology & Toxicology',
'Software Engineering':'Computer Science',
'Agricultural science':'Agricultural Sciences',
#'Ophthalmology':None,
'Distributed computing':'Computer Science',
'Metallurgy':'Materials Science',
'Mathematical physics':'Mathematics',
'Development economics':'Economics & Business',
#'Endocrinology':None,
#'Nanotechnology':None,
'Radiochemistry':'Chemistry',
'Ancient history':'Social Sciences, general',
'Algorithm':'Computer Science',
'Combinatorial chemistry':'Chemistry',
'Particle physics':'Physics',
'Computer graphics (images)':'Computer Science',
'Earth science':'Geosciences',
'Ethnology':'Social Sciences, general',
'Nuclear engineering':'Engineering',
'Econometrics':'Economics & Business',
'Parallel computing':'Computer Science',
'Natural resource economics':'Economics & Business',
#'Aeronautics':None,
#'Cartography':None,
'Structural engineering':'Engineering',
'Social psychology':'Psychiatry/Psychology',
'Forestry':'Environment/Ecology',
'Psychotherapist':'Psychiatry/Psychology',
'Clinical psychology':'Psychiatry/Psychology',
'Photochemistry':'Chemistry',
#'Epistemology':None,
'Chemical physics':'Physics',
'Zoology':'Plant & Animal Science',
#'Biotechnology':None,
'Industrial organization':'Economics & Business',
'Forensic engineering':'Engineering',
'Economic geography':'Economics & Business',
'Polymer science':'Materials Science',
'Embedded system':'Computer Science',
#'Cognitive science':None,
#'Nuclear medicine':None,
'Toxicology':'Pharmacology & Toxicology',
'Criminology':'Social Sciences, general',
#'Biological engineering':None,
#'Intensive Care Medicine':None,
'Computer security':'Computer Science',
'Bioinformatics':'Computer Science',
'Statistical physics':'Physics',
'Welfare economics':'Economics & Business',
'Economic system':'Economics & Business',
'Environmental Engineering':'Engineering',
#'Oncology':None,
'Organic chemistry':'Chemistry',
'Law':'Social Sciences, general',
#'Food science':None,
'Public Relations':'Economics & Business',
#'Optoelectronics':None,
'Labour economics':'Economics & Business',
'Civil Engineering':'Engineering',
'Regional science':'Social Sciences, general',
'Neuroscience':'Neuroscience & Behavior',
'Horticulture':'Agricultural Sciences',
'Natural language processing':'Computer Science',
#'Fishery':None,
'Linguistics':'Social Sciences, general',
'Theoretical computer science':'Computer Science',
#'Medical emergency':None,
'Financial system':'Economics & Business',
'Combinatorics':'Mathematics',
'Classical economics':'Economics & Business',
'Pattern recognition':'Computer Science',
#'Orthodontics':None,
#'Anesthesia':None,
'Discrete mathematics':'Mathematics',
'Environmental economics':'Economics & Business',
'Atomic physics':'Physics',
'Process management':'Economics & Business',
'Engineering drawing':'Engineering',
#'Computational biology':None,
'Microbiology':'Microbiology',
'Mathematical optimization':'Mathematics',
#'Paleontology':None,
'Meteorology':'Geosciences',
'Biophysics':'Biology & Biochemistry',
#'Humanities':None,
'Cognitive psychology':'Psychiatry/Psychology',
'Immunology':'Immunology',
'Water resource management':'Environment/Ecology',
#'Knowledge management':None,
'Financial economics':'Economics & Business',
#'Visual arts':None,
#'Nursing':None,
'Microeconomics':'Economics & Business',
'Nuclear physics':'Physics',
'Atmospheric sciences':'Geosciences',
'Gender studies':'Social Sciences, general',
'Geomorphology':'Geosciences',
'Optics':'Physics',
'Control engineering':'Engineering',
#'Family medicine':None,
'Computer hardware':'Computer Science',
#'Optometry':None,
'Machine learning':'Computer Science',
#'Obstetrics':None,
'Mathematics education':'Social Sciences, general',
'Polymer chemistry':'Chemistry',
'Waste management':'Environment/Ecology',
'Computational science':'Computer Science',
'Electronic engineering':'Engineering',
'Physiology':'Biology & Biochemistry',
'Economic growth':'Economics & Business',
#'Control theory':None,
'Molecular physics':'Physics',
#'Simulation':None,
'Thermodynamics':'Physics',
'Pedagogy':'Social Sciences, general',
'Anatomy':'Biology & Biochemistry',
'Analytical chemistry':'Chemistry',
'Soil science':'Geosciences',
#'Risk analysis':None,
#'Literature':None,
'Aerospace Engineering':'Engineering',
'Geotechnical engineering':'Engineering',
'Mineralogy':'Geosciences',
'Quantum electrodynamics':'Physics',
'Petroleum Engineering':'Engineering',
'Medical education':'Social Sciences, general',
'Geophysics':'Geosciences',
#'Process engineering':None,
'Botany':'Plant & Animal Science',
'Petrology':'Geosciences',
'Mechanical Engineering':'Engineering',
'Speech recognition':'Computer Science',
'Real-time computing':'Computer Science',
'Public administration':'Social Sciences, general',
'Socioeconomics':'Social Sciences, general',
'Communication':'Social Sciences, general',
'Programming language':'Computer Science',
'Environmental protection':'Environment/Ecology',
#'Management science':None,
#'Gynecology':None,
'Oceanography':'Geosciences',
'Psychiatry':'Psychiatry/Psychology',
'Medicinal chemistry':'Chemistry',
#'Cardiology':None,
'Systems engineering':'Engineering',
'Veterinary medicine':'Plant & Animal Science',
#'Advertising':None,
'Macroeconomics':'Economics & Business',
'Theoretical physics':'Physics',
#'Engineering physics':None,
#'Classics':None,
#'Medical physics':None,
'Market economy':'Economics & Business',
#'Art history':None,
'Classical mechanics':'Physics',
'Environmental health':'Environment/Ecology',
'Public economics':'Economics & Business',
'Pure mathematics':'Mathematics',
'Cell biology':'Molecular Biology & Genetics',
'Data mining':'Computer Science',
'Computational chemistry':'Chemistry',
#'Acoustics':None,
'Computational physics':'Physics',
'Genealogy':'Social Sciences, general',
'Computer Engineering':'Computer Science',
'Algebra':'Mathematics',
'Neoclassical economics':'Economics & Business',
'International trade':'Economics & Business',
'Business Administration':'Economics & Business',
'Evolutionary biology':'Biology & Biochemistry',
'Political Economy':'Economics & Business',
'Psychoanalysis':'Psychiatry/Psychology',
'Ecology':'Environment/Ecology',
'Computer vision':'Computer Science',
'Environmental resource management':'Environment/Ecology',
'Animal science':'Plant & Animal Science',
'Condensed matter physics':'Physics',
'Media studies':'Social Sciences, general',
'Commerce':'Economics & Business',
'Database':'Computer Science',
'Computer architecture':'Computer Science',
'Library science':'Social Sciences, general',
'Economic history':'Economics & Business',
'Mining engineering':'Engineering',
'Chemical Engineering':'Engineering',
#'Radiology':None,
#'Virology':None,
'Keynesian economics':'Economics & Business',
'General surgery':'Clinical Medicine',
#'Internal medicine':None,
'Economy':'Economics & Business',
'Positive economics':'Economics & Business',
'World Wide Web':'Computer Science',
'Architectural engineering':'Engineering',
'Biological system':'Biology & Biochemistry',
'Operations management':'Economics & Business',
'Genetics':'Molecular Biology & Genetics',
#'Gastroenterology':None
}
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the assignment service."""
import abc
import six
from keystone import clean
from keystone.common import cache
from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import manager
from keystone import config
from keystone import exception
from keystone import notifications
from keystone.openstack.common import log
CONF = config.CONF
LOG = log.getLogger(__name__)
SHOULD_CACHE = cache.should_cache_fn('assignment')
# NOTE(blk-u): The config option is not available at import time.
EXPIRATION_TIME = lambda: CONF.assignment.cache_time
def calc_default_domain():
return {'description':
(u'Owns users and tenants (i.e. projects)'
' available on Identity API v2.'),
'enabled': True,
'id': CONF.identity.default_domain_id,
'name': u'Default'}
@dependency.provider('assignment_api')
@dependency.requires('credential_api', 'identity_api', 'token_api')
class Manager(manager.Manager):
"""Default pivot point for the Assignment backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
assignment.Manager() and identity.Manager() have a circular dependency.
The late import works around this. The if block prevents creation of the
api object by both managers.
"""
def __init__(self):
assignment_driver = CONF.assignment.driver
if assignment_driver is None:
identity_driver = dependency.REGISTRY['identity_api'].driver
assignment_driver = identity_driver.default_assignment_driver()
super(Manager, self).__init__(assignment_driver)
@notifications.created('project')
def create_project(self, tenant_id, tenant):
tenant = tenant.copy()
tenant.setdefault('enabled', True)
tenant['enabled'] = clean.project_enabled(tenant['enabled'])
tenant.setdefault('description', '')
ret = self.driver.create_project(tenant_id, tenant)
if SHOULD_CACHE(ret):
self.get_project.set(ret, self, tenant_id)
self.get_project_by_name.set(ret, self, ret['name'],
ret['domain_id'])
return ret
@notifications.updated('project')
def update_project(self, tenant_id, tenant):
tenant = tenant.copy()
if 'enabled' in tenant:
tenant['enabled'] = clean.project_enabled(tenant['enabled'])
if not tenant.get('enabled', True):
self.token_api.delete_tokens_for_users(
self.list_user_ids_for_project(tenant_id),
project_id=tenant_id)
ret = self.driver.update_project(tenant_id, tenant)
self.get_project.invalidate(self, tenant_id)
self.get_project_by_name.invalidate(self, ret['name'],
ret['domain_id'])
return ret
@notifications.deleted('project')
def delete_project(self, tenant_id):
project = self.driver.get_project(tenant_id)
user_ids = self.list_user_ids_for_project(tenant_id)
self.token_api.delete_tokens_for_users(user_ids, project_id=tenant_id)
ret = self.driver.delete_project(tenant_id)
self.get_project.invalidate(self, tenant_id)
self.get_project_by_name.invalidate(self, project['name'],
project['domain_id'])
self.credential_api.delete_credentials_for_project(tenant_id)
return ret
def get_roles_for_user_and_project(self, user_id, tenant_id):
"""Get the roles associated with a user within given project.
This includes roles directly assigned to the user on the
project, as well as those by virtue of group membership. If
the OS-INHERIT extension is enabled, then this will also
include roles inherited from the domain.
:returns: a list of role ids.
:raises: keystone.exception.UserNotFound,
keystone.exception.ProjectNotFound
"""
def _get_group_project_roles(user_id, project_ref):
role_list = []
group_refs = self.identity_api.list_groups_for_user(user_id)
for x in group_refs:
try:
metadata_ref = self._get_metadata(
group_id=x['id'], tenant_id=project_ref['id'])
role_list += self._roles_from_role_dicts(
metadata_ref.get('roles', {}), False)
except exception.MetadataNotFound:
# no group grant, skip
pass
if CONF.os_inherit.enabled:
# Now get any inherited group roles for the owning domain
try:
metadata_ref = self._get_metadata(
group_id=x['id'],
domain_id=project_ref['domain_id'])
role_list += self._roles_from_role_dicts(
metadata_ref.get('roles', {}), True)
except (exception.MetadataNotFound,
exception.NotImplemented):
pass
return role_list
def _get_user_project_roles(user_id, project_ref):
role_list = []
try:
metadata_ref = self._get_metadata(user_id=user_id,
tenant_id=project_ref['id'])
role_list = self._roles_from_role_dicts(
metadata_ref.get('roles', {}), False)
except exception.MetadataNotFound:
pass
if CONF.os_inherit.enabled:
# Now get any inherited roles for the owning domain
try:
metadata_ref = self._get_metadata(
user_id=user_id, domain_id=project_ref['domain_id'])
role_list += self._roles_from_role_dicts(
metadata_ref.get('roles', {}), True)
except (exception.MetadataNotFound, exception.NotImplemented):
pass
return role_list
project_ref = self.get_project(tenant_id)
user_role_list = _get_user_project_roles(user_id, project_ref)
group_role_list = _get_group_project_roles(user_id, project_ref)
# Use set() to process the list to remove any duplicates
return list(set(user_role_list + group_role_list))
def get_roles_for_user_and_domain(self, user_id, domain_id):
"""Get the roles associated with a user within given domain.
:returns: a list of role ids.
:raises: keystone.exception.UserNotFound,
keystone.exception.DomainNotFound
"""
def _get_group_domain_roles(user_id, domain_id):
role_list = []
group_refs = self.identity_api.list_groups_for_user(user_id)
for x in group_refs:
try:
metadata_ref = self._get_metadata(group_id=x['id'],
domain_id=domain_id)
role_list += self._roles_from_role_dicts(
metadata_ref.get('roles', {}), False)
except (exception.MetadataNotFound, exception.NotImplemented):
# MetadataNotFound implies no group grant, so skip.
# Ignore NotImplemented since not all backends support
# domains.
pass
return role_list
def _get_user_domain_roles(user_id, domain_id):
metadata_ref = {}
try:
metadata_ref = self._get_metadata(user_id=user_id,
domain_id=domain_id)
except (exception.MetadataNotFound, exception.NotImplemented):
# MetadataNotFound implies no user grants.
# Ignore NotImplemented since not all backends support
# domains
pass
return self._roles_from_role_dicts(
metadata_ref.get('roles', {}), False)
self.get_domain(domain_id)
user_role_list = _get_user_domain_roles(user_id, domain_id)
group_role_list = _get_group_domain_roles(user_id, domain_id)
# Use set() to process the list to remove any duplicates
return list(set(user_role_list + group_role_list))
def add_user_to_project(self, tenant_id, user_id):
"""Add user to a tenant by creating a default role relationship.
:raises: keystone.exception.ProjectNotFound,
keystone.exception.UserNotFound
"""
try:
self.driver.add_role_to_user_and_project(
user_id,
tenant_id,
config.CONF.member_role_id)
except exception.RoleNotFound:
LOG.info(_("Creating the default role %s "
"because it does not exist."),
config.CONF.member_role_id)
role = {'id': CONF.member_role_id,
'name': CONF.member_role_name}
self.driver.create_role(config.CONF.member_role_id, role)
#now that default role exists, the add should succeed
self.driver.add_role_to_user_and_project(
user_id,
tenant_id,
config.CONF.member_role_id)
def remove_user_from_project(self, tenant_id, user_id):
"""Remove user from a tenant
:raises: keystone.exception.ProjectNotFound,
keystone.exception.UserNotFound
"""
roles = self.get_roles_for_user_and_project(user_id, tenant_id)
if not roles:
raise exception.NotFound(tenant_id)
for role_id in roles:
try:
self.driver.remove_role_from_user_and_project(user_id,
tenant_id,
role_id)
except exception.RoleNotFound:
LOG.debug(_("Removing role %s failed because it does not "
"exist."),
role_id)
def list_projects_for_user(self, user_id, hints=None):
# NOTE(henry-nash): In order to get a complete list of user projects,
# the driver will need to look at group assignments. To avoid cross
# calling between the assignment and identity driver we get the group
# list here and pass it in. The rest of the detailed logic of listing
# projects for a user is pushed down into the driver to enable
# optimization with the various backend technologies (SQL, LDAP etc.).
group_ids = [x['id'] for
x in self.identity_api.list_groups_for_user(user_id)]
return self.driver.list_projects_for_user(
user_id, group_ids, hints or driver_hints.Hints())
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
expiration_time=EXPIRATION_TIME)
def get_domain(self, domain_id):
return self.driver.get_domain(domain_id)
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
expiration_time=EXPIRATION_TIME)
def get_domain_by_name(self, domain_name):
return self.driver.get_domain_by_name(domain_name)
def create_domain(self, domain_id, domain):
ret = self.driver.create_domain(domain_id, domain)
if SHOULD_CACHE(ret):
self.get_domain.set(ret, self, domain_id)
self.get_domain_by_name.set(ret, self, ret['name'])
return ret
def list_domains(self, hints=None):
return self.driver.list_domains(hints or driver_hints.Hints())
def update_domain(self, domain_id, domain):
ret = self.driver.update_domain(domain_id, domain)
# disable owned users & projects when the API user specifically set
# enabled=False
if not domain.get('enabled', True):
self.token_api.delete_tokens_for_domain(domain_id)
self.get_domain.invalidate(self, domain_id)
self.get_domain_by_name.invalidate(self, ret['name'])
return ret
def delete_domain(self, domain_id):
# explicitly forbid deleting the default domain (this should be a
# carefully orchestrated manual process involving configuration
# changes, etc)
if domain_id == CONF.identity.default_domain_id:
raise exception.ForbiddenAction(action=_('delete the default '
'domain'))
domain = self.driver.get_domain(domain_id)
# To help avoid inadvertent deletes, we insist that the domain
# has been previously disabled. This also prevents a user deleting
# their own domain since, once it is disabled, they won't be able
# to get a valid token to issue this delete.
if domain['enabled']:
raise exception.ForbiddenAction(
action=_('delete a domain that is not disabled'))
self._delete_domain_contents(domain_id)
self.driver.delete_domain(domain_id)
self.get_domain.invalidate(self, domain_id)
self.get_domain_by_name.invalidate(self, domain['name'])
def _delete_domain_contents(self, domain_id):
"""Delete the contents of a domain.
Before we delete a domain, we need to remove all the entities
that are owned by it, i.e. Users, Groups & Projects. To do this we
call the respective delete functions for these entities, which are
themselves responsible for deleting any credentials and role grants
associated with them as well as revoking any relevant tokens.
The order we delete entities is also important since some types
of backend may need to maintain referential integrity
throughout, and many of the entities have relationship with each
other. The following deletion order is therefore used:
Projects: Reference user and groups for grants
Groups: Reference users for membership and domains for grants
Users: Reference domains for grants
"""
user_refs = self.identity_api.list_users()
proj_refs = self.list_projects()
group_refs = self.identity_api.list_groups()
# First delete the projects themselves
for project in proj_refs:
if project['domain_id'] == domain_id:
try:
self.delete_project(project['id'])
except exception.ProjectNotFound:
LOG.debug(_('Project %(projectid)s not found when '
'deleting domain contents for %(domainid)s, '
'continuing with cleanup.'),
{'projectid': project['id'],
'domainid': domain_id})
for group in group_refs:
# Cleanup any existing groups.
if group['domain_id'] == domain_id:
try:
self.identity_api.delete_group(group['id'],
domain_scope=domain_id)
except exception.GroupNotFound:
LOG.debug(_('Group %(groupid)s not found when deleting '
'domain contents for %(domainid)s, continuing '
'with cleanup.'),
{'groupid': group['id'], 'domainid': domain_id})
# And finally, delete the users themselves
for user in user_refs:
if user['domain_id'] == domain_id:
try:
self.identity_api.delete_user(user['id'],
domain_scope=domain_id)
except exception.UserNotFound:
LOG.debug(_('User %(userid)s not found when '
'deleting domain contents for %(domainid)s, '
'continuing with cleanup.'),
{'userid': user['id'],
'domainid': domain_id})
def list_projects(self, hints=None):
return self.driver.list_projects(hints or driver_hints.Hints())
# NOTE(henry-nash): list_projects_in_domain is actually an internal method
# and not exposed via the API. Therefore there is no need to support
# driver hints for it.
def list_projects_in_domain(self, domain_id):
return self.driver.list_projects_in_domain(domain_id)
def list_user_projects(self, user_id, hints=None):
return self.driver.list_user_projects(
user_id, hints or driver_hints.Hints())
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
expiration_time=EXPIRATION_TIME)
def get_project(self, project_id):
return self.driver.get_project(project_id)
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
expiration_time=EXPIRATION_TIME)
def get_project_by_name(self, tenant_name, domain_id):
return self.driver.get_project_by_name(tenant_name, domain_id)
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
expiration_time=EXPIRATION_TIME)
def get_role(self, role_id):
return self.driver.get_role(role_id)
@notifications.created('role')
def create_role(self, role_id, role):
ret = self.driver.create_role(role_id, role)
if SHOULD_CACHE(ret):
self.get_role.set(ret, self, role_id)
return ret
def list_roles(self, hints=None):
return self.driver.list_roles(hints or driver_hints.Hints())
@notifications.updated('role')
def update_role(self, role_id, role):
ret = self.driver.update_role(role_id, role)
self.get_role.invalidate(self, role_id)
return ret
@notifications.deleted('role')
def delete_role(self, role_id):
try:
self._delete_tokens_for_role(role_id)
except exception.NotImplemented:
# FIXME(morganfainberg): Not all backends (ldap) implement
# `list_role_assignments_for_role` which would have previously
# caused a NotImplmented error to be raised when called through
# the controller. Now error or proper action will always come from
# the `delete_role` method logic. Work needs to be done to make
# the behavior between drivers consistent (capable of revoking
# tokens for the same circumstances). This is related to the bug
# https://bugs.launchpad.net/keystone/+bug/1221805
pass
self.driver.delete_role(role_id)
self.get_role.invalidate(self, role_id)
def list_role_assignments_for_role(self, role_id=None):
# NOTE(henry-nash): Currently the efficiency of the key driver
# implementation (SQL) of list_role_assignments is severely hampered by
# the existence of the multiple grant tables - hence there is little
# advantage in pushing the logic of this method down into the driver.
# Once the single assignment table is implemented, then this situation
# will be different, and this method should have its own driver
# implementation.
return [r for r in self.driver.list_role_assignments()
if r['role_id'] == role_id]
def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
self.driver.remove_role_from_user_and_project(user_id, tenant_id,
role_id)
self.token_api.delete_tokens_for_user(user_id)
def delete_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
user_ids = []
if group_id is not None:
# NOTE(morganfainberg): The user ids are the important part for
# invalidating tokens below, so extract them here.
try:
for user in self.identity_api.list_users_in_group(group_id,
domain_id):
if user['id'] != user_id:
user_ids.append(user['id'])
except exception.GroupNotFound:
LOG.debug(_('Group %s not found, no tokens to invalidate.'),
group_id)
self.driver.delete_grant(role_id, user_id, group_id, domain_id,
project_id, inherited_to_projects)
if user_id is not None:
user_ids.append(user_id)
self.token_api.delete_tokens_for_users(user_ids)
def _delete_tokens_for_role(self, role_id):
assignments = self.list_role_assignments_for_role(role_id=role_id)
# Iterate over the assignments for this role and build the list of
# user or user+project IDs for the tokens we need to delete
user_ids = set()
user_and_project_ids = list()
for assignment in assignments:
# If we have a project assignment, then record both the user and
# project IDs so we can target the right token to delete. If it is
# a domain assignment, we might as well kill all the tokens for
# the user, since in the vast majority of cases all the tokens
# for a user will be within one domain anyway, so not worth
# trying to delete tokens for each project in the domain.
if 'user_id' in assignment:
if 'project_id' in assignment:
user_and_project_ids.append(
(assignment['user_id'], assignment['project_id']))
elif 'domain_id' in assignment:
user_ids.add(assignment['user_id'])
elif 'group_id' in assignment:
# Add in any users for this group, being tolerant of any
# cross-driver database integrity errors.
try:
users = self.identity_api.list_users_in_group(
assignment['group_id'])
except exception.GroupNotFound:
# Ignore it, but log a debug message
if 'project_id' in assignment:
target = _('Project (%s)') % assignment['project_id']
elif 'domain_id' in assignment:
target = _('Domain (%s)') % assignment['domain_id']
else:
target = _('Unknown Target')
msg = _('Group (%(group)s), referenced in assignment '
'for %(target)s, not found - ignoring.')
LOG.debug(msg, {'group': assignment['group_id'],
'target': target})
continue
if 'project_id' in assignment:
for user in users:
user_and_project_ids.append(
(user['id'], assignment['project_id']))
elif 'domain_id' in assignment:
for user in users:
user_ids.add(user['id'])
# Now process the built up lists. Before issuing calls to delete any
# tokens, let's try and minimize the number of calls by pruning out
# any user+project deletions where a general token deletion for that
# same user is also planned.
user_and_project_ids_to_action = []
for user_and_project_id in user_and_project_ids:
if user_and_project_id[0] not in user_ids:
user_and_project_ids_to_action.append(user_and_project_id)
self.token_api.delete_tokens_for_users(user_ids)
for user_id, project_id in user_and_project_ids_to_action:
self.token_api.delete_tokens_for_user(user_id, project_id)
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
def _role_to_dict(self, role_id, inherited):
role_dict = {'id': role_id}
if inherited:
role_dict['inherited_to'] = 'projects'
return role_dict
def _roles_from_role_dicts(self, dict_list, inherited):
role_list = []
for d in dict_list:
if ((not d.get('inherited_to') and not inherited) or
(d.get('inherited_to') == 'projects' and inherited)):
role_list.append(d['id'])
return role_list
def _add_role_to_role_dicts(self, role_id, inherited, dict_list,
allow_existing=True):
# There is a difference in error semantics when trying to
# assign a role that already exists between the coded v2 and v3
# API calls. v2 will error if the assignment already exists,
# while v3 is silent. Setting the 'allow_existing' parameter
# appropriately lets this call be used for both.
role_set = set([frozenset(r.items()) for r in dict_list])
key = frozenset(self._role_to_dict(role_id, inherited).items())
if not allow_existing and key in role_set:
raise KeyError
role_set.add(key)
return [dict(r) for r in role_set]
def _remove_role_from_role_dicts(self, role_id, inherited, dict_list):
role_set = set([frozenset(r.items()) for r in dict_list])
role_set.remove(frozenset(self._role_to_dict(role_id,
inherited).items()))
return [dict(r) for r in role_set]
@abc.abstractmethod
def get_project_by_name(self, tenant_name, domain_id):
"""Get a tenant by name.
:returns: tenant_ref
:raises: keystone.exception.ProjectNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_user_ids_for_project(self, tenant_id):
"""Lists all user IDs with a role assignment in the specified project.
:returns: a list of user_ids or an empty set.
:raises: keystone.exception.ProjectNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
"""Add a role to a user within given tenant.
:raises: keystone.exception.UserNotFound,
keystone.exception.ProjectNotFound,
keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
"""Remove a role from a user within given tenant.
:raises: keystone.exception.UserNotFound,
keystone.exception.ProjectNotFound,
keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
# assignment/grant crud
@abc.abstractmethod
def create_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
"""Creates a new assignment/grant.
If the assignment is to a domain, then optionally it may be
specified as inherited to owned projects (this requires
the OS-INHERIT extension to be enabled).
:raises: keystone.exception.DomainNotFound,
keystone.exception.ProjectNotFound,
keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_grants(self, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
"""Lists assignments/grants.
:raises: keystone.exception.UserNotFound,
keystone.exception.GroupNotFound,
keystone.exception.ProjectNotFound,
keystone.exception.DomainNotFound,
keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
"""Lists assignments/grants.
:raises: keystone.exception.UserNotFound,
keystone.exception.GroupNotFound,
keystone.exception.ProjectNotFound,
keystone.exception.DomainNotFound,
keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def delete_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
"""Deletes assignments/grants.
:raises: keystone.exception.ProjectNotFound,
keystone.exception.DomainNotFound,
keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_role_assignments(self):
raise exception.NotImplemented()
# domain crud
@abc.abstractmethod
def create_domain(self, domain_id, domain):
"""Creates a new domain.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_domains(self, hints):
"""List domains in the system.
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of domain_refs or an empty list.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_domain(self, domain_id):
"""Get a domain by ID.
:returns: domain_ref
:raises: keystone.exception.DomainNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_domain_by_name(self, domain_name):
"""Get a domain by name.
:returns: domain_ref
:raises: keystone.exception.DomainNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def update_domain(self, domain_id, domain):
"""Updates an existing domain.
:raises: keystone.exception.DomainNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented()
@abc.abstractmethod
def delete_domain(self, domain_id):
"""Deletes an existing domain.
:raises: keystone.exception.DomainNotFound
"""
raise exception.NotImplemented()
# project crud
@abc.abstractmethod
def create_project(self, project_id, project):
"""Creates a new project.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_projects(self, hints):
"""List projects in the system.
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of project_refs or an empty list.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_projects_in_domain(self, domain_id):
"""List projects in the domain.
:param domain_id: the driver MUST only return projects
within this domain.
:returns: a list of project_refs or an empty list.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_projects_for_user(self, user_id, group_ids, hints):
"""List all projects associated with a given user.
:param user_id: the user in question
:param group_ids: the groups this user is a member of. This list is
built in the Manager, so that the driver itself
does not have to call across to identity.
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of project_refs or an empty list.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_project(self, project_id):
"""Get a project by ID.
:returns: project_ref
:raises: keystone.exception.ProjectNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def update_project(self, project_id, project):
"""Updates an existing project.
:raises: keystone.exception.ProjectNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented()
@abc.abstractmethod
def delete_project(self, project_id):
"""Deletes an existing project.
:raises: keystone.exception.ProjectNotFound
"""
raise exception.NotImplemented()
# role crud
@abc.abstractmethod
def create_role(self, role_id, role):
"""Creates a new role.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_roles(self, hints):
"""List roles in the system.
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of role_refs or an empty list.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_role(self, role_id):
"""Get a role by ID.
:returns: role_ref
:raises: keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def update_role(self, role_id, role):
"""Updates an existing role.
:raises: keystone.exception.RoleNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented()
@abc.abstractmethod
def delete_role(self, role_id):
"""Deletes an existing role.
:raises: keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
#TODO(ayoung): determine what else these two functions raise
@abc.abstractmethod
def delete_user(self, user_id):
"""Deletes all assignments for a user.
:raises: keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def delete_group(self, group_id):
"""Deletes all assignments for a group.
:raises: keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
#domain management functions for backends that only allow a single domain.
#currently, this is only LDAP, but might be used by PAM or other backends
#as well. This is used by both identity and assignment drivers.
def _set_default_domain(self, ref):
"""If the domain ID has not been set, set it to the default."""
if isinstance(ref, dict):
if 'domain_id' not in ref:
ref = ref.copy()
ref['domain_id'] = CONF.identity.default_domain_id
return ref
elif isinstance(ref, list):
return [self._set_default_domain(x) for x in ref]
else:
raise ValueError(_('Expected dict or list: %s') % type(ref))
def _validate_default_domain(self, ref):
"""Validate that either the default domain or nothing is specified.
Also removes the domain from the ref so that LDAP doesn't have to
persist the attribute.
"""
ref = ref.copy()
domain_id = ref.pop('domain_id', CONF.identity.default_domain_id)
self._validate_default_domain_id(domain_id)
return ref
def _validate_default_domain_id(self, domain_id):
"""Validate that the domain ID specified belongs to the default domain.
"""
if domain_id != CONF.identity.default_domain_id:
raise exception.DomainNotFound(domain_id=domain_id)
|
|
import datetime
from django.db import models
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
class Meta:
ordering = ('name',)
def __str__(self):
return "%s the place" % self.name
class Restaurant(Place):
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return "%s the restaurant" % self.name
class ItalianRestaurant(Restaurant):
serves_gnocchi = models.BooleanField(default=False)
def __str__(self):
return "%s the italian restaurant" % self.name
class ParkingLot(Place):
# An explicit link to the parent (we can control the attribute name).
parent = models.OneToOneField(Place, models.CASCADE, primary_key=True, parent_link=True)
capacity = models.IntegerField()
def __str__(self):
return "%s the parking lot" % self.name
class ParkingLot3(Place):
# The parent_link connector need not be the pk on the model.
primary_key = models.AutoField(primary_key=True)
parent = models.OneToOneField(Place, models.CASCADE, parent_link=True)
class ParkingLot4(models.Model):
# Test parent_link connector can be discovered in abstract classes.
parent = models.OneToOneField(Place, models.CASCADE, parent_link=True)
class Meta:
abstract = True
class ParkingLot4A(ParkingLot4, Place):
pass
class ParkingLot4B(Place, ParkingLot4):
pass
class Supplier(models.Model):
name = models.CharField(max_length=50)
restaurant = models.ForeignKey(Restaurant, models.CASCADE)
def __str__(self):
return self.name
class Wholesaler(Supplier):
retailer = models.ForeignKey(Supplier, models.CASCADE, related_name='wholesale_supplier')
class Parent(models.Model):
created = models.DateTimeField(default=datetime.datetime.now)
class Child(Parent):
name = models.CharField(max_length=10)
class SelfRefParent(models.Model):
parent_data = models.IntegerField()
self_data = models.ForeignKey('self', models.SET_NULL, null=True)
class SelfRefChild(SelfRefParent):
child_data = models.IntegerField()
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
ordering = ('-pub_date', 'headline')
def __str__(self):
return self.headline
class ArticleWithAuthor(Article):
author = models.CharField(max_length=100)
class M2MBase(models.Model):
articles = models.ManyToManyField(Article)
class M2MChild(M2MBase):
name = models.CharField(max_length=50)
class Evaluation(Article):
quality = models.IntegerField()
class Meta:
abstract = True
class QualityControl(Evaluation):
assignee = models.CharField(max_length=50)
class BaseM(models.Model):
base_name = models.CharField(max_length=100)
def __str__(self):
return self.base_name
class DerivedM(BaseM):
customPK = models.IntegerField(primary_key=True)
derived_name = models.CharField(max_length=100)
def __str__(self):
return "PK = %d, base_name = %s, derived_name = %s" % (
self.customPK, self.base_name, self.derived_name)
class AuditBase(models.Model):
planned_date = models.DateField()
class Meta:
abstract = True
verbose_name_plural = 'Audits'
class CertificationAudit(AuditBase):
class Meta(AuditBase.Meta):
abstract = True
class InternalCertificationAudit(CertificationAudit):
auditing_dept = models.CharField(max_length=20)
# Abstract classes don't get m2m tables autocreated.
class Person(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class AbstractEvent(models.Model):
name = models.CharField(max_length=100)
attendees = models.ManyToManyField(Person, related_name="%(class)s_set")
class Meta:
abstract = True
ordering = ('name',)
def __str__(self):
return self.name
class BirthdayParty(AbstractEvent):
pass
class BachelorParty(AbstractEvent):
pass
class MessyBachelorParty(BachelorParty):
pass
# Check concrete -> abstract -> concrete inheritance
class SearchableLocation(models.Model):
keywords = models.CharField(max_length=256)
class Station(SearchableLocation):
name = models.CharField(max_length=128)
class Meta:
abstract = True
class BusStation(Station):
inbound = models.BooleanField(default=False)
class TrainStation(Station):
zone = models.IntegerField()
class User(models.Model):
username = models.CharField(max_length=30, unique=True)
class Profile(User):
profile_id = models.AutoField(primary_key=True)
extra = models.CharField(max_length=30, blank=True)
# Check concrete + concrete -> concrete -> concrete
class Politician(models.Model):
politician_id = models.AutoField(primary_key=True)
title = models.CharField(max_length=50)
class Congressman(Person, Politician):
state = models.CharField(max_length=2)
class Senator(Congressman):
pass
|
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import typing as T
import subprocess, os
from .. import coredata
from .compilers import (
clike_debug_args,
Compiler,
)
from .mixins.clike import CLikeCompiler
from .mixins.gnu import (
GnuCompiler, gnulike_buildtype_args, gnu_optimization_args,
)
from .mixins.intel import IntelGnuLikeCompiler, IntelVisualStudioLikeCompiler
from .mixins.clang import ClangCompiler
from .mixins.elbrus import ElbrusCompiler
from .mixins.pgi import PGICompiler
from .. import mlog
from mesonbuild.mesonlib import (
version_compare, EnvironmentException, MesonException, MachineChoice, LibType
)
if T.TYPE_CHECKING:
from ..envconfig import MachineInfo
class FortranCompiler(CLikeCompiler, Compiler):
language = 'fortran'
def __init__(self, exelist, version, for_machine: MachineChoice,
is_cross, info: 'MachineInfo', exe_wrapper=None, **kwargs):
Compiler.__init__(self, exelist, version, for_machine, info, **kwargs)
CLikeCompiler.__init__(self, is_cross, exe_wrapper)
self.id = 'unknown'
def has_function(self, funcname, prefix, env, *, extra_args=None, dependencies=None):
raise MesonException('Fortran does not have "has_function" capability.\n'
'It is better to test if a Fortran capability is working like:\n\n'
"meson.get_compiler('fortran').links('block; end block; end program')\n\n"
'that example is to see if the compiler has Fortran 2008 Block element.')
def sanity_check(self, work_dir: Path, environment):
"""
Check to be sure a minimal program can compile and execute
with this compiler & platform.
"""
work_dir = Path(work_dir)
source_name = work_dir / 'sanitycheckf.f90'
binary_name = work_dir / 'sanitycheckf'
if binary_name.is_file():
binary_name.unlink()
source_name.write_text('print *, "Fortran compilation is working."; end')
extra_flags = []
extra_flags += environment.coredata.get_external_args(self.for_machine, self.language)
extra_flags += environment.coredata.get_external_link_args(self.for_machine, self.language)
extra_flags += self.get_always_args()
# %% build the test executable "sanitycheckf"
# cwd=work_dir is necessary on Windows especially for Intel compilers to avoid error: cannot write on sanitycheckf.obj
# this is a defect with how Windows handles files and ifort's object file-writing behavior vis concurrent ProcessPoolExecutor.
# This simple workaround solves the issue.
# FIXME: cwd=str(work_dir) is for Python 3.5 on Windows, when 3.5 is deprcated, this can become cwd=work_dir
returncode = subprocess.run(self.exelist + extra_flags + [str(source_name), '-o', str(binary_name)],
cwd=str(work_dir)).returncode
if returncode != 0:
raise EnvironmentException('Compiler %s can not compile programs.' % self.name_string())
if self.is_cross:
if self.exe_wrapper is None:
# Can't check if the binaries run so we have to assume they do
return
cmdlist = self.exe_wrapper + [str(binary_name)]
else:
cmdlist = [str(binary_name)]
# %% Run the test executable
try:
returncode = subprocess.run(cmdlist, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).returncode
if returncode != 0:
raise EnvironmentException('Executables created by Fortran compiler %s are not runnable.' % self.name_string())
except OSError:
raise EnvironmentException('Executables created by Fortran compiler %s are not runnable.' % self.name_string())
def get_std_warn_args(self, level):
return FortranCompiler.std_warn_args
def get_buildtype_args(self, buildtype):
return gnulike_buildtype_args[buildtype]
def get_optimization_args(self, optimization_level):
return gnu_optimization_args[optimization_level]
def get_debug_args(self, is_debug):
return clike_debug_args[is_debug]
def get_dependency_gen_args(self, outtarget, outfile):
return []
def get_preprocess_only_args(self):
return ['-cpp'] + super().get_preprocess_only_args()
def get_module_incdir_args(self):
return ('-I', )
def get_module_outdir_args(self, path):
return ['-module', path]
def compute_parameters_with_absolute_paths(self, parameter_list, build_dir):
for idx, i in enumerate(parameter_list):
if i[:2] == '-I' or i[:2] == '-L':
parameter_list[idx] = i[:2] + os.path.normpath(os.path.join(build_dir, i[2:]))
return parameter_list
def module_name_to_filename(self, module_name: str) -> str:
if '_' in module_name: # submodule
s = module_name.lower()
if self.id in ('gcc', 'intel', 'intel-cl'):
filename = s.replace('_', '@') + '.smod'
elif self.id in ('pgi', 'flang'):
filename = s.replace('_', '-') + '.mod'
else:
filename = s + '.mod'
else: # module
filename = module_name.lower() + '.mod'
return filename
def find_library(self, libname, env, extra_dirs, libtype: LibType = LibType.PREFER_SHARED):
code = 'stop; end program'
return self.find_library_impl(libname, env, extra_dirs, code, libtype)
def has_multi_arguments(self, args: T.Sequence[str], env):
for arg in args[:]:
# some compilers, e.g. GCC, don't warn for unsupported warning-disable
# flags, so when we are testing a flag like "-Wno-forgotten-towel", also
# check the equivalent enable flag too "-Wforgotten-towel"
# GCC does error for "-fno-foobar"
if arg.startswith('-Wno-'):
args.append('-W' + arg[5:])
if arg.startswith('-Wl,'):
mlog.warning('{} looks like a linker argument, '
'but has_argument and other similar methods only '
'support checking compiler arguments. Using them '
'to check linker arguments are never supported, '
'and results are likely to be wrong regardless of '
'the compiler you are using. has_link_argument or '
'other similar method can be used instead.'
.format(arg))
code = 'stop; end program'
return self.has_arguments(args, env, code, mode='compile')
class GnuFortranCompiler(GnuCompiler, FortranCompiler):
def __init__(self, exelist, version, for_machine: MachineChoice,
is_cross, info: 'MachineInfo', exe_wrapper=None,
defines=None, **kwargs):
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, **kwargs)
GnuCompiler.__init__(self, defines)
default_warn_args = ['-Wall']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic', '-fimplicit-none']}
def get_options(self):
opts = FortranCompiler.get_options(self)
fortran_stds = ['legacy', 'f95', 'f2003']
if version_compare(self.version, '>=4.4.0'):
fortran_stds += ['f2008']
if version_compare(self.version, '>=8.0.0'):
fortran_stds += ['f2018']
opts.update({
'std': coredata.UserComboOption(
'Fortran language standard to use',
['none'] + fortran_stds,
'none',
),
})
return opts
def get_option_compile_args(self, options) -> T.List[str]:
args = []
std = options['std']
if std.value != 'none':
args.append('-std=' + std.value)
return args
def get_dependency_gen_args(self, outtarget, outfile) -> T.List[str]:
# Disabled until this is fixed:
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=62162
# return ['-cpp', '-MD', '-MQ', outtarget]
return []
def get_module_outdir_args(self, path: str) -> T.List[str]:
return ['-J' + path]
def language_stdlib_only_link_flags(self) -> T.List[str]:
return ['-lgfortran', '-lm']
def has_header(self, hname, prefix, env, *, extra_args=None, dependencies=None, disable_cache=False):
'''
Derived from mixins/clike.py:has_header, but without C-style usage of
__has_include which breaks with GCC-Fortran 10:
https://github.com/mesonbuild/meson/issues/7017
'''
fargs = {'prefix': prefix, 'header': hname}
code = '{prefix}\n#include <{header}>'
return self.compiles(code.format(**fargs), env, extra_args=extra_args,
dependencies=dependencies, mode='preprocess', disable_cache=disable_cache)
class ElbrusFortranCompiler(GnuFortranCompiler, ElbrusCompiler):
def __init__(self, exelist, version, for_machine: MachineChoice,
is_cross, info: 'MachineInfo', exe_wrapper=None,
defines=None, **kwargs):
GnuFortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, defines,
**kwargs)
ElbrusCompiler.__init__(self)
class G95FortranCompiler(FortranCompiler):
LINKER_PREFIX = '-Wl,'
def __init__(self, exelist, version, for_machine: MachineChoice,
is_cross, info: 'MachineInfo', exe_wrapper=None, **kwargs):
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, **kwargs)
self.id = 'g95'
default_warn_args = ['-Wall']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-pedantic']}
def get_module_outdir_args(self, path: str) -> T.List[str]:
return ['-fmod=' + path]
def get_no_warn_args(self):
# FIXME: Confirm that there's no compiler option to disable all warnings
return []
class SunFortranCompiler(FortranCompiler):
LINKER_PREFIX = '-Wl,'
def __init__(self, exelist, version, for_machine: MachineChoice,
is_cross, info: 'MachineInfo', exe_wrapper=None,
**kwargs):
FortranCompiler.__init__(self, exelist, version, for_machine, is_cross, info, exe_wrapper, **kwargs)
self.id = 'sun'
def get_dependency_gen_args(self, outtarget, outfile) -> T.List[str]:
return ['-fpp']
def get_always_args(self):
return []
def get_warn_args(self, level):
return []
def get_module_incdir_args(self):
return ('-M', )
def get_module_outdir_args(self, path: str) -> T.List[str]:
return ['-moddir=' + path]
def openmp_flags(self) -> T.List[str]:
return ['-xopenmp']
class IntelFortranCompiler(IntelGnuLikeCompiler, FortranCompiler):
def __init__(self, exelist, version, for_machine: MachineChoice,
is_cross, info: 'MachineInfo', exe_wrapper=None,
**kwargs):
self.file_suffixes = ('f90', 'f', 'for', 'ftn', 'fpp')
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, **kwargs)
# FIXME: Add support for OS X and Windows in detect_fortran_compiler so
# we are sent the type of compiler
IntelGnuLikeCompiler.__init__(self)
self.id = 'intel'
default_warn_args = ['-warn', 'general', '-warn', 'truncated_source']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['-warn', 'unused'],
'3': ['-warn', 'all']}
def get_options(self):
opts = FortranCompiler.get_options(self)
fortran_stds = ['legacy', 'f95', 'f2003', 'f2008', 'f2018']
opts.update({
'std': coredata.UserComboOption(
'Fortran language standard to use',
['none'] + fortran_stds,
'none',
),
})
return opts
def get_option_compile_args(self, options) -> T.List[str]:
args = []
std = options['std']
stds = {'legacy': 'none', 'f95': 'f95', 'f2003': 'f03', 'f2008': 'f08', 'f2018': 'f18'}
if std.value != 'none':
args.append('-stand=' + stds[std.value])
return args
def get_preprocess_only_args(self) -> T.List[str]:
return ['-cpp', '-EP']
def get_always_args(self):
"""Ifort doesn't have -pipe."""
val = super().get_always_args()
val.remove('-pipe')
return val
def language_stdlib_only_link_flags(self) -> T.List[str]:
return ['-lifcore', '-limf']
def get_dependency_gen_args(self, outtarget: str, outfile: str) -> T.List[str]:
return ['-gen-dep=' + outtarget, '-gen-depformat=make']
class IntelClFortranCompiler(IntelVisualStudioLikeCompiler, FortranCompiler):
file_suffixes = ['f90', 'f', 'for', 'ftn', 'fpp']
always_args = ['/nologo']
def __init__(self, exelist, version, for_machine: MachineChoice,
is_cross, target: str, info: 'MachineInfo', exe_wrapper=None,
**kwargs):
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, **kwargs)
IntelVisualStudioLikeCompiler.__init__(self, target)
default_warn_args = ['/warn:general', '/warn:truncated_source']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['/warn:unused'],
'3': ['/warn:all']}
def get_options(self):
opts = FortranCompiler.get_options(self)
fortran_stds = ['legacy', 'f95', 'f2003', 'f2008', 'f2018']
opts.update({
'std': coredata.UserComboOption(
'Fortran language standard to use',
['none'] + fortran_stds,
'none',
),
})
return opts
def get_option_compile_args(self, options) -> T.List[str]:
args = []
std = options['std']
stds = {'legacy': 'none', 'f95': 'f95', 'f2003': 'f03', 'f2008': 'f08', 'f2018': 'f18'}
if std.value != 'none':
args.append('/stand:' + stds[std.value])
return args
def get_module_outdir_args(self, path) -> T.List[str]:
return ['/module:' + path]
class PathScaleFortranCompiler(FortranCompiler):
def __init__(self, exelist, version, for_machine: MachineChoice,
is_cross, info: 'MachineInfo', exe_wrapper=None,
**kwargs):
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, **kwargs)
self.id = 'pathscale'
default_warn_args = ['-fullwarn']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args,
'3': default_warn_args}
def openmp_flags(self) -> T.List[str]:
return ['-mp']
class PGIFortranCompiler(PGICompiler, FortranCompiler):
def __init__(self, exelist, version, for_machine: MachineChoice,
is_cross, info: 'MachineInfo', exe_wrapper=None,
**kwargs):
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, **kwargs)
PGICompiler.__init__(self)
default_warn_args = ['-Minform=inform']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args,
'3': default_warn_args + ['-Mdclchk']}
def language_stdlib_only_link_flags(self) -> T.List[str]:
return ['-lpgf90rtl', '-lpgf90', '-lpgf90_rpm1', '-lpgf902',
'-lpgf90rtl', '-lpgftnrtl', '-lrt']
class FlangFortranCompiler(ClangCompiler, FortranCompiler):
def __init__(self, exelist, version, for_machine: MachineChoice,
is_cross, info: 'MachineInfo', exe_wrapper=None,
**kwargs):
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, **kwargs)
ClangCompiler.__init__(self, [])
self.id = 'flang'
default_warn_args = ['-Minform=inform']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args,
'3': default_warn_args}
def language_stdlib_only_link_flags(self) -> T.List[str]:
return ['-lflang', '-lpgmath']
class Open64FortranCompiler(FortranCompiler):
def __init__(self, exelist, version, for_machine: MachineChoice,
is_cross, info: 'MachineInfo', exe_wrapper=None,
**kwargs):
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, **kwargs)
self.id = 'open64'
default_warn_args = ['-fullwarn']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args,
'3': default_warn_args}
def openmp_flags(self) -> T.List[str]:
return ['-mp']
class NAGFortranCompiler(FortranCompiler):
def __init__(self, exelist, version, for_machine: MachineChoice,
is_cross, info: 'MachineInfo', exe_wrapper=None,
**kwargs):
FortranCompiler.__init__(self, exelist, version, for_machine,
is_cross, info, exe_wrapper, **kwargs)
self.id = 'nagfor'
def get_warn_args(self, level):
return []
def get_module_outdir_args(self, path) -> T.List[str]:
return ['-mdir', path]
def openmp_flags(self) -> T.List[str]:
return ['-openmp']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.