repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
ryfeus/lambda-packs | Tensorflow/source/tensorflow/python/ops/nn_impl.py | 5 | 51951 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Implementation of Neural Net (NN) functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import candidate_sampling_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
def log_poisson_loss(targets, log_input, compute_full_loss=False, name=None):
"""Computes log Poisson loss given `log_input`.
Gives the log-likelihood loss between the prediction and the target under the
assumption that the target has a Poisson distribution.
Caveat: By default, this is not the exact loss, but the loss minus a
constant term [log(z!)]. That has no effect for optimization, but
does not play well with relative loss comparisons. To compute an
approximation of the log factorial term, specify
compute_full_loss=True to enable Stirling's Approximation.
For brevity, let `c = log(x) = log_input`, `z = targets`. The log Poisson
loss is
-log(exp(-x) * (x^z) / z!)
= -log(exp(-x) * (x^z)) + log(z!)
~ -log(exp(-x)) - log(x^z) [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
[ Note the second term is the Stirling's Approximation for log(z!).
It is invariant to x and does not affect optimization, though
important for correct relative loss comparisons. It is only
computed when compute_full_loss == True. ]
= x - z * log(x) [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
= exp(c) - z * c [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
Args:
targets: A `Tensor` of the same type and shape as `log_input`.
log_input: A `Tensor` of type `float32` or `float64`.
compute_full_loss: whether to compute the full loss. If false, a constant
term is dropped in favor of more efficient optimization.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `log_input` with the componentwise
logistic losses.
Raises:
ValueError: If `log_input` and `targets` do not have the same shape.
"""
with ops.name_scope(name, "log_poisson_loss", [log_input, targets]) as name:
log_input = ops.convert_to_tensor(log_input, name="log_input")
targets = ops.convert_to_tensor(targets, name="targets")
try:
targets.get_shape().merge_with(log_input.get_shape())
except ValueError:
raise ValueError(
"log_input and targets must have the same shape (%s vs %s)" %
(log_input.get_shape(), targets.get_shape()))
result = math_ops.exp(log_input) - log_input * targets
if compute_full_loss:
# need to create constant tensors here so that their dtypes can be matched
# to that of the targets.
point_five = constant_op.constant(0.5, dtype=targets.dtype)
two_pi = constant_op.constant(2 * math.pi, dtype=targets.dtype)
stirling_approx = (targets * math_ops.log(targets)) - targets + (
point_five * math_ops.log(two_pi * targets))
zeros = array_ops.zeros_like(targets, dtype=targets.dtype)
ones = array_ops.ones_like(targets, dtype=targets.dtype)
cond = math_ops.logical_and(targets >= zeros, targets <= ones)
result += array_ops.where(cond, zeros, stirling_approx)
return result
def sigmoid_cross_entropy_with_logits( # pylint: disable=invalid-name
_sentinel=None,
labels=None,
logits=None,
name=None):
"""Computes sigmoid cross entropy given `logits`.
Measures the probability error in discrete classification tasks in which each
class is independent and not mutually exclusive. For instance, one could
perform multilabel classification where a picture can contain both an elephant
and a dog at the same time.
For brevity, let `x = logits`, `z = labels`. The logistic loss is
z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + log(1 + exp(-x))
= x - x * z + log(1 + exp(-x))
For x < 0, to avoid overflow in exp(-x), we reformulate the above
x - x * z + log(1 + exp(-x))
= log(exp(x)) - x * z + log(1 + exp(-x))
= - x * z + log(1 + exp(x))
Hence, to ensure stability and avoid overflow, the implementation uses this
equivalent formulation
max(x, 0) - x * z + log(1 + exp(-abs(x)))
`logits` and `labels` must have the same type and shape.
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: A `Tensor` of the same type and shape as `logits`.
logits: A `Tensor` of type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
logistic losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
# pylint: disable=protected-access
nn_ops._ensure_xent_args("sigmoid_cross_entropy_with_logits", _sentinel,
labels, logits)
# pylint: enable=protected-access
with ops.name_scope(name, "logistic_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError("logits and labels must have the same shape (%s vs %s)" %
(logits.get_shape(), labels.get_shape()))
# The logistic loss formula from above is
# x - x * z + log(1 + exp(-x))
# For x < 0, a more numerically stable formula is
# -x * z + log(1 + exp(x))
# Note that these two expressions can be combined into the following:
# max(x, 0) - x * z + log(1 + exp(-abs(x)))
# To allow computing gradients at zero, we define custom versions of max and
# abs functions.
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
cond = (logits >= zeros)
relu_logits = array_ops.where(cond, logits, zeros)
neg_abs_logits = array_ops.where(cond, -logits, logits)
return math_ops.add(
relu_logits - logits * labels,
math_ops.log1p(math_ops.exp(neg_abs_logits)),
name=name)
def weighted_cross_entropy_with_logits(targets, logits, pos_weight, name=None):
"""Computes a weighted cross entropy.
This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`,
allows one to trade off recall and precision by up- or down-weighting the
cost of a positive error relative to a negative error.
The usual cross-entropy cost is defined as:
targets * -log(sigmoid(logits)) +
(1 - targets) * -log(1 - sigmoid(logits))
The argument `pos_weight` is used as a multiplier for the positive targets:
targets * -log(sigmoid(logits)) * pos_weight +
(1 - targets) * -log(1 - sigmoid(logits))
For brevity, let `x = logits`, `z = targets`, `q = pos_weight`.
The loss is:
qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x))
= (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))
Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow,
the implementation uses
(1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))
`logits` and `targets` must have the same type and shape.
Args:
targets: A `Tensor` of the same type and shape as `logits`.
logits: A `Tensor` of type `float32` or `float64`.
pos_weight: A coefficient to use on the positive examples.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
weighted logistic losses.
Raises:
ValueError: If `logits` and `targets` do not have the same shape.
"""
with ops.name_scope(name, "logistic_loss", [logits, targets]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
targets = ops.convert_to_tensor(targets, name="targets")
try:
targets.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError(
"logits and targets must have the same shape (%s vs %s)" %
(logits.get_shape(), targets.get_shape()))
# The logistic loss formula from above is
# (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))
# For x < 0, a more numerically stable formula is
# (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(x)) - l * x
# To avoid branching, we use the combined version
# (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))
log_weight = 1 + (pos_weight - 1) * targets
return math_ops.add(
(1 - targets) * logits,
log_weight * (math_ops.log1p(math_ops.exp(-math_ops.abs(logits))) +
nn_ops.relu(-logits)),
name=name)
def relu_layer(x, weights, biases, name=None):
"""Computes Relu(x * weight + biases).
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"nn_relu_layer" is used.
Returns:
A 2-D Tensor computing relu(matmul(x, weights) + biases).
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "relu_layer", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases)
return nn_ops.relu(xw_plus_b, name=name)
def l2_normalize(x, dim, epsilon=1e-12, name=None):
"""Normalizes along dimension `dim` using an L2 norm.
For a 1-D tensor with `dim = 0`, computes
output = x / sqrt(max(sum(x**2), epsilon))
For `x` with more dimensions, independently normalizes each 1-D slice along
dimension `dim`.
Args:
x: A `Tensor`.
dim: Dimension along which to normalize. A scalar or a vector of
integers.
epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the
divisor if `norm < sqrt(epsilon)`.
name: A name for this operation (optional).
Returns:
A `Tensor` with the same shape as `x`.
"""
with ops.name_scope(name, "l2_normalize", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
square_sum = math_ops.reduce_sum(math_ops.square(x), dim, keep_dims=True)
x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon))
return math_ops.multiply(x, x_inv_norm, name=name)
def zero_fraction(value, name=None):
"""Returns the fraction of zeros in `value`.
If `value` is empty, the result is `nan`.
This is useful in summaries to measure and report sparsity. For example,
```python
z = tf.nn.relu(...)
summ = tf.summary.scalar('sparsity', tf.nn.zero_fraction(z))
```
Args:
value: A tensor of numeric type.
name: A name for the operation (optional).
Returns:
The fraction of zeros in `value`, with type `float32`.
"""
with ops.name_scope(name, "zero_fraction", [value]):
value = ops.convert_to_tensor(value, name="value")
zero = constant_op.constant(0, dtype=value.dtype, name="zero")
return math_ops.reduce_mean(
math_ops.cast(math_ops.equal(value, zero), dtypes.float32))
# pylint: disable=redefined-builtin
def depthwise_conv2d(input,
filter,
strides,
padding,
rate=None,
name=None,
data_format=None):
"""Depthwise 2-D convolution.
Given a 4D input tensor ('NHWC' or 'NCHW' data formats)
and a filter tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`
containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d`
applies a different filter to each input channel (expanding from 1 channel
to `channel_multiplier` channels for each), then concatenates the results
together. The output has `in_channels * channel_multiplier` channels.
In detail,
output[b, i, j, k * channel_multiplier + q] = sum_{di, dj}
filter[di, dj, k, q] * input[b, strides[1] * i + rate[0] * di,
strides[2] * j + rate[1] * dj, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the
same horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
If any value in `rate` is greater than 1, we perform atrous depthwise
convolution, in which case all values in the `strides` tensor must be equal
to 1.
Args:
input: 4-D with shape according to `data_format`.
filter: 4-D with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
strides: 1-D of size 4. The stride of the sliding window for each
dimension of `input`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the @{tf.nn.convolution$comment here}
rate: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
name: A name for this operation (optional).
data_format: The data format for input. Either "NHWC" (default) or "NCHW".
Returns:
A 4-D `Tensor` with shape according to `data_format`. E.g., for
"NHWC" format, shape is
`[batch, out_height, out_width, in_channels * channel_multiplier].`
"""
with ops.name_scope(name, "depthwise", [input, filter]) as name:
input = ops.convert_to_tensor(input, name="tensor_in")
filter = ops.convert_to_tensor(filter, name="filter_in")
if rate is None:
rate = [1, 1]
def op(input_converted, _, padding):
return nn_ops.depthwise_conv2d_native(
input=input_converted,
filter=filter,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return nn_ops.with_space_to_batch(
input=input,
filter_shape=array_ops.shape(filter),
dilation_rate=rate,
padding=padding,
data_format=data_format,
op=op)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin,line-too-long
def separable_conv2d(input,
depthwise_filter,
pointwise_filter,
strides,
padding,
rate=None,
name=None,
data_format=None):
"""2-D convolution with separable filters.
Performs a depthwise convolution that acts separately on channels followed by
a pointwise convolution that mixes channels. Note that this is separability
between dimensions `[1, 2]` and `3`, not spatial separability between
dimensions `1` and `2`.
In detail,
output[b, i, j, k] = sum_{di, dj, q, r}
input[b, strides[1] * i + di, strides[2] * j + dj, q] *
depthwise_filter[di, dj, q, r] *
pointwise_filter[0, 0, q * channel_multiplier + r, k]
`strides` controls the strides for the depthwise convolution only, since
the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have
`strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
If any value in `rate` is greater than 1, we perform atrous depthwise
convolution, in which case all values in the `strides` tensor must be equal
to 1.
Args:
input: 4-D `Tensor` with shape according to `data_format`.
depthwise_filter: 4-D `Tensor` with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
Contains `in_channels` convolutional filters of depth 1.
pointwise_filter: 4-D `Tensor` with shape
`[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise
filter to mix channels after `depthwise_filter` has convolved spatially.
strides: 1-D of size 4. The strides for the depthwise convolution for
each dimension of `input`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the @{tf.nn.convolution$comment here}
rate: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
name: A name for this operation (optional).
data_format: The data format for input. Either "NHWC" (default) or "NCHW".
Returns:
A 4-D `Tensor` with shape according to 'data_format'. For
example, with data_format="NHWC", shape is [batch, out_height,
out_width, out_channels].
"""
with ops.name_scope(name, "separable_conv2d",
[input, depthwise_filter, pointwise_filter]) as name:
input = ops.convert_to_tensor(input, name="tensor_in")
depthwise_filter = ops.convert_to_tensor(
depthwise_filter, name="depthwise_filter")
pointwise_filter = ops.convert_to_tensor(
pointwise_filter, name="pointwise_filter")
pointwise_filter_shape = pointwise_filter.get_shape().with_rank(4)
pointwise_filter_shape[0].assert_is_compatible_with(1)
pointwise_filter_shape[1].assert_is_compatible_with(1)
if rate is None:
rate = [1, 1]
# The layout of the ops in the graph are expected to be as follows:
# depthwise_conv2d // Conv2D op corresponding to native deptwise conv.
# separable_conv2d // Conv2D op corresponding to the pointwise conv.
def op(input_converted, _, padding):
return nn_ops.depthwise_conv2d_native(
input=input_converted,
filter=depthwise_filter,
strides=strides,
padding=padding,
data_format=data_format,
name="depthwise")
depthwise = nn_ops.with_space_to_batch(
input=input,
filter_shape=array_ops.shape(depthwise_filter),
dilation_rate=rate,
padding=padding,
data_format=data_format,
op=op)
return nn_ops.conv2d(
depthwise,
pointwise_filter, [1, 1, 1, 1],
padding="VALID",
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin,line-too-long
def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None):
"""Calculate the sufficient statistics for the mean and variance of `x`.
These sufficient statistics are computed using the one pass algorithm on
an input that's optionally shifted. See:
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and variance.
shift: A `Tensor` containing the value by which to shift the data for
numerical stability, or `None` if no shift is to be performed. A shift
close to the true mean provides the most numerically stable results.
keep_dims: produce statistics with the same dimensionality as the input.
name: Name used to scope the operations that compute the sufficient stats.
Returns:
Four `Tensor` objects of the same type as `x`:
* the count (number of elements to average over).
* the (possibly shifted) sum of the elements in the array.
* the (possibly shifted) sum of squares of the elements in the array.
* the shift by which the mean must be corrected or None if `shift` is None.
"""
axes = list(set(axes))
with ops.name_scope(name, "sufficient_statistics", [x, shift]):
x = ops.convert_to_tensor(x, name="x")
x_shape = x.get_shape()
if all(x_shape[d].value is not None for d in axes):
counts = 1
for d in axes:
counts *= x_shape[d].value
counts = constant_op.constant(counts, dtype=x.dtype)
else: # shape needs to be inferred at runtime.
x_dims = array_ops.gather(
math_ops.cast(array_ops.shape(x), x.dtype), axes)
counts = math_ops.reduce_prod(x_dims, name="count")
if shift is not None:
shift = ops.convert_to_tensor(shift, name="shift")
m_ss = math_ops.subtract(x, shift)
v_ss = math_ops.squared_difference(x, shift)
else: # no shift.
m_ss = x
v_ss = math_ops.square(x)
m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name="mean_ss")
v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name="var_ss")
return counts, m_ss, v_ss, shift
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None):
"""Calculate the mean and variance of based on the sufficient statistics.
Args:
counts: A `Tensor` containing a the total count of the data (one value).
mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly
shifted) sum of the elements to average over.
variance_ss: A `Tensor` containing the variance sufficient statistics: the
(possibly shifted) squared sum of the data to compute the variance over.
shift: A `Tensor` containing the value by which the data is shifted for
numerical stability, or `None` if no shift was performed.
name: Name used to scope the operations that compute the moments.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
with ops.name_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]):
divisor = math_ops.reciprocal(counts, name="divisor")
if shift is not None:
shifted_mean = math_ops.multiply(mean_ss, divisor, name="shifted_mean")
mean = math_ops.add(shifted_mean, shift, name="mean")
else: # no shift.
shifted_mean = math_ops.multiply(mean_ss, divisor, name="mean")
mean = shifted_mean
variance = math_ops.subtract(
math_ops.multiply(variance_ss, divisor),
math_ops.square(shifted_mean),
name="variance")
return (mean, variance)
def moments(x, axes,
shift=None, # pylint: disable=unused-argument
name=None, keep_dims=False):
"""Calculate the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean
and variance of a vector.
Note: shift is currently not used, the true mean is computed and used.
When using these moments for batch normalization (see
`tf.nn.batch_normalization`):
* for so-called "global normalization", used with convolutional filters with
shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.
* for simple batch normalization pass `axes=[0]` (batch only).
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and
variance.
shift: Not used in the current implementation
name: Name used to scope the operations that compute the moments.
keep_dims: produce moments with the same dimensionality as the input.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
with ops.name_scope(name, "moments", [x, axes]):
# The dynamic range of fp16 is too limited to support the collection of
# sufficient statistics. As a workaround we simply perform the operations
# on 32-bit floats before converting the mean and variance back to fp16
y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x
# Compute true mean while keeping the dims for proper broadcasting.
mean = math_ops.reduce_mean(y, axes, keep_dims=True, name="mean")
# sample variance, not unbiased variance
variance = math_ops.reduce_mean(
math_ops.squared_difference(y, array_ops.stop_gradient(mean)),
axes,
keep_dims=True,
name="variance")
if not keep_dims:
mean = array_ops.squeeze(mean, axes)
variance = array_ops.squeeze(variance, axes)
if x.dtype == dtypes.float16:
return (math_ops.cast(mean, dtypes.float16), math_ops.cast(
variance, dtypes.float16))
else:
return (mean, variance)
def weighted_moments(x, axes, frequency_weights, name=None, keep_dims=False):
"""Returns the frequency-weighted mean and variance of `x`.
Args:
x: A tensor.
axes: 1-d tensor of int32 values; these are the axes along which
to compute mean and variance.
frequency_weights: A tensor of positive weights which can be
broadcast with x.
name: Name used to scope the operation.
keep_dims: Produce moments with the same dimensionality as the input.
Returns:
Two tensors: `weighted_mean` and `weighted_variance`.
"""
with ops.name_scope(name, "weighted_moments", [x, frequency_weights, axes]):
x = ops.convert_to_tensor(x, name="x")
frequency_weights = ops.convert_to_tensor(
frequency_weights, name="frequency_weights")
# Unlike moments(), this just uses a simpler two-pass method.
# See comment in moments() WRT precision; it applies here too.
needs_cast = x.dtype == dtypes.float16
if needs_cast:
x = math_ops.cast(x, dtypes.float32)
if frequency_weights.dtype != x.dtype:
frequency_weights = math_ops.cast(frequency_weights, x.dtype)
# Note that we use keep_dims=True for our reductions regardless of the arg;
# this is so that the results remain broadcast-compatible with the inputs.
weighted_input_sum = math_ops.reduce_sum(
frequency_weights * x, axes, name="weighted_input_sum", keep_dims=True)
# The shape of the weights isn't necessarily the same as x's
# shape, just broadcast-compatible with it -- so this expression
# performs broadcasting to give a per-item weight, with the same
# shape as (freqency_weights * x). This avoids having to reason
# through all the broadcast logic to compute a correct
# sum_of_weights.
broadcasted_weights = frequency_weights + array_ops.zeros_like(x)
sum_of_weights = math_ops.reduce_sum(
broadcasted_weights, axes, name="sum_of_weights", keep_dims=True)
divisor = math_ops.reciprocal(sum_of_weights, name="inv_weight_sum")
weighted_mean = math_ops.multiply(weighted_input_sum, divisor)
# Have the weighted mean; now on to variance:
weighted_distsq = math_ops.reduce_sum(
frequency_weights * math_ops.squared_difference(x, weighted_mean),
axes,
name="weighted_distsq",
keep_dims=True)
weighted_variance = math_ops.multiply(weighted_distsq, divisor)
if not keep_dims:
weighted_mean = array_ops.squeeze(weighted_mean, squeeze_dims=axes)
weighted_variance = array_ops.squeeze(
weighted_variance, squeeze_dims=axes)
if needs_cast:
weighted_mean = math_ops.cast(weighted_mean, dtypes.float16)
weighted_variance = math_ops.cast(weighted_variance, dtypes.float16)
return weighted_mean, weighted_variance
def batch_normalization(x,
mean,
variance,
offset,
scale,
variance_epsilon,
name=None):
r"""Batch normalization.
As described in http://arxiv.org/abs/1502.03167.
Normalizes a tensor by `mean` and `variance`, and applies (optionally) a
`scale` \\(\gamma\\) to it, as well as an `offset` \\(\beta\\):
\\(\frac{\gamma(x-\mu)}{\sigma}+\beta\\)
`mean`, `variance`, `offset` and `scale` are all expected to be of one of two
shapes:
* In all generality, they can have the same number of dimensions as the
input `x`, with identical sizes as `x` for the dimensions that are not
normalized over (the 'depth' dimension(s)), and dimension 1 for the
others which are being normalized over.
`mean` and `variance` in this case would typically be the outputs of
`tf.nn.moments(..., keep_dims=True)` during training, or running averages
thereof during inference.
* In the common case where the 'depth' dimension is the last dimension in
the input tensor `x`, they may be one dimensional tensors of the same
size as the 'depth' dimension.
This is the case for example for the common `[batch, depth]` layout of
fully-connected layers, and `[batch, height, width, depth]` for
convolutions.
`mean` and `variance` in this case would typically be the outputs of
`tf.nn.moments(..., keep_dims=False)` during training, or running averages
thereof during inference.
Args:
x: Input `Tensor` of arbitrary dimensionality.
mean: A mean `Tensor`.
variance: A variance `Tensor`.
offset: An offset `Tensor`, often denoted \\(\beta\\) in equations, or
None. If present, will be added to the normalized tensor.
scale: A scale `Tensor`, often denoted \\(\gamma\\) in equations, or
`None`. If present, the scale is applied to the normalized tensor.
variance_epsilon: A small float number to avoid dividing by 0.
name: A name for this operation (optional).
Returns:
the normalized, scaled, offset tensor.
"""
with ops.name_scope(name, "batchnorm", [x, mean, variance, scale, offset]):
inv = math_ops.rsqrt(variance + variance_epsilon)
if scale is not None:
inv *= scale
return x * inv + (offset - mean * inv
if offset is not None else -mean * inv)
def fused_batch_norm(
x,
scale,
offset, # pylint: disable=invalid-name
mean=None,
variance=None,
epsilon=0.001,
data_format="NHWC",
is_training=True,
name=None):
r"""Batch normalization.
As described in http://arxiv.org/abs/1502.03167.
Args:
x: Input `Tensor` of 4 dimensions.
scale: A `Tensor` of 1 dimension for scaling.
offset: A `Tensor` of 1 dimension for bias.
mean: A `Tensor` of 1 dimension for population mean used for inference.
variance: A `Tensor` of 1 dimension for population variance
used for inference.
epsilon: A small float number added to the variance of x.
data_format: The data format for x. Either "NHWC" (default) or "NCHW".
is_training: A bool value to specify if the operation is used for
training or inference.
name: A name for this operation (optional).
Returns:
y: A 4D Tensor for the normalized, scaled, offsetted x.
batch_mean: A 1D Tensor for the mean of x.
batch_var: A 1D Tensor for the variance of x.
Raises:
ValueError: If mean or variance is not None when is_training is True.
"""
x = ops.convert_to_tensor(x, name="input")
scale = ops.convert_to_tensor(scale, name="scale")
offset = ops.convert_to_tensor(offset, name="offset")
if is_training:
if (mean is not None) or (variance is not None):
raise ValueError("Both 'mean' and 'variance' must be None "
"if is_training is True.")
if mean is None:
mean = constant_op.constant([])
if variance is None:
variance = constant_op.constant([])
# Set a minimum epsilon to 1.001e-5, which is a requirement by CUDNN to
# prevent exception (see cudnn.h).
min_epsilon = 1.001e-5
epsilon = epsilon if epsilon > min_epsilon else min_epsilon
# TODO(reedwm): In a few weeks, switch to using the V2 version exclusively. We
# currently only use the V2 version for float16 inputs, which is not supported
# by the V1 version.
# pylint: disable=protected-access
if x.dtype == dtypes.float16:
fused_batch_norm_func = gen_nn_ops._fused_batch_norm_v2
else:
fused_batch_norm_func = gen_nn_ops._fused_batch_norm
# pylint: enable=protected-access
y, batch_mean, batch_var, _, _ = fused_batch_norm_func(
x,
scale,
offset,
mean,
variance,
epsilon=epsilon,
data_format=data_format,
is_training=is_training,
name=name)
return y, batch_mean, batch_var
def batch_norm_with_global_normalization(t,
m,
v,
beta,
gamma,
variance_epsilon,
scale_after_normalization,
name=None):
"""Batch normalization.
This op is deprecated. See `tf.nn.batch_normalization`.
Args:
t: A 4D input Tensor.
m: A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments,
or a saved moving average thereof.
v: A 1D variance Tensor with size matching the last dimension of t.
This is the second output from tf.nn.moments,
or a saved moving average thereof.
beta: A 1D beta Tensor with size matching the last dimension of t.
An offset to be added to the normalized tensor.
gamma: A 1D gamma Tensor with size matching the last dimension of t.
If "scale_after_normalization" is true, this tensor will be multiplied
with the normalized tensor.
variance_epsilon: A small float number to avoid dividing by 0.
scale_after_normalization: A bool indicating whether the resulted tensor
needs to be multiplied with gamma.
name: A name for this operation (optional).
Returns:
A batch-normalized `t`.
"""
return batch_normalization(t, m, v, beta, gamma if scale_after_normalization
else None, variance_epsilon, name)
def _sum_rows(x):
"""Returns a vector summing up each row of the matrix x."""
# _sum_rows(x) is equivalent to math_ops.reduce_sum(x, 1) when x is
# a matrix. The gradient of _sum_rows(x) is more efficient than
# reduce_sum(x, 1)'s gradient in today's implementation. Therefore,
# we use _sum_rows(x) in the nce_loss() computation since the loss
# is mostly used for training.
cols = array_ops.shape(x)[1]
ones_shape = array_ops.stack([cols, 1])
ones = array_ops.ones(ones_shape, x.dtype)
return array_ops.reshape(math_ops.matmul(x, ones), [-1])
def _compute_sampled_logits(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
subtract_log_q=True,
remove_accidental_hits=False,
partition_strategy="mod",
name=None):
"""Helper function for nce_loss and sampled_softmax_loss functions.
Computes sampled output training logits and labels suitable for implementing
e.g. noise-contrastive estimation (see nce_loss) or sampled softmax (see
sampled_softmax_loss).
Note: In the case where num_true > 1, we assign to each target class
the target probability 1 / num_true so that the target probabilities
sum to 1 per-example.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
`[num_classes, dim]`. The (possibly-partitioned) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The (possibly-partitioned)
class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
subtract_log_q: A `bool`. whether to subtract the log expected count of
the labels in the sample to get the logits of the true labels.
Default is True. Turn off for Negative Sampling.
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
False.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
out_logits, out_labels: `Tensor` objects each with shape
`[batch_size, num_true + num_sampled]`, for passing to either
`nn.sigmoid_cross_entropy_with_logits` (NCE) or
`nn.softmax_cross_entropy_with_logits` (sampled softmax).
"""
if isinstance(weights, variables.PartitionedVariable):
weights = list(weights)
if not isinstance(weights, list):
weights = [weights]
with ops.name_scope(name, "compute_sampled_logits",
weights + [biases, inputs, labels]):
if labels.dtype != dtypes.int64:
labels = math_ops.cast(labels, dtypes.int64)
labels_flat = array_ops.reshape(labels, [-1])
# Sample the negative labels.
# sampled shape: [num_sampled] tensor
# true_expected_count shape = [batch_size, 1] tensor
# sampled_expected_count shape = [num_sampled] tensor
if sampled_values is None:
sampled_values = candidate_sampling_ops.log_uniform_candidate_sampler(
true_classes=labels,
num_true=num_true,
num_sampled=num_sampled,
unique=True,
range_max=num_classes)
# NOTE: pylint cannot tell that 'sampled_values' is a sequence
# pylint: disable=unpacking-non-sequence
sampled, true_expected_count, sampled_expected_count = (
array_ops.stop_gradient(s) for s in sampled_values)
# pylint: enable=unpacking-non-sequence
sampled = math_ops.cast(sampled, dtypes.int64)
# labels_flat is a [batch_size * num_true] tensor
# sampled is a [num_sampled] int tensor
all_ids = array_ops.concat([labels_flat, sampled], 0)
# Retrieve the true weights and the logits of the sampled weights.
# weights shape is [num_classes, dim]
all_w = embedding_ops.embedding_lookup(
weights, all_ids, partition_strategy=partition_strategy)
# true_w shape is [batch_size * num_true, dim]
true_w = array_ops.slice(all_w, [0, 0],
array_ops.stack(
[array_ops.shape(labels_flat)[0], -1]))
sampled_w = array_ops.slice(
all_w, array_ops.stack([array_ops.shape(labels_flat)[0], 0]), [-1, -1])
# inputs has shape [batch_size, dim]
# sampled_w has shape [num_sampled, dim]
# Apply X*W', which yields [batch_size, num_sampled]
sampled_logits = math_ops.matmul(inputs, sampled_w, transpose_b=True)
# Retrieve the true and sampled biases, compute the true logits, and
# add the biases to the true and sampled logits.
all_b = embedding_ops.embedding_lookup(
biases, all_ids, partition_strategy=partition_strategy)
# true_b is a [batch_size * num_true] tensor
# sampled_b is a [num_sampled] float tensor
true_b = array_ops.slice(all_b, [0], array_ops.shape(labels_flat))
sampled_b = array_ops.slice(all_b, array_ops.shape(labels_flat), [-1])
# inputs shape is [batch_size, dim]
# true_w shape is [batch_size * num_true, dim]
# row_wise_dots is [batch_size, num_true, dim]
dim = array_ops.shape(true_w)[1:2]
new_true_w_shape = array_ops.concat([[-1, num_true], dim], 0)
row_wise_dots = math_ops.multiply(
array_ops.expand_dims(inputs, 1),
array_ops.reshape(true_w, new_true_w_shape))
# We want the row-wise dot plus biases which yields a
# [batch_size, num_true] tensor of true_logits.
dots_as_matrix = array_ops.reshape(row_wise_dots,
array_ops.concat([[-1], dim], 0))
true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true])
true_b = array_ops.reshape(true_b, [-1, num_true])
true_logits += true_b
sampled_logits += sampled_b
if remove_accidental_hits:
acc_hits = candidate_sampling_ops.compute_accidental_hits(
labels, sampled, num_true=num_true)
acc_indices, acc_ids, acc_weights = acc_hits
# This is how SparseToDense expects the indices.
acc_indices_2d = array_ops.reshape(acc_indices, [-1, 1])
acc_ids_2d_int32 = array_ops.reshape(
math_ops.cast(acc_ids, dtypes.int32), [-1, 1])
sparse_indices = array_ops.concat([acc_indices_2d, acc_ids_2d_int32], 1,
"sparse_indices")
# Create sampled_logits_shape = [batch_size, num_sampled]
sampled_logits_shape = array_ops.concat(
[array_ops.shape(labels)[:1],
array_ops.expand_dims(num_sampled, 0)], 0)
if sampled_logits.dtype != acc_weights.dtype:
acc_weights = math_ops.cast(acc_weights, sampled_logits.dtype)
sampled_logits += sparse_ops.sparse_to_dense(
sparse_indices,
sampled_logits_shape,
acc_weights,
default_value=0.0,
validate_indices=False)
if subtract_log_q:
# Subtract log of Q(l), prior probability that l appears in sampled.
true_logits -= math_ops.log(true_expected_count)
sampled_logits -= math_ops.log(sampled_expected_count)
# Construct output logits and labels. The true labels/logits start at col 0.
out_logits = array_ops.concat([true_logits, sampled_logits], 1)
# true_logits is a float tensor, ones_like(true_logits) is a float tensor
# of ones. We then divide by num_true to ensure the per-example labels sum
# to 1.0, i.e. form a proper probability distribution.
out_labels = array_ops.concat([
array_ops.ones_like(true_logits) / num_true,
array_ops.zeros_like(sampled_logits)
], 1)
return out_logits, out_labels
def nce_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=False,
partition_strategy="mod",
name="nce_loss"):
"""Computes and returns the noise-contrastive estimation training loss.
See [Noise-contrastive estimation: A new estimation principle for
unnormalized statistical
models](http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf).
Also see our [Candidate Sampling Algorithms
Reference](https://www.tensorflow.org/extras/candidate_sampling.pdf)
A common use case is to use this method for training, and calculate the full
sigmoid loss for evaluation or inference. In this case, you must set
`partition_strategy="div"` for the two losses to be consistent, as in the
following example:
```python
if mode == "train":
loss = tf.nn.nce_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...,
partition_strategy="div")
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
loss = tf.reduce_sum(loss, axis=1)
```
Note: By default this uses a log-uniform (Zipfian) distribution for sampling,
so your labels must be sorted in order of decreasing frequency to achieve
good results. For more details, see
@{tf.nn.log_uniform_candidate_sampler}.
Note: In the case where `num_true` > 1, we assign to each target class
the target probability 1 / `num_true` so that the target probabilities
sum to 1 per-example.
Note: It would be useful to allow a variable number of target classes per
example. We hope to provide this functionality in a future release.
For now, if you have a variable number of target classes, you can pad them
out to a constant number by either repeating them or by padding
with an otherwise unused class.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-partitioned) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. Whether to remove "accidental hits"
where a sampled class equals one of the target classes. If set to
`True`, this is a "Sampled Logistic" loss instead of NCE, and we are
learning to generate log-odds instead of log probabilities. See
our [Candidate Sampling Algorithms Reference]
(https://www.tensorflow.org/extras/candidate_sampling.pdf).
Default is False.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example NCE losses.
"""
logits, labels = _compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
sampled_losses = sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits, name="sampled_losses")
# sampled_losses is batch_size x {true_loss, sampled_losses...}
# We sum out true and sampled losses.
return _sum_rows(sampled_losses)
def sampled_softmax_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=True,
partition_strategy="mod",
name="sampled_softmax_loss"):
"""Computes and returns the sampled softmax training loss.
This is a faster way to train a softmax classifier over a huge number of
classes.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
A common use case is to use this method for training, and calculate the full
softmax loss for evaluation or inference. In this case, you must set
`partition_strategy="div"` for the two losses to be consistent, as in the
following example:
```python
if mode == "train":
loss = tf.nn.sampled_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...,
partition_strategy="div")
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
```
See our [Candidate Sampling Algorithms Reference]
(https://www.tensorflow.org/extras/candidate_sampling.pdf)
Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007)
([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
True.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example sampled softmax losses.
"""
logits, labels = _compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
sampled_losses = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
# sampled_losses is a [batch_size] tensor.
return sampled_losses
| mit |
niklasf/python-prompt-toolkit | prompt_toolkit/layout/utils.py | 1 | 2590 | from __future__ import unicode_literals
from prompt_toolkit.utils import get_cwidth
__all__ = (
'token_list_len',
'token_list_width',
'token_list_to_text',
'explode_tokens',
'find_window_for_buffer_name',
)
def token_list_len(tokenlist):
"""
Return the amount of characters in this token list.
:param tokenlist: List of (token, text) or (token, text, mouse_handler)
tuples.
"""
return sum(len(item[1]) for item in tokenlist)
def token_list_width(tokenlist):
"""
Return the character width of this token list.
(Take double width characters into account.)
:param tokenlist: List of (token, text) or (token, text, mouse_handler)
tuples.
"""
return sum(get_cwidth(c) for item in tokenlist for c in item[1])
def token_list_to_text(tokenlist):
"""
Concatenate all the text parts again.
"""
return ''.join(item[1] for item in tokenlist)
def iter_token_lines(tokenlist):
"""
Iterator that yields tokenlists for each line.
"""
line = []
for token, c in explode_tokens(tokenlist):
line.append((token, c))
if c == '\n':
yield line
line = []
yield line
def split_lines(tokenlist):
"""
Take a single list of (Token, text) tuples and yield one such list for each
line.
"""
line = []
for token, string in tokenlist:
items = string.split('\n')
for item in items[:-1]:
if item:
line.append((token, item))
yield line
line = []
line.append((token, items[-1]))
if line:
yield line
def explode_tokens(tokenlist):
"""
Turn a list of (token, text) tuples into another list where each string is
exactly one character.
:param tokenlist: List of (token, text) tuples.
"""
result = []
for token, string in tokenlist:
for c in string:
result.append((token, c))
return result
def find_window_for_buffer_name(layout, buffer_name):
"""
Look for a :class:`~prompt_toolkit.layout.containers.Window` in the Layout
that contains the :class:`~prompt_toolkit.layout.controls.BufferControl`
for the given buffer and return it. If no such Window is found, return None.
"""
from .containers import Window
from .controls import BufferControl
for l in layout.walk():
if isinstance(l, Window) and isinstance(l.content, BufferControl):
if l.content.buffer_name == buffer_name:
return l
| bsd-3-clause |
Daniex/horizon | horizon/utils/urlresolvers.py | 67 | 1162 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from django.core.urlresolvers import reverse as django_reverse
from django.utils.http import urlquote # noqa
from django import VERSION # noqa
def reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None,
current_app=None):
if VERSION < (1, 6):
if args:
args = [urlquote(x) for x in args]
if kwargs:
kwargs = dict([(x, urlquote(y)) for x, y in six.iteritems(kwargs)])
return django_reverse(viewname, urlconf, args, kwargs, prefix,
current_app)
| apache-2.0 |
bltb/mitmproxy | test/test_proxy.py | 4 | 4679 | import argparse
from libmproxy import cmdline
from libmproxy.proxy import ProxyConfig, process_proxy_options
from libmproxy.proxy.connection import ServerConnection
from libmproxy.proxy.primitives import ProxyError
from libmproxy.proxy.server import DummyServer, ProxyServer, ConnectionHandler
import tutils
from libpathod import test
from netlib import http, tcp
import mock
def test_proxy_error():
p = ProxyError(111, "msg")
assert str(p)
class TestServerConnection:
def setUp(self):
self.d = test.Daemon()
def tearDown(self):
self.d.shutdown()
def test_simple(self):
sc = ServerConnection((self.d.IFACE, self.d.port))
sc.connect()
f = tutils.tflow()
f.server_conn = sc
f.request.path = "/p/200:da"
sc.send(f.request.assemble())
assert http.read_response(sc.rfile, f.request.method, 1000)
assert self.d.last_log()
sc.finish()
def test_terminate_error(self):
sc = ServerConnection((self.d.IFACE, self.d.port))
sc.connect()
sc.connection = mock.Mock()
sc.connection.recv = mock.Mock(return_value=False)
sc.connection.flush = mock.Mock(side_effect=tcp.NetLibDisconnect)
sc.finish()
def test_repr(self):
sc = tutils.tserver_conn()
assert "address:22" in repr(sc)
assert "ssl" not in repr(sc)
sc.ssl_established = True
assert "ssl" in repr(sc)
sc.sni = "foo"
assert "foo" in repr(sc)
class TestProcessProxyOptions:
def p(self, *args):
parser = tutils.MockParser()
cmdline.common_options(parser)
opts = parser.parse_args(args=args)
return parser, process_proxy_options(parser, opts)
def assert_err(self, err, *args):
tutils.raises(err, self.p, *args)
def assert_noerr(self, *args):
m, p = self.p(*args)
assert p
return p
def test_simple(self):
assert self.p()
def test_cadir(self):
with tutils.tmpdir() as cadir:
self.assert_noerr("--cadir", cadir)
@mock.patch("libmproxy.platform.resolver", None)
def test_no_transparent(self):
self.assert_err("transparent mode not supported", "-T")
@mock.patch("libmproxy.platform.resolver")
def test_modes(self, _):
self.assert_noerr("-R", "http://localhost")
self.assert_err("expected one argument", "-R")
self.assert_err("Invalid server specification", "-R", "reverse")
self.assert_noerr("-T")
self.assert_noerr("-U", "http://localhost")
self.assert_err("expected one argument", "-U")
self.assert_err("Invalid server specification", "-U", "upstream")
self.assert_err("mutually exclusive", "-R", "http://localhost", "-T")
def test_client_certs(self):
with tutils.tmpdir() as cadir:
self.assert_noerr("--client-certs", cadir)
self.assert_err("directory does not exist", "--client-certs", "nonexistent")
def test_certs(self):
with tutils.tmpdir() as cadir:
self.assert_noerr("--cert", tutils.test_data.path("data/testkey.pem"))
self.assert_err("does not exist", "--cert", "nonexistent")
def test_auth(self):
p = self.assert_noerr("--nonanonymous")
assert p.authenticator
p = self.assert_noerr("--htpasswd", tutils.test_data.path("data/htpasswd"))
assert p.authenticator
self.assert_err("malformed htpasswd file", "--htpasswd", tutils.test_data.path("data/htpasswd.invalid"))
p = self.assert_noerr("--singleuser", "test:test")
assert p.authenticator
self.assert_err("invalid single-user specification", "--singleuser", "test")
class TestProxyServer:
@tutils.SkipWindows # binding to 0.0.0.0:1 works without special permissions on Windows
def test_err(self):
conf = ProxyConfig(
port=1
)
tutils.raises("error starting proxy server", ProxyServer, conf)
def test_err_2(self):
conf = ProxyConfig(
host="invalidhost"
)
tutils.raises("error starting proxy server", ProxyServer, conf)
class TestDummyServer:
def test_simple(self):
d = DummyServer(None)
d.start_slave()
d.shutdown()
class TestConnectionHandler:
def test_fatal_error(self):
config = mock.Mock()
config.mode.get_upstream_server.side_effect = RuntimeError
c = ConnectionHandler(config, mock.MagicMock(), ("127.0.0.1", 8080), None, mock.MagicMock())
with tutils.capture_stderr(c.handle) as output:
assert "mitmproxy has crashed" in output
| mit |
UdK-VPT/Open_eQuarter | mole/stat_corr/building_owner_distribution_by_population_density_correlation.py | 1 | 2864 | # OeQ autogenerated correlation for 'Building Owner Distribution in Correlation to the population density'
import math
import numpy as np
import oeqCorrelation as oeq
def get(*xin):
# OeQ autogenerated correlation for 'Buildings with housing owned by assiciations'
BLD_OWNER_ASSOC= oeq.correlation(
const= 0.0312402617763,
a= 0.0118744160584,
b= -0.00729746079842,
c= 0.00173208156049,
d= -9.53526866706e-05,
mode= "log")
# OeQ autogenerated correlation for 'Buildings with housing owned by private persons'
BLD_OWNER_PRIV= oeq.correlation(
const= 0.870142013652,
a= 0.0125902143919,
b= 0.00423575284842,
c= -0.00101572305978,
mode= "log")
# OeQ autogenerated correlation for ''
BLD_OWNER_BUILDSOC= oeq.correlation(
const= 7.05505520927e-05,
a= 2.41405868896e-05,
b= -8.36861380787e-09,
c= 1.19731589642e-12,
d= -3.91117785944e-17,
mode= "lin")
# OeQ autogenerated correlation for 'Buildings with housing owned by muicipalities and municipal housing companies'
BLD_OWNER_MUNDWELLCOMP= oeq.correlation(
const= 0.0119495453475,
a= 0.00711820124277,
b= -0.00303579094598,
c= 0.000296825110286,
mode= "log")
# OeQ autogenerated correlation for 'Buildings with housing owned by private housing companies'
BLD_OWNER_PRIVDWELLCOMP= oeq.correlation(
const= -0.00352036390092,
a= 0.00501006910544,
b= -0.000436116039481,
c= -0.000303172760381,
d= 4.66056854617e-05,
mode= "log")
# OeQ autogenerated correlation for 'Buildings with housing owned by other private companies'
BLD_OWNER_OTHERPRIVCOMP= oeq.correlation(
const= 5.2083798971e-05,
a= 0.00594480102356,
b= -0.00189094906311,
c= 0.000167059857312,
mode= "log")
# OeQ autogenerated correlation for 'Buildings with housing owned by government'
BLD_OWNER_GOV= oeq.correlation(
const= 0.00125717232549,
a= 3.53179848584e-06,
b= -2.32282383087e-09,
c= 4.87405593612e-13,
mode= "lin")
# OeQ autogenerated correlation for 'Buildings with housing owned by nongovernment organisations'
BLD_OWNER_ORG= oeq.correlation(
const= -0.00178904938759,
a= 0.00356828920262,
b= -0.000892835212844,
c= 7.13734965882e-05,
mode= "log")
return dict(BLD_OWNER_ASSOC=BLD_OWNER_ASSOC.lookup(*xin),
BLD_OWNER_PRIV=BLD_OWNER_PRIV.lookup(*xin),
BLD_OWNER_BUILDSOC=BLD_OWNER_BUILDSOC.lookup(*xin),
BLD_OWNER_MUNDWELLCOMP=BLD_OWNER_MUNDWELLCOMP.lookup(*xin),
BLD_OWNER_PRIVDWELLCOMP=BLD_OWNER_PRIVDWELLCOMP.lookup(*xin),
BLD_OWNER_OTHERPRIVCOMP=BLD_OWNER_OTHERPRIVCOMP.lookup(*xin),
BLD_OWNER_GOV=BLD_OWNER_GOV.lookup(*xin),
BLD_OWNER_ORG=BLD_OWNER_ORG.lookup(*xin))
| gpl-2.0 |
devbitstudio/portfolio | settings.py | 1 | 5950 | # Django settings for devbitstudio project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DOMAIN = 'devbitstudio.com'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
#~ DEFAULT_FROM_EMAIL = 'contact@devbitstudio.com'
SERVER_EMAIL = 'contact@devbitstudio.com'
EMAIL_SUBJECT_PREFIX = 'DevBitStudio - '
CURRENT_PATH = os.path.abspath(os.path.dirname(__file__).decode('utf-8'))
PROJECT_DIR = os.path.dirname(__file__)
RESULTS_PER_PAGE = 12
ADMINS = (
('William Ibarra Rodriguez', 'wibarra@ucp.ho.rimed.cu'),
('Miguel Pelfort Paz', 'miguel.pelfort@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'devbitstudio', # Or path to database file if using sqlite3.
'USER': 'root', # Not used with sqlite3.
'PASSWORD': 'root', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'uploads/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''#os.path.join(PROJECT_DIR, 'static/')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'static/'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'iea9ivk!*ms-#$i%ix0i0b3p=u&30v+h*)&c5!%byv^i6^15%3'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'marketing.urlcanon.URLCanonicalizationMiddleware',
)
ROOT_URLCONF = 'devbitstudio.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__),'templates').replace('\\', '/'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
'main',
'django.contrib.sitemaps',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# for use with URL Canonicalization Middleware:
# this is the canonical hostname to be used by your app (required)
CANON_URL_HOST = 'devbitstudio.com'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| mit |
lesina/Hack70 | env/lib/python3.5/site-packages/django/utils/translation/trans_null.py | 75 | 1468 | # These are versions of the functions in django.utils.translation.trans_real
# that don't actually do anything. This is purely for performance, so that
# settings.USE_I18N = False can use this module rather than trans_real.py.
from django.conf import settings
from django.utils.encoding import force_text
def ngettext(singular, plural, number):
if number == 1:
return singular
return plural
ngettext_lazy = ngettext
def ungettext(singular, plural, number):
return force_text(ngettext(singular, plural, number))
def pgettext(context, message):
return ugettext(message)
def npgettext(context, singular, plural, number):
return ungettext(singular, plural, number)
def activate(x):
return None
def deactivate():
return None
deactivate_all = deactivate
def get_language():
return settings.LANGUAGE_CODE
def get_language_bidi():
return settings.LANGUAGE_CODE in settings.LANGUAGES_BIDI
def check_for_language(x):
return True
def gettext(message):
return message
def ugettext(message):
return force_text(gettext(message))
gettext_noop = gettext_lazy = _ = gettext
def to_locale(language):
p = language.find('-')
if p >= 0:
return language[:p].lower() + '_' + language[p + 1:].upper()
else:
return language.lower()
def get_language_from_request(request, check_path=False):
return settings.LANGUAGE_CODE
def get_language_from_path(request):
return None
| gpl-3.0 |
michelangelo13/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/CVS.py | 61 | 2912 | """SCons.Tool.CVS.py
Tool-specific initialization for CVS.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/CVS.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add a Builder factory function and construction variables for
CVS to an Environment."""
def CVSFactory(repos, module='', env=env):
""" """
import SCons.Warnings as W
W.warn(W.DeprecatedSourceCodeWarning, """The CVS() factory is deprecated and there is no replacement.""")
# fail if repos is not an absolute path name?
if module != '':
# Don't use os.path.join() because the name we fetch might
# be across a network and must use POSIX slashes as separators.
module = module + '/'
env['CVSCOM'] = '$CVS $CVSFLAGS co $CVSCOFLAGS -d ${TARGET.dir} $CVSMODULE${TARGET.posix}'
act = SCons.Action.Action('$CVSCOM', '$CVSCOMSTR')
return SCons.Builder.Builder(action = act,
env = env,
CVSREPOSITORY = repos,
CVSMODULE = module)
#setattr(env, 'CVS', CVSFactory)
env.CVS = CVSFactory
env['CVS'] = 'cvs'
env['CVSFLAGS'] = SCons.Util.CLVar('-d $CVSREPOSITORY')
env['CVSCOFLAGS'] = SCons.Util.CLVar('')
env['CVSCOM'] = '$CVS $CVSFLAGS co $CVSCOFLAGS ${TARGET.posix}'
def exists(env):
return env.Detect('cvs')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
SaschaMester/delicium | third_party/cython/src/Tools/site_scons/site_tools/cython.py | 125 | 1721 | """
Tool to run Cython files (.pyx) into .c and .cpp.
TODO:
- Add support for dynamically selecting in-process Cython
through CYTHONINPROCESS variable.
- Have a CYTHONCPP option which turns on C++ in flags and
changes output extension at the same time
VARIABLES:
- CYTHON - The path to the "cython" command line tool.
- CYTHONFLAGS - Flags to pass to the "cython" command line tool.
AUTHORS:
- David Cournapeau
- Dag Sverre Seljebotn
"""
import SCons
from SCons.Builder import Builder
from SCons.Action import Action
#def cython_action(target, source, env):
# print target, source, env
# from Cython.Compiler.Main import compile as cython_compile
# res = cython_compile(str(source[0]))
cythonAction = Action("$CYTHONCOM")
def create_builder(env):
try:
cython = env['BUILDERS']['Cython']
except KeyError:
cython = SCons.Builder.Builder(
action = cythonAction,
emitter = {},
suffix = cython_suffix_emitter,
single_source = 1)
env['BUILDERS']['Cython'] = cython
return cython
def cython_suffix_emitter(env, source):
return "$CYTHONCFILESUFFIX"
def generate(env):
env["CYTHON"] = "cython"
env["CYTHONCOM"] = "$CYTHON $CYTHONFLAGS -o $TARGET $SOURCE"
env["CYTHONCFILESUFFIX"] = ".c"
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
c_file.suffix['.pyx'] = cython_suffix_emitter
c_file.add_action('.pyx', cythonAction)
c_file.suffix['.py'] = cython_suffix_emitter
c_file.add_action('.py', cythonAction)
create_builder(env)
def exists(env):
try:
# import Cython
return True
except ImportError:
return False
| bsd-3-clause |
jumpstarter-io/neutron | neutron/db/migration/alembic_migrations/versions/2a6d0b51f4bb_cisco_plugin_cleanup.py | 8 | 3044 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""cisco plugin cleanup
Revision ID: 2a6d0b51f4bb
Revises: 1d76643bcec4
Create Date: 2013-01-17 22:24:37.730466
"""
# revision identifiers, used by Alembic.
revision = '2a6d0b51f4bb'
down_revision = '1d76643bcec4'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.cisco.network_plugin.PluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table(u'portprofile_bindings')
op.drop_table(u'portprofiles')
op.drop_table(u'port_bindings')
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
u'port_bindings',
sa.Column(u'id', sa.Integer(), autoincrement=True,
nullable=False),
sa.Column(u'port_id', sa.String(255), nullable=False),
sa.Column(u'blade_intf_dn', sa.String(255), nullable=False),
sa.Column(u'portprofile_name', sa.String(255),
nullable=True),
sa.Column(u'vlan_name', sa.String(255), nullable=True),
sa.Column(u'vlan_id', sa.Integer(), nullable=True),
sa.Column(u'qos', sa.String(255), nullable=True),
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'vif_id', sa.String(255), nullable=True),
sa.PrimaryKeyConstraint(u'id')
)
op.create_table(
u'portprofiles',
sa.Column(u'uuid', sa.String(255), nullable=False),
sa.Column(u'name', sa.String(255), nullable=True),
sa.Column(u'vlan_id', sa.Integer(), nullable=True),
sa.Column(u'qos', sa.String(255), nullable=True),
sa.PrimaryKeyConstraint(u'uuid')
)
op.create_table(
u'portprofile_bindings',
sa.Column(u'id', sa.String(255), nullable=False),
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'port_id', sa.String(255), nullable=True),
sa.Column(u'portprofile_id', sa.String(255), nullable=True),
sa.Column(u'default', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['portprofile_id'], ['portprofiles.uuid'], ),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ),
sa.PrimaryKeyConstraint(u'id')
)
| apache-2.0 |
eliangidoni/rethinkdb | test/scenarios/restart.py | 12 | 1600 | #!/usr/bin/env python
# Copyright 2010-2016 RethinkDB, all rights reserved.
import os, sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import rdb_unittest, scenario_common, utils, vcoptparse, workload_runner
op = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(op)
op["workload1"] = vcoptparse.StringFlag("--workload-before", None)
op["workload2"] = vcoptparse.StringFlag("--workload-after", None)
op["timeout"] = vcoptparse.IntFlag("--timeout", 600)
opts = op.parse(sys.argv)
_, command_prefix, serve_options = scenario_common.parse_mode_flags(opts)
class ChangePrimary(rdb_unittest.RdbTestCase):
'''Restart a server to make sure it keeps its data on restart'''
servers = 1
server_command_prefix = command_prefix
server_extra_options = serve_options
def test_workload(self):
server = self.cluster[0]
utils.print_with_time("Running first workload")
workload_runner.run(opts["workload1"], server, opts["timeout"], db_name=self.dbName, table_name=self.tableName)
utils.print_with_time("Restarting server")
server.check_and_stop()
server.start()
self.cluster.check()
self.r.db(self.dbName).wait(wait_for="all_replicas_ready").run(self.conn)
utils.print_with_time("Running second workload")
workload_runner.run(opts["workload2"], server, opts["timeout"], db_name=self.dbName, table_name=self.tableName)
# ===== main
if __name__ == '__main__':
rdb_unittest.main()
| agpl-3.0 |
antamb/google-personal-assistant | tests/test_speak_time.py | 5 | 1998 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Test the time-to-string conversion.'''
import datetime
import unittest
import action
class TestTimeToStr(unittest.TestCase):
def assertTimeToStr(self, time, expected):
self.assertEqual(action.SpeakTime(None).to_str(time), expected)
def test_midnight(self):
self.assertTimeToStr(datetime.time(0, 0), 'It is midnight.')
def test_just_after_midnight(self):
self.assertTimeToStr(datetime.time(0, 2), 'It is midnight.')
def test_five_past_midnight(self):
self.assertTimeToStr(datetime.time(0, 5), 'It is five past midnight.')
def test_five_to_midnight(self):
self.assertTimeToStr(datetime.time(23, 55), 'It is five to midnight.')
def test_quarter_to_one(self):
self.assertTimeToStr(datetime.time(0, 45), 'It is quarter to one.')
def test_twenty_past_four(self):
self.assertTimeToStr(datetime.time(4, 20), 'It is twenty past four.')
def test_before_midday(self):
self.assertTimeToStr(datetime.time(11, 50), 'It is ten to twelve.')
def test_midday(self):
self.assertTimeToStr(datetime.time(11, 59), "It is twelve o'clock.")
def test_after_midday(self):
self.assertTimeToStr(datetime.time(12, 32), 'It is half past twelve.')
def test_twenty_past_four_pm(self):
self.assertTimeToStr(datetime.time(16, 20), 'It is twenty past four.')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
akopytov/sysbench | third_party/cram/cram/_process.py | 9 | 1805 | """Utilities for running subprocesses"""
import os
import signal
import subprocess
import sys
from cram._encoding import fsdecode
__all__ = ['PIPE', 'STDOUT', 'execute']
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
def _makeresetsigpipe():
"""Make a function to reset SIGPIPE to SIG_DFL (for use in subprocesses).
Doing subprocess.Popen(..., preexec_fn=makeresetsigpipe()) will prevent
Python's SIGPIPE handler (SIG_IGN) from being inherited by the
child process.
"""
if (sys.platform == 'win32' or
getattr(signal, 'SIGPIPE', None) is None): # pragma: nocover
return None
return lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(args, stdin=None, stdout=None, stderr=None, cwd=None, env=None):
"""Run a process and return its output and return code.
stdin may either be None or a string to send to the process.
stdout may either be None or PIPE. If set to PIPE, the process's output
is returned as a string.
stderr may either be None or STDOUT. If stdout is set to PIPE and stderr
is set to STDOUT, the process's stderr output will be interleaved with
stdout and returned as a string.
cwd sets the process's current working directory.
env can be set to a dictionary to override the process's environment
variables.
This function returns a 2-tuple of (output, returncode).
"""
if sys.platform == 'win32': # pragma: nocover
args = [fsdecode(arg) for arg in args]
p = subprocess.Popen(args, stdin=PIPE, stdout=stdout, stderr=stderr,
cwd=cwd, env=env, bufsize=-1,
preexec_fn=_makeresetsigpipe(),
close_fds=os.name == 'posix')
out, err = p.communicate(stdin)
return out, p.returncode
| gpl-2.0 |
ChromeDevTools/devtools-frontend | scripts/deps/roll_deps.py | 2 | 2410 | #!/usr/bin/env vpython
#
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Update manually maintained dependencies from Chromium.
"""
import argparse
import os
import shutil
import subprocess
import sys
# Files whose location within devtools-frontend matches the upstream location.
FILES = [
'v8/include/js_protocol.pdl',
'third_party/blink/renderer/core/css/css_properties.json5',
'third_party/blink/renderer/core/html/aria_properties.json5',
'third_party/blink/public/devtools_protocol/browser_protocol.pdl',
]
# Files whose location within devtools-frontend differs from the upstream location.
FILE_MAPPINGS = {
# chromium_path => devtools_frontend_path
'components/variations/proto/devtools/client_variations.js':
'front_end/third_party/chromium/client-variations/ClientVariations.js',
'third_party/axe-core/axe.d.ts': 'front_end/third_party/axe-core/axe.d.ts',
'third_party/axe-core/axe.js': 'front_end/third_party/axe-core/axe.js',
'third_party/axe-core/axe.min.js':
'front_end/third_party/axe-core/axe.min.js',
'third_party/axe-core/LICENSE': 'front_end/third_party/axe-core/LICENSE',
}
for f in FILES:
FILE_MAPPINGS[f] = f
def parse_options(cli_args):
parser = argparse.ArgumentParser(description='Roll dependencies from Chromium.')
parser.add_argument('chromium_dir', help='path to chromium/src directory')
parser.add_argument('devtools_dir',
help='path to devtools/devtools-frontend directory')
return parser.parse_args(cli_args)
def update(options):
subprocess.check_call(['git', 'fetch', 'origin'], cwd=options.chromium_dir)
subprocess.check_call(['git', 'checkout', 'origin/main'],
cwd=options.chromium_dir)
subprocess.check_call(['gclient', 'sync'], cwd=options.chromium_dir)
def copy_files(options):
for from_path, to_path in FILE_MAPPINGS.items():
from_path = os.path.normpath(from_path)
to_path = os.path.normpath(to_path)
print('%s => %s' % (from_path, to_path))
shutil.copy(os.path.join(options.chromium_dir, from_path),
os.path.join(options.devtools_dir, to_path))
if __name__ == '__main__':
OPTIONS = parse_options(sys.argv[1:])
update(OPTIONS)
copy_files(OPTIONS)
| bsd-3-clause |
valkjsaaa/sl4a | python/src/Lib/test/test_builtin.py | 51 | 54825 | # Python test set -- built-in functions
import test.test_support, unittest
from test.test_support import fcmp, have_unicode, TESTFN, unlink, \
run_unittest, run_with_locale
from operator import neg
import sys, warnings, cStringIO, random, fractions, UserDict
warnings.filterwarnings("ignore", "hex../oct.. of negative int",
FutureWarning, __name__)
warnings.filterwarnings("ignore", "integer argument expected",
DeprecationWarning, "unittest")
# count the number of test runs.
# used to skip running test_execfile() multiple times
numruns = 0
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
class StrSquares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max:
raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(str(n*n))
n += 1
return self.sofar[i]
class BitBucket:
def write(self, line):
pass
class TestFailingBool:
def __nonzero__(self):
raise RuntimeError
class TestFailingIter:
def __iter__(self):
raise RuntimeError
class BuiltinTest(unittest.TestCase):
def test_import(self):
__import__('sys')
__import__('time')
__import__('string')
__import__(name='sys')
__import__(name='time', level=0)
self.assertRaises(ImportError, __import__, 'spamspam')
self.assertRaises(TypeError, __import__, 1, 2, 3, 4)
self.assertRaises(ValueError, __import__, '')
self.assertRaises(TypeError, __import__, 'sys', name='sys')
def test_abs(self):
# int
self.assertEqual(abs(0), 0)
self.assertEqual(abs(1234), 1234)
self.assertEqual(abs(-1234), 1234)
self.assertTrue(abs(-sys.maxint-1) > 0)
# float
self.assertEqual(abs(0.0), 0.0)
self.assertEqual(abs(3.14), 3.14)
self.assertEqual(abs(-3.14), 3.14)
# long
self.assertEqual(abs(0L), 0L)
self.assertEqual(abs(1234L), 1234L)
self.assertEqual(abs(-1234L), 1234L)
# str
self.assertRaises(TypeError, abs, 'a')
def test_all(self):
self.assertEqual(all([2, 4, 6]), True)
self.assertEqual(all([2, None, 6]), False)
self.assertRaises(RuntimeError, all, [2, TestFailingBool(), 6])
self.assertRaises(RuntimeError, all, TestFailingIter())
self.assertRaises(TypeError, all, 10) # Non-iterable
self.assertRaises(TypeError, all) # No args
self.assertRaises(TypeError, all, [2, 4, 6], []) # Too many args
self.assertEqual(all([]), True) # Empty iterator
S = [50, 60]
self.assertEqual(all(x > 42 for x in S), True)
S = [50, 40, 60]
self.assertEqual(all(x > 42 for x in S), False)
def test_any(self):
self.assertEqual(any([None, None, None]), False)
self.assertEqual(any([None, 4, None]), True)
self.assertRaises(RuntimeError, any, [None, TestFailingBool(), 6])
self.assertRaises(RuntimeError, all, TestFailingIter())
self.assertRaises(TypeError, any, 10) # Non-iterable
self.assertRaises(TypeError, any) # No args
self.assertRaises(TypeError, any, [2, 4, 6], []) # Too many args
self.assertEqual(any([]), False) # Empty iterator
S = [40, 60, 30]
self.assertEqual(any(x > 42 for x in S), True)
S = [10, 20, 30]
self.assertEqual(any(x > 42 for x in S), False)
def test_neg(self):
x = -sys.maxint-1
self.assert_(isinstance(x, int))
self.assertEqual(-x, sys.maxint+1)
def test_apply(self):
def f0(*args):
self.assertEqual(args, ())
def f1(a1):
self.assertEqual(a1, 1)
def f2(a1, a2):
self.assertEqual(a1, 1)
self.assertEqual(a2, 2)
def f3(a1, a2, a3):
self.assertEqual(a1, 1)
self.assertEqual(a2, 2)
self.assertEqual(a3, 3)
apply(f0, ())
apply(f1, (1,))
apply(f2, (1, 2))
apply(f3, (1, 2, 3))
# A PyCFunction that takes only positional parameters should allow an
# empty keyword dictionary to pass without a complaint, but raise a
# TypeError if the dictionary is non-empty.
apply(id, (1,), {})
self.assertRaises(TypeError, apply, id, (1,), {"foo": 1})
self.assertRaises(TypeError, apply)
self.assertRaises(TypeError, apply, id, 42)
self.assertRaises(TypeError, apply, id, (42,), 42)
def test_callable(self):
self.assert_(callable(len))
def f(): pass
self.assert_(callable(f))
class C:
def meth(self): pass
self.assert_(callable(C))
x = C()
self.assert_(callable(x.meth))
self.assert_(not callable(x))
class D(C):
def __call__(self): pass
y = D()
self.assert_(callable(y))
y()
def test_chr(self):
self.assertEqual(chr(32), ' ')
self.assertEqual(chr(65), 'A')
self.assertEqual(chr(97), 'a')
self.assertEqual(chr(0xff), '\xff')
self.assertRaises(ValueError, chr, 256)
self.assertRaises(TypeError, chr)
def test_cmp(self):
self.assertEqual(cmp(-1, 1), -1)
self.assertEqual(cmp(1, -1), 1)
self.assertEqual(cmp(1, 1), 0)
# verify that circular objects are not handled
a = []; a.append(a)
b = []; b.append(b)
from UserList import UserList
c = UserList(); c.append(c)
self.assertRaises(RuntimeError, cmp, a, b)
self.assertRaises(RuntimeError, cmp, b, c)
self.assertRaises(RuntimeError, cmp, c, a)
self.assertRaises(RuntimeError, cmp, a, c)
# okay, now break the cycles
a.pop(); b.pop(); c.pop()
self.assertRaises(TypeError, cmp)
def test_coerce(self):
self.assert_(not fcmp(coerce(1, 1.1), (1.0, 1.1)))
self.assertEqual(coerce(1, 1L), (1L, 1L))
self.assert_(not fcmp(coerce(1L, 1.1), (1.0, 1.1)))
self.assertRaises(TypeError, coerce)
class BadNumber:
def __coerce__(self, other):
raise ValueError
self.assertRaises(ValueError, coerce, 42, BadNumber())
self.assertRaises(OverflowError, coerce, 0.5, int("12345" * 1000))
def test_compile(self):
compile('print 1\n', '', 'exec')
bom = '\xef\xbb\xbf'
compile(bom + 'print 1\n', '', 'exec')
compile(source='pass', filename='?', mode='exec')
compile(dont_inherit=0, filename='tmp', source='0', mode='eval')
compile('pass', '?', dont_inherit=1, mode='exec')
self.assertRaises(TypeError, compile)
self.assertRaises(ValueError, compile, 'print 42\n', '<string>', 'badmode')
self.assertRaises(ValueError, compile, 'print 42\n', '<string>', 'single', 0xff)
self.assertRaises(TypeError, compile, chr(0), 'f', 'exec')
self.assertRaises(TypeError, compile, 'pass', '?', 'exec',
mode='eval', source='0', filename='tmp')
if have_unicode:
compile(unicode('print u"\xc3\xa5"\n', 'utf8'), '', 'exec')
self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec')
self.assertRaises(ValueError, compile, unicode('a = 1'), 'f', 'bad')
def test_delattr(self):
import sys
sys.spam = 1
delattr(sys, 'spam')
self.assertRaises(TypeError, delattr)
def test_dir(self):
# dir(wrong number of arguments)
self.assertRaises(TypeError, dir, 42, 42)
# dir() - local scope
local_var = 1
self.assert_('local_var' in dir())
# dir(module)
import sys
self.assert_('exit' in dir(sys))
# dir(module_with_invalid__dict__)
import types
class Foo(types.ModuleType):
__dict__ = 8
f = Foo("foo")
self.assertRaises(TypeError, dir, f)
# dir(type)
self.assert_("strip" in dir(str))
self.assert_("__mro__" not in dir(str))
# dir(obj)
class Foo(object):
def __init__(self):
self.x = 7
self.y = 8
self.z = 9
f = Foo()
self.assert_("y" in dir(f))
# dir(obj_no__dict__)
class Foo(object):
__slots__ = []
f = Foo()
self.assert_("__repr__" in dir(f))
# dir(obj_no__class__with__dict__)
# (an ugly trick to cause getattr(f, "__class__") to fail)
class Foo(object):
__slots__ = ["__class__", "__dict__"]
def __init__(self):
self.bar = "wow"
f = Foo()
self.assert_("__repr__" not in dir(f))
self.assert_("bar" in dir(f))
# dir(obj_using __dir__)
class Foo(object):
def __dir__(self):
return ["kan", "ga", "roo"]
f = Foo()
self.assert_(dir(f) == ["ga", "kan", "roo"])
# dir(obj__dir__not_list)
class Foo(object):
def __dir__(self):
return 7
f = Foo()
self.assertRaises(TypeError, dir, f)
def test_divmod(self):
self.assertEqual(divmod(12, 7), (1, 5))
self.assertEqual(divmod(-12, 7), (-2, 2))
self.assertEqual(divmod(12, -7), (-2, -2))
self.assertEqual(divmod(-12, -7), (1, -5))
self.assertEqual(divmod(12L, 7L), (1L, 5L))
self.assertEqual(divmod(-12L, 7L), (-2L, 2L))
self.assertEqual(divmod(12L, -7L), (-2L, -2L))
self.assertEqual(divmod(-12L, -7L), (1L, -5L))
self.assertEqual(divmod(12, 7L), (1, 5L))
self.assertEqual(divmod(-12, 7L), (-2, 2L))
self.assertEqual(divmod(12L, -7), (-2L, -2))
self.assertEqual(divmod(-12L, -7), (1L, -5))
self.assertEqual(divmod(-sys.maxint-1, -1),
(sys.maxint+1, 0))
self.assert_(not fcmp(divmod(3.25, 1.0), (3.0, 0.25)))
self.assert_(not fcmp(divmod(-3.25, 1.0), (-4.0, 0.75)))
self.assert_(not fcmp(divmod(3.25, -1.0), (-4.0, -0.75)))
self.assert_(not fcmp(divmod(-3.25, -1.0), (3.0, -0.25)))
self.assertRaises(TypeError, divmod)
def test_eval(self):
self.assertEqual(eval('1+1'), 2)
self.assertEqual(eval(' 1+1\n'), 2)
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
self.assertEqual(eval('a', globals) , 1)
self.assertEqual(eval('a', globals, locals), 1)
self.assertEqual(eval('b', globals, locals), 200)
self.assertEqual(eval('c', globals, locals), 300)
if have_unicode:
self.assertEqual(eval(unicode('1+1')), 2)
self.assertEqual(eval(unicode(' 1+1\n')), 2)
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
if have_unicode:
self.assertEqual(eval(unicode('a'), globals), 1)
self.assertEqual(eval(unicode('a'), globals, locals), 1)
self.assertEqual(eval(unicode('b'), globals, locals), 200)
self.assertEqual(eval(unicode('c'), globals, locals), 300)
bom = '\xef\xbb\xbf'
self.assertEqual(eval(bom + 'a', globals, locals), 1)
self.assertEqual(eval(unicode('u"\xc3\xa5"', 'utf8'), globals),
unicode('\xc3\xa5', 'utf8'))
self.assertRaises(TypeError, eval)
self.assertRaises(TypeError, eval, ())
def test_general_eval(self):
# Tests that general mappings can be used for the locals argument
class M:
"Test mapping interface versus possible calls from eval()."
def __getitem__(self, key):
if key == 'a':
return 12
raise KeyError
def keys(self):
return list('xyz')
m = M()
g = globals()
self.assertEqual(eval('a', g, m), 12)
self.assertRaises(NameError, eval, 'b', g, m)
self.assertEqual(eval('dir()', g, m), list('xyz'))
self.assertEqual(eval('globals()', g, m), g)
self.assertEqual(eval('locals()', g, m), m)
self.assertRaises(TypeError, eval, 'a', m)
class A:
"Non-mapping"
pass
m = A()
self.assertRaises(TypeError, eval, 'a', g, m)
# Verify that dict subclasses work as well
class D(dict):
def __getitem__(self, key):
if key == 'a':
return 12
return dict.__getitem__(self, key)
def keys(self):
return list('xyz')
d = D()
self.assertEqual(eval('a', g, d), 12)
self.assertRaises(NameError, eval, 'b', g, d)
self.assertEqual(eval('dir()', g, d), list('xyz'))
self.assertEqual(eval('globals()', g, d), g)
self.assertEqual(eval('locals()', g, d), d)
# Verify locals stores (used by list comps)
eval('[locals() for i in (2,3)]', g, d)
eval('[locals() for i in (2,3)]', g, UserDict.UserDict())
class SpreadSheet:
"Sample application showing nested, calculated lookups."
_cells = {}
def __setitem__(self, key, formula):
self._cells[key] = formula
def __getitem__(self, key):
return eval(self._cells[key], globals(), self)
ss = SpreadSheet()
ss['a1'] = '5'
ss['a2'] = 'a1*6'
ss['a3'] = 'a2*7'
self.assertEqual(ss['a3'], 210)
# Verify that dir() catches a non-list returned by eval
# SF bug #1004669
class C:
def __getitem__(self, item):
raise KeyError(item)
def keys(self):
return 'a'
self.assertRaises(TypeError, eval, 'dir()', globals(), C())
# Done outside of the method test_z to get the correct scope
z = 0
f = open(TESTFN, 'w')
f.write('z = z+1\n')
f.write('z = z*2\n')
f.close()
execfile(TESTFN)
def test_execfile(self):
global numruns
if numruns:
return
numruns += 1
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
self.assertEqual(self.__class__.z, 2)
globals['z'] = 0
execfile(TESTFN, globals)
self.assertEqual(globals['z'], 2)
locals['z'] = 0
execfile(TESTFN, globals, locals)
self.assertEqual(locals['z'], 2)
class M:
"Test mapping interface versus possible calls from execfile()."
def __init__(self):
self.z = 10
def __getitem__(self, key):
if key == 'z':
return self.z
raise KeyError
def __setitem__(self, key, value):
if key == 'z':
self.z = value
return
raise KeyError
locals = M()
locals['z'] = 0
execfile(TESTFN, globals, locals)
self.assertEqual(locals['z'], 2)
unlink(TESTFN)
self.assertRaises(TypeError, execfile)
self.assertRaises(TypeError, execfile, TESTFN, {}, ())
import os
self.assertRaises(IOError, execfile, os.curdir)
self.assertRaises(IOError, execfile, "I_dont_exist")
def test_filter(self):
self.assertEqual(filter(lambda c: 'a' <= c <= 'z', 'Hello World'), 'elloorld')
self.assertEqual(filter(None, [1, 'hello', [], [3], '', None, 9, 0]), [1, 'hello', [3], 9])
self.assertEqual(filter(lambda x: x > 0, [1, -3, 9, 0, 2]), [1, 9, 2])
self.assertEqual(filter(None, Squares(10)), [1, 4, 9, 16, 25, 36, 49, 64, 81])
self.assertEqual(filter(lambda x: x%2, Squares(10)), [1, 9, 25, 49, 81])
def identity(item):
return 1
filter(identity, Squares(5))
self.assertRaises(TypeError, filter)
class BadSeq(object):
def __getitem__(self, index):
if index<4:
return 42
raise ValueError
self.assertRaises(ValueError, filter, lambda x: x, BadSeq())
def badfunc():
pass
self.assertRaises(TypeError, filter, badfunc, range(5))
# test bltinmodule.c::filtertuple()
self.assertEqual(filter(None, (1, 2)), (1, 2))
self.assertEqual(filter(lambda x: x>=3, (1, 2, 3, 4)), (3, 4))
self.assertRaises(TypeError, filter, 42, (1, 2))
# test bltinmodule.c::filterstring()
self.assertEqual(filter(None, "12"), "12")
self.assertEqual(filter(lambda x: x>="3", "1234"), "34")
self.assertRaises(TypeError, filter, 42, "12")
class badstr(str):
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, filter, lambda x: x >="3", badstr("1234"))
class badstr2(str):
def __getitem__(self, index):
return 42
self.assertRaises(TypeError, filter, lambda x: x >=42, badstr2("1234"))
class weirdstr(str):
def __getitem__(self, index):
return weirdstr(2*str.__getitem__(self, index))
self.assertEqual(filter(lambda x: x>="33", weirdstr("1234")), "3344")
class shiftstr(str):
def __getitem__(self, index):
return chr(ord(str.__getitem__(self, index))+1)
self.assertEqual(filter(lambda x: x>="3", shiftstr("1234")), "345")
if have_unicode:
# test bltinmodule.c::filterunicode()
self.assertEqual(filter(None, unicode("12")), unicode("12"))
self.assertEqual(filter(lambda x: x>="3", unicode("1234")), unicode("34"))
self.assertRaises(TypeError, filter, 42, unicode("12"))
self.assertRaises(ValueError, filter, lambda x: x >="3", badstr(unicode("1234")))
class badunicode(unicode):
def __getitem__(self, index):
return 42
self.assertRaises(TypeError, filter, lambda x: x >=42, badunicode("1234"))
class weirdunicode(unicode):
def __getitem__(self, index):
return weirdunicode(2*unicode.__getitem__(self, index))
self.assertEqual(
filter(lambda x: x>=unicode("33"), weirdunicode("1234")), unicode("3344"))
class shiftunicode(unicode):
def __getitem__(self, index):
return unichr(ord(unicode.__getitem__(self, index))+1)
self.assertEqual(
filter(lambda x: x>=unicode("3"), shiftunicode("1234")),
unicode("345")
)
def test_filter_subclasses(self):
# test that filter() never returns tuple, str or unicode subclasses
# and that the result always goes through __getitem__
funcs = (None, bool, lambda x: True)
class tuple2(tuple):
def __getitem__(self, index):
return 2*tuple.__getitem__(self, index)
class str2(str):
def __getitem__(self, index):
return 2*str.__getitem__(self, index)
inputs = {
tuple2: {(): (), (1, 2, 3): (2, 4, 6)},
str2: {"": "", "123": "112233"}
}
if have_unicode:
class unicode2(unicode):
def __getitem__(self, index):
return 2*unicode.__getitem__(self, index)
inputs[unicode2] = {
unicode(): unicode(),
unicode("123"): unicode("112233")
}
for (cls, inps) in inputs.iteritems():
for (inp, exp) in inps.iteritems():
# make sure the output goes through __getitem__
# even if func is None
self.assertEqual(
filter(funcs[0], cls(inp)),
filter(funcs[1], cls(inp))
)
for func in funcs:
outp = filter(func, cls(inp))
self.assertEqual(outp, exp)
self.assert_(not isinstance(outp, cls))
def test_getattr(self):
import sys
self.assert_(getattr(sys, 'stdout') is sys.stdout)
self.assertRaises(TypeError, getattr, sys, 1)
self.assertRaises(TypeError, getattr, sys, 1, "foo")
self.assertRaises(TypeError, getattr)
if have_unicode:
self.assertRaises(UnicodeError, getattr, sys, unichr(sys.maxunicode))
def test_hasattr(self):
import sys
self.assert_(hasattr(sys, 'stdout'))
self.assertRaises(TypeError, hasattr, sys, 1)
self.assertRaises(TypeError, hasattr)
if have_unicode:
self.assertRaises(UnicodeError, hasattr, sys, unichr(sys.maxunicode))
# Check that hasattr allows SystemExit and KeyboardInterrupts by
class A:
def __getattr__(self, what):
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, hasattr, A(), "b")
class B:
def __getattr__(self, what):
raise SystemExit
self.assertRaises(SystemExit, hasattr, B(), "b")
def test_hash(self):
hash(None)
self.assertEqual(hash(1), hash(1L))
self.assertEqual(hash(1), hash(1.0))
hash('spam')
if have_unicode:
self.assertEqual(hash('spam'), hash(unicode('spam')))
hash((0,1,2,3))
def f(): pass
self.assertRaises(TypeError, hash, [])
self.assertRaises(TypeError, hash, {})
# Bug 1536021: Allow hash to return long objects
class X:
def __hash__(self):
return 2**100
self.assertEquals(type(hash(X())), int)
class Y(object):
def __hash__(self):
return 2**100
self.assertEquals(type(hash(Y())), int)
class Z(long):
def __hash__(self):
return self
self.assertEquals(hash(Z(42)), hash(42L))
def test_hex(self):
self.assertEqual(hex(16), '0x10')
self.assertEqual(hex(16L), '0x10L')
self.assertEqual(hex(-16), '-0x10')
self.assertEqual(hex(-16L), '-0x10L')
self.assertRaises(TypeError, hex, {})
def test_id(self):
id(None)
id(1)
id(1L)
id(1.0)
id('spam')
id((0,1,2,3))
id([0,1,2,3])
id({'spam': 1, 'eggs': 2, 'ham': 3})
# Test input() later, together with raw_input
def test_intern(self):
self.assertRaises(TypeError, intern)
s = "never interned before"
self.assert_(intern(s) is s)
s2 = s.swapcase().swapcase()
self.assert_(intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, intern, S("abc"))
# It's still safe to pass these strings to routines that
# call intern internally, e.g. PyObject_SetAttr().
s = S("abc")
setattr(s, s, s)
self.assertEqual(getattr(s, s), s)
def test_iter(self):
self.assertRaises(TypeError, iter)
self.assertRaises(TypeError, iter, 42, 42)
lists = [("1", "2"), ["1", "2"], "12"]
if have_unicode:
lists.append(unicode("12"))
for l in lists:
i = iter(l)
self.assertEqual(i.next(), '1')
self.assertEqual(i.next(), '2')
self.assertRaises(StopIteration, i.next)
def test_isinstance(self):
class C:
pass
class D(C):
pass
class E:
pass
c = C()
d = D()
e = E()
self.assert_(isinstance(c, C))
self.assert_(isinstance(d, C))
self.assert_(not isinstance(e, C))
self.assert_(not isinstance(c, D))
self.assert_(not isinstance('foo', E))
self.assertRaises(TypeError, isinstance, E, 'foo')
self.assertRaises(TypeError, isinstance)
def test_issubclass(self):
class C:
pass
class D(C):
pass
class E:
pass
c = C()
d = D()
e = E()
self.assert_(issubclass(D, C))
self.assert_(issubclass(C, C))
self.assert_(not issubclass(C, D))
self.assertRaises(TypeError, issubclass, 'foo', E)
self.assertRaises(TypeError, issubclass, E, 'foo')
self.assertRaises(TypeError, issubclass)
def test_len(self):
self.assertEqual(len('123'), 3)
self.assertEqual(len(()), 0)
self.assertEqual(len((1, 2, 3, 4)), 4)
self.assertEqual(len([1, 2, 3, 4]), 4)
self.assertEqual(len({}), 0)
self.assertEqual(len({'a':1, 'b': 2}), 2)
class BadSeq:
def __len__(self):
raise ValueError
self.assertRaises(ValueError, len, BadSeq())
def test_map(self):
self.assertEqual(
map(None, 'hello world'),
['h','e','l','l','o',' ','w','o','r','l','d']
)
self.assertEqual(
map(None, 'abcd', 'efg'),
[('a', 'e'), ('b', 'f'), ('c', 'g'), ('d', None)]
)
self.assertEqual(
map(None, range(10)),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
self.assertEqual(
map(lambda x: x*x, range(1,4)),
[1, 4, 9]
)
try:
from math import sqrt
except ImportError:
def sqrt(x):
return pow(x, 0.5)
self.assertEqual(
map(lambda x: map(sqrt,x), [[16, 4], [81, 9]]),
[[4.0, 2.0], [9.0, 3.0]]
)
self.assertEqual(
map(lambda x, y: x+y, [1,3,2], [9,1,4]),
[10, 4, 6]
)
def plus(*v):
accu = 0
for i in v: accu = accu + i
return accu
self.assertEqual(
map(plus, [1, 3, 7]),
[1, 3, 7]
)
self.assertEqual(
map(plus, [1, 3, 7], [4, 9, 2]),
[1+4, 3+9, 7+2]
)
self.assertEqual(
map(plus, [1, 3, 7], [4, 9, 2], [1, 1, 0]),
[1+4+1, 3+9+1, 7+2+0]
)
self.assertEqual(
map(None, Squares(10)),
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
)
self.assertEqual(
map(int, Squares(10)),
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
)
self.assertEqual(
map(None, Squares(3), Squares(2)),
[(0,0), (1,1), (4,None)]
)
self.assertEqual(
map(max, Squares(3), Squares(2)),
[0, 1, 4]
)
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, map, lambda x: x, 42)
self.assertEqual(map(None, [42]), [42])
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, map, lambda x: x, BadSeq())
def badfunc(x):
raise RuntimeError
self.assertRaises(RuntimeError, map, badfunc, range(5))
def test_max(self):
self.assertEqual(max('123123'), '3')
self.assertEqual(max(1, 2, 3), 3)
self.assertEqual(max((1, 2, 3, 1, 2, 3)), 3)
self.assertEqual(max([1, 2, 3, 1, 2, 3]), 3)
self.assertEqual(max(1, 2L, 3.0), 3.0)
self.assertEqual(max(1L, 2.0, 3), 3)
self.assertEqual(max(1.0, 2, 3L), 3L)
for stmt in (
"max(key=int)", # no args
"max(1, key=int)", # single arg not iterable
"max(1, 2, keystone=int)", # wrong keyword
"max(1, 2, key=int, abc=int)", # two many keywords
"max(1, 2, key=1)", # keyfunc is not callable
):
try:
exec(stmt) in globals()
except TypeError:
pass
else:
self.fail(stmt)
self.assertEqual(max((1,), key=neg), 1) # one elem iterable
self.assertEqual(max((1,2), key=neg), 1) # two elem iterable
self.assertEqual(max(1, 2, key=neg), 1) # two elems
data = [random.randrange(200) for i in range(100)]
keys = dict((elem, random.randrange(50)) for elem in data)
f = keys.__getitem__
self.assertEqual(max(data, key=f),
sorted(reversed(data), key=f)[-1])
def test_min(self):
self.assertEqual(min('123123'), '1')
self.assertEqual(min(1, 2, 3), 1)
self.assertEqual(min((1, 2, 3, 1, 2, 3)), 1)
self.assertEqual(min([1, 2, 3, 1, 2, 3]), 1)
self.assertEqual(min(1, 2L, 3.0), 1)
self.assertEqual(min(1L, 2.0, 3), 1L)
self.assertEqual(min(1.0, 2, 3L), 1.0)
self.assertRaises(TypeError, min)
self.assertRaises(TypeError, min, 42)
self.assertRaises(ValueError, min, ())
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, min, BadSeq())
class BadNumber:
def __cmp__(self, other):
raise ValueError
self.assertRaises(ValueError, min, (42, BadNumber()))
for stmt in (
"min(key=int)", # no args
"min(1, key=int)", # single arg not iterable
"min(1, 2, keystone=int)", # wrong keyword
"min(1, 2, key=int, abc=int)", # two many keywords
"min(1, 2, key=1)", # keyfunc is not callable
):
try:
exec(stmt) in globals()
except TypeError:
pass
else:
self.fail(stmt)
self.assertEqual(min((1,), key=neg), 1) # one elem iterable
self.assertEqual(min((1,2), key=neg), 2) # two elem iterable
self.assertEqual(min(1, 2, key=neg), 2) # two elems
data = [random.randrange(200) for i in range(100)]
keys = dict((elem, random.randrange(50)) for elem in data)
f = keys.__getitem__
self.assertEqual(min(data, key=f),
sorted(data, key=f)[0])
def test_next(self):
it = iter(range(2))
self.assertEqual(next(it), 0)
self.assertEqual(next(it), 1)
self.assertRaises(StopIteration, next, it)
self.assertRaises(StopIteration, next, it)
self.assertEquals(next(it, 42), 42)
class Iter(object):
def __iter__(self):
return self
def next(self):
raise StopIteration
it = iter(Iter())
self.assertEquals(next(it, 42), 42)
self.assertRaises(StopIteration, next, it)
def gen():
yield 1
return
it = gen()
self.assertEquals(next(it), 1)
self.assertRaises(StopIteration, next, it)
self.assertEquals(next(it, 42), 42)
def test_oct(self):
self.assertEqual(oct(100), '0144')
self.assertEqual(oct(100L), '0144L')
self.assertEqual(oct(-100), '-0144')
self.assertEqual(oct(-100L), '-0144L')
self.assertRaises(TypeError, oct, ())
def write_testfile(self):
# NB the first 4 lines are also used to test input and raw_input, below
fp = open(TESTFN, 'w')
try:
fp.write('1+1\n')
fp.write('1+1\n')
fp.write('The quick brown fox jumps over the lazy dog')
fp.write('.\n')
fp.write('Dear John\n')
fp.write('XXX'*100)
fp.write('YYY'*100)
finally:
fp.close()
def test_open(self):
self.write_testfile()
fp = open(TESTFN, 'r')
try:
self.assertEqual(fp.readline(4), '1+1\n')
self.assertEqual(fp.readline(4), '1+1\n')
self.assertEqual(fp.readline(), 'The quick brown fox jumps over the lazy dog.\n')
self.assertEqual(fp.readline(4), 'Dear')
self.assertEqual(fp.readline(100), ' John\n')
self.assertEqual(fp.read(300), 'XXX'*100)
self.assertEqual(fp.read(1000), 'YYY'*100)
finally:
fp.close()
unlink(TESTFN)
def test_ord(self):
self.assertEqual(ord(' '), 32)
self.assertEqual(ord('A'), 65)
self.assertEqual(ord('a'), 97)
if have_unicode:
self.assertEqual(ord(unichr(sys.maxunicode)), sys.maxunicode)
self.assertRaises(TypeError, ord, 42)
if have_unicode:
self.assertRaises(TypeError, ord, unicode("12"))
def test_pow(self):
self.assertEqual(pow(0,0), 1)
self.assertEqual(pow(0,1), 0)
self.assertEqual(pow(1,0), 1)
self.assertEqual(pow(1,1), 1)
self.assertEqual(pow(2,0), 1)
self.assertEqual(pow(2,10), 1024)
self.assertEqual(pow(2,20), 1024*1024)
self.assertEqual(pow(2,30), 1024*1024*1024)
self.assertEqual(pow(-2,0), 1)
self.assertEqual(pow(-2,1), -2)
self.assertEqual(pow(-2,2), 4)
self.assertEqual(pow(-2,3), -8)
self.assertEqual(pow(0L,0), 1)
self.assertEqual(pow(0L,1), 0)
self.assertEqual(pow(1L,0), 1)
self.assertEqual(pow(1L,1), 1)
self.assertEqual(pow(2L,0), 1)
self.assertEqual(pow(2L,10), 1024)
self.assertEqual(pow(2L,20), 1024*1024)
self.assertEqual(pow(2L,30), 1024*1024*1024)
self.assertEqual(pow(-2L,0), 1)
self.assertEqual(pow(-2L,1), -2)
self.assertEqual(pow(-2L,2), 4)
self.assertEqual(pow(-2L,3), -8)
self.assertAlmostEqual(pow(0.,0), 1.)
self.assertAlmostEqual(pow(0.,1), 0.)
self.assertAlmostEqual(pow(1.,0), 1.)
self.assertAlmostEqual(pow(1.,1), 1.)
self.assertAlmostEqual(pow(2.,0), 1.)
self.assertAlmostEqual(pow(2.,10), 1024.)
self.assertAlmostEqual(pow(2.,20), 1024.*1024.)
self.assertAlmostEqual(pow(2.,30), 1024.*1024.*1024.)
self.assertAlmostEqual(pow(-2.,0), 1.)
self.assertAlmostEqual(pow(-2.,1), -2.)
self.assertAlmostEqual(pow(-2.,2), 4.)
self.assertAlmostEqual(pow(-2.,3), -8.)
for x in 2, 2L, 2.0:
for y in 10, 10L, 10.0:
for z in 1000, 1000L, 1000.0:
if isinstance(x, float) or \
isinstance(y, float) or \
isinstance(z, float):
self.assertRaises(TypeError, pow, x, y, z)
else:
self.assertAlmostEqual(pow(x, y, z), 24.0)
self.assertRaises(TypeError, pow, -1, -2, 3)
self.assertRaises(ValueError, pow, 1, 2, 0)
self.assertRaises(TypeError, pow, -1L, -2L, 3L)
self.assertRaises(ValueError, pow, 1L, 2L, 0L)
# Will return complex in 3.0:
self.assertRaises(ValueError, pow, -342.43, 0.234)
self.assertRaises(TypeError, pow)
def test_range(self):
self.assertEqual(range(3), [0, 1, 2])
self.assertEqual(range(1, 5), [1, 2, 3, 4])
self.assertEqual(range(0), [])
self.assertEqual(range(-3), [])
self.assertEqual(range(1, 10, 3), [1, 4, 7])
self.assertEqual(range(5, -5, -3), [5, 2, -1, -4])
# Now test range() with longs
self.assertEqual(range(-2**100), [])
self.assertEqual(range(0, -2**100), [])
self.assertEqual(range(0, 2**100, -1), [])
self.assertEqual(range(0, 2**100, -1), [])
a = long(10 * sys.maxint)
b = long(100 * sys.maxint)
c = long(50 * sys.maxint)
self.assertEqual(range(a, a+2), [a, a+1])
self.assertEqual(range(a+2, a, -1L), [a+2, a+1])
self.assertEqual(range(a+4, a, -2), [a+4, a+2])
seq = range(a, b, c)
self.assert_(a in seq)
self.assert_(b not in seq)
self.assertEqual(len(seq), 2)
seq = range(b, a, -c)
self.assert_(b in seq)
self.assert_(a not in seq)
self.assertEqual(len(seq), 2)
seq = range(-a, -b, -c)
self.assert_(-a in seq)
self.assert_(-b not in seq)
self.assertEqual(len(seq), 2)
self.assertRaises(TypeError, range)
self.assertRaises(TypeError, range, 1, 2, 3, 4)
self.assertRaises(ValueError, range, 1, 2, 0)
self.assertRaises(ValueError, range, a, a + 1, long(0))
class badzero(int):
def __cmp__(self, other):
raise RuntimeError
__hash__ = None # Invalid cmp makes this unhashable
self.assertRaises(RuntimeError, range, a, a + 1, badzero(1))
# Reject floats when it would require PyLongs to represent.
# (smaller floats still accepted, but deprecated)
self.assertRaises(TypeError, range, 1e100, 1e101, 1e101)
self.assertRaises(TypeError, range, 0, "spam")
self.assertRaises(TypeError, range, 0, 42, "spam")
self.assertRaises(OverflowError, range, -sys.maxint, sys.maxint)
self.assertRaises(OverflowError, range, 0, 2*sys.maxint)
def test_input_and_raw_input(self):
self.write_testfile()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
savestdout = sys.stdout # Eats the echo
try:
sys.stdin = fp
sys.stdout = BitBucket()
self.assertEqual(input(), 2)
self.assertEqual(input('testing\n'), 2)
self.assertEqual(raw_input(), 'The quick brown fox jumps over the lazy dog.')
self.assertEqual(raw_input('testing\n'), 'Dear John')
# SF 1535165: don't segfault on closed stdin
# sys.stdout must be a regular file for triggering
sys.stdout = savestdout
sys.stdin.close()
self.assertRaises(ValueError, input)
sys.stdout = BitBucket()
sys.stdin = cStringIO.StringIO("NULL\0")
self.assertRaises(TypeError, input, 42, 42)
sys.stdin = cStringIO.StringIO(" 'whitespace'")
self.assertEqual(input(), 'whitespace')
sys.stdin = cStringIO.StringIO()
self.assertRaises(EOFError, input)
# SF 876178: make sure input() respect future options.
sys.stdin = cStringIO.StringIO('1/2')
sys.stdout = cStringIO.StringIO()
exec compile('print input()', 'test_builtin_tmp', 'exec')
sys.stdin.seek(0, 0)
exec compile('from __future__ import division;print input()',
'test_builtin_tmp', 'exec')
sys.stdin.seek(0, 0)
exec compile('print input()', 'test_builtin_tmp', 'exec')
# The result we expect depends on whether new division semantics
# are already in effect.
if 1/2 == 0:
# This test was compiled with old semantics.
expected = ['0', '0.5', '0']
else:
# This test was compiled with new semantics (e.g., -Qnew
# was given on the command line.
expected = ['0.5', '0.5', '0.5']
self.assertEqual(sys.stdout.getvalue().splitlines(), expected)
del sys.stdout
self.assertRaises(RuntimeError, input, 'prompt')
del sys.stdin
self.assertRaises(RuntimeError, input, 'prompt')
finally:
sys.stdin = savestdin
sys.stdout = savestdout
fp.close()
unlink(TESTFN)
def test_reduce(self):
self.assertEqual(reduce(lambda x, y: x+y, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
reduce(lambda x, y: x+y, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(reduce(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
reduce(lambda x, y: x*y, range(2,21), 1L),
2432902008176640000L
)
self.assertEqual(reduce(lambda x, y: x+y, Squares(10)), 285)
self.assertEqual(reduce(lambda x, y: x+y, Squares(10), 0), 285)
self.assertEqual(reduce(lambda x, y: x+y, Squares(0), 0), 0)
self.assertRaises(TypeError, reduce)
self.assertRaises(TypeError, reduce, 42, 42)
self.assertRaises(TypeError, reduce, 42, 42, 42)
self.assertEqual(reduce(42, "1"), "1") # func is never called with one item
self.assertEqual(reduce(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, reduce, 42, (42, 42))
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, reduce, 42, BadSeq())
def test_reload(self):
import marshal
reload(marshal)
import string
reload(string)
## import sys
## self.assertRaises(ImportError, reload, sys)
def test_repr(self):
self.assertEqual(repr(''), '\'\'')
self.assertEqual(repr(0), '0')
self.assertEqual(repr(0L), '0L')
self.assertEqual(repr(()), '()')
self.assertEqual(repr([]), '[]')
self.assertEqual(repr({}), '{}')
a = []
a.append(a)
self.assertEqual(repr(a), '[[...]]')
a = {}
a[0] = a
self.assertEqual(repr(a), '{0: {...}}')
def test_round(self):
self.assertEqual(round(0.0), 0.0)
self.assertEqual(type(round(0.0)), float) # Will be int in 3.0.
self.assertEqual(round(1.0), 1.0)
self.assertEqual(round(10.0), 10.0)
self.assertEqual(round(1000000000.0), 1000000000.0)
self.assertEqual(round(1e20), 1e20)
self.assertEqual(round(-1.0), -1.0)
self.assertEqual(round(-10.0), -10.0)
self.assertEqual(round(-1000000000.0), -1000000000.0)
self.assertEqual(round(-1e20), -1e20)
self.assertEqual(round(0.1), 0.0)
self.assertEqual(round(1.1), 1.0)
self.assertEqual(round(10.1), 10.0)
self.assertEqual(round(1000000000.1), 1000000000.0)
self.assertEqual(round(-1.1), -1.0)
self.assertEqual(round(-10.1), -10.0)
self.assertEqual(round(-1000000000.1), -1000000000.0)
self.assertEqual(round(0.9), 1.0)
self.assertEqual(round(9.9), 10.0)
self.assertEqual(round(999999999.9), 1000000000.0)
self.assertEqual(round(-0.9), -1.0)
self.assertEqual(round(-9.9), -10.0)
self.assertEqual(round(-999999999.9), -1000000000.0)
self.assertEqual(round(-8.0, -1), -10.0)
self.assertEqual(type(round(-8.0, -1)), float)
self.assertEqual(type(round(-8.0, 0)), float)
self.assertEqual(type(round(-8.0, 1)), float)
# Check half rounding behaviour.
self.assertEqual(round(5.5), 6)
self.assertEqual(round(6.5), 7)
self.assertEqual(round(-5.5), -6)
self.assertEqual(round(-6.5), -7)
# Check behavior on ints
self.assertEqual(round(0), 0)
self.assertEqual(round(8), 8)
self.assertEqual(round(-8), -8)
self.assertEqual(type(round(0)), float) # Will be int in 3.0.
self.assertEqual(type(round(-8, -1)), float)
self.assertEqual(type(round(-8, 0)), float)
self.assertEqual(type(round(-8, 1)), float)
# test new kwargs
self.assertEqual(round(number=-8.0, ndigits=-1), -10.0)
self.assertRaises(TypeError, round)
# test generic rounding delegation for reals
class TestRound(object):
def __float__(self):
return 23.0
class TestNoRound(object):
pass
self.assertEqual(round(TestRound()), 23)
self.assertRaises(TypeError, round, 1, 2, 3)
self.assertRaises(TypeError, round, TestNoRound())
t = TestNoRound()
t.__float__ = lambda *args: args
self.assertRaises(TypeError, round, t)
self.assertRaises(TypeError, round, t, 0)
def test_setattr(self):
setattr(sys, 'spam', 1)
self.assertEqual(sys.spam, 1)
self.assertRaises(TypeError, setattr, sys, 1, 'spam')
self.assertRaises(TypeError, setattr)
def test_sum(self):
self.assertEqual(sum([]), 0)
self.assertEqual(sum(range(2,8)), 27)
self.assertEqual(sum(iter(range(2,8))), 27)
self.assertEqual(sum(Squares(10)), 285)
self.assertEqual(sum(iter(Squares(10))), 285)
self.assertEqual(sum([[1], [2], [3]], []), [1, 2, 3])
self.assertRaises(TypeError, sum)
self.assertRaises(TypeError, sum, 42)
self.assertRaises(TypeError, sum, ['a', 'b', 'c'])
self.assertRaises(TypeError, sum, ['a', 'b', 'c'], '')
self.assertRaises(TypeError, sum, [[1], [2], [3]])
self.assertRaises(TypeError, sum, [{2:3}])
self.assertRaises(TypeError, sum, [{2:3}]*2, {2:3})
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, sum, BadSeq())
def test_type(self):
self.assertEqual(type(''), type('123'))
self.assertNotEqual(type(''), type(()))
def test_unichr(self):
if have_unicode:
self.assertEqual(unichr(32), unicode(' '))
self.assertEqual(unichr(65), unicode('A'))
self.assertEqual(unichr(97), unicode('a'))
self.assertEqual(
unichr(sys.maxunicode),
unicode('\\U%08x' % (sys.maxunicode), 'unicode-escape')
)
self.assertRaises(ValueError, unichr, sys.maxunicode+1)
self.assertRaises(TypeError, unichr)
self.assertRaises((OverflowError, ValueError), unichr, 2**32)
# We don't want self in vars(), so these are static methods
@staticmethod
def get_vars_f0():
return vars()
@staticmethod
def get_vars_f2():
BuiltinTest.get_vars_f0()
a = 1
b = 2
return vars()
def test_vars(self):
self.assertEqual(set(vars()), set(dir()))
import sys
self.assertEqual(set(vars(sys)), set(dir(sys)))
self.assertEqual(self.get_vars_f0(), {})
self.assertEqual(self.get_vars_f2(), {'a': 1, 'b': 2})
self.assertRaises(TypeError, vars, 42, 42)
self.assertRaises(TypeError, vars, 42)
def test_zip(self):
a = (1, 2, 3)
b = (4, 5, 6)
t = [(1, 4), (2, 5), (3, 6)]
self.assertEqual(zip(a, b), t)
b = [4, 5, 6]
self.assertEqual(zip(a, b), t)
b = (4, 5, 6, 7)
self.assertEqual(zip(a, b), t)
class I:
def __getitem__(self, i):
if i < 0 or i > 2: raise IndexError
return i + 4
self.assertEqual(zip(a, I()), t)
self.assertEqual(zip(), [])
self.assertEqual(zip(*[]), [])
self.assertRaises(TypeError, zip, None)
class G:
pass
self.assertRaises(TypeError, zip, a, G())
# Make sure zip doesn't try to allocate a billion elements for the
# result list when one of its arguments doesn't say how long it is.
# A MemoryError is the most likely failure mode.
class SequenceWithoutALength:
def __getitem__(self, i):
if i == 5:
raise IndexError
else:
return i
self.assertEqual(
zip(SequenceWithoutALength(), xrange(2**30)),
list(enumerate(range(5)))
)
class BadSeq:
def __getitem__(self, i):
if i == 5:
raise ValueError
else:
return i
self.assertRaises(ValueError, zip, BadSeq(), BadSeq())
def test_format(self):
# Test the basic machinery of the format() builtin. Don't test
# the specifics of the various formatters
self.assertEqual(format(3, ''), '3')
# Returns some classes to use for various tests. There's
# an old-style version, and a new-style version
def classes_new():
class A(object):
def __init__(self, x):
self.x = x
def __format__(self, format_spec):
return str(self.x) + format_spec
class DerivedFromA(A):
pass
class Simple(object): pass
class DerivedFromSimple(Simple):
def __init__(self, x):
self.x = x
def __format__(self, format_spec):
return str(self.x) + format_spec
class DerivedFromSimple2(DerivedFromSimple): pass
return A, DerivedFromA, DerivedFromSimple, DerivedFromSimple2
# In 3.0, classes_classic has the same meaning as classes_new
def classes_classic():
class A:
def __init__(self, x):
self.x = x
def __format__(self, format_spec):
return str(self.x) + format_spec
class DerivedFromA(A):
pass
class Simple: pass
class DerivedFromSimple(Simple):
def __init__(self, x):
self.x = x
def __format__(self, format_spec):
return str(self.x) + format_spec
class DerivedFromSimple2(DerivedFromSimple): pass
return A, DerivedFromA, DerivedFromSimple, DerivedFromSimple2
def class_test(A, DerivedFromA, DerivedFromSimple, DerivedFromSimple2):
self.assertEqual(format(A(3), 'spec'), '3spec')
self.assertEqual(format(DerivedFromA(4), 'spec'), '4spec')
self.assertEqual(format(DerivedFromSimple(5), 'abc'), '5abc')
self.assertEqual(format(DerivedFromSimple2(10), 'abcdef'),
'10abcdef')
class_test(*classes_new())
class_test(*classes_classic())
def empty_format_spec(value):
# test that:
# format(x, '') == str(x)
# format(x) == str(x)
self.assertEqual(format(value, ""), str(value))
self.assertEqual(format(value), str(value))
# for builtin types, format(x, "") == str(x)
empty_format_spec(17**13)
empty_format_spec(1.0)
empty_format_spec(3.1415e104)
empty_format_spec(-3.1415e104)
empty_format_spec(3.1415e-104)
empty_format_spec(-3.1415e-104)
empty_format_spec(object)
empty_format_spec(None)
# TypeError because self.__format__ returns the wrong type
class BadFormatResult:
def __format__(self, format_spec):
return 1.0
self.assertRaises(TypeError, format, BadFormatResult(), "")
# TypeError because format_spec is not unicode or str
self.assertRaises(TypeError, format, object(), 4)
self.assertRaises(TypeError, format, object(), object())
# tests for object.__format__ really belong elsewhere, but
# there's no good place to put them
x = object().__format__('')
self.assert_(x.startswith('<object object at'))
# first argument to object.__format__ must be string
self.assertRaises(TypeError, object().__format__, 3)
self.assertRaises(TypeError, object().__format__, object())
self.assertRaises(TypeError, object().__format__, None)
# make sure we can take a subclass of str as a format spec
class DerivedFromStr(str): pass
self.assertEqual(format(0, DerivedFromStr('10')), ' 0')
def test_bin(self):
self.assertEqual(bin(0), '0b0')
self.assertEqual(bin(1), '0b1')
self.assertEqual(bin(-1), '-0b1')
self.assertEqual(bin(2**65), '0b1' + '0' * 65)
self.assertEqual(bin(2**65-1), '0b' + '1' * 65)
self.assertEqual(bin(-(2**65)), '-0b1' + '0' * 65)
self.assertEqual(bin(-(2**65-1)), '-0b' + '1' * 65)
class TestSorted(unittest.TestCase):
def test_basic(self):
data = range(100)
copy = data[:]
random.shuffle(copy)
self.assertEqual(data, sorted(copy))
self.assertNotEqual(data, copy)
data.reverse()
random.shuffle(copy)
self.assertEqual(data, sorted(copy, cmp=lambda x, y: cmp(y,x)))
self.assertNotEqual(data, copy)
random.shuffle(copy)
self.assertEqual(data, sorted(copy, key=lambda x: -x))
self.assertNotEqual(data, copy)
random.shuffle(copy)
self.assertEqual(data, sorted(copy, reverse=1))
self.assertNotEqual(data, copy)
def test_inputtypes(self):
s = 'abracadabra'
types = [list, tuple]
if have_unicode:
types.insert(0, unicode)
for T in types:
self.assertEqual(sorted(s), sorted(T(s)))
s = ''.join(dict.fromkeys(s).keys()) # unique letters only
types = [set, frozenset, list, tuple, dict.fromkeys]
if have_unicode:
types.insert(0, unicode)
for T in types:
self.assertEqual(sorted(s), sorted(T(s)))
def test_baddecorator(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
self.assertRaises(TypeError, sorted, data, None, lambda x,y: 0)
def test_main(verbose=None):
test_classes = (BuiltinTest, TestSorted)
run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
| apache-2.0 |
google/makani | avionics/motor/motor_client.py | 1 | 50178 | #!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line client for controlling motors."""
import collections
import os
import re
import socket
import subprocess
import tempfile
import threading
import time
import makani
from makani.avionics.common import actuator_types
from makani.avionics.common import aio
from makani.avionics.common import cmd_client
from makani.avionics.common import pack_avionics_messages
from makani.avionics.common import safety_codes
from makani.avionics.firmware.params import client as param_client
from makani.avionics.motor.firmware import config_params
from makani.avionics.motor.firmware import flags
from makani.avionics.network import aio_labels
from makani.avionics.network import aio_node
from makani.avionics.network import message_type
from makani.lib.python import c_helpers
import numpy as np
from scipy import interpolate
# TODO: implement NetworkConfig() to replace all these EnumHelper's.
aio_node_helper = c_helpers.EnumHelper('AioNode', aio_node)
motor_label_helper = c_helpers.EnumHelper('MotorLabel', aio_labels,
prefix='kMotor')
motor_error_helper = c_helpers.EnumHelper('MotorError', flags)
motor_warning_helper = c_helpers.EnumHelper('MotorWarning', flags)
def BuildMotorParamDict():
"""Builds a dict mapping motor param names to their indices."""
# Build up parameter list.
filename = os.path.join(makani.HOME, 'avionics/motor/firmware/io.c')
with open(filename) as f:
f_text = f.read()
# Get parameter array string.
re_string = r'static float \*g_mutable_param_addrs\[\] = {\s*^([\s\S]*)^};'
array_string = re.search(re_string, f_text, re.MULTILINE)
re_string = r'^ *&[\w\[\]]+.([\w\.\[\]]+)'
motor_param_keys = re.findall(re_string, array_string.group(0), re.MULTILINE)
return {key: ind for ind, key in enumerate(motor_param_keys)}
# Constants.
MOTORS = [mot.upper() for mot in motor_label_helper.ShortNames()]
CONTROLLER = 'kAioNodeControllerA'
OPERATOR = 'kAioNodeOperator'
MOTOR_PARAMS = BuildMotorParamDict()
MOTOR_ERROR_NAMES = collections.OrderedDict(
(error_bitmask, motor_error_helper.Name(error_bitmask))
for error_bitmask in motor_error_helper.Values()
if motor_error_helper.Name(error_bitmask) != 'kMotorErrorAll')
MOTOR_WARNING_NAMES = collections.OrderedDict(
(warning_bitmask, motor_warning_helper.Name(warning_bitmask))
for warning_bitmask in motor_warning_helper.Values()
if motor_warning_helper.Name(warning_bitmask) != 'kMotorWarningAll')
MOTOR_STATUS_NAMES = {val: key for key, val in flags.__dict__.items()
if key.startswith('kMotorStatus')}
GEN_TABLE_PATH = os.path.join(makani.HOME,
'avionics/motor/gen_lookup_table.py')
OMEGA_MIN_LIMIT = -260.0
OMEGA_MAX_LIMIT = 260.0
TORQUE_MIN_LIMIT = -600.0
TORQUE_MAX_LIMIT = 600.0
EPS32 = np.finfo(np.float32).eps
class MotorClientError(cmd_client.WingClientError):
pass
def MotorsAsBits(motor_list):
"""Returns a bitmask describing the motors in `motor_list`."""
return sum(1 << motor_label_helper.Value(motor.capitalize())
for motor in motor_list)
def AioNodeNameFromMotorNickname(motor):
"""Returns AIO node name for the specified motor."""
return 'kAioNodeMotor' + motor.capitalize()
def AioNodeNameFromDynoNickname(motor):
"""Returns AIO node name for the specified dyno motor."""
return 'kAioNodeDynoMotor' + motor.capitalize()
def GetMotorErrorNames(error_bitmask):
"""Returns a list of error names corresponding to the specified bitmask."""
return GetFlagNames(error_bitmask, MOTOR_ERROR_NAMES, 0)
def GetMotorWarningNames(warning_bitmask):
"""Returns a list of warning names corresponding to the specified bitmask."""
return GetFlagNames(warning_bitmask, MOTOR_WARNING_NAMES, 0)
def GetFlagNames(bitmask, bitmask_dict, default_key=None):
"""Returns a list based on bitmask_dict corresponding to set bits in bitmask.
Args:
bitmask: Integer containing a bitmask of desired fields.
bitmask_dict: Dictionary with power-of-two integer keys and values
containing names of the corresponding bits.
default_key: Key to use if bitmask == 0. Set to None to return [].
Returns:
A list with the values of bitmask_dict specified by bitmask.
"""
if bitmask:
return [name for bit, name in bitmask_dict.iteritems() if bit & bitmask]
else:
if default_key is None:
return []
else:
return [bitmask_dict[default_key]]
def GenerateCommandData(args):
"""Generates the data to use for a given speed or torque command.
Args:
args: List containing command input file & optional loop parameter.
Returns:
data: Numpy array of time, torque and speed limits.
loop: Boolean of optional loop parameter.
Raises:
MotorClientError: An invalid filename or file format was specified.
"""
cmd_file = args[0]
if not os.path.isfile(cmd_file):
raise MotorClientError('Invalid filename: %s' % cmd_file)
# Handle 1st arg i.e. the command file.
if cmd_file.endswith(('.py', '.pycmd')): # Treat as a Python file.
with tempfile.NamedTemporaryFile() as table_file:
popen = subprocess.Popen([GEN_TABLE_PATH, '--input_file', cmd_file,
'--binary'],
stdout=table_file, stderr=subprocess.PIPE)
_, stderr = popen.communicate()
if popen.returncode != 0:
raise MotorClientError('Generation of lookup table from %s failed. '
'stderr:\n%s' % (cmd_file, stderr))
data = np.load(table_file.name)
print 'Using %s to generate command profile.' % cmd_file
else: # Treat as a text file for interpolation.
try:
data = np.loadtxt(cmd_file)
except (IOError, ValueError):
raise MotorClientError(
'Invalid input text file: %s. Should contain a table of time, torques'
'and speed limits with rows of the form:\n\n'
'time torque1 torque2 ... torque8 omega_lower1 omega_lower2 ...'
'omega_lower8 omega_upper1 omega_upper2 ... omega_upper8' % cmd_file)
print 'Using interpolated values from %s for command profile.' % cmd_file
if data.shape[1] != 25:
raise MotorClientError(
'Invalid number of columns in command table. Expected 25, got %d. '
'Revise input file to generate rows of the form:\n'
'time torque1 torque2 ... torque8 omega_lower1 omega_lower2 ...'
'omega_lower8 omega_upper1 omega_upper2 ... omega_upper8'
% data.shape[1])
# Handle 2nd arg i.e. the optional parameter to repeat.
if len(args) == 1:
loop = False
print 'Defaulting to \"noloop\".'
else:
if args[1] == 'loop':
loop = True
elif args[1] == 'noloop':
loop = False
else:
raise MotorClientError('Invalid option: %s. Expecting \"loop\" or '
'[default] \"noloop\".' % args[1])
return data, loop
def CheckCommandLimits(
cmd_min, cmd_max, cmd_min_limit, cmd_max_limit, cmd_type):
if cmd_min < cmd_min_limit or cmd_max > cmd_max_limit:
raise MotorClientError('Extreme %s outside of limits [%f, %f] '
'detected. Command not set.' %
(cmd_type, cmd_min_limit, cmd_max_limit))
if cmd_min > cmd_max:
raise MotorClientError('Invalid %s i.e. min value - %f, is greater '
'than max value - %f' % (cmd_type, cmd_min, cmd_max))
class CommandProfile(object):
"""Maintains a lookup table of motor commands while running motors."""
def __init__(
self, t, motor_cmd, cmd_min_limit, cmd_max_limit, cmd_type,
loop_back=False):
self._loop_back = loop_back
self._t = t
self._motor_cmd_func = interpolate.interp1d(self._t, motor_cmd, axis=0)
cmd_max = np.max(motor_cmd)
cmd_min = np.min(motor_cmd)
print ('\nWith {t_start:.2f}s < t < {t_end:.2f}s:'
'\n min({type}) = {min:f}\n max({type}) = {max:f}\n'.format(
t_start=t[0], t_end=t[-1], type=cmd_type,
min=cmd_min, max=cmd_max))
CheckCommandLimits(cmd_min, cmd_max, cmd_min_limit, cmd_max_limit, cmd_type)
def __call__(self, t):
if self._loop_back:
t = np.mod(t, self._t[-1])
elif t > self._t[-1]:
return None
return list(self._motor_cmd_func(t))
class MotorCommandClient(cmd_client.WingCommandClient):
"""Command line client for running M600 motors."""
prompt = '(motor_client) '
_NUM_RETRIES = 10
_MOTORS = 'motors'
_DYNOS = 'dynos'
def __init__(self, *args, **kwargs):
cmd_client.WingCommandClient.__init__(self, *args, **kwargs)
self._motors_selected = set()
self._dynos_selected = set()
self._spin_dir = {}
self._motor_runner = Runner(self._motors_selected, self._spin_dir)
self._dyno_runner = Runner(self._dynos_selected, self._spin_dir,
dyno_mode=True)
self._motor_listener = None
self._dyno_listener = None
self._torque = 0.0
self._omega_lower_limit = 0.0
self._omega_upper_limit = 0.0
self._arm_aio_client = aio.AioClient(
['kMessageTypeMotorSetState', 'kMessageTypeDynoMotorSetState'],
timeout=0.1)
self._set_param_aio_client = aio.AioClient(
['kMessageTypeMotorSetParam', 'kMessageTypeDynoMotorSetParam'],
timeout=0.1)
# The long range radio requires at least 2x160 ms for a complete command-
# response cycle.
self._ack_param_aio_client = aio.AioClient(
['kMessageTypeMotorAckParam'], timeout=0.35)
self._get_param_aio_client = aio.AioClient(
['kMessageTypeMotorGetParam', 'kMessageTypeDynoMotorGetParam'],
timeout=0.1)
self._param_client = param_client.Client(timeout=0.1)
def TryStopThreads(self):
self._motor_runner.TryStop()
self._dyno_runner.TryStop()
if self._motor_listener:
self._motor_listener.TryStop()
if self._dyno_listener:
self._dyno_listener.TryStop()
def _GetListenerAndRunner(self, node_type):
if node_type == self._MOTORS:
return self._motor_listener, self._motor_runner
elif node_type == self._DYNOS:
return self._dyno_listener, self._dyno_runner
else:
raise MotorClientError('Unknown node type.')
def _CheckStatus(self, valid_statuses, node_type):
listener, _ = self._GetListenerAndRunner(node_type)
if not listener:
status = flags.kMotorStatusInit
else:
status = listener.GetMostRestrictiveMotorStatus()
if status not in valid_statuses:
raise MotorClientError(
'Invalid %s status. %s' % (
node_type.capitalize(), MOTOR_STATUS_NAMES[status]))
return True
def _CheckMotorStatus(self, valid_statuses):
self._CheckStatus(valid_statuses, self._MOTORS)
def _CheckDynoStatus(self, valid_statuses):
self._CheckStatus(valid_statuses, self._DYNOS)
def _CheckTargetsSelected(self):
if self._motors_selected or self._dynos_selected:
return True
else:
raise MotorClientError('Invalid set of targets. Use either: '
'"set_targets" or "set_targets_dyno".')
def _SetTargets(self, line, node_type):
"""Sets motor or dyno targets.
Args:
line: User supplied arguments specifying target motors.
node_type: String specifying type of targets i.e. 'motors' or 'dynos'.
Raises:
MotorClientError: An invalid set of targets was specified.
"""
targets_selected, _ = cmd_client.SelectArgs(
line.split(), MOTORS, require_some=True, require_all=True,
select_all=True, require_one=False)
if node_type == self._MOTORS:
self._motors_selected = targets_selected
motor_params = self._QueryConfig(self._motors_selected, self._MOTORS)
self._spin_dir = self._GetSpinDir(motor_params)
elif node_type == self._DYNOS:
self._dynos_selected = targets_selected
self._QueryConfig(self._dynos_selected, self._DYNOS)
self.TryStopThreads()
if self._motors_selected:
print 'Motors selected: %s.' % ', '.join(self._motors_selected)
self._motor_runner = Runner(self._motors_selected, self._spin_dir)
self._motor_listener = Listener(self._motor_runner.StopRun,
self._motors_selected)
if self._dynos_selected:
print 'Dynos selected: %s.' % ', '.join(self._dynos_selected)
self._dyno_runner = Runner(self._dynos_selected, self._spin_dir,
dyno_mode=True)
self._dyno_listener = Listener(self._dyno_runner.StopRun,
self._dynos_selected, dyno_mode=True)
@cmd_client.Command()
def do_set_targets(self, line): # pylint: disable=invalid-name
"""Sets motor targets e.g. "set_targets SBO SBI"."""
self._CheckMotorStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
self._SetTargets(line, self._MOTORS)
@cmd_client.Command()
def do_set_targets_dyno(self, line): # pylint: disable=invalid-name
"""Sets dyno targets e.g. "set_targets_dyno SBO SBI"."""
self._CheckDynoStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
self._SetTargets(line, self._DYNOS)
@cmd_client.Command()
def do_get_targets(self, line): # pylint: disable=invalid-name
"""Displays selected motor & dyno targets."""
print 'Current targets.\nMotors: %s.\nDynos: %s.' % (
', '.join(self._motors_selected), ', '.join(self._dynos_selected))
@cmd_client.Command()
def do_clear_targets(self, line): # pylint: disable=invalid-name
"""Clears selected motor & dyno targets."""
old_motors = self._motors_selected.copy()
old_dynos = self._dynos_selected.copy()
self.TryStopThreads()
self._motors_selected = set()
self._dynos_selected = set()
self._spin_dir = {}
self._motor_runner = Runner(self._motors_selected, self._spin_dir)
self._dyno_runner = Runner(self._dynos_selected, self._spin_dir,
dyno_mode=True)
self._motor_listener = None
self._dyno_listener = None
print 'Cleared old targets.\nOld Motors: %s.\nOld Dynos: %s.' % (
', '.join(old_motors), ', '.join(old_dynos))
def complete_set_targets(self, text, *unused_args): # pylint: disable=invalid-name
return self._CompleteArg(text, sorted(MOTORS) + ['All'])
complete_set_targets_dyno = complete_set_targets
def _GetSpinDir(self, params):
"""Determine the nominal spin direction based off of the motor load type."""
# List of props that need to spin in the positive x direction / in the
# negative omega sense.
# Additional loads are to be added in future commits.
reversed_loads = [config_params.MotorLoadType.PROP_REV2_POSITIVE_X]
return {key: -1 if param and param.load_type in reversed_loads else 1
for key, param in params.iteritems()}
def _QueryConfig(self, targets, target_type):
"""Test if targets are on the network and query their configurations."""
params = {}
for target in targets:
if target_type == self._DYNOS:
node = aio_node_helper.Value(AioNodeNameFromDynoNickname(target))
elif target_type == self._MOTORS:
node = aio_node_helper.Value(AioNodeNameFromMotorNickname(target))
section = param_client.SECTION_CONFIG
try:
params[target] = self._param_client.GetSection(node, section)
except socket.timeout:
params[target] = None
self._PrintConfig(targets, params)
return params
def _PrintConfig(self, motors, params):
"""Print portions of the selected motor config params."""
load_types = [load_type.CName()[len('kMotorLoadType'):]
for load_type in config_params.MotorLoadType.Names()]
motor_types = [motor_type.CName()[len('kMotorType'):]
for motor_type in config_params.MotorType.Names()]
load_type_max_str_len = max([len(name) for name in load_types])
motor_type_max_str_len = max([len(name) for name in motor_types])
for motor in sorted(motors):
if params[motor] is None:
print '%s: unknown' % motor
else:
print '{name}: motor_type: {motor_type} load_type: {load_type}'.format(
name=motor,
motor_type=(motor_types[params[motor].motor_type]
.ljust(motor_type_max_str_len)),
load_type=(load_types[params[motor].load_type]
.ljust(load_type_max_str_len)))
print ''
@cmd_client.Command()
def do_query_config(self, line): # pylint: disable=invalid-name
targets_selected, _ = cmd_client.SelectArgs(
line.split(), MOTORS, require_some=True, require_all=True,
select_all=True, require_one=False)
self._CheckMotorStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
self._QueryConfig(targets_selected, self._MOTORS)
@cmd_client.Command()
def do_query_config_dyno(self, line): # pylint: disable=invalid-name
targets_selected, _ = cmd_client.SelectArgs(
line.split(), MOTORS, require_some=True, require_all=True,
select_all=True, require_one=False)
self._CheckDynoStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
self._QueryConfig(targets_selected, self._DYNOS)
def _TryArm(self, arm_msg, arm_msg_type, node_type):
listener, _ = self._GetListenerAndRunner(node_type)
for _ in xrange(self._NUM_RETRIES):
self._arm_aio_client.Send(arm_msg, arm_msg_type, OPERATOR)
time.sleep(0.1)
if listener.AllMotorsArmed():
print 'Successfully armed %s.' % node_type
return
else:
raise MotorClientError('Failed to arm %s.' % node_type)
@cmd_client.Command(num_args=0)
def do_arm(self, unused_line): # pylint: disable=invalid-name
"""Arms the selected motors and/or dynos."""
if self._motors_selected:
self._CheckMotorStatus([flags.kMotorStatusInit])
if self._dynos_selected:
self._CheckDynoStatus([flags.kMotorStatusInit])
self._CheckTargetsSelected()
if self._motors_selected:
motor_arm_msg = pack_avionics_messages.MotorSetStateMessage()
motor_arm_msg.command = actuator_types.kActuatorStateCommandArm
motor_arm_msg.command_data = safety_codes.MOTOR_ARMING_SIGNAL
print 'Arming motors.'
motor_arm_msg.selected_motors = MotorsAsBits(
self._motor_listener.GetUnarmedMotors())
self._TryArm(
motor_arm_msg, 'kMessageTypeMotorSetState', self._MOTORS)
if self._dynos_selected:
dyno_arm_msg = pack_avionics_messages.DynoMotorSetStateMessage()
dyno_arm_msg.command = actuator_types.kActuatorStateCommandArm
dyno_arm_msg.command_data = safety_codes.MOTOR_ARMING_SIGNAL
print 'Arming dynos.'
dyno_arm_msg.selected_motors = MotorsAsBits(
self._dyno_listener.GetUnarmedMotors())
self._TryArm(
dyno_arm_msg, 'kMessageTypeDynoMotorSetState', self._DYNOS)
def _SetParam(self, line, message, node_type): # pylint: disable=invalid-name
"""Sets a param for a specified motor or dyno."""
targets, args = cmd_client.SelectArgs(
line.split(), MOTORS, require_some=True, select_all=True)
param, args = cmd_client.SelectArgs(
args, MOTOR_PARAMS.keys(), require_one=True, select_all=False)
if node_type == self._DYNOS:
targets = ['DYNO_%s' % t.upper() for t in targets]
try:
value = float(args[0])
except ValueError:
raise MotorClientError('Invalid value: "%s".' % args[0])
message.id = MOTOR_PARAMS[param]
message.value = value
failed_targets = []
for target in targets:
print 'Setting %s to %g on %s.' % (param, value, target)
if target.startswith('DYNO_'):
message.selected_motors = MotorsAsBits([target[len('DYNO_'):]])
aio_target = AioNodeNameFromDynoNickname(target[len('DYNO_'):])
success = self._TrySetParam(
message, 'kMessageTypeDynoMotorSetParam', param, target, aio_target)
else:
message.selected_motors = MotorsAsBits([target])
aio_target = AioNodeNameFromMotorNickname(target)
success = self._TrySetParam(
message, 'kMessageTypeMotorSetParam', param, target, aio_target)
if not success:
failed_targets.append(target)
if failed_targets:
raise MotorClientError('Failed to verify %s from %s.'
% (param, failed_targets))
def _TrySetParam(self, message, msg_type, param, target, aio_target):
for _ in xrange(self._NUM_RETRIES):
self._set_param_aio_client.Send(message, msg_type, OPERATOR)
for _ in xrange(self._NUM_RETRIES):
try:
_, header, ack = self._ack_param_aio_client.Recv()
if (header.source == aio_node_helper.Value(aio_target)
and header.type == message_type.kMessageTypeMotorAckParam
and ack.id == message.id and ack.value == message.value):
print '%s %s: %g' % (target, param, ack.value)
return True
except socket.timeout:
return False
return False
@cmd_client.Command(num_args=3)
def do_set_param(self, line): # pylint: disable=invalid-name
"""Sets param for a specified motor, e.g. "set_motor_param SBO Ld 3.14"."""
self._CheckMotorStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
message = pack_avionics_messages.MotorSetParamMessage()
self._SetParam(line, message, self._MOTORS)
@cmd_client.Command(num_args=3)
def do_set_param_dyno(self, line): # pylint: disable=invalid-name
"""Sets param for a specified dyno, e.g. "set_dyno_param SBO Ld 3.14"."""
self._CheckDynoStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
message = pack_avionics_messages.DynoMotorSetParamMessage()
self._SetParam(line, message, self._DYNOS)
def complete_set_param(self, text, line, *unused_args): # pylint: disable=invalid-name
arg_number = len(line.split())
if not text:
arg_number += 1
if arg_number == 2:
return self._CompleteArg(text, sorted(MOTORS) + ['All'])
elif arg_number == 3:
return self._CompleteArg(text, sorted(MOTOR_PARAMS.keys()))
else:
return []
complete_set_param_dyno = complete_set_param
def _GetParam(self, line, message, node_type):
targets, args = cmd_client.SelectArgs(
line.split(), MOTORS, require_some=True, select_all=True)
param, _ = cmd_client.SelectArgs(
args, MOTOR_PARAMS.keys(), require_one=True, select_all=False)
if node_type == self._DYNOS:
targets = ['DYNO_%s' % t.upper() for t in targets]
message.id = MOTOR_PARAMS[param]
failed_targets = []
for target in targets:
print 'Getting %s from %s...' % (param, target)
success = True
if target.startswith('DYNO_'):
message.selected_motors = MotorsAsBits([target[len('DYNO_'):]])
aio_target = AioNodeNameFromDynoNickname(target[len('DYNO_'):])
success = self._TryGetParam(
message, 'kMessageTypeDynoMotorGetParam', param, target, aio_target)
else:
message.selected_motors = MotorsAsBits([target])
aio_target = AioNodeNameFromMotorNickname(target)
success = self._TryGetParam(
message, 'kMessageTypeMotorGetParam', param, target, aio_target)
if not success:
failed_targets.append(target)
if failed_targets:
raise MotorClientError('Failed to get %s from %s.'
% (param, failed_targets))
def _TryGetParam(self, message, msg_type, param, target, aio_target):
for _ in xrange(self._NUM_RETRIES):
self._get_param_aio_client.Send(message, msg_type, OPERATOR)
for _ in xrange(self._NUM_RETRIES):
try:
_, header, ack = self._ack_param_aio_client.Recv()
if (header.source == aio_node_helper.Value(aio_target)
and header.type == message_type.kMessageTypeMotorAckParam
and ack.id == message.id):
print '%s %s: %g' % (target, param, ack.value)
return True
except socket.timeout:
return False
return False
@cmd_client.Command()
def do_get_param(self, line): # pylint: disable=invalid-name
self._CheckMotorStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
message = pack_avionics_messages.MotorGetParamMessage()
self._GetParam(line, message, self._MOTORS)
@cmd_client.Command()
def do_get_param_dyno(self, line): # pylint: disable=invalid-name
self._CheckDynoStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
message = pack_avionics_messages.DynoMotorGetParamMessage()
self._GetParam(line, message, self._DYNOS)
complete_get_param = complete_set_param
complete_get_param_dyno = complete_get_param
@cmd_client.Command()
def do_run(self, line): # pylint: disable=invalid-name
"""Runs the selected motors and/or dynos.
Specify a duration in "s" or "ms". E.g. "run 10s" or "run 300ms".
Args:
line: Command to this function.
Raises:
MotorClientError: An invalid duration was specified.
"""
if self._motors_selected:
self._CheckMotorStatus([flags.kMotorStatusArmed])
if self._dynos_selected:
self._CheckDynoStatus([flags.kMotorStatusArmed])
self._CheckTargetsSelected()
if line.endswith('ms'):
line = line[:-2]
multiplier = 1e-3
elif line.endswith('s'):
line = line[:-1]
multiplier = 1.0
else:
raise MotorClientError('Usage: run {$N {s|ms}}')
try:
duration = float(line) * multiplier
except ValueError:
raise MotorClientError('Invalid run time: \'%s\'' % line)
if self._motor_runner.IsRunning() or self._dyno_runner.IsRunning():
raise MotorClientError('Already running.')
if self._motors_selected:
if not self._motor_listener.AllMotorsArmed():
raise MotorClientError('Motors not armed.')
self._motor_runner.StartRun(duration)
if self._dynos_selected:
if not self._dyno_listener.AllMotorsArmed():
raise MotorClientError('Dynos not armed.')
self._dyno_runner.StartRun(duration)
print 'Running...'
@cmd_client.Command(num_args=0)
def do_stop(self, unused_line): # pylint: disable=invalid-name
"""Stops the motors and/or dynos."""
if self._motor_runner.IsRunning() or self._dyno_runner.IsRunning():
self._motor_runner.StopRun()
self._dyno_runner.StopRun()
else:
raise MotorClientError('Not running.')
print 'Run stopped.'
def _GetCommandFunction(self, line):
"""Returns a complete command function for each selected motor and/or dyno.
Args:
line: Command to this function.
Raises:
MotorClientError: Motors and/or dynos are running.
Returns:
torque_func: A function that returns torque commands.
omega_lower_func: A function that returns omega_lower commands.
omega_upper_func: A function that returns omega_upper commands.
freeze_command: Specifies if last command should persist on stop.
"""
if self._motor_runner.IsRunning() or self._dyno_runner.IsRunning():
raise MotorClientError('Motors and/or dynos are running.')
args = line.split()
data, loop = GenerateCommandData(args)
t = data[:, 0]
torque_cmd = data[:, 1:9]
omega_lower_cmd = data[:, 9:17]
omega_upper_cmd = data[:, 17:25]
torque_func = CommandProfile(t, torque_cmd, TORQUE_MIN_LIMIT,
TORQUE_MAX_LIMIT, 'torque', loop)
omega_lower_func = CommandProfile(t, omega_lower_cmd, OMEGA_MIN_LIMIT,
OMEGA_MAX_LIMIT, 'omega', loop)
omega_upper_func = CommandProfile(t, omega_upper_cmd, OMEGA_MIN_LIMIT,
OMEGA_MAX_LIMIT, 'omega', loop)
freeze_command = False
return (torque_func, omega_lower_func, omega_upper_func, freeze_command)
@cmd_client.Command(num_args=[1, 2])
def do_set_command_function(self, line): # pylint: disable=invalid-name, g-doc-args
# pylint: disable=g-doc-args
"""Sets a command function for motor(s).
Specify a filename which may be:
- A Python file (must have .py suffix) corresponding to an input to
gen_lookup_table.py
- A text file whose output is a lookup table formatted per the output of
gen_lookup_table.py.
"""
self._CheckMotorStatus(
[flags.kMotorStatusInit, flags.kMotorStatusArmed,
flags.kMotorStatusError])
cmd_args = self._GetCommandFunction(line)
self._motor_runner.SetCommandFunction(*cmd_args)
@cmd_client.Command(num_args=[1, 2])
def do_set_command_function_dyno(self, line): # pylint: disable=invalid-name
# pylint: disable=g-doc-args
"""Sets a command function for dyno(s).
Specify a filename which may be:
- A Python file (must have .py suffix) corresponding to an input to
gen_lookup_table.py
- A text file whose output is a lookup table formatted per the output of
gen_lookup_table.py.
"""
self._CheckDynoStatus(
[flags.kMotorStatusInit, flags.kMotorStatusArmed,
flags.kMotorStatusError])
cmd_args = self._GetCommandFunction(line)
self._dyno_runner.SetCommandFunction(*cmd_args)
def complete_set_motor_command_function(self, _, line, *unused_args): # pylint: disable=invalid-name
"""Completes arguments for the "set_command_function" command."""
args = line.split(None, 2)
if len(args) > 2 or (len(args) == 2 and line.endswith(' ')):
suggestions = ['noloop', 'loop']
if len(args) == 3:
if args[2] in suggestions:
return []
suggestions = [x for x in suggestions if x.startswith(args[2])]
else:
path = args[1] if len(args) == 2 else ''
suggestions = cmd_client.CompleteFile(path)
suggestions = [x for x in suggestions
if (x.endswith(('/', '.py', '.pycmd', '.txt', '.dat'))
or x.find('.') < 0)]
return suggestions
complete_set_dyno_command_function = complete_set_motor_command_function
@cmd_client.Command(num_args=2)
def do_set_speed_limits(self, line): # pylint: disable=invalid-name
"""Sets the speed limits for torque-mode e.g. set_speed_limits 100 200."""
if not self._dynos_selected:
raise MotorClientError('No dynos selected. Use "set_targets_dyno".')
args = line.split()
try:
omega_lower = float(args[0])
omega_upper = float(args[1])
except ValueError:
raise MotorClientError('Invalid argument(s): \'{:s}\''.format(line))
CheckCommandLimits(
omega_lower, omega_upper, OMEGA_MIN_LIMIT, OMEGA_MAX_LIMIT, 'omega')
self._omega_lower_limit = omega_lower
self._omega_upper_limit = omega_upper
print 'Omega limits set to: %.2f rad/s, %.2f rad/s.' % (
self._omega_lower_limit, self._omega_upper_limit)
torque_func = lambda _: self._torque
omega_lower_func = lambda _: self._omega_lower_limit
omega_upper_func = lambda _: self._omega_upper_limit
freeze_command = True
self._dyno_runner.SetCommandFunction(torque_func, omega_lower_func,
omega_upper_func, freeze_command)
@cmd_client.Command(num_args=1)
def do_set_torque(self, line): # pylint: disable=invalid-name
"""Sets motor torque."""
if not self._dynos_selected:
raise MotorClientError('No dynos selected. Use "set_targets_dyno".')
try:
torque = float(line)
except ValueError:
raise MotorClientError('Invalid argument(s): \'{:s}\''.format(line))
if self._omega_lower_limit == 0 and self._omega_upper_limit == 0:
raise MotorClientError('Omega limits not set. Use "set_speed_limits".')
CheckCommandLimits(
torque, torque, TORQUE_MIN_LIMIT, TORQUE_MAX_LIMIT, 'torque')
self._torque = torque
print 'Torque desired: %.2f Nm. Speed limits: %.2f rad/s, %.2f rad/s.' % (
torque, self._omega_lower_limit, self._omega_upper_limit)
torque_func = lambda _: self._torque
omega_lower_func = lambda _: self._omega_lower_limit
omega_upper_func = lambda _: self._omega_upper_limit
freeze_command = True
self._dyno_runner.SetCommandFunction(torque_func, omega_lower_func,
omega_upper_func, freeze_command)
@cmd_client.Command(num_args=1)
def do_set_omega(self, line): # pylint: disable=invalid-name
"""Sets motor speed."""
if not self._motors_selected:
raise MotorClientError('No motors selected. Use "set_targets".')
try:
omega = float(line)
except ValueError:
raise MotorClientError('Invalid omega: \'{:s}\''.format(line))
CheckCommandLimits(omega, omega, OMEGA_MIN_LIMIT, OMEGA_MAX_LIMIT, 'omega')
print 'Omega desired: %s rad/s' % omega
torque_func = lambda _: 0.0
omega_lower_func = lambda _: omega
omega_upper_func = lambda _: omega
freeze_command = True
self._motor_runner.SetCommandFunction(torque_func, omega_lower_func,
omega_upper_func, freeze_command)
def _RampCommand(self, line, cmd_type, runner):
"""Sets a motor speed or torque ramp.
Args:
line: Command to this function.
cmd_type: Torque or Omega command to ramp.
runner: Runner instance to use for setting command.
Raises:
MotorClientError: An invalid parameter was specified.
"""
args = line.split(None, 2)
try:
cmd = float(args[0])
except ValueError:
raise MotorClientError('Invalid %s: \'{:s}\''.format(args[0]) % cmd_type)
if len(args) == 2:
try:
dt = self._dt = float(args[1])
except ValueError:
raise MotorClientError('Invalid time: \'{:s}\''.format(args[1]))
else:
dt = 1.0
if runner.IsRunning():
t0 = runner.GetTime()
motor_cmd = runner.GetCommand()
cmd0 = motor_cmd[cmd_type]
else:
t0 = 0.0
cmd0 = 0.0
dcmd_dt = (cmd - cmd0) / dt if abs(dt) > 10.0 * EPS32 else 0.0
def Ramp(t):
if t > t0 + dt:
return cmd
elif t > t0:
return dcmd_dt * (t - t0) + cmd0
else:
return cmd0
if cmd_type == 'omega_upper':
torque_func = lambda _: 0.0
omega_lower_func = Ramp
omega_upper_func = Ramp
elif cmd_type == 'torque':
torque_func = Ramp
omega_lower_func = lambda _: self._omega_lower_limit
omega_upper_func = lambda _: self._omega_upper_limit
else:
raise MotorClientError('Invalid command type: %s' % cmd_type)
freeze_command = True
runner.SetCommandFunction(
torque_func, omega_lower_func, omega_upper_func, freeze_command)
display_cmd = cmd_type.split('_')[0].capitalize()
print (' Ramping over dt = %4.2f:\n'
' %s(t0) = %4.1f\n'
' %s(t0 + dt) = %4.1f' % (dt, display_cmd, cmd0, display_cmd, cmd))
@cmd_client.Command(num_args=[1, 2])
def do_ramp_omega(self, line): # pylint: disable=invalid-name
# pylint: disable=g-doc-args
"""Sets a motor speed ramp.
Specify a linear angular rate ramp from the present speed omega0 to a final
speed omega1 over some time dt (in seconds) with the command:
ramp_omega [omega1] [dt]
The second argument is optional. If not specified dt = 1s is assumed.
"""
self._RampCommand(line, 'omega_upper', self._motor_runner)
@cmd_client.Command(num_args=[1, 2])
def do_ramp_torque(self, line): # pylint: disable=invalid-name
# pylint: disable=g-doc-args
"""Sets a dyno torque ramp.
Specify a linear torque ramp from the present torque T0 to a final
torque T1 over some time dt (in seconds) with the command:
ramp_torque [T1] [dt]
The second argument is optional. If not specified dt = 1s is assumed.
"""
self._RampCommand(line, 'torque', self._dyno_runner)
@cmd_client.Command(num_args=0)
def do_clear_errors(self, unused_line): # pylint: disable=invalid-name
self._CheckTargetsSelected()
if self._motors_selected:
self._CheckMotorStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
self._motor_listener.ClearErrors()
self._motor_runner.ClearErrors()
if self._dynos_selected:
self._CheckDynoStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
self._dyno_listener.ClearErrors()
self._dyno_runner.ClearErrors()
print 'Errors cleared.'
def _TryDisarm(self, node_type):
listener, runner = self._GetListenerAndRunner(node_type)
for _ in xrange(self._NUM_RETRIES):
runner.Disarm()
time.sleep(0.1)
if listener.AllMotorsDisarmed():
print 'Successfully disarmed %s.' % node_type
return
raise MotorClientError('Failed to disarm %s.' % node_type)
@cmd_client.Command(num_args=0)
def do_disarm(self, unused_line): # pylint: disable=invalid-name
"""Disarms the motors."""
self._CheckTargetsSelected()
print 'Disarming.'
if self._motors_selected:
self._TryDisarm(self._MOTORS)
if self._dynos_selected:
self._TryDisarm(self._DYNOS)
@cmd_client.Command()
def do_get_errors(self, unused_line): # pylint: disable=invalid-name
self._CheckTargetsSelected()
if self._motors_selected:
self._motor_listener.PrintErrors()
if self._dynos_selected:
self._dyno_listener.PrintErrors()
@cmd_client.Command()
def do_request_control_log(self, unused_line): # pylint: disable=invalid-name
self._CheckTargetsSelected()
if self._motors_selected:
self._motor_runner.RequestControlLog()
if self._dynos_selected:
self._dyno_runner.RequestControlLog()
@cmd_client.Command()
def do_request_adc_log(self, unused_line): # pylint: disable=invalid-name
self._CheckTargetsSelected()
if self._motors_selected:
self._motor_runner.RequestAdcLog()
if self._dynos_selected:
self._dyno_runner.RequestAdcLog()
class Listener(cmd_client.AioThread):
"""Continuously listens to MotorStatusMessages."""
def __init__(self, error_callback, motors, dyno_mode=False):
self._motors = motors.copy()
t_now = time.time()
self._errors = {m: flags.kMotorErrorNone for m in MOTORS}
self._warnings = {m: flags.kMotorWarningNone for m in MOTORS}
self._error_lock = threading.Lock()
self._clear_errors_stop_time = t_now
self._motor_status = {m: flags.kMotorStatusInit
for m in self._motors}
self._motor_status_lock = threading.Lock()
self._t_message = {m: t_now for m in self._motors}
self._t_message_lock = threading.Lock()
self._dyno_mode = dyno_mode
if dyno_mode:
sources = {AioNodeNameFromDynoNickname(m): m for m in self._motors}
else:
sources = {AioNodeNameFromMotorNickname(m): m for m in self._motors}
self._motor_sources = {aio.aio_node_helper.Value(k): sources[k]
for k in sources.keys()}
self._error_callback = error_callback
super(Listener, self).__init__(['kMessageTypeMotorStatus'],
allowed_sources=sources.keys(), timeout=0.1)
self.start()
def ClearErrors(self):
with self._error_lock:
for motor in self._errors.keys():
self._errors[motor] = flags.kMotorErrorNone
self._warnings[motor] = flags.kMotorWarningNone
self._clear_errors_stop_time = time.time() + 5*10e-3
def GetMostRestrictiveMotorStatus(self):
"""Returns the most restrictive status across all motors."""
with self._motor_status_lock:
motor_statuses = self._motor_status.values()
if flags.kMotorStatusRunning in motor_statuses:
return flags.kMotorStatusRunning
elif flags.kMotorStatusArmed in motor_statuses:
return flags.kMotorStatusArmed
elif flags.kMotorStatusError in motor_statuses:
return flags.kMotorStatusError
return flags.kMotorStatusInit
def AllMotorsArmed(self):
with self._motor_status_lock:
motor_statuses = self._motor_status.values()
return all(x == flags.kMotorStatusArmed for x in motor_statuses)
def AnyMotorsArmed(self):
with self._motor_status_lock:
motor_statuses = self._motor_status.values()
return any(x == flags.kMotorStatusArmed for x in motor_statuses)
def AllMotorsDisarmed(self):
with self._motor_status_lock:
motor_statuses = self._motor_status.values()
return all(x != flags.kMotorStatusArmed
and x != flags.kMotorStatusRunning
for x in motor_statuses)
def GetUnarmedMotors(self):
with self._motor_status_lock:
return [motor for motor, status in self._motor_status.iteritems()
if status == flags.kMotorStatusInit]
def PrintErrors(self):
with self._error_lock:
if (any([e != flags.kMotorErrorNone for e in self._errors.itervalues()])
or any([w != flags.kMotorWarningNone
for w in self._warnings.itervalues()])):
print 'Errors:'
for motor in MOTORS:
error = self._errors[motor]
warning = self._warnings[motor]
if error != flags.kMotorErrorNone:
print '%s: %s' % (motor, ' | '.join(GetMotorErrorNames(error)))
motor = (' ') * len(motor) # Do no print out the motor name again.
if warning != flags.kMotorWarningNone:
print '%s: %s' % (motor, ' | '.join(GetMotorWarningNames(warning)))
else:
print 'No errors or warnings.'
def _RunOnce(self):
try:
_, header, msg = self._client.Recv()
motor = self._motor_sources[header.source]
t_now = time.time()
with self._t_message_lock:
self._t_message[motor] = t_now
stale = {m: t_now - self._t_message[m] > 0.05 for m in self._motors}
new_status = False
execute_callback = False
with self._error_lock, self._motor_status_lock:
# New errors.
if t_now > self._clear_errors_stop_time:
newline = '\n'
error_diff = self._errors[motor] ^ msg.motor_error
if msg.motor_error and error_diff:
self._errors[motor] |= msg.motor_error
print ('%sNew motor error(s) %s: %s' %
(newline, motor, ' | '.join(GetMotorErrorNames(error_diff))))
newline = '' # Group errors and warning from the same motor.
warning_diff = self._warnings[motor] ^ msg.motor_warning
if warning_diff:
self._warnings[motor] = msg.motor_warning
if msg.motor_warning & warning_diff:
print ('%sNew motor warning(s) %s: %s' %
(newline, motor,
' | '.join(GetMotorWarningNames(warning_diff
& msg.motor_warning))))
else:
print ('%sCleared motor warning(s) %s: %s' %
(newline, motor,
' | '.join(GetMotorWarningNames(warning_diff
& ~msg.motor_warning))))
# Change in status.
if self._motor_status[motor] != msg.motor_status:
new_status = True
self._motor_status[motor] = msg.motor_status
# Invoke error callback after giving up self._error_lock and
# self._status_lock just in case.
if (new_status and
any([e for e in self._errors.values()]) and
all([self._motor_status[motor] &
~(flags.kMotorStatusRunning | flags.kMotorStatusWindDown) or
stale[motor] for motor in self._motors])):
execute_callback = True
if execute_callback:
self._error_callback()
except socket.timeout:
pass
class Runner(cmd_client.AioThread):
"""Continuously sends ControllerCommandMessages."""
def __init__(self, motors, spin_dir, dyno_mode=False):
self._motors = motors.copy()
self._spin_dir = [spin_dir.get(motor, 1) for motor in MOTORS]
self._clear_error_retries = 0
self._disarm_retries = 0
self._request_control_log = False
self._request_adc_log = False
self._dyno_mode = dyno_mode
if dyno_mode:
self._command = pack_avionics_messages.DynoCommandMessage()
else:
self._command = pack_avionics_messages.ControllerCommandMessage()
self._command.motor_command = flags.kMotorCommandNone
self._command_lock = threading.Lock()
self._command_function_lock = threading.Lock()
self._torque_func = lambda _: 0.0
self._omega_lower_func = lambda _: 0.0
self._omega_upper_func = lambda _: 0.0
self._freeze_command = False # Replace command with a constant on stop.
self._WriteMotorCommand()
super(Runner, self).__init__(['kMessageTypeControllerCommand',
'kMessageTypeDynoCommand'])
self.start()
def SetCommand(self, command_mask):
with self._command_lock:
self._command.motor_command |= command_mask
def _ClearCommand(self, command_mask):
with self._command_lock:
self._command.motor_command &= ~command_mask
def IsRunning(self):
return self._command.motor_command & flags.kMotorCommandRun
def StartRun(self, duration):
self._start_time = time.time()
self._stop_time = self._start_time + duration
self.SetCommand(flags.kMotorCommandRun)
def StopRun(self):
if self._freeze_command:
motor_cmd = self.GetCommand()
with self._command_function_lock:
self._torque_func = lambda _: motor_cmd['torque']
self._omega_lower_func = lambda _: motor_cmd['omega_lower']
self._omega_upper_func = lambda _: motor_cmd['omega_upper']
self._ClearCommand(flags.kMotorCommandRun)
def GetCommand(self):
"""Generates motor commands at the current time.
Returns:
motor_cmd: Command to send to motors or dynos at the current time.
"""
if self.IsRunning():
curr_time = time.time() - self._start_time
else:
curr_time = 0.0
with self._command_function_lock:
motor_cmd = {'torque': self._torque_func(curr_time),
'omega_lower': self._omega_lower_func(curr_time),
'omega_upper': self._omega_upper_func(curr_time)}
return motor_cmd
def _CheckCommand(self, cmd_dict):
for _, val in cmd_dict.iteritems():
assert isinstance(val, list)
assert len(val) == len(MOTORS)
def _WriteMotorCommand(self):
motor_cmd = self.GetCommand()
for cmd, val in motor_cmd.iteritems():
if isinstance(val, int) or isinstance(val, float):
motor_cmd[cmd] = [val for _ in MOTORS]
self._CheckCommand(motor_cmd)
torque = motor_cmd['torque']
omega_lower = motor_cmd['omega_lower']
omega_upper = motor_cmd['omega_upper']
with self._command_lock:
for i, motor in enumerate(MOTORS):
spin = self._spin_dir[i]
if motor in self._motors:
self._command.motor_torque[i] = torque[i] * spin
self._command.motor_speed_lower_limit[i] = omega_lower[i] * spin
self._command.motor_speed_upper_limit[i] = omega_upper[i] * spin
else:
self._command.motor_torque[i] = 0.0
self._command.motor_speed_lower_limit[i] = 0.0
self._command.motor_speed_upper_limit[i] = 0.0
def SetCommandFunction(self, torque_func, omega_lower_func,
omega_upper_func, freeze_command):
with self._command_function_lock:
self._torque_func = torque_func
self._omega_lower_func = omega_lower_func
self._omega_upper_func = omega_upper_func
self._freeze_command = freeze_command
self._WriteMotorCommand()
def GetTime(self):
return time.time() - self._start_time if self.IsRunning() else 0.0
def ClearErrors(self):
self.SetCommand(flags.kMotorCommandClearError)
self._clear_error_retries = 3
def Disarm(self):
self.SetCommand(flags.kMotorCommandDisarm)
self._disarm_retries = 3
def RequestControlLog(self):
self._request_control_log = True
def RequestAdcLog(self):
self._request_adc_log = True
def _RunOnce(self):
"""Modifies and sends the ControllerCommandMessage."""
if self.IsRunning():
if time.time() > self._stop_time:
self.StopRun()
print '\nFinished run.'
else:
try:
self._WriteMotorCommand()
except AssertionError:
print ('Warning: Command(t) did not return a scalar or list with '
'elements for all motors.')
self.StopRun()
if self._clear_error_retries <= 0:
self._ClearCommand(flags.kMotorCommandClearError)
else:
self._clear_error_retries -= 1
if self._disarm_retries <= 0:
self._ClearCommand(flags.kMotorCommandDisarm)
else:
self._disarm_retries -= 1
if self._request_control_log:
self.SetCommand(flags.kMotorCommandSendControlLog)
self._request_control_log = False
else:
self._ClearCommand(flags.kMotorCommandSendControlLog)
if self._request_adc_log:
self.SetCommand(flags.kMotorCommandSendAdcLog)
self._request_adc_log = False
else:
self._ClearCommand(flags.kMotorCommandSendAdcLog)
with self._command_lock:
if self._dyno_mode:
self._client.Send(self._command, 'kMessageTypeDynoCommand', OPERATOR)
else:
self._client.Send(self._command, 'kMessageTypeControllerCommand',
CONTROLLER)
time.sleep(0.0095)
if __name__ == '__main__':
client = MotorCommandClient()
try:
client.cmdloop()
except BaseException:
client.TryStopThreads()
raise
| apache-2.0 |
yun3195/android_kernel_ZTE_Z5S | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
KhronosGroup/COLLADA-CTS | StandardDataSets/1_5/collada/library_animations/animation/source/1animation/1animation.py | 2 | 8115 |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from Core.Common.DOMParser import *
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = [['library_animations', 'animation', 'animation', 'sampler', 'input'], ['library_animations', 'animation', 'sampler', 'input']]
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.inputFilleName = ''
self.outputFilleNameLst = []
self.__assistant = JudgeAssistant.JudgeAssistant()
# Compares animation source data between input and output files
# Edit: only interested in time input source
def SourcePreserved(self, context):
# Get the input file
self.inputFilleName = context.GetAbsInputFilename(context.GetCurrentTestId())
# Get the output file
outputFilenames = context.GetStepOutputFilenames("Export")
if len(outputFilenames) == 0:
context.Log("FAILED: There are no export steps.")
return False
else:
del self.outputFilleNameLst[:]
self.outputFilleNameLst.extend( outputFilenames )
testIO = DOMParserIO( self.inputFilleName, self.outputFilleNameLst )
# load files and generate root
testIO.Init()
# get input list
inputInputLst = FindElement(testIO.GetRoot(self.inputFilleName), self.tagList[0])
if len( inputInputLst ) == 0:
context.Log('FAILED: Animation source data of input is not found')
return False
inputOutputLst = []
for eachTagList in self.tagList:
inputOutputLst = FindElement(testIO.GetRoot(self.outputFilleNameLst[0]), eachTagList)
if (len(inputOutputLst) > 0):
break
if len( inputOutputLst ) == 0:
context.Log('FAILED: Animation source data of output is not found')
return False
for eachInput in inputInputLst:
inputSemantic = GetAttriByEle(eachInput, 'semantic')
if (inputSemantic == "INPUT"):
dataType = 'float_array'
inputDataElement = GetDataFromInput( testIO.GetRoot(self.inputFilleName), eachInput, dataType)
if inputDataElement == None:
context.Log('FAILED: Source of ' + inputSemantic + ' in input is not found')
return False
else:
break
for eachOuput in inputOutputLst:
if (GetAttriByEle(eachOuput, 'semantic') == inputSemantic):
outputDataElement = GetDataFromInput( testIO.GetRoot(self.outputFilleNameLst[0]), eachOuput, dataType)
if outputDataElement == None:
context.Log('FAILED: Source of ' + inputSemantic + ' in output is not found')
return False
else:
break
if (inputDataElement != None and outputDataElement != None):
inputDataList = inputDataElement.childNodes[0].nodeValue.split()
outputDataList = outputDataElement.childNodes[0].nodeValue.split()
if (len(inputDataList) != len(outputDataList)):
context.Log('FAILED: ' + inputSemantic + ' semantic is not preserved')
return False
for i in range( len(inputDataList) ):
if (dataType == "float_array"):
if ( not IsValueEqual( float(outputDataList[i]), float(inputDataList[i]), 'float') ):
context.Log('FAILED: ' + inputSemantic + ' semantic is not preserved')
return False
else:
if ( not IsValueEqual(outputDataList[i], inputDataList[i], 'string') ):
context.Log('FAILED: ' + inputSemantic + ' semantic is not preserved')
return False
else:
context.Log('FAILED: Animation source data is not preserved')
return False
context.Log('PASSED: Animation source data is preserved')
return True
def JudgeBaseline(self, context):
# No step should crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], [])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass superior you need to pass baseline, this object could also include additional
# tests that were specific to the superior badge.
def JudgeSuperior(self, context):
# if baseline fails, no point in further checking
if (self.status_baseline == False):
self.status_superior = self.status_baseline
return self.status_superior
# Checks that images show animation
# Then compare the import and export rendered images
# Finally, check that the source data is preserved
if ( self.__assistant.HasAnimatedImages(context) ):
if ( self.__assistant.CompareRenderedImages(context) ):
self.status_superior = self.SourcePreserved(context)
return self.status_superior
self.status_superior = self.__assistant.DeferJudgement(context)
return self.status_superior
# To pass exemplary you need to pass superior, this object could also include additional
# tests that were specific to the exemplary badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
| mit |
rklabs/scrapy | tests/test_middleware.py | 129 | 2743 | from twisted.trial import unittest
from scrapy.settings import Settings
from scrapy.exceptions import NotConfigured
from scrapy.middleware import MiddlewareManager
import six
class M1(object):
def open_spider(self, spider):
pass
def close_spider(self, spider):
pass
def process(self, response, request, spider):
pass
class M2(object):
def open_spider(self, spider):
pass
def close_spider(self, spider):
pass
pass
class M3(object):
def process(self, response, request, spider):
pass
class MOff(object):
def open_spider(self, spider):
pass
def close_spider(self, spider):
pass
def __init__(self):
raise NotConfigured
class TestMiddlewareManager(MiddlewareManager):
@classmethod
def _get_mwlist_from_settings(cls, settings):
return ['tests.test_middleware.%s' % x for x in ['M1', 'MOff', 'M3']]
def _add_middleware(self, mw):
super(TestMiddlewareManager, self)._add_middleware(mw)
if hasattr(mw, 'process'):
self.methods['process'].append(mw.process)
class MiddlewareManagerTest(unittest.TestCase):
def test_init(self):
m1, m2, m3 = M1(), M2(), M3()
mwman = TestMiddlewareManager(m1, m2, m3)
self.assertEqual(mwman.methods['open_spider'], [m1.open_spider, m2.open_spider])
self.assertEqual(mwman.methods['close_spider'], [m2.close_spider, m1.close_spider])
self.assertEqual(mwman.methods['process'], [m1.process, m3.process])
def test_methods(self):
mwman = TestMiddlewareManager(M1(), M2(), M3())
if six.PY2:
self.assertEqual([x.im_class for x in mwman.methods['open_spider']],
[M1, M2])
self.assertEqual([x.im_class for x in mwman.methods['close_spider']],
[M2, M1])
self.assertEqual([x.im_class for x in mwman.methods['process']],
[M1, M3])
else:
self.assertEqual([x.__self__.__class__ for x in mwman.methods['open_spider']],
[M1, M2])
self.assertEqual([x.__self__.__class__ for x in mwman.methods['close_spider']],
[M2, M1])
self.assertEqual([x.__self__.__class__ for x in mwman.methods['process']],
[M1, M3])
def test_enabled(self):
m1, m2, m3 = M1(), M2(), M3()
mwman = MiddlewareManager(m1, m2, m3)
self.assertEqual(mwman.middlewares, (m1, m2, m3))
def test_enabled_from_settings(self):
settings = Settings()
mwman = TestMiddlewareManager.from_settings(settings)
classes = [x.__class__ for x in mwman.middlewares]
self.assertEqual(classes, [M1, M3])
| bsd-3-clause |
SurfasJones/djcmsrc3 | venv/lib/python2.7/site-packages/cms/migrations/0019_public_table_renames.py | 525 | 20033 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Dummy migration
pass
def backwards(self, orm):
# Dummy migration
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'})
},
'auth.permission': {
'Meta': {
'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)",
'object_name': 'Permission'},
'codename': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['contenttypes.ContentType']"}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [],
{'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Group']", 'symmetrical': 'False',
'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'password': (
'django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': (
'django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.CMSPlugin']", 'null': 'True',
'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': "orm['sites.Site']",
'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')",
'object_name': 'Page'},
'changed_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'created_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'limit_visibility_in_menu': (
'django.db.models.fields.SmallIntegerField', [],
{'default': 'None', 'null': 'True', 'db_index': 'True',
'blank': 'True'}),
'login_required': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '80',
'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'children'",
'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['cms.Placeholder']",
'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'published': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'publisher_public': (
'django.db.models.fields.related.OneToOneField', [],
{'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True',
'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '40', 'null': 'True',
'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'template': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_descendants': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_page': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')",
'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [],
{'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [],
{'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': (
'django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_users'",
'to': "orm['%s']" % user_orm_label}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['%s']" % user_orm_label, 'unique': 'True',
'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_usergroups'",
'to': "orm['%s']" % user_orm_label}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['auth.Group']", 'unique': 'True',
'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': (
'django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)",
'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '200',
'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': (
'django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': (
'django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"},
'app_label': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site',
'db_table': "'django_site'"},
'domain': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
| mit |
mbr0wn/gnuradio | gr-blocks/python/blocks/qa_ctrlport_probes.py | 5 | 8079 | #!/usr/bin/env python
#
# Copyright 2013,2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import sys
import time
import random
import numpy
from gnuradio import gr, gr_unittest, blocks
import os
import struct
import re
from gnuradio.ctrlport.GNURadioControlPortClient import GNURadioControlPortClient
class test_ctrlport_probes(gr_unittest.TestCase):
def setUp(self):
os.environ['GR_CONF_CONTROLPORT_ON'] = 'True'
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001(self):
data = list(range(1, 9))
self.src = blocks.vector_source_c(data, True)
self.probe = blocks.ctrlport_probe2_c("samples", "Complex",
len(data), gr.DISPNULL)
probe_name = self.probe.alias()
self.tb.connect(self.src, self.probe)
self.tb.start()
# Probes return complex values as list of floats with re, im
# Imaginary parts of this data set are 0.
expected_result = [1, 2, 3, 4,
5, 6, 7, 8]
# Make sure we have time for flowgraph to run
time.sleep(0.1)
# Get available endpoint
ep = gr.rpcmanager_get().endpoints()[0]
hostname = re.search(r"-h (\S+|\d+\.\d+\.\d+\.\d+)", ep).group(1)
portnum = re.search(r"-p (\d+)", ep).group(1)
# Initialize a simple ControlPort client from endpoint
from gnuradio.ctrlport.GNURadioControlPortClient import GNURadioControlPortClient
radiosys = GNURadioControlPortClient(
hostname, portnum, rpcmethod='thrift')
radio = radiosys.client
# Get all exported knobs
ret = radio.getKnobs([probe_name + "::samples"])
for name in list(ret.keys()):
# Get data in probe, which might be offset; find the
# beginning and unwrap.
result = ret[name].value
i = result.index(complex(1.0, 0.0))
result = result[i:] + result[0:i]
self.assertComplexTuplesAlmostEqual(expected_result, result, 4)
self.tb.stop()
self.tb.wait()
def test_002(self):
data = list(range(1, 9))
self.src = blocks.vector_source_f(data, True)
self.probe = blocks.ctrlport_probe2_f("samples", "Floats",
len(data), gr.DISPNULL)
probe_name = self.probe.alias()
self.tb.connect(self.src, self.probe)
self.tb.start()
expected_result = [1, 2, 3, 4, 5, 6, 7, 8, ]
# Make sure we have time for flowgraph to run
time.sleep(0.1)
# Get available endpoint
ep = gr.rpcmanager_get().endpoints()[0]
hostname = re.search(r"-h (\S+|\d+\.\d+\.\d+\.\d+)", ep).group(1)
portnum = re.search(r"-p (\d+)", ep).group(1)
# Initialize a simple ControlPort client from endpoint
from gnuradio.ctrlport.GNURadioControlPortClient import GNURadioControlPortClient
radiosys = GNURadioControlPortClient(
hostname, portnum, rpcmethod='thrift')
radio = radiosys.client
# Get all exported knobs
ret = radio.getKnobs([probe_name + "::samples"])
for name in list(ret.keys()):
# Get data in probe, which might be offset; find the
# beginning and unwrap.
result = ret[name].value
i = result.index(1.0)
result = result[i:] + result[0:i]
self.assertEqual(expected_result, result)
self.tb.stop()
self.tb.wait()
def test_003(self):
data = list(range(1, 9))
self.src = blocks.vector_source_i(data, True)
self.probe = blocks.ctrlport_probe2_i("samples", "Integers",
len(data), gr.DISPNULL)
probe_name = self.probe.alias()
self.tb.connect(self.src, self.probe)
self.tb.start()
expected_result = [1, 2, 3, 4, 5, 6, 7, 8, ]
# Make sure we have time for flowgraph to run
time.sleep(0.1)
# Get available endpoint
ep = gr.rpcmanager_get().endpoints()[0]
hostname = re.search(r"-h (\S+|\d+\.\d+\.\d+\.\d+)", ep).group(1)
portnum = re.search(r"-p (\d+)", ep).group(1)
# Initialize a simple ControlPort client from endpoint
from gnuradio.ctrlport.GNURadioControlPortClient import GNURadioControlPortClient
radiosys = GNURadioControlPortClient(
hostname, portnum, rpcmethod='thrift')
radio = radiosys.client
# Get all exported knobs
ret = radio.getKnobs([probe_name + "::samples"])
for name in list(ret.keys()):
# Get data in probe, which might be offset; find the
# beginning and unwrap.
result = ret[name].value
i = result.index(1.0)
result = result[i:] + result[0:i]
self.assertEqual(expected_result, result)
self.tb.stop()
self.tb.wait()
def test_004(self):
data = list(range(1, 9))
self.src = blocks.vector_source_s(data, True)
self.probe = blocks.ctrlport_probe2_s("samples", "Shorts",
len(data), gr.DISPNULL)
probe_name = self.probe.alias()
self.tb.connect(self.src, self.probe)
self.tb.start()
expected_result = [1, 2, 3, 4, 5, 6, 7, 8, ]
# Make sure we have time for flowgraph to run
time.sleep(0.1)
# Get available endpoint
ep = gr.rpcmanager_get().endpoints()[0]
hostname = re.search(r"-h (\S+|\d+\.\d+\.\d+\.\d+)", ep).group(1)
portnum = re.search(r"-p (\d+)", ep).group(1)
# Initialize a simple ControlPort client from endpoint
from gnuradio.ctrlport.GNURadioControlPortClient import GNURadioControlPortClient
radiosys = GNURadioControlPortClient(
hostname, portnum, rpcmethod='thrift')
radio = radiosys.client
# Get all exported knobs
ret = radio.getKnobs([probe_name + "::samples"])
for name in list(ret.keys()):
# Get data in probe, which might be offset; find the
# beginning and unwrap.
result = ret[name].value
i = result.index(1.0)
result = result[i:] + result[0:i]
self.assertEqual(expected_result, result)
self.tb.stop()
self.tb.wait()
def test_005(self):
data = list(range(1, 9))
self.src = blocks.vector_source_b(data, True)
self.probe = blocks.ctrlport_probe2_b("samples", "Bytes",
len(data), gr.DISPNULL)
probe_name = self.probe.alias()
self.tb.connect(self.src, self.probe)
self.tb.start()
expected_result = [1, 2, 3, 4, 5, 6, 7, 8, ]
# Make sure we have time for flowgraph to run
time.sleep(0.1)
# Get available endpoint
ep = gr.rpcmanager_get().endpoints()[0]
hostname = re.search(r"-h (\S+|\d+\.\d+\.\d+\.\d+)", ep).group(1)
portnum = re.search(r"-p (\d+)", ep).group(1)
# Initialize a simple ControlPort client from endpoint
from gnuradio.ctrlport.GNURadioControlPortClient import GNURadioControlPortClient
radiosys = GNURadioControlPortClient(
hostname, portnum, rpcmethod='thrift')
radio = radiosys.client
# Get all exported knobs
ret = radio.getKnobs([probe_name + "::samples"])
for name in list(ret.keys()):
# Get data in probe, which might be offset; find the
# beginning and unwrap.
result = ret[name].value
result = list(struct.unpack(len(result) * 'b', result))
i = result.index(1)
result = result[i:] + result[0:i]
self.assertEqual(expected_result, result)
self.tb.stop()
self.tb.wait()
if __name__ == '__main__':
gr_unittest.run(test_ctrlport_probes)
| gpl-3.0 |
pubnative/redash | tests/test_utils.py | 12 | 2552 | from redash.utils import build_url, collect_query_parameters, collect_parameters_from_request
from collections import namedtuple
from unittest import TestCase
DummyRequest = namedtuple('DummyRequest', ['host', 'scheme'])
class TestBuildUrl(TestCase):
def test_simple_case(self):
self.assertEqual("http://example.com/test", build_url(DummyRequest("", "http"), "example.com", "/test"))
def test_uses_current_request_port(self):
self.assertEqual("http://example.com:5000/test", build_url(DummyRequest("example.com:5000", "http"), "example.com", "/test"))
def test_uses_current_request_schema(self):
self.assertEqual("https://example.com/test", build_url(DummyRequest("example.com", "https"), "example.com", "/test"))
def test_skips_port_for_default_ports(self):
self.assertEqual("https://example.com/test", build_url(DummyRequest("example.com:443", "https"), "example.com", "/test"))
self.assertEqual("http://example.com/test", build_url(DummyRequest("example.com:80", "http"), "example.com", "/test"))
self.assertEqual("https://example.com:80/test", build_url(DummyRequest("example.com:80", "https"), "example.com", "/test"))
self.assertEqual("http://example.com:443/test", build_url(DummyRequest("example.com:443", "http"), "example.com", "/test"))
class TestCollectParametersFromQuery(TestCase):
def test_returns_empty_list_for_regular_query(self):
query = u"SELECT 1"
self.assertEqual([], collect_query_parameters(query))
def test_finds_all_params(self):
query = u"SELECT {{param}} FROM {{table}}"
params = ['param', 'table']
self.assertEqual(params, collect_query_parameters(query))
def test_deduplicates_params(self):
query = u"SELECT {{param}}, {{param}} FROM {{table}}"
params = ['param', 'table']
self.assertEqual(params, collect_query_parameters(query))
def test_handles_nested_params(self):
query = u"SELECT {{param}}, {{param}} FROM {{table}} -- {{#test}} {{nested_param}} {{/test}}"
params = ['param', 'table', 'test', 'nested_param']
self.assertEqual(params, collect_query_parameters(query))
class TestCollectParametersFromRequest(TestCase):
def test_ignores_non_prefixed_values(self):
self.assertEqual({}, collect_parameters_from_request({'test': 1}))
def test_takes_prefixed_values(self):
self.assertDictEqual({'test': 1, 'something_else': 'test'}, collect_parameters_from_request({'p_test': 1, 'p_something_else': 'test'}))
| bsd-2-clause |
JarbasAI/JarbasAI | jarbas_models/tf_tacotron/models/modules.py | 1 | 3455 | import tensorflow as tf
from tensorflow.contrib.rnn import GRUCell
def prenet(inputs, is_training, layer_sizes=[256, 128], scope=None):
x = inputs
drop_rate = 0.5 if is_training else 0.0
with tf.variable_scope(scope or 'prenet'):
for i, size in enumerate(layer_sizes):
dense = tf.layers.dense(x, units=size, activation=tf.nn.relu,
name='dense_%d' % (i + 1))
x = tf.layers.dropout(dense, rate=drop_rate,
name='dropout_%d' % (i + 1))
return x
def encoder_cbhg(inputs, input_lengths, is_training):
return cbhg(
inputs,
input_lengths,
is_training,
scope='encoder_cbhg',
K=16,
projections=[128, 128])
def post_cbhg(inputs, input_dim, is_training):
return cbhg(
inputs,
None,
is_training,
scope='post_cbhg',
K=8,
projections=[256, input_dim])
def cbhg(inputs, input_lengths, is_training, scope, K, projections):
with tf.variable_scope(scope):
with tf.variable_scope('conv_bank'):
# Convolution bank: concatenate on the last axis to stack channels from all convolutions
conv_outputs = tf.concat(
[conv1d(inputs, k, 128, tf.nn.relu, is_training,
'conv1d_%d' % k) for k in range(1, K + 1)],
axis=-1
)
# Maxpooling:
maxpool_output = tf.layers.max_pooling1d(
conv_outputs,
pool_size=2,
strides=1,
padding='same')
# Two projection layers:
proj1_output = conv1d(maxpool_output, 3, projections[0], tf.nn.relu,
is_training, 'proj_1')
proj2_output = conv1d(proj1_output, 3, projections[1], None,
is_training, 'proj_2')
# Residual connection:
highway_input = proj2_output + inputs
# Handle dimensionality mismatch:
if highway_input.shape[2] != 128:
highway_input = tf.layers.dense(highway_input, 128)
# 4-layer HighwayNet:
for i in range(4):
highway_input = highwaynet(highway_input, 'highway_%d' % (i + 1))
rnn_input = highway_input
# Bidirectional RNN
outputs, states = tf.nn.bidirectional_dynamic_rnn(
GRUCell(128),
GRUCell(128),
rnn_input,
sequence_length=input_lengths,
dtype=tf.float32)
return tf.concat(outputs, axis=2) # Concat forward and backward
def highwaynet(inputs, scope):
with tf.variable_scope(scope):
H = tf.layers.dense(
inputs,
units=128,
activation=tf.nn.relu,
name='H')
T = tf.layers.dense(
inputs,
units=128,
activation=tf.nn.sigmoid,
name='T',
bias_initializer=tf.constant_initializer(-1.0))
return H * T + inputs * (1.0 - T)
def conv1d(inputs, kernel_size, channels, activation, is_training, scope):
with tf.variable_scope(scope):
conv1d_output = tf.layers.conv1d(
inputs,
filters=channels,
kernel_size=kernel_size,
activation=activation,
padding='same')
return tf.layers.batch_normalization(conv1d_output,
training=is_training)
| gpl-3.0 |
ssh1/stbgui | lib/python/Components/Input.py | 32 | 6944 | from HTMLComponent import HTMLComponent
from GUIComponent import GUIComponent
from VariableText import VariableText
from enigma import eLabel
from Tools.NumericalTextInput import NumericalTextInput
class Input(VariableText, HTMLComponent, GUIComponent, NumericalTextInput):
TEXT = 0
PIN = 1
NUMBER = 2
def __init__(self, text="", maxSize=False, visible_width=False, type=TEXT, currPos=0, allMarked=True):
NumericalTextInput.__init__(self, self.right)
GUIComponent.__init__(self)
VariableText.__init__(self)
self.type = type
self.allmarked = allMarked and (text != "") and (type != self.PIN)
self.maxSize = maxSize
self.currPos = currPos
self.visible_width = visible_width
self.offset = 0
self.overwrite = maxSize
self.setText(text)
def __len__(self):
return len(self.text)
def update(self):
if self.visible_width:
if self.currPos < self.offset:
self.offset = self.currPos
if self.currPos >= self.offset + self.visible_width:
if self.currPos == len(self.Text):
self.offset = self.currPos - self.visible_width
else:
self.offset = self.currPos - self.visible_width + 1
if self.offset > 0 and self.offset + self.visible_width > len(self.Text):
self.offset = max(0, len(self.Text) - self.visible_width)
if self.allmarked:
self.setMarkedPos(-2)
else:
self.setMarkedPos(self.currPos-self.offset)
if self.visible_width:
if self.type == self.PIN:
self.text = ""
for x in self.Text[self.offset:self.offset+self.visible_width]:
self.text += (x==" " and " " or "*")
else:
self.text = self.Text[self.offset:self.offset+self.visible_width].encode("utf-8") + " "
else:
if self.type == self.PIN:
self.text = ""
for x in self.Text:
self.text += (x==" " and " " or "*")
else:
self.text = self.Text.encode("utf-8") + " "
def setText(self, text):
if not len(text):
self.currPos = 0
self.Text = u""
else:
if isinstance(text, str):
self.Text = text.decode("utf-8", "ignore")
else:
self.Text = text
self.update()
def getText(self):
return self.Text.encode('utf-8')
def createWidget(self, parent):
if self.allmarked:
return eLabel(parent, -2)
else:
return eLabel(parent, self.currPos-self.offset)
def getSize(self):
s = self.instance.calculateSize()
return (s.width(), s.height())
def markAll(self):
self.allmarked = True
self.update()
def innerright(self):
if self.allmarked:
self.currPos = 0
self.allmarked = False
elif self.maxSize:
if self.currPos < len(self.Text)-1:
self.currPos += 1
else:
if self.currPos < len(self.Text):
self.currPos += 1
def right(self):
if self.type == self.TEXT:
self.timeout()
self.innerright()
self.update()
def left(self):
if self.type == self.TEXT:
self.timeout()
if self.allmarked:
if self.maxSize:
self.currPos = len(self.Text) - 1
else:
self.currPos = len(self.Text)
self.allmarked = False
elif self.currPos > 0:
self.currPos -= 1
self.update()
def up(self):
self.allmarked = False
if self.type == self.TEXT:
self.timeout()
if self.currPos == len(self.Text) or self.Text[self.currPos] == "9" or self.Text[self.currPos] == " ":
newNumber = "0"
else:
newNumber = str(int(self.Text[self.currPos]) + 1)
self.Text = self.Text[0:self.currPos] + newNumber + self.Text[self.currPos + 1:]
self.update()
def down(self):
self.allmarked = False
if self.type == self.TEXT:
self.timeout()
if self.currPos == len(self.Text) or self.Text[self.currPos] == "0" or self.Text[self.currPos] == " ":
newNumber = "9"
else:
newNumber = str(int(self.Text[self.currPos]) - 1)
self.Text = self.Text[0:self.currPos] + newNumber + self.Text[self.currPos + 1:]
self.update()
def home(self):
self.allmarked = False
if self.type == self.TEXT:
self.timeout()
self.currPos = 0
self.update()
def end(self):
self.allmarked = False
if self.type == self.TEXT:
self.timeout()
if self.maxSize:
self.currPos = len(self.Text) - 1
else:
self.currPos = len(self.Text)
self.update()
def insertChar(self, ch, pos=False, owr=False, ins=False):
if isinstance(ch, str):
ch = ch.decode("utf-8","ignore")
if not pos:
pos = self.currPos
if ins and not self.maxSize:
self.Text = self.Text[0:pos] + ch + self.Text[pos:]
elif owr or self.overwrite:
self.Text = self.Text[0:pos] + ch + self.Text[pos + 1:]
elif self.maxSize:
self.Text = self.Text[0:pos] + ch + self.Text[pos:-1]
else:
self.Text = self.Text[0:pos] + ch + self.Text[pos:]
def deleteChar(self, pos):
if not self.maxSize:
self.Text = self.Text[0:pos] + self.Text[pos + 1:]
elif self.overwrite:
self.Text = self.Text[0:pos] + u" " + self.Text[pos + 1:]
else:
self.Text = self.Text[0:pos] + self.Text[pos + 1:] + u" "
def deleteAllChars(self):
if self.maxSize:
self.Text = u" " * len(self.Text)
else:
self.Text = u""
self.currPos = 0
def tab(self):
if self.type == self.TEXT:
self.timeout()
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
else:
self.insertChar(u" ", self.currPos, False, True);
self.innerright()
self.update()
def delete(self):
if self.type == self.TEXT:
self.timeout()
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
else:
self.deleteChar(self.currPos);
if self.maxSize and self.overwrite:
self.innerright()
self.update()
def deleteBackward(self):
if self.type == self.TEXT:
self.timeout()
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
else:
if self.currPos > 0:
self.deleteChar(self.currPos-1);
if not self.maxSize and self.offset > 0:
self.offset -= 1
self.currPos -= 1
self.update()
def deleteForward(self):
if self.type == self.TEXT:
self.timeout()
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
else:
self.deleteChar(self.currPos);
self.update()
def toggleOverwrite(self):
if self.type == self.TEXT:
self.timeout()
self.overwrite = not self.overwrite
self.update()
def handleAscii(self, code):
if self.type == self.TEXT:
self.timeout()
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(unichr(code), self.currPos, False, False);
self.innerright()
self.update()
def number(self, number):
if self.type == self.TEXT:
owr = self.lastKey == number
newChar = self.getKey(number)
elif self.type == self.PIN or self.type == self.NUMBER:
owr = False
newChar = str(number)
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.currPos, owr, False);
if self.type == self.PIN or self.type == self.NUMBER:
self.innerright()
self.update()
def char(self, char):
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(char)
self.innerright()
self.update()
| gpl-2.0 |
zhangg/trove | trove/guestagent/datastore/mysql/service.py | 1 | 3685 | # Copyright 2013 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
from trove.common.i18n import _
from trove.guestagent.datastore.mysql_common import service
LOG = logging.getLogger(__name__)
CONF = service.CONF
class KeepAliveConnection(service.BaseKeepAliveConnection):
pass
class MySqlAppStatus(service.BaseMySqlAppStatus):
pass
class LocalSqlClient(service.BaseLocalSqlClient):
pass
class MySqlApp(service.BaseMySqlApp):
def __init__(self, status):
super(MySqlApp, self).__init__(status, LocalSqlClient,
KeepAliveConnection)
# DEPRECATED: Mantain for API Compatibility
def get_txn_count(self):
LOG.info(_("Retrieving latest txn id."))
txn_count = 0
with self.local_sql_client(self.get_engine()) as client:
result = client.execute('SELECT @@global.gtid_executed').first()
for uuid_set in result[0].split(','):
for interval in uuid_set.split(':')[1:]:
if '-' in interval:
iparts = interval.split('-')
txn_count += int(iparts[1]) - int(iparts[0])
else:
txn_count += 1
return txn_count
def _get_slave_status(self):
with self.local_sql_client(self.get_engine()) as client:
return client.execute('SHOW SLAVE STATUS').first()
def _get_master_UUID(self):
slave_status = self._get_slave_status()
return slave_status and slave_status['Master_UUID'] or None
def _get_gtid_executed(self):
with self.local_sql_client(self.get_engine()) as client:
return client.execute('SELECT @@global.gtid_executed').first()[0]
def get_last_txn(self):
master_UUID = self._get_master_UUID()
last_txn_id = '0'
gtid_executed = self._get_gtid_executed()
for gtid_set in gtid_executed.split(','):
uuid_set = gtid_set.split(':')
if uuid_set[0] == master_UUID:
last_txn_id = uuid_set[-1].split('-')[-1]
break
return master_UUID, int(last_txn_id)
def get_latest_txn_id(self):
LOG.info(_("Retrieving latest txn id."))
return self._get_gtid_executed()
def wait_for_txn(self, txn):
LOG.info(_("Waiting on txn '%s'."), txn)
with self.local_sql_client(self.get_engine()) as client:
client.execute("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s')"
% txn)
class MySqlRootAccess(service.BaseMySqlRootAccess):
def __init__(self):
super(MySqlRootAccess, self).__init__(LocalSqlClient,
MySqlApp(MySqlAppStatus.get()))
class MySqlAdmin(service.BaseMySqlAdmin):
def __init__(self):
super(MySqlAdmin, self).__init__(LocalSqlClient, MySqlRootAccess(),
MySqlApp)
get_engine = MySqlApp.get_engine
| apache-2.0 |
tmerrick1/spack | var/spack/repos/builtin/packages/r-mlr/package.py | 4 | 2806 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RMlr(RPackage):
"""Interface to a large number of classification and regression techniques,
including machine-readable parameter descriptions. There is also an
experimental extension for survival analysis, clustering and general,
example-specific cost-sensitive learning. Generic resampling,
including cross-validation, bootstrapping and subsampling.
Hyperparameter tuning with modern optimization techniques,
for single- and multi-objective problems. Filter and wrapper methods for
feature selection. Extension of basic learners with additional
operations common in machine learning, also allowing for easy nested
resampling. Most operations can be parallelized."""
homepage = "https://github.com/mlr-org/mlr/"
url = "https://cran.r-project.org/src/contrib/mlr_2.12.1.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/mlr"
version('2.12.1', 'abddfc9dfe95f290a233ecd97969a4ec')
version('2.12', '94ee7495aeafb432c8af5a8bdd26c25f')
depends_on('r-paramhelpers@1.10:', type=('build', 'run'))
depends_on('r-bbmisc@1.11:', type=('build', 'run'))
depends_on('r-backports@1.1.0:', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-stringi', type=('build', 'run'))
depends_on('r-checkmate@1.8.2:', type=('build', 'run'))
depends_on('r-data-table', type=('build', 'run'))
depends_on('r-parallelmap@1.3:', type=('build', 'run'))
depends_on('r-xml', type=('build', 'run'))
| lgpl-2.1 |
LethusTI/supportcenter | vendor/django/tests/regressiontests/admin_util/models.py | 40 | 1082 | from django.db import models
class Article(models.Model):
"""
A simple Article model for testing
"""
site = models.ForeignKey('sites.Site', related_name="admin_articles")
title = models.CharField(max_length=100)
title2 = models.CharField(max_length=100, verbose_name="another name")
created = models.DateTimeField()
def test_from_model(self):
return "nothing"
def test_from_model_with_override(self):
return "nothing"
test_from_model_with_override.short_description = "not What you Expect"
class Count(models.Model):
num = models.PositiveSmallIntegerField()
parent = models.ForeignKey('self', null=True)
def __unicode__(self):
return unicode(self.num)
class Event(models.Model):
date = models.DateTimeField(auto_now_add=True)
class Location(models.Model):
event = models.OneToOneField(Event, verbose_name='awesome event')
class Guest(models.Model):
event = models.OneToOneField(Event)
name = models.CharField(max_length=255)
class Meta:
verbose_name = "awesome guest"
| gpl-3.0 |
gnieboer/tensorflow | tensorflow/contrib/layers/python/layers/initializers_test.py | 111 | 7640 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class InitializerTest(test.TestCase):
def test_xavier_wrong_dtype(self):
with self.assertRaisesRegexp(
TypeError, 'Cannot create initializer for non-floating point type.'):
initializers.xavier_initializer(dtype=dtypes.int32)
self.assertIsNone(regularizers.l1_regularizer(0.)(None))
def _test_xavier(self, initializer, shape, variance, uniform):
with session.Session() as sess:
var = variable_scope.get_variable(
name='test',
shape=shape,
dtype=dtypes.float32,
initializer=initializer(
uniform=uniform, seed=1))
sess.run(variables.global_variables_initializer())
values = var.eval()
self.assertAllClose(np.var(values), variance, 1e-3, 1e-3)
def test_xavier_uniform(self):
self._test_xavier(initializers.xavier_initializer, [100, 40],
2. / (100. + 40.), True)
def test_xavier_normal(self):
self._test_xavier(initializers.xavier_initializer, [100, 40],
2. / (100. + 40.), False)
def test_xavier_scalar(self):
self._test_xavier(initializers.xavier_initializer, [], 0.0, True)
def test_xavier_conv2d_uniform(self):
self._test_xavier(layers.xavier_initializer_conv2d, [100, 40, 5, 7],
2. / (100. * 40 * (5 + 7)), True)
def test_xavier_conv2d_normal(self):
self._test_xavier(layers.xavier_initializer_conv2d, [100, 40, 5, 7],
2. / (100. * 40 * (5 + 7)), False)
class VarianceScalingInitializerTest(test.TestCase):
def test_wrong_dtype(self):
with self.assertRaisesRegexp(
TypeError, 'Cannot create initializer for non-floating point type.'):
initializers.variance_scaling_initializer(dtype=dtypes.int32)
initializer = initializers.variance_scaling_initializer()
with self.assertRaisesRegexp(
TypeError, 'Cannot create initializer for non-floating point type.'):
initializer([], dtype=dtypes.int32)
def _test_variance(self, initializer, shape, variance, factor, mode, uniform):
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
var = variable_scope.get_variable(
name='test',
shape=shape,
dtype=dtypes.float32,
initializer=initializer(
factor=factor, mode=mode, uniform=uniform, seed=1))
sess.run(variables.global_variables_initializer())
values = var.eval()
self.assertAllClose(np.var(values), variance, 1e-3, 1e-3)
def test_fan_in(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / 100.,
factor=2.0,
mode='FAN_IN',
uniform=uniform)
def test_fan_out(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / 40.,
factor=2.0,
mode='FAN_OUT',
uniform=uniform)
def test_fan_avg(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=4. / (100. + 40.),
factor=2.0,
mode='FAN_AVG',
uniform=uniform)
def test_conv2d_fan_in(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * 5.),
factor=2.0,
mode='FAN_IN',
uniform=uniform)
def test_conv2d_fan_out(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * 7.),
factor=2.0,
mode='FAN_OUT',
uniform=uniform)
def test_conv2d_fan_avg(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * (5. + 7.)),
factor=2.0,
mode='FAN_AVG',
uniform=uniform)
def test_xavier_uniform(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / (100. + 40.),
factor=1.0,
mode='FAN_AVG',
uniform=True)
def test_xavier_normal(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / (100. + 40.),
factor=1.0,
mode='FAN_AVG',
uniform=False)
def test_xavier_scalar(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[],
variance=0.0,
factor=1.0,
mode='FAN_AVG',
uniform=False)
def test_xavier_conv2d_uniform(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * (5. + 7.)),
factor=1.0,
mode='FAN_AVG',
uniform=True)
def test_xavier_conv2d_normal(self):
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * (5. + 7.)),
factor=1.0,
mode='FAN_AVG',
uniform=True)
def test_1d_shape_fan_in(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100],
variance=2. / 100.,
factor=2.0,
mode='FAN_IN',
uniform=uniform)
def test_1d_shape_fan_out(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100],
variance=2. / 100.,
factor=2.0,
mode='FAN_OUT',
uniform=uniform)
def test_1d_shape_fan_avg(self):
for uniform in [False, True]:
self._test_variance(
initializers.variance_scaling_initializer,
shape=[100],
variance=4. / (100. + 100.),
factor=2.0,
mode='FAN_AVG',
uniform=uniform)
if __name__ == '__main__':
test.main()
| apache-2.0 |
abdulbaqi/quranf | venv/lib/python2.7/site-packages/pip/_vendor/html5lib/sanitizer.py | 805 | 16428 | from __future__ import absolute_import, division, unicode_literals
import re
from xml.sax.saxutils import escape, unescape
from .tokenizer import HTMLTokenizer
from .constants import tokenTypes
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload',
'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc', 'poster',
'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = ['fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = ['ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs']
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in list(tokenTypes.keys()):
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
if token["name"] in self.allowed_elements:
return self.allowed_token(token, token_type)
else:
return self.disallowed_token(token, token_type)
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def allowed_token(self, token, token_type):
if "data" in token:
attrs = dict([(name, val) for name, val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if attr not in attrs:
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
# remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace("\ufffd", "")
if (re.match("^[a-z0-9][-+.a-z0-9]*:", val_unescaped) and
(val_unescaped.split(':')[0] not in
self.allowed_protocols)):
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if 'style' in attrs:
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name, val] for name, val in list(attrs.items())]
return token
def disallowed_token(self, token, token_type):
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k, escape(v)) for k, v in token["data"]])
token["data"] = "<%s%s>" % (token["name"], attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"] = token["data"][:-1] + "/>"
if token["type"] in list(tokenTypes.keys()):
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
del token["name"]
return token
def sanitize_css(self, style):
# disallow urls
style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
return ''
clean = []
for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style):
if not value:
continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
'padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False, parser=None):
# Change case matching defaults as we only output lowercase html anyway
# This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName, parser=parser)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
| mit |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/networkx/algorithms/approximation/matching.py | 10 | 1159 | # -*- coding: utf-8 -*-
"""
**************
Graph Matching
**************
Given a graph G = (V,E), a matching M in G is a set of pairwise non-adjacent
edges; that is, no two edges share a common vertex.
http://en.wikipedia.org/wiki/Matching_(graph_theory)
"""
# Copyright (C) 2011-2012 by
# Nicholas Mancuso <nick.mancuso@gmail.com>
# All rights reserved.
# BSD license.
import networkx as nx
__all__ = ["min_maximal_matching"]
__author__ = """Nicholas Mancuso (nick.mancuso@gmail.com)"""
def min_maximal_matching(graph):
"""Returns a set of edges such that no two edges share a common endpoint
and every edge not in the set shares some common endpoint in the set.
Parameters
----------
graph : NetworkX graph
Undirected graph
Returns
-------
min_maximal_matching : set
Returns a set of edges such that no two edges share a common endpoint
and every edge not in the set shares some common endpoint in the set.
Cardinality will be 2*OPT in the worst case.
References
----------
.. [1] Vazirani, Vijay Approximation Algorithms (2001)
"""
return nx.maximal_matching(graph)
| agpl-3.0 |
mdomke/signaling | tests/test_signals.py | 1 | 3866 | import mock
import pytest
from signaling.exceptions import InvalidEmit
from signaling.exceptions import InvalidSlot
from signaling import Signal
class Receiver(object):
def __init__(self):
self.m = mock.Mock()
def slot(self):
self.m()
class TestSignalSlot(object):
def setup_method(self, method):
self.signal = Signal(name='emitter')
self.sentinel_a = mock.Mock()
self.sentinel_b = mock.Mock()
def slot_a(self):
self.sentinel_a()
def slot_b(self):
self.sentinel_b()
def test_connect(self):
self.signal.connect(self.slot_a)
assert self.slot_a in self.signal.slots
self.signal.connect(self.slot_b)
assert self.slot_a in self.signal.slots
assert self.slot_b in self.signal.slots
def test_connect_with_incompatible_slot_arg_count(self):
def slot_a():
pass
with pytest.raises(InvalidSlot):
Signal(args=['foo']).connect(slot_a)
def slot_b(foo):
pass
with pytest.raises(InvalidSlot):
Signal().connect(slot_b)
def test_connect_with_incompatible_slot_arg_name(self):
def slot(foo):
pass
with pytest.raises(InvalidSlot):
Signal(args=['bar']).connect(slot)
def test_disconnect(self):
self.test_connect()
self.signal.disconnect(self.slot_a)
assert self.slot_a not in self.signal.slots
assert self.slot_b in self.signal.slots
self.signal.disconnect(self.slot_b)
assert self.slot_a not in self.signal.slots
assert self.slot_b not in self.signal.slots
def test_emit_with_one_slot(self):
self.signal.connect(self.slot_a)
self.signal.emit()
self.sentinel_a.assert_called_once_with()
assert self.sentinel_b.call_count == 0
def test_emit_with_two_slots(self):
self.signal.connect(self.slot_a)
self.signal.connect(self.slot_b)
self.signal.emit()
self.sentinel_a.assert_called_once_with()
self.sentinel_b.assert_called_once_with()
def test_emit_with_args(self):
def slot(foo, bar):
self.sentinel_a(foo=foo, bar=bar)
signal = Signal(args=['foo', 'bar'])
signal.connect(slot)
signal.emit(foo=1, bar=2)
self.sentinel_a.assert_called_once_with(foo=1, bar=2)
def test_emit_with_missing_args(self):
def slot(foo, bar):
self.sentinel_a(foo, bar)
signal = Signal(args=['foo', 'bar'])
signal.connect(slot)
with pytest.raises(InvalidEmit):
signal.emit(foo=1)
self.sentinel_a.assert_not_called()
def test_emit_with_superfluous_args(self):
def slot(foo):
self.sentinel_a(foo)
signal = Signal(args=['foo'])
signal.connect(slot)
with pytest.raises(InvalidEmit):
signal.emit(foo=1, bar=2)
self.sentinel_a.assert_not_called()
def test_emit_with_superfluous_args_none_expected(self):
def slot():
self.sentinel_a()
signal = Signal()
signal.connect(slot)
with pytest.raises(InvalidEmit):
signal.emit(foo=1)
self.sentinel_a.assert_not_called()
def test_emit_with_method_slot(self):
signal = Signal()
receiver = Receiver()
signal.connect(receiver.slot)
signal.emit()
receiver.m.assert_called_with()
def test_repr(self):
signal = Signal()
assert repr(signal) == u"<Signal: 'anonymous'. Slots=0>"
signal.connect(self.slot_a)
assert repr(signal) == u"<Signal: 'anonymous'. Slots=1>"
def test_equality(self):
other = Signal()
assert self.signal == other
self.signal.connect(self.slot_a)
assert self.signal != other
| mit |
Akrog/cinder | cinder/api/extensions.py | 4 | 12939 | # Copyright 2011 OpenStack Foundation
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
from oslo_utils import importutils
import webob.dec
import webob.exc
import cinder.api.openstack
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
import cinder.policy
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ExtensionDescriptor(object):
"""Base class that defines the contract for extensions.
Note that you don't have to derive from this class to have a valid
extension; it is purely a convenience.
"""
# The name of the extension, e.g., 'Fox In Socks'
name = None
# The alias for the extension, e.g., 'FOXNSOX'
alias = None
# Description comes from the docstring for the class
# The XML namespace for the extension, e.g.,
# 'http://www.fox.in.socks/api/ext/pie/v1.0'
namespace = None
# The timestamp when the extension was last updated, e.g.,
# '2011-01-22T13:25:27-06:00'
updated = None
def __init__(self, ext_mgr):
"""Register extension with the extension manager."""
ext_mgr.register(self)
self.ext_mgr = ext_mgr
def get_resources(self):
"""List of extensions.ResourceExtension extension objects.
Resources define new nouns, and are accessible through URLs.
"""
resources = []
return resources
def get_controller_extensions(self):
"""List of extensions.ControllerExtension extension objects.
Controller extensions are used to extend existing controllers.
"""
controller_exts = []
return controller_exts
@classmethod
def nsmap(cls):
"""Synthesize a namespace map from extension."""
# Start with a base nsmap
nsmap = ext_nsmap.copy()
# Add the namespace for the extension
nsmap[cls.alias] = cls.namespace
return nsmap
@classmethod
def xmlname(cls, name):
"""Synthesize element and attribute names."""
return '{%s}%s' % (cls.namespace, name)
def make_ext(elem):
elem.set('name')
elem.set('namespace')
elem.set('alias')
elem.set('updated')
desc = xmlutil.SubTemplateElement(elem, 'description')
desc.text = 'description'
xmlutil.make_links(elem, 'links')
ext_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
class ExtensionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('extension', selector='extension')
make_ext(root)
return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap)
class ExtensionsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('extensions')
elem = xmlutil.SubTemplateElement(root, 'extension',
selector='extensions')
make_ext(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap)
class ExtensionsResource(wsgi.Resource):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
super(ExtensionsResource, self).__init__(None)
def _translate(self, ext):
ext_data = {}
ext_data['name'] = ext.name
ext_data['alias'] = ext.alias
ext_data['description'] = ext.__doc__
ext_data['namespace'] = ext.namespace
ext_data['updated'] = ext.updated
ext_data['links'] = [] # TODO(dprince): implement extension links
return ext_data
@wsgi.serializers(xml=ExtensionsTemplate)
def index(self, req):
extensions = []
for _alias, ext in self.extension_manager.extensions.iteritems():
extensions.append(self._translate(ext))
return dict(extensions=extensions)
@wsgi.serializers(xml=ExtensionTemplate)
def show(self, req, id):
try:
# NOTE(dprince): the extensions alias is used as the 'id' for show
ext = self.extension_manager.extensions[id]
except KeyError:
raise webob.exc.HTTPNotFound()
return dict(extension=self._translate(ext))
def delete(self, req, id):
raise webob.exc.HTTPNotFound()
def create(self, req):
raise webob.exc.HTTPNotFound()
class ExtensionManager(object):
"""Load extensions from the configured extension path.
See cinder/tests/api/extensions/foxinsocks/extension.py for an
example extension implementation.
"""
def __init__(self):
LOG.info(_LI('Initializing extension manager.'))
self.cls_list = CONF.osapi_volume_extension
self.extensions = {}
self._load_extensions()
def is_loaded(self, alias):
return alias in self.extensions
def register(self, ext):
# Do nothing if the extension doesn't check out
if not self._check_extension(ext):
return
alias = ext.alias
LOG.info(_LI('Loaded extension: %s'), alias)
if alias in self.extensions:
raise exception.Error("Found duplicate extension: %s" % alias)
self.extensions[alias] = ext
def get_resources(self):
"""Returns a list of ResourceExtension objects."""
resources = []
resources.append(ResourceExtension('extensions',
ExtensionsResource(self)))
for ext in self.extensions.values():
try:
resources.extend(ext.get_resources())
except AttributeError:
# NOTE(dprince): Extension aren't required to have resource
# extensions
pass
return resources
def get_controller_extensions(self):
"""Returns a list of ControllerExtension objects."""
controller_exts = []
for ext in self.extensions.values():
try:
get_ext_method = ext.get_controller_extensions
except AttributeError:
# NOTE(Vek): Extensions aren't required to have
# controller extensions
continue
controller_exts.extend(get_ext_method())
return controller_exts
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
LOG.debug('Ext name: %s', extension.name)
LOG.debug('Ext alias: %s', extension.alias)
LOG.debug('Ext description: %s',
' '.join(extension.__doc__.strip().split()))
LOG.debug('Ext namespace: %s', extension.namespace)
LOG.debug('Ext updated: %s', extension.updated)
except AttributeError as ex:
LOG.exception(_LE("Exception loading extension: %s"), unicode(ex))
return False
return True
def load_extension(self, ext_factory):
"""Execute an extension factory.
Loads an extension. The 'ext_factory' is the name of a
callable that will be imported and called with one
argument--the extension manager. The factory callable is
expected to call the register() method at least once.
"""
LOG.debug("Loading extension %s", ext_factory)
# Load the factory
factory = importutils.import_class(ext_factory)
# Call it
LOG.debug("Calling extension factory %s", ext_factory)
factory(self)
def _load_extensions(self):
"""Load extensions specified on the command line."""
extensions = list(self.cls_list)
for ext_factory in extensions:
try:
self.load_extension(ext_factory)
except Exception as exc:
LOG.warn(_LW('Failed to load extension %(ext_factory)s: '
'%(exc)s'),
{'ext_factory': ext_factory, 'exc': exc})
class ControllerExtension(object):
"""Extend core controllers of cinder OpenStack API.
Provide a way to extend existing cinder OpenStack API core
controllers.
"""
def __init__(self, extension, collection, controller):
self.extension = extension
self.collection = collection
self.controller = controller
class ResourceExtension(object):
"""Add top level resources to the OpenStack API in cinder."""
def __init__(self, collection, controller, parent=None,
collection_actions=None, member_actions=None,
custom_routes_fn=None):
if not collection_actions:
collection_actions = {}
if not member_actions:
member_actions = {}
self.collection = collection
self.controller = controller
self.parent = parent
self.collection_actions = collection_actions
self.member_actions = member_actions
self.custom_routes_fn = custom_routes_fn
def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
"""Registers all standard API extensions."""
# Walk through all the modules in our directory...
our_dir = path[0]
for dirpath, dirnames, filenames in os.walk(our_dir):
# Compute the relative package name from the dirpath
relpath = os.path.relpath(dirpath, our_dir)
if relpath == '.':
relpkg = ''
else:
relpkg = '.%s' % '.'.join(relpath.split(os.sep))
# Now, consider each file in turn, only considering .py files
for fname in filenames:
root, ext = os.path.splitext(fname)
# Skip __init__ and anything that's not .py
if ext != '.py' or root == '__init__':
continue
# Try loading it
classname = "%s%s" % (root[0].upper(), root[1:])
classpath = ("%s%s.%s.%s" %
(package, relpkg, root, classname))
if ext_list is not None and classname not in ext_list:
logger.debug("Skipping extension: %s" % classpath)
continue
try:
ext_mgr.load_extension(classpath)
except Exception as exc:
logger.warn(_('Failed to load extension %(classpath)s: '
'%(exc)s'),
{'classpath': classpath, 'exc': exc})
# Now, let's consider any subdirectories we may have...
subdirs = []
for dname in dirnames:
# Skip it if it does not have __init__.py
if not os.path.exists(os.path.join(dirpath, dname,
'__init__.py')):
continue
# If it has extension(), delegate...
ext_name = ("%s%s.%s.extension" %
(package, relpkg, dname))
try:
ext = importutils.import_class(ext_name)
except ImportError:
# extension() doesn't exist on it, so we'll explore
# the directory for ourselves
subdirs.append(dname)
else:
try:
ext(ext_mgr)
except Exception as exc:
logger.warn(_('Failed to load extension %(ext_name)s: '
'%(exc)s'),
{'ext_name': ext_name, 'exc': exc})
# Update the list of directories we'll explore...
dirnames[:] = subdirs
def extension_authorizer(api_name, extension_name):
def authorize(context, target=None, action=None):
if target is None:
target = {'project_id': context.project_id,
'user_id': context.user_id}
if action is None:
act = '%s_extension:%s' % (api_name, extension_name)
else:
act = '%s_extension:%s:%s' % (api_name, extension_name, action)
cinder.policy.enforce(context, act, target)
return authorize
def soft_extension_authorizer(api_name, extension_name):
hard_authorize = extension_authorizer(api_name, extension_name)
def authorize(context):
try:
hard_authorize(context)
return True
except exception.NotAuthorized:
return False
return authorize
| apache-2.0 |
mikemintz/neutron | modules/iq.py | 1 | 4752 | # -*- coding: koi8-r -*-
## OJAB iq module
## Copyright (C) Boris Kotov <admin@avoozl.ru>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
# Modified by me :) Gh0st AKA Bohdan Turkynewych
import os, xmpp, time
messages=None
global version
global vername
ver_queue={}
time_queue={}
iq_id=1
def versioncmd(conn, msg, args, replyto):
if args=="":
target=msg.getFrom()
else:
target=("%s/%s"%(replyto, args))
req=xmpp.protocol.Iq('get', xmpp.NS_VERSION, {}, target)
req.setID(iq_id)
ver_queue[str(iq_id)]=[replyto, msg.getFrom().getResource(), False]
conn.send(req)
globals()['iq_id']+=1
def pingcmd(conn, msg, args, replyto):
if args=="":
target=msg.getFrom()
else:
target=("%s/%s"%(replyto, args))
req=xmpp.protocol.Iq('get', xmpp.NS_VERSION, {}, target)
req.setID(iq_id)
ver_queue[str(iq_id)]=[replyto, msg.getFrom().getResource(), time.time()]
conn.send(req)
globals()['iq_id']+=1
def timecmd(conn, msg, args, replyto):
if args=="":
target=msg.getFrom()
else:
target=("%s/%s"%(replyto, args))
req=xmpp.protocol.Iq('get', xmpp.NS_TIME, {}, target)
req.setID(iq_id)
time_queue[str(iq_id)]=[replyto, msg.getFrom().getResource()]
conn.send(req)
globals()['iq_id']+=1
def versionCB(conn, iq_obj):
uname=os.popen("uname -sr", 'r')
osver=uname.read().strip()
uname.close()
pipe = os.popen('sh -c ' + '"' + 'python -V 2>&1' + '"')
python_ver = pipe.read(1024).strip()
osver = osver + ' ' + python_ver
iq_obj=iq_obj.buildReply('result')
qp=iq_obj.getTag('query')
qp.setTagData('name', vername)
qp.setTagData('version', version)
qp.setTagData('os', osver)
conn.send(iq_obj)
raise xmpp.NodeProcessed
def versionresultCB(conn, iq_obj):
qp=iq_obj.getTag('query')
rname=qp.getTagData('name')
rversion=qp.getTagData('version')
ros=qp.getTagData('os')
rid=iq_obj.getID()
if ver_queue.has_key(rid):
if ver_queue[rid][2]:
if ver_queue[rid][1]==iq_obj.getFrom().getResource():
conn.send(xmpp.protocol.Message(ver_queue[rid][0], messages['yourping']%(ver_queue[rid][1], str(round(time.time()-ver_queue[rid][2],3))), 'groupchat'))
else:
conn.send(xmpp.protocol.Message(ver_queue[rid][0], messages['ping']%(ver_queue[rid][1], iq_obj.getFrom().getResource(), str(round(time.time()-ver_queue[rid][2],3))), 'groupchat'))
else:
if ver_queue[rid][1]==iq_obj.getFrom().getResource():
conn.send(xmpp.protocol.Message(ver_queue[rid][0], messages['yourversion']%(ver_queue[rid][1], rname, rversion, ros), 'groupchat'))
else:
conn.send(xmpp.protocol.Message(ver_queue[rid][0], messages['version']%(ver_queue[rid][1], iq_obj.getFrom().getResource(), rname, rversion, ros), 'groupchat'))
def versionerrorCB(conn, iq_obj):
rid=iq_obj.getID()
if ver_queue.has_key(rid):
if ver_queue[rid][2]:
conn.send(xmpp.protocol.Message(ver_queue[rid][0], messages['ping_error']%(ver_queue[rid][1], iq_obj.getFrom().getResource()), 'groupchat'))
else:
conn.send(xmpp.protocol.Message(ver_queue[rid][0], messages['version_error']%(ver_queue[rid][1], iq_obj.getFrom().getResource()), 'groupchat'))
def timeCB(conn, iq_obj):
timep=os.popen("date -u '+%Y%m%dT%T'", 'r'); futc=timep.read(17); timep.close()
timep=os.popen("date '+%Z|%d/%m/%Y %T|'", 'r'); ftime=timep.read(); timep.close()
iq_obj = iq_obj.buildReply('result')
qp = iq_obj.getTag('query')
qp.setTagData('utc', futc)
qp.setTagData('tz', ftime.split("|")[0])
qp.setTagData('display', ftime.split("|")[1])
conn.send(iq_obj)
raise xmpp.NodeProcessed
def timeresultCB(conn, iq_obj):
qp=iq_obj.getTag('query')
rdisplay=qp.getTagData('display')
rid=iq_obj.getID()
if time_queue.has_key(rid):
if time_queue[rid][1]==iq_obj.getFrom().getResource():
conn.send(xmpp.protocol.Message(time_queue[rid][0], messages['yourtime']%(time_queue[rid][1], rdisplay), 'groupchat'))
else:
conn.send(xmpp.protocol.Message(time_queue[rid][0], messages['time']%(time_queue[rid][1], iq_obj.getFrom().getResource(), rdisplay), 'groupchat'))
def timeerrorCB(conn, iq_obj):
rid=iq_obj.getID()
if time_queue.has_key(rid):
conn.send(xmpp.protocol.Message(time_queue[rid][0], messages['time_error']%(time_queue[rid][1], iq_obj.getFrom().getResource()), 'groupchat'))
| gpl-2.0 |
xfournet/intellij-community | plugins/hg4idea/testData/bin/mercurial/lock.py | 92 | 4921 | # lock.py - simple advisory locking scheme for mercurial
#
# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import util, error
import errno, os, socket, time
import warnings
class lock(object):
'''An advisory lock held by one process to control access to a set
of files. Non-cooperating processes or incorrectly written scripts
can ignore Mercurial's locking scheme and stomp all over the
repository, so don't do that.
Typically used via localrepository.lock() to lock the repository
store (.hg/store/) or localrepository.wlock() to lock everything
else under .hg/.'''
# lock is symlink on platforms that support it, file on others.
# symlink is used because create of directory entry and contents
# are atomic even over nfs.
# old-style lock: symlink to pid
# new-style lock: symlink to hostname:pid
_host = None
def __init__(self, file, timeout=-1, releasefn=None, desc=None):
self.f = file
self.held = 0
self.timeout = timeout
self.releasefn = releasefn
self.desc = desc
self.postrelease = []
self.pid = os.getpid()
self.lock()
def __del__(self):
if self.held:
warnings.warn("use lock.release instead of del lock",
category=DeprecationWarning,
stacklevel=2)
# ensure the lock will be removed
# even if recursive locking did occur
self.held = 1
self.release()
def lock(self):
timeout = self.timeout
while True:
try:
self.trylock()
return 1
except error.LockHeld, inst:
if timeout != 0:
time.sleep(1)
if timeout > 0:
timeout -= 1
continue
raise error.LockHeld(errno.ETIMEDOUT, inst.filename, self.desc,
inst.locker)
def trylock(self):
if self.held:
self.held += 1
return
if lock._host is None:
lock._host = socket.gethostname()
lockname = '%s:%s' % (lock._host, self.pid)
while not self.held:
try:
util.makelock(lockname, self.f)
self.held = 1
except (OSError, IOError), why:
if why.errno == errno.EEXIST:
locker = self.testlock()
if locker is not None:
raise error.LockHeld(errno.EAGAIN, self.f, self.desc,
locker)
else:
raise error.LockUnavailable(why.errno, why.strerror,
why.filename, self.desc)
def testlock(self):
"""return id of locker if lock is valid, else None.
If old-style lock, we cannot tell what machine locker is on.
with new-style lock, if locker is on this machine, we can
see if locker is alive. If locker is on this machine but
not alive, we can safely break lock.
The lock file is only deleted when None is returned.
"""
try:
locker = util.readlock(self.f)
except OSError, why:
if why.errno == errno.ENOENT:
return None
raise
try:
host, pid = locker.split(":", 1)
except ValueError:
return locker
if host != lock._host:
return locker
try:
pid = int(pid)
except ValueError:
return locker
if util.testpid(pid):
return locker
# if locker dead, break lock. must do this with another lock
# held, or can race and break valid lock.
try:
l = lock(self.f + '.break', timeout=0)
util.unlink(self.f)
l.release()
except error.LockError:
return locker
def release(self):
"""release the lock and execute callback function if any
If the lock has been acquired multiple times, the actual release is
delayed to the last release call."""
if self.held > 1:
self.held -= 1
elif self.held == 1:
self.held = 0
if os.getpid() != self.pid:
# we forked, and are not the parent
return
if self.releasefn:
self.releasefn()
try:
util.unlink(self.f)
except OSError:
pass
for callback in self.postrelease:
callback()
def release(*locks):
for lock in locks:
if lock is not None:
lock.release()
| apache-2.0 |
samatdav/zulip | zerver/webhooks/mention/view.py | 26 | 1389 | # Webhooks for external integrations.
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.lib.validator import check_dict, check_string
from zerver.models import Client, UserProfile
from django.http import HttpRequest, HttpResponse
from typing import Dict, Any, Iterable, Optional, Text
@api_key_only_webhook_view('Mention')
@has_request_variables
def api_mention_webhook(request, user_profile, client,
payload=REQ(argument_type='body'),
stream=REQ(default='mention'),
topic=REQ(default='news')):
# type: (HttpRequest, UserProfile, Client, Dict[str, Iterable[Dict[str, Any]]], Text, Optional[Text]) -> HttpResponse
try:
title = payload["title"]
source_url = payload["url"]
description = payload["description"]
except KeyError as e:
return json_error(_("Missing key {} in JSON").format(str(e)))
# construct the body of the message
body = '**[%s](%s)**:\n%s' % (title, source_url, description)
# send the message
check_send_message(user_profile, client, 'stream', [stream], topic, body)
return json_success()
| apache-2.0 |
smurfix/pybble | pybble/cache/__init__.py | 1 | 1978 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This is part of Pybble, a WMS (Whatever Management System) based on
## Jinja2/Haml, Werkzeug, Flask, and Optimism.
##
## Pybble is Copyright © 2009-2014 by Matthias Urlichs <matthias@urlichs.de>,
## it is licensed under the GPLv3. See the file `README.md` for details,
## including an optimistic statements by the author.
##
## This paragraph is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘utils/_boilerplate.py’.
## Thus, please do not remove the next line, or insert any blank lines.
##BP
regions = None
from dogpile.cache.api import NO_VALUE
def keystr(args):
# Take care to keep this idempotent: keystr(x) == keystr(keystr(x))
return '|'.join(str(x) for x in args)
## TODO: add keyword-only region param
def delete(*args):
"""Delete a cache value (or a bunch of them)."""
global regions
if regions is None:
from .config import regions
if not regions:
return
# TODO: this only works with redis
r = regions['default'].backend.client
n = 0
if "*" in args:
for k in r.keys(keystr(args)):
r.delete(k)
n += 1
else:
r.delete(keystr(args))
n = 1
return n
def get(*args):
"""Get a cache value, or NO_VALUE if not set."""
global regions
if regions is None:
from .config import regions
if not regions:
return NO_VALUE
r = regions['default']
return r.get(keystr(args))
def set(val, *args):
"""Set a cache value. You really should use cached() instead."""
global regions
if regions is None:
from .config import regions
if not regions:
return
r = regions['default']
r.set(keystr(args),val)
def cached(func, *args):
"""Cache this function's result. Runs the function exactly once."""
global regions
if regions is None:
from .config import regions
if not regions:
return func()
r = regions['default']
return r.get_or_create(keystr(args), func)
| gpl-3.0 |
joker946/nova | nova/servicegroup/drivers/db.py | 8 | 3798 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from nova.i18n import _, _LE
from nova.servicegroup import api
from nova.servicegroup.drivers import base
CONF = cfg.CONF
CONF.import_opt('service_down_time', 'nova.service')
LOG = logging.getLogger(__name__)
class DbDriver(base.Driver):
def __init__(self, *args, **kwargs):
self.service_down_time = CONF.service_down_time
def join(self, member, group, service=None):
"""Add a new member to a service group.
:param member: the joined member ID/name
:param group: the group ID/name, of the joined member
:param service: a `nova.service.Service` object
"""
LOG.debug('DB_Driver: join new ServiceGroup member %(member)s to '
'the %(group)s group, service = %(service)s',
{'member': member, 'group': group,
'service': service})
if service is None:
raise RuntimeError(_('service is a mandatory argument for DB based'
' ServiceGroup driver'))
report_interval = service.report_interval
if report_interval:
service.tg.add_timer(report_interval, self._report_state,
api.INITIAL_REPORTING_DELAY, service)
def is_up(self, service_ref):
"""Moved from nova.utils
Check whether a service is up based on last heartbeat.
"""
last_heartbeat = service_ref['updated_at'] or service_ref['created_at']
if isinstance(last_heartbeat, six.string_types):
# NOTE(russellb) If this service_ref came in over rpc via
# conductor, then the timestamp will be a string and needs to be
# converted back to a datetime.
last_heartbeat = timeutils.parse_strtime(last_heartbeat)
else:
# Objects have proper UTC timezones, but the timeutils comparison
# below does not (and will fail)
last_heartbeat = last_heartbeat.replace(tzinfo=None)
# Timestamps in DB are UTC.
elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow())
is_up = abs(elapsed) <= self.service_down_time
if not is_up:
LOG.debug('Seems service is down. Last heartbeat was %(lhb)s. '
'Elapsed time is %(el)s',
{'lhb': str(last_heartbeat), 'el': str(elapsed)})
return is_up
def _report_state(self, service):
"""Update the state of this service in the datastore."""
try:
service.service_ref.report_count += 1
service.service_ref.save()
# TODO(termie): make this pattern be more elegant.
if getattr(service, 'model_disconnected', False):
service.model_disconnected = False
LOG.error(_LE('Recovered model server connection!'))
# TODO(vish): this should probably only catch connection errors
except Exception:
if not getattr(service, 'model_disconnected', False):
service.model_disconnected = True
LOG.exception(_LE('model server went away'))
| apache-2.0 |
MrLoick/python-for-android | python3-alpha/python3-src/Lib/unittest/test/test_runner.py | 785 | 10718 | import io
import os
import sys
import pickle
import subprocess
import unittest
from .support import LoggingResult, ResultWithNoStartTestRunStopTestRun
class TestCleanUp(unittest.TestCase):
def testCleanUp(self):
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
self.assertEqual(test._cleanups, [])
cleanups = []
def cleanup1(*args, **kwargs):
cleanups.append((1, args, kwargs))
def cleanup2(*args, **kwargs):
cleanups.append((2, args, kwargs))
test.addCleanup(cleanup1, 1, 2, 3, four='hello', five='goodbye')
test.addCleanup(cleanup2)
self.assertEqual(test._cleanups,
[(cleanup1, (1, 2, 3), dict(four='hello', five='goodbye')),
(cleanup2, (), {})])
self.assertTrue(test.doCleanups())
self.assertEqual(cleanups, [(2, (), {}), (1, (1, 2, 3), dict(four='hello', five='goodbye'))])
def testCleanUpWithErrors(self):
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
class MockOutcome(object):
success = True
errors = []
test = TestableTest('testNothing')
test._outcomeForDoCleanups = MockOutcome
exc1 = Exception('foo')
exc2 = Exception('bar')
def cleanup1():
raise exc1
def cleanup2():
raise exc2
test.addCleanup(cleanup1)
test.addCleanup(cleanup2)
self.assertFalse(test.doCleanups())
self.assertFalse(MockOutcome.success)
(Type1, instance1, _), (Type2, instance2, _) = reversed(MockOutcome.errors)
self.assertEqual((Type1, instance1), (Exception, exc1))
self.assertEqual((Type2, instance2), (Exception, exc2))
def testCleanupInRun(self):
blowUp = False
ordering = []
class TestableTest(unittest.TestCase):
def setUp(self):
ordering.append('setUp')
if blowUp:
raise Exception('foo')
def testNothing(self):
ordering.append('test')
def tearDown(self):
ordering.append('tearDown')
test = TestableTest('testNothing')
def cleanup1():
ordering.append('cleanup1')
def cleanup2():
ordering.append('cleanup2')
test.addCleanup(cleanup1)
test.addCleanup(cleanup2)
def success(some_test):
self.assertEqual(some_test, test)
ordering.append('success')
result = unittest.TestResult()
result.addSuccess = success
test.run(result)
self.assertEqual(ordering, ['setUp', 'test', 'tearDown',
'cleanup2', 'cleanup1', 'success'])
blowUp = True
ordering = []
test = TestableTest('testNothing')
test.addCleanup(cleanup1)
test.run(result)
self.assertEqual(ordering, ['setUp', 'cleanup1'])
def testTestCaseDebugExecutesCleanups(self):
ordering = []
class TestableTest(unittest.TestCase):
def setUp(self):
ordering.append('setUp')
self.addCleanup(cleanup1)
def testNothing(self):
ordering.append('test')
def tearDown(self):
ordering.append('tearDown')
test = TestableTest('testNothing')
def cleanup1():
ordering.append('cleanup1')
test.addCleanup(cleanup2)
def cleanup2():
ordering.append('cleanup2')
test.debug()
self.assertEqual(ordering, ['setUp', 'test', 'tearDown', 'cleanup1', 'cleanup2'])
class Test_TextTestRunner(unittest.TestCase):
"""Tests for TextTestRunner."""
def test_init(self):
runner = unittest.TextTestRunner()
self.assertFalse(runner.failfast)
self.assertFalse(runner.buffer)
self.assertEqual(runner.verbosity, 1)
self.assertEqual(runner.warnings, None)
self.assertTrue(runner.descriptions)
self.assertEqual(runner.resultclass, unittest.TextTestResult)
def testBufferAndFailfast(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
result = unittest.TestResult()
runner = unittest.TextTestRunner(stream=io.StringIO(), failfast=True,
buffer=True)
# Use our result object
runner._makeResult = lambda: result
runner.run(Test('testFoo'))
self.assertTrue(result.failfast)
self.assertTrue(result.buffer)
def testRunnerRegistersResult(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
originalRegisterResult = unittest.runner.registerResult
def cleanup():
unittest.runner.registerResult = originalRegisterResult
self.addCleanup(cleanup)
result = unittest.TestResult()
runner = unittest.TextTestRunner(stream=io.StringIO())
# Use our result object
runner._makeResult = lambda: result
self.wasRegistered = 0
def fakeRegisterResult(thisResult):
self.wasRegistered += 1
self.assertEqual(thisResult, result)
unittest.runner.registerResult = fakeRegisterResult
runner.run(unittest.TestSuite())
self.assertEqual(self.wasRegistered, 1)
def test_works_with_result_without_startTestRun_stopTestRun(self):
class OldTextResult(ResultWithNoStartTestRunStopTestRun):
separator2 = ''
def printErrors(self):
pass
class Runner(unittest.TextTestRunner):
def __init__(self):
super(Runner, self).__init__(io.StringIO())
def _makeResult(self):
return OldTextResult()
runner = Runner()
runner.run(unittest.TestSuite())
def test_startTestRun_stopTestRun_called(self):
class LoggingTextResult(LoggingResult):
separator2 = ''
def printErrors(self):
pass
class LoggingRunner(unittest.TextTestRunner):
def __init__(self, events):
super(LoggingRunner, self).__init__(io.StringIO())
self._events = events
def _makeResult(self):
return LoggingTextResult(self._events)
events = []
runner = LoggingRunner(events)
runner.run(unittest.TestSuite())
expected = ['startTestRun', 'stopTestRun']
self.assertEqual(events, expected)
def test_pickle_unpickle(self):
# Issue #7197: a TextTestRunner should be (un)pickleable. This is
# required by test_multiprocessing under Windows (in verbose mode).
stream = io.StringIO("foo")
runner = unittest.TextTestRunner(stream)
for protocol in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(runner, protocol)
obj = pickle.loads(s)
# StringIO objects never compare equal, a cheap test instead.
self.assertEqual(obj.stream.getvalue(), stream.getvalue())
def test_resultclass(self):
def MockResultClass(*args):
return args
STREAM = object()
DESCRIPTIONS = object()
VERBOSITY = object()
runner = unittest.TextTestRunner(STREAM, DESCRIPTIONS, VERBOSITY,
resultclass=MockResultClass)
self.assertEqual(runner.resultclass, MockResultClass)
expectedresult = (runner.stream, DESCRIPTIONS, VERBOSITY)
self.assertEqual(runner._makeResult(), expectedresult)
def test_warnings(self):
"""
Check that warnings argument of TextTestRunner correctly affects the
behavior of the warnings.
"""
# see #10535 and the _test_warnings file for more information
def get_parse_out_err(p):
return [b.splitlines() for b in p.communicate()]
opts = dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.path.dirname(__file__))
ae_msg = b'Please use assertEqual instead.'
at_msg = b'Please use assertTrue instead.'
# no args -> all the warnings are printed, unittest warnings only once
p = subprocess.Popen([sys.executable, '_test_warnings.py'], **opts)
out, err = get_parse_out_err(p)
self.assertIn(b'OK', err)
# check that the total number of warnings in the output is correct
self.assertEqual(len(out), 12)
# check that the numbers of the different kind of warnings is correct
for msg in [b'dw', b'iw', b'uw']:
self.assertEqual(out.count(msg), 3)
for msg in [ae_msg, at_msg, b'rw']:
self.assertEqual(out.count(msg), 1)
args_list = (
# passing 'ignore' as warnings arg -> no warnings
[sys.executable, '_test_warnings.py', 'ignore'],
# -W doesn't affect the result if the arg is passed
[sys.executable, '-Wa', '_test_warnings.py', 'ignore'],
# -W affects the result if the arg is not passed
[sys.executable, '-Wi', '_test_warnings.py']
)
# in all these cases no warnings are printed
for args in args_list:
p = subprocess.Popen(args, **opts)
out, err = get_parse_out_err(p)
self.assertIn(b'OK', err)
self.assertEqual(len(out), 0)
# passing 'always' as warnings arg -> all the warnings printed,
# unittest warnings only once
p = subprocess.Popen([sys.executable, '_test_warnings.py', 'always'],
**opts)
out, err = get_parse_out_err(p)
self.assertIn(b'OK', err)
self.assertEqual(len(out), 14)
for msg in [b'dw', b'iw', b'uw', b'rw']:
self.assertEqual(out.count(msg), 3)
for msg in [ae_msg, at_msg]:
self.assertEqual(out.count(msg), 1)
def testStdErrLookedUpAtInstantiationTime(self):
# see issue 10786
old_stderr = sys.stderr
f = io.StringIO()
sys.stderr = f
try:
runner = unittest.TextTestRunner()
self.assertTrue(runner.stream.stream is f)
finally:
sys.stderr = old_stderr
def testSpecifiedStreamUsed(self):
# see issue 10786
f = io.StringIO()
runner = unittest.TextTestRunner(f)
self.assertTrue(runner.stream.stream is f)
| apache-2.0 |
wuxianghou/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/platforminfo.py | 122 | 6974 | # Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import sys
class PlatformInfo(object):
"""This class provides a consistent (and mockable) interpretation of
system-specific values (like sys.platform and platform.mac_ver())
to be used by the rest of the webkitpy code base.
Public (static) properties:
-- os_name
-- os_version
Note that 'future' is returned for os_version if the operating system is
newer than one known to the code.
"""
def __init__(self, sys_module, platform_module, executive):
self._executive = executive
self._platform_module = platform_module
self.os_name = self._determine_os_name(sys_module.platform)
if self.os_name == 'linux':
self.os_version = self._determine_linux_version()
if self.os_name == 'freebsd':
self.os_version = platform_module.release()
if self.os_name.startswith('mac'):
self.os_version = self._determine_mac_version(platform_module.mac_ver()[0])
if self.os_name.startswith('win'):
self.os_version = self._determine_win_version(self._win_version_tuple(sys_module))
self._is_cygwin = sys_module.platform == 'cygwin'
def is_mac(self):
return self.os_name == 'mac'
def is_win(self):
return self.os_name == 'win'
def is_cygwin(self):
return self._is_cygwin
def is_linux(self):
return self.os_name == 'linux'
def is_freebsd(self):
return self.os_name == 'freebsd'
def display_name(self):
# platform.platform() returns Darwin information for Mac, which is just confusing.
if self.is_mac():
return "Mac OS X %s" % self._platform_module.mac_ver()[0]
# Returns strings like:
# Linux-2.6.18-194.3.1.el5-i686-with-redhat-5.5-Final
# Windows-2008ServerR2-6.1.7600
return self._platform_module.platform()
def total_bytes_memory(self):
if self.is_mac():
return long(self._executive.run_command(["sysctl", "-n", "hw.memsize"]))
return None
def terminal_width(self):
"""Returns sys.maxint if the width cannot be determined."""
try:
if self.is_win():
# From http://code.activestate.com/recipes/440694-determine-size-of-console-window-on-windows/
from ctypes import windll, create_string_buffer
handle = windll.kernel32.GetStdHandle(-12) # -12 == stderr
console_screen_buffer_info = create_string_buffer(22) # 22 == sizeof(console_screen_buffer_info)
if windll.kernel32.GetConsoleScreenBufferInfo(handle, console_screen_buffer_info):
import struct
_, _, _, _, _, left, _, right, _, _, _ = struct.unpack("hhhhHhhhhhh", console_screen_buffer_info.raw)
# Note that we return 1 less than the width since writing into the rightmost column
# automatically performs a line feed.
return right - left
return sys.maxint
else:
import fcntl
import struct
import termios
packed = fcntl.ioctl(sys.stderr.fileno(), termios.TIOCGWINSZ, '\0' * 8)
_, columns, _, _ = struct.unpack('HHHH', packed)
return columns
except:
return sys.maxint
def _determine_os_name(self, sys_platform):
if sys_platform == 'darwin':
return 'mac'
if sys_platform.startswith('linux'):
return 'linux'
if sys_platform in ('win32', 'cygwin'):
return 'win'
if sys_platform.startswith('freebsd'):
return 'freebsd'
raise AssertionError('unrecognized platform string "%s"' % sys_platform)
def _determine_mac_version(self, mac_version_string):
release_version = mac_version_string.split('.')[1]
version_strings = {
'5': 'leopard',
'6': 'snowleopard',
'7': 'lion',
'8': 'mountainlion',
}
assert release_version >= min(version_strings.keys())
return version_strings.get(release_version, 'future')
def _determine_linux_version(self):
# FIXME: we ignore whatever the real version is and pretend it's lucid for now.
return 'lucid'
def _determine_win_version(self, win_version_tuple):
if win_version_tuple[:3] == (6, 1, 7600):
return '7sp0'
if win_version_tuple[:2] == (6, 0):
return 'vista'
if win_version_tuple[:2] == (5, 1):
return 'xp'
assert win_version_tuple[0] > 6 or win_version_tuple[1] >= 1, 'Unrecognized Windows version tuple: "%s"' % (win_version_tuple,)
return 'future'
def _win_version_tuple(self, sys_module):
if hasattr(sys_module, 'getwindowsversion'):
return sys_module.getwindowsversion()
return self._win_version_tuple_from_cmd()
def _win_version_tuple_from_cmd(self):
# Note that this should only ever be called on windows, so this should always work.
ver_output = self._executive.run_command(['cmd', '/c', 'ver'], decode_output=False)
match_object = re.search(r'(?P<major>\d)\.(?P<minor>\d)\.(?P<build>\d+)', ver_output)
assert match_object, 'cmd returned an unexpected version string: ' + ver_output
return tuple(map(int, match_object.groups()))
| bsd-3-clause |
rgeleta/odoo | addons/crm/wizard/crm_phonecall_to_phonecall.py | 337 | 4535 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import time
class crm_phonecall2phonecall(osv.osv_memory):
_name = 'crm.phonecall2phonecall'
_description = 'Phonecall To Phonecall'
_columns = {
'name' : fields.char('Call summary', required=True, select=1),
'user_id' : fields.many2one('res.users',"Assign To"),
'contact_name':fields.char('Contact'),
'phone':fields.char('Phone'),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="['|',('section_id','=',False),('section_id','=',section_id),\
('object_id.model', '=', 'crm.phonecall')]"),
'date': fields.datetime('Date'),
'section_id':fields.many2one('crm.case.section','Sales Team'),
'action': fields.selection([('schedule','Schedule a call'), ('log','Log a call')], 'Action', required=True),
'partner_id' : fields.many2one('res.partner', "Partner"),
'note':fields.text('Note')
}
def action_cancel(self, cr, uid, ids, context=None):
"""
Closes Phonecall to Phonecall form
"""
return {'type':'ir.actions.act_window_close'}
def action_schedule(self, cr, uid, ids, context=None):
value = {}
if context is None:
context = {}
phonecall = self.pool.get('crm.phonecall')
phonecall_ids = context and context.get('active_ids') or []
for this in self.browse(cr, uid, ids, context=context):
phocall_ids = phonecall.schedule_another_phonecall(cr, uid, phonecall_ids, this.date, this.name, \
this.user_id and this.user_id.id or False, \
this.section_id and this.section_id.id or False, \
this.categ_id and this.categ_id.id or False, \
action=this.action, context=context)
return phonecall.redirect_phonecall_view(cr, uid, phocall_ids[phonecall_ids[0]], context=context)
def default_get(self, cr, uid, fields, context=None):
"""
This function gets default values
"""
res = super(crm_phonecall2phonecall, self).default_get(cr, uid, fields, context=context)
record_id = context and context.get('active_id', False) or False
res.update({'action': 'schedule', 'date': time.strftime('%Y-%m-%d %H:%M:%S')})
if record_id:
phonecall = self.pool.get('crm.phonecall').browse(cr, uid, record_id, context=context)
categ_id = False
data_obj = self.pool.get('ir.model.data')
try:
res_id = data_obj._get_id(cr, uid, 'crm', 'categ_phone2')
categ_id = data_obj.browse(cr, uid, res_id, context=context).res_id
except ValueError:
pass
if 'name' in fields:
res.update({'name': phonecall.name})
if 'user_id' in fields:
res.update({'user_id': phonecall.user_id and phonecall.user_id.id or False})
if 'date' in fields:
res.update({'date': False})
if 'section_id' in fields:
res.update({'section_id': phonecall.section_id and phonecall.section_id.id or False})
if 'categ_id' in fields:
res.update({'categ_id': categ_id})
if 'partner_id' in fields:
res.update({'partner_id': phonecall.partner_id and phonecall.partner_id.id or False})
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
alextruberg/custom_django | tests/commands_sql/tests.py | 58 | 2445 | from __future__ import unicode_literals
from django.core.management.color import no_style
from django.core.management.sql import (sql_create, sql_delete, sql_indexes,
sql_destroy_indexes, sql_all)
from django.db import connections, DEFAULT_DB_ALIAS, models
from django.test import TestCase
from django.utils import six
# See also initial_sql_regress for 'custom_sql_for_model' tests
class SQLCommandsTestCase(TestCase):
"""Tests for several functions in django/core/management/sql.py"""
def count_ddl(self, output, cmd):
return len([o for o in output if o.startswith(cmd)])
def test_sql_create(self):
app = models.get_app('commands_sql')
output = sql_create(app, no_style(), connections[DEFAULT_DB_ALIAS])
create_tables = [o for o in output if o.startswith('CREATE TABLE')]
self.assertEqual(len(create_tables), 3)
# Lower so that Oracle's upper case tbl names wont break
sql = create_tables[-1].lower()
six.assertRegex(self, sql, r'^create table .commands_sql_book.*')
def test_sql_delete(self):
app = models.get_app('commands_sql')
output = sql_delete(app, no_style(), connections[DEFAULT_DB_ALIAS])
drop_tables = [o for o in output if o.startswith('DROP TABLE')]
self.assertEqual(len(drop_tables), 3)
# Lower so that Oracle's upper case tbl names wont break
sql = drop_tables[-1].lower()
six.assertRegex(self, sql, r'^drop table .commands_sql_comment.*')
def test_sql_indexes(self):
app = models.get_app('commands_sql')
output = sql_indexes(app, no_style(), connections[DEFAULT_DB_ALIAS])
# PostgreSQL creates one additional index for CharField
self.assertIn(self.count_ddl(output, 'CREATE INDEX'), [3, 4])
def test_sql_destroy_indexes(self):
app = models.get_app('commands_sql')
output = sql_destroy_indexes(app, no_style(), connections[DEFAULT_DB_ALIAS])
# PostgreSQL creates one additional index for CharField
self.assertIn(self.count_ddl(output, 'DROP INDEX'), [3, 4])
def test_sql_all(self):
app = models.get_app('commands_sql')
output = sql_all(app, no_style(), connections[DEFAULT_DB_ALIAS])
self.assertEqual(self.count_ddl(output, 'CREATE TABLE'), 3)
# PostgreSQL creates one additional index for CharField
self.assertIn(self.count_ddl(output, 'CREATE INDEX'), [3, 4])
| bsd-3-clause |
mancoast/CPythonPyc_test | cpython/272_loosing_mro_ref.py | 151 | 1079 | """
There is a way to put keys of any type in a type's dictionary.
I think this allows various kinds of crashes, but so far I have only
found a convoluted attack of _PyType_Lookup(), which uses the mro of the
type without holding a strong reference to it. Probably works with
super.__getattribute__() too, which uses the same kind of code.
"""
class MyKey(object):
def __hash__(self):
return hash('mykey')
def __cmp__(self, other):
# the following line decrefs the previous X.__mro__
X.__bases__ = (Base2,)
# trash all tuples of length 3, to make sure that the items of
# the previous X.__mro__ are really garbage
z = []
for i in range(1000):
z.append((i, None, None))
return -1
class Base(object):
mykey = 'from Base'
class Base2(object):
mykey = 'from Base2'
# you can't add a non-string key to X.__dict__, but it can be
# there from the beginning :-)
X = type('X', (Base,), {MyKey(): 5})
print X.mykey
# I get a segfault, or a slightly wrong assertion error in a debug build.
| gpl-3.0 |
khagler/boto | boto/ec2/autoscale/limits.py | 152 | 1958 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class AccountLimits(object):
def __init__(self, connection=None):
self.connection = connection
self.max_autoscaling_groups = None
self.max_launch_configurations = None
def __repr__(self):
return 'AccountLimits: [%s, %s]' % (self.max_autoscaling_groups,
self.max_launch_configurations)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'RequestId':
self.request_id = value
elif name == 'MaxNumberOfAutoScalingGroups':
self.max_autoscaling_groups = int(value)
elif name == 'MaxNumberOfLaunchConfigurations':
self.max_launch_configurations = int(value)
else:
setattr(self, name, value)
| mit |
sasmita/upm | examples/python/scam.py | 7 | 2202 | #!/usr/bin/python
#
# Author: Jon Trulson <jtrulson@ics.com>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import sys
from upm import pyupm_scam as upmscam
def main():
# Instantiate a Serial Camera on UART 0
camera = upmscam.SCAM(0)
# make sure port is initialized properly. 115200 baud is the default.
if (not camera.setupTty()):
print("Failed to setup tty port parameters")
sys.exit(1)
if (camera.init()):
print("Initialized...")
else:
print("init() failed")
if (camera.preCapture()):
print("preCapture succeeded...")
else:
print("preCapture failed.")
if (camera.doCapture()):
print("doCapture succeeded...")
else:
print("doCapture failed.")
print("Image size is", camera.getImageSize(), "bytes")
if (camera.getImageSize() > 0):
print("Storing image.jpg...")
if (camera.storeImage("image.jpg")):
print("storeImage succeeded...")
else:
print("storeImage failed.")
print("Exiting.")
sys.exit(0)
if __name__ == '__main__':
main()
| mit |
natj/bender | paper/figs/fig9.py | 1 | 4141 | import numpy as np
import math
from pylab import *
from palettable.wesanderson import Zissou_5 as wsZ
import matplotlib.ticker as mtick
from scipy.interpolate import interp1d
from scipy.interpolate import griddata
from scipy.signal import savgol_filter
def smooth(xx, yy):
yy = savgol_filter(yy, 7, 2)
np.clip(yy, 0.0, 1000.0, out=yy)
yy[0] = 0.0
yy[-1] = 0.0
return xx, yy
#Read JN files
def read_lineprof(fname):
da = np.genfromtxt(fname, delimiter=",")
des = np.diff(da[:,0])[2]
norm = np.sum(des*da[:,1])
return da[:,0],da[:,1]/norm
#Read JN files
def read_csv(fname):
da = np.genfromtxt(fname, delimiter=",")
des = np.diff(da[:,0])[2]
norm = np.sum(des*da[:,1])
return da[:,0],da[:,1] #/norm
## Plot
fig = figure(figsize=(5,3), dpi=80)
rc('font', family='serif')
rc('xtick', labelsize='xx-small')
rc('ytick', labelsize='xx-small')
gs = GridSpec(1, 1)
#gs.update(wspace = 0.34)
#gs.update(hspace = 0.4)
lsize = 10.0
xmin = 0.69
xmax = 0.82
#error window limits
eymin = -0.5
eymax = 0.5
#path to files
#path_JN = "../../out3/lines/"
path_JN = "../../out/lines2/"
#labels size
tsize = 10.0
nu = '700'
#fig.text(0.5, 0.92, '$\\theta_s = 18^{\\circ}$', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.72, '$\\theta_s = 45^{\\circ}$', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.52, '$\\theta_s = 90^{\\circ}$', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.32, 'Hopf $\\theta_s = 45^{\circ}$', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.12, 'Phase',ha='center', va='center', size=lsize)
ax1 = subplot(gs[0,0])
ax1.minorticks_on()
ax1.set_xlim(xmin, xmax)
ax1.set_ylim(0.0, 30)
ax1.set_ylabel('Normalized flux',size=lsize)
ax1.set_xlabel('Energy $E/E\'$',size=lsize)
#xx1, yy1 = read_lineprof(path_JN+'lineprof_f700pbbr10m1.4i20.csv')
#ax1.plot(xx1, yy1, "k--")
#xx2, yy2 = read_lineprof(path_JN+'lineprof_obl_HTq0_f700pbbr10m1.4i20.csv')
#ax1.plot(xx2, yy2, "k-")
#lineprof_obl_HTq3_f700pbbr10m1.4i20.csv
#lineprof_obl_HTq5_f700pbbr10m1.4i20.csv
#lineprof_obl_HTq2_f700pbbr10m1.4i20.csv
files_JN = [
"lineprof_f700pbbr10m1.4i20.csv",
"lineprof_obl_f700pbbr10m1.4i20.csv",
#"lineprof_sph2_HTqfix_f700pbbr10m1.4i20.csv"]
#"lineprof_obl_HTq0_f700pbbr10m1.4i20.csv",
"lineprof_obl_HTq1_f700pbbr10m1.4i20.csv"]
#"lineprof_obl_HTq4_f700pbbr10m1.4i20.csv"]
files_JN = ['sch/lineprofile_f700_bb_r10_m1.4_i20.csv',
'obl/lineprofile_f700_bb_r10_m1.4_i20.csv',
'q/lineprofile_f700_bb_r10_m1.4_i20.csv']
cols = ["black",
"blue",
"red",
"magenta"]
i = 0
for file_name in files_JN:
xx, yy = read_lineprof(path_JN+file_name)
xx, yy = smooth(xx, yy)
ax1.plot(xx, yy, color=cols[i], linestyle="solid")
i += 1
#path_JN = "../../out3/lines/"
xx, yy = read_lineprof("../../out3/lines/lineprof_obl_HTq4_f700pbbr10m1.4i20.csv")
ax1.plot(xx, yy, color="red", linestyle="dashed")
#files_Bau = [
#"sch+dopp.csv",
#"sch+dopp+obl.csv",
#"HT.csv",
#"HT_obl.csv"]
files_Bau = ['sch.csv', 'obl.csv', 'ht.csv']
i = 0
for file_name in files_Bau:
xx, yy = read_csv(path_JN+file_name)
#rescale xx for correct scaling
#xx = (xx-0.72)/(0.89-0.72)*(0.8-0.72) + 0.72
#ax1.plot(xx, yy, color=cols[i], linestyle="dashed")
i += 1
############ q's
#xx3, yy3 = read_lineprof(path_JN+'lineprof_obl_HTq1_f700pbbr10m1.4i20.csv')
#ax1.plot(xx3, yy3, "k-", label="$q = -0.268$")
#
#xx4, yy4 = read_lineprof(path_JN+'lineprof_obl_HTq2_f700pbbr10m1.4i20.csv')
#ax1.plot(xx4, yy4, "r-", label="$q \\times 2$")
#
#xx5, yy5 = read_lineprof(path_JN+'lineprof_obl_HTq3_f700pbbr10m1.4i20.csv')
#ax1.plot(xx5, yy5, "g-", label="$q \\times 3$")
#
#xx6, yy6 = read_lineprof(path_JN+'lineprof_obl_HTq4_f700pbbr10m1.4i20.csv')
#ax1.plot(xx6, yy6, "b-", label="$q \\times 4$")
#
#xx7, yy7 = read_lineprof(path_JN+'lineprof_obl_HTq5_f700pbbr10m1.4i20.csv')
#ax1.plot(xx7, yy7, "m-", label="$q \\times 5$")
#
#legend = ax1.legend(loc='upper left', shadow=False, labelspacing=0.1)
#for label in legend.get_texts():
# label.set_fontsize('x-small')
savefig('fig9_testi.pdf', bbox_inches='tight')
| mit |
johankaito/fufuka | microblog/flask/venv/lib/python2.7/site-packages/flask/ctx.py | 776 | 14266 | # -*- coding: utf-8 -*-
"""
flask.ctx
~~~~~~~~~
Implements the objects required to keep the context.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import sys
from functools import update_wrapper
from werkzeug.exceptions import HTTPException
from .globals import _request_ctx_stack, _app_ctx_stack
from .module import blueprint_is_module
from .signals import appcontext_pushed, appcontext_popped
class _AppCtxGlobals(object):
"""A plain object."""
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __contains__(self, item):
return item in self.__dict__
def __iter__(self):
return iter(self.__dict__)
def __repr__(self):
top = _app_ctx_stack.top
if top is not None:
return '<flask.g of %r>' % top.app.name
return object.__repr__(self)
def after_this_request(f):
"""Executes a function after this request. This is useful to modify
response objects. The function is passed the response object and has
to return the same or a new one.
Example::
@app.route('/')
def index():
@after_this_request
def add_header(response):
response.headers['X-Foo'] = 'Parachute'
return response
return 'Hello World!'
This is more useful if a function other than the view function wants to
modify a response. For instance think of a decorator that wants to add
some headers without converting the return value into a response object.
.. versionadded:: 0.9
"""
_request_ctx_stack.top._after_request_functions.append(f)
return f
def copy_current_request_context(f):
"""A helper function that decorates a function to retain the current
request context. This is useful when working with greenlets. The moment
the function is decorated a copy of the request context is created and
then pushed when the function is called.
Example::
import gevent
from flask import copy_current_request_context
@app.route('/')
def index():
@copy_current_request_context
def do_some_work():
# do some work here, it can access flask.request like you
# would otherwise in the view function.
...
gevent.spawn(do_some_work)
return 'Regular response'
.. versionadded:: 0.10
"""
top = _request_ctx_stack.top
if top is None:
raise RuntimeError('This decorator can only be used at local scopes '
'when a request context is on the stack. For instance within '
'view functions.')
reqctx = top.copy()
def wrapper(*args, **kwargs):
with reqctx:
return f(*args, **kwargs)
return update_wrapper(wrapper, f)
def has_request_context():
"""If you have code that wants to test if a request context is there or
not this function can be used. For instance, you may want to take advantage
of request information if the request object is available, but fail
silently if it is unavailable.
::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and has_request_context():
remote_addr = request.remote_addr
self.remote_addr = remote_addr
Alternatively you can also just test any of the context bound objects
(such as :class:`request` or :class:`g` for truthness)::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and request:
remote_addr = request.remote_addr
self.remote_addr = remote_addr
.. versionadded:: 0.7
"""
return _request_ctx_stack.top is not None
def has_app_context():
"""Works like :func:`has_request_context` but for the application
context. You can also just do a boolean check on the
:data:`current_app` object instead.
.. versionadded:: 0.9
"""
return _app_ctx_stack.top is not None
class AppContext(object):
"""The application context binds an application object implicitly
to the current thread or greenlet, similar to how the
:class:`RequestContext` binds request information. The application
context is also implicitly created if a request context is created
but the application is not on top of the individual application
context.
"""
def __init__(self, app):
self.app = app
self.url_adapter = app.create_url_adapter(None)
self.g = app.app_ctx_globals_class()
# Like request context, app contexts can be pushed multiple times
# but there a basic "refcount" is enough to track them.
self._refcnt = 0
def push(self):
"""Binds the app context to the current context."""
self._refcnt += 1
_app_ctx_stack.push(self)
appcontext_pushed.send(self.app)
def pop(self, exc=None):
"""Pops the app context."""
self._refcnt -= 1
if self._refcnt <= 0:
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_appcontext(exc)
rv = _app_ctx_stack.pop()
assert rv is self, 'Popped wrong app context. (%r instead of %r)' \
% (rv, self)
appcontext_popped.send(self.app)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop(exc_value)
class RequestContext(object):
"""The request context contains all request relevant information. It is
created at the beginning of the request and pushed to the
`_request_ctx_stack` and removed at the end of it. It will create the
URL adapter and request object for the WSGI environment provided.
Do not attempt to use this class directly, instead use
:meth:`~flask.Flask.test_request_context` and
:meth:`~flask.Flask.request_context` to create this object.
When the request context is popped, it will evaluate all the
functions registered on the application for teardown execution
(:meth:`~flask.Flask.teardown_request`).
The request context is automatically popped at the end of the request
for you. In debug mode the request context is kept around if
exceptions happen so that interactive debuggers have a chance to
introspect the data. With 0.4 this can also be forced for requests
that did not fail and outside of `DEBUG` mode. By setting
``'flask._preserve_context'`` to `True` on the WSGI environment the
context will not pop itself at the end of the request. This is used by
the :meth:`~flask.Flask.test_client` for example to implement the
deferred cleanup functionality.
You might find this helpful for unittests where you need the
information from the context local around for a little longer. Make
sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in
that situation, otherwise your unittests will leak memory.
"""
def __init__(self, app, environ, request=None):
self.app = app
if request is None:
request = app.request_class(environ)
self.request = request
self.url_adapter = app.create_url_adapter(self.request)
self.flashes = None
self.session = None
# Request contexts can be pushed multiple times and interleaved with
# other request contexts. Now only if the last level is popped we
# get rid of them. Additionally if an application context is missing
# one is created implicitly so for each level we add this information
self._implicit_app_ctx_stack = []
# indicator if the context was preserved. Next time another context
# is pushed the preserved context is popped.
self.preserved = False
# remembers the exception for pop if there is one in case the context
# preservation kicks in.
self._preserved_exc = None
# Functions that should be executed after the request on the response
# object. These will be called before the regular "after_request"
# functions.
self._after_request_functions = []
self.match_request()
# XXX: Support for deprecated functionality. This is going away with
# Flask 1.0
blueprint = self.request.blueprint
if blueprint is not None:
# better safe than sorry, we don't want to break code that
# already worked
bp = app.blueprints.get(blueprint)
if bp is not None and blueprint_is_module(bp):
self.request._is_old_module = True
def _get_g(self):
return _app_ctx_stack.top.g
def _set_g(self, value):
_app_ctx_stack.top.g = value
g = property(_get_g, _set_g)
del _get_g, _set_g
def copy(self):
"""Creates a copy of this request context with the same request object.
This can be used to move a request context to a different greenlet.
Because the actual request object is the same this cannot be used to
move a request context to a different thread unless access to the
request object is locked.
.. versionadded:: 0.10
"""
return self.__class__(self.app,
environ=self.request.environ,
request=self.request
)
def match_request(self):
"""Can be overridden by a subclass to hook into the matching
of the request.
"""
try:
url_rule, self.request.view_args = \
self.url_adapter.match(return_rule=True)
self.request.url_rule = url_rule
except HTTPException as e:
self.request.routing_exception = e
def push(self):
"""Binds the request context to the current context."""
# If an exception occurs in debug mode or if context preservation is
# activated under exception situations exactly one context stays
# on the stack. The rationale is that you want to access that
# information under debug situations. However if someone forgets to
# pop that context again we want to make sure that on the next push
# it's invalidated, otherwise we run at risk that something leaks
# memory. This is usually only a problem in testsuite since this
# functionality is not active in production environments.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop(top._preserved_exc)
# Before we push the request context we have to ensure that there
# is an application context.
app_ctx = _app_ctx_stack.top
if app_ctx is None or app_ctx.app != self.app:
app_ctx = self.app.app_context()
app_ctx.push()
self._implicit_app_ctx_stack.append(app_ctx)
else:
self._implicit_app_ctx_stack.append(None)
_request_ctx_stack.push(self)
# Open the session at the moment that the request context is
# available. This allows a custom open_session method to use the
# request context (e.g. code that access database information
# stored on `g` instead of the appcontext).
self.session = self.app.open_session(self.request)
if self.session is None:
self.session = self.app.make_null_session()
def pop(self, exc=None):
"""Pops the request context and unbinds it by doing that. This will
also trigger the execution of functions registered by the
:meth:`~flask.Flask.teardown_request` decorator.
.. versionchanged:: 0.9
Added the `exc` argument.
"""
app_ctx = self._implicit_app_ctx_stack.pop()
clear_request = False
if not self._implicit_app_ctx_stack:
self.preserved = False
self._preserved_exc = None
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_request(exc)
# If this interpreter supports clearing the exception information
# we do that now. This will only go into effect on Python 2.x,
# on 3.x it disappears automatically at the end of the exception
# stack.
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
request_close = getattr(self.request, 'close', None)
if request_close is not None:
request_close()
clear_request = True
rv = _request_ctx_stack.pop()
assert rv is self, 'Popped wrong request context. (%r instead of %r)' \
% (rv, self)
# get rid of circular dependencies at the end of the request
# so that we don't require the GC to be active.
if clear_request:
rv.request.environ['werkzeug.request'] = None
# Get rid of the app as well if necessary.
if app_ctx is not None:
app_ctx.pop(exc)
def auto_pop(self, exc):
if self.request.environ.get('flask._preserve_context') or \
(exc is not None and self.app.preserve_context_on_exception):
self.preserved = True
self._preserved_exc = exc
else:
self.pop(exc)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
# do not pop the request stack if we are in debug mode and an
# exception happened. This will allow the debugger to still
# access the request object in the interactive shell. Furthermore
# the context can be force kept alive for the test client.
# See flask.testing for how this works.
self.auto_pop(exc_value)
def __repr__(self):
return '<%s \'%s\' [%s] of %s>' % (
self.__class__.__name__,
self.request.url,
self.request.method,
self.app.name,
)
| apache-2.0 |
jmontleon/fusor_ovirt | bin/ovirt_get_ip_of_vm.py | 4 | 2742 | #! /usr/bin/env python
import logging
import sys
import time
from optparse import OptionParser
try:
from ovirtsdk.api import API
except:
print "Please re-run after you have installed 'ovirt-engine-sdk-python'"
print "Example: easy_install ovirt-engine-sdk-python"
sys.exit()
DEFAULT_API_USER = "admin@internal"
def parse_args():
parser = OptionParser(description='Get the IP of a running VM')
parser.add_option('--debug', action='store_true',
default=False, help='debug mode')
parser.add_option('--api_host', default=None,
help='oVirt API IP Address/Hostname')
parser.add_option(
'--api_user', default=DEFAULT_API_USER,
help='oVirt API Username, defaults to "%s"' % (DEFAULT_API_USER))
parser.add_option('--api_pass', default=None, help='oVirt API Password')
parser.add_option('--vm_id', default=None,
help='ID of an existing VM to add a disk to')
(opts, args) = parser.parse_args()
for optname in ["api_host", "api_pass", "api_user", "vm_id"]:
optvalue = getattr(opts, optname)
if not optvalue:
parser.print_help()
parser.print_usage()
print "Please re-run with an option specified for: '%s'" % (optname)
sys.exit(1)
return opts
def setup_logging(debug=False):
if debug:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
logging.basicConfig(level=loglevel, format='%(asctime)s %(levelname)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
def get_ip(api, vm_id):
def __get_ip():
vm = api.vms.get(id=vm_id)
info = vm.get_guest_info()
try:
return info.get_ips().get_ip()[0].get_address()
except:
return None
# Wait till IP is available
max_tries = 60
wait_secs = 5
for count in range(0, max_tries):
ip = __get_ip()
if not ip:
logging.debug("Waiting %s seconds for IP to become available of VM ID '%s' (%s/%s)" %
(wait_secs, vm_id, count, max_tries))
time.sleep(wait_secs)
else:
return ip
return None
if __name__ == "__main__":
opts = parse_args()
debug = opts.debug
setup_logging(debug)
api_host = opts.api_host
api_user = opts.api_user
api_pass = opts.api_pass
vm_id = opts.vm_id
url = "https://%s" % (api_host)
api = API(url=url, username=api_user, password=api_pass, insecure=True)
if not api:
print "Failed to connect to '%s'" % (url)
sys.exit(1)
ip = get_ip(api, vm_id)
if not ip:
sys.exit(1)
print ip
sys.exit(0)
| gpl-2.0 |
Nexenta/s3-tests | virtualenv/lib/python2.7/site-packages/boto/kinesis/layer1.py | 18 | 33202 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import base64
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.kinesis import exceptions
from boto.compat import json
class KinesisConnection(AWSQueryConnection):
"""
Amazon Kinesis Service API Reference
Amazon Kinesis is a managed service that scales elastically for
real time processing of streaming big data.
"""
APIVersion = "2013-12-02"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "kinesis.us-east-1.amazonaws.com"
ServiceName = "Kinesis"
TargetPrefix = "Kinesis_20131202"
ResponseError = JSONResponseError
_faults = {
"ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException,
"LimitExceededException": exceptions.LimitExceededException,
"ExpiredIteratorException": exceptions.ExpiredIteratorException,
"ResourceInUseException": exceptions.ResourceInUseException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InvalidArgumentException": exceptions.InvalidArgumentException,
"SubscriptionRequiredException": exceptions.SubscriptionRequiredException
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(KinesisConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_stream(self, stream_name, shard_count):
"""
This operation adds a new Amazon Kinesis stream to your AWS
account. A stream captures and transports data records that
are continuously emitted from different data sources or
producers . Scale-out within an Amazon Kinesis stream is
explicitly supported by means of shards, which are uniquely
identified groups of data records in an Amazon Kinesis stream.
You specify and control the number of shards that a stream is
composed of. Each shard can support up to 5 read transactions
per second up to a maximum total of 2 MB of data read per
second. Each shard can support up to 1000 write transactions
per second up to a maximum total of 1 MB data written per
second. You can add shards to a stream if the amount of data
input increases and you can remove shards if the amount of
data input decreases.
The stream name identifies the stream. The name is scoped to
the AWS account used by the application. It is also scoped by
region. That is, two streams in two different accounts can
have the same name, and two streams in the same account, but
in two different regions, can have the same name.
`CreateStream` is an asynchronous operation. Upon receiving a
`CreateStream` request, Amazon Kinesis immediately returns and
sets the stream status to CREATING. After the stream is
created, Amazon Kinesis sets the stream status to ACTIVE. You
should perform read and write operations only on an ACTIVE
stream.
You receive a `LimitExceededException` when making a
`CreateStream` request if you try to do one of the following:
+ Have more than five streams in the CREATING state at any
point in time.
+ Create more shards than are authorized for your account.
**Note:** The default limit for an AWS account is two shards
per stream. If you need to create a stream with more than two
shards, contact AWS Support to increase the limit on your
account.
You can use the `DescribeStream` operation to check the stream
status, which is returned in `StreamStatus`.
`CreateStream` has a limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: A name to identify the stream. The stream name is
scoped to the AWS account used by the application that creates the
stream. It is also scoped by region. That is, two streams in two
different AWS accounts can have the same name, and two streams in
the same AWS account, but in two different regions, can have the
same name.
:type shard_count: integer
:param shard_count: The number of shards that the stream will use. The
throughput of the stream is a function of the number of shards;
more shards are required for greater provisioned throughput.
**Note:** The default limit for an AWS account is two shards per
stream. If you need to create a stream with more than two shards,
contact AWS Support to increase the limit on your account.
"""
params = {
'StreamName': stream_name,
'ShardCount': shard_count,
}
return self.make_request(action='CreateStream',
body=json.dumps(params))
def delete_stream(self, stream_name):
"""
This operation deletes a stream and all of its shards and
data. You must shut down any applications that are operating
on the stream before you delete the stream. If an application
attempts to operate on a deleted stream, it will receive the
exception `ResourceNotFoundException`.
If the stream is in the ACTIVE state, you can delete it. After
a `DeleteStream` request, the specified stream is in the
DELETING state until Amazon Kinesis completes the deletion.
**Note:** Amazon Kinesis might continue to accept data read
and write operations, such as PutRecord and GetRecords, on a
stream in the DELETING state until the stream deletion is
complete.
When you delete a stream, any shards in that stream are also
deleted.
You can use the DescribeStream operation to check the state of
the stream, which is returned in `StreamStatus`.
`DeleteStream` has a limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream to delete.
"""
params = {'StreamName': stream_name, }
return self.make_request(action='DeleteStream',
body=json.dumps(params))
def describe_stream(self, stream_name, limit=None,
exclusive_start_shard_id=None):
"""
This operation returns the following information about the
stream: the current status of the stream, the stream Amazon
Resource Name (ARN), and an array of shard objects that
comprise the stream. For each shard object there is
information about the hash key and sequence number ranges that
the shard spans, and the IDs of any earlier shards that played
in a role in a MergeShards or SplitShard operation that
created the shard. A sequence number is the identifier
associated with every record ingested in the Amazon Kinesis
stream. The sequence number is assigned by the Amazon Kinesis
service when a record is put into the stream.
You can limit the number of returned shards using the `Limit`
parameter. The number of shards in a stream may be too large
to return from a single call to `DescribeStream`. You can
detect this by using the `HasMoreShards` flag in the returned
output. `HasMoreShards` is set to `True` when there is more
data available.
If there are more shards available, you can request more
shards by using the shard ID of the last shard returned by the
`DescribeStream` request, in the `ExclusiveStartShardId`
parameter in a subsequent request to `DescribeStream`.
`DescribeStream` is a paginated operation.
`DescribeStream` has a limit of 10 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream to describe.
:type limit: integer
:param limit: The maximum number of shards to return.
:type exclusive_start_shard_id: string
:param exclusive_start_shard_id: The shard ID of the shard to start
with for the stream description.
"""
params = {'StreamName': stream_name, }
if limit is not None:
params['Limit'] = limit
if exclusive_start_shard_id is not None:
params['ExclusiveStartShardId'] = exclusive_start_shard_id
return self.make_request(action='DescribeStream',
body=json.dumps(params))
def get_records(self, shard_iterator, limit=None, b64_decode=True):
"""
This operation returns one or more data records from a shard.
A `GetRecords` operation request can retrieve up to 10 MB of
data.
You specify a shard iterator for the shard that you want to
read data from in the `ShardIterator` parameter. The shard
iterator specifies the position in the shard from which you
want to start reading data records sequentially. A shard
iterator specifies this position using the sequence number of
a data record in the shard. For more information about the
shard iterator, see GetShardIterator.
`GetRecords` may return a partial result if the response size
limit is exceeded. You will get an error, but not a partial
result if the shard's provisioned throughput is exceeded, the
shard iterator has expired, or an internal processing failure
has occurred. Clients can request a smaller amount of data by
specifying a maximum number of returned records using the
`Limit` parameter. The `Limit` parameter can be set to an
integer value of up to 10,000. If you set the value to an
integer greater than 10,000, you will receive
`InvalidArgumentException`.
A new shard iterator is returned by every `GetRecords` request
in `NextShardIterator`, which you use in the `ShardIterator`
parameter of the next `GetRecords` request. When you
repeatedly read from an Amazon Kinesis stream use a
GetShardIterator request to get the first shard iterator to
use in your first `GetRecords` request and then use the shard
iterator returned in `NextShardIterator` for subsequent reads.
`GetRecords` can return `null` for the `NextShardIterator` to
reflect that the shard has been closed and that the requested
shard iterator would never have returned more data.
If no items can be processed because of insufficient
provisioned throughput on the shard involved in the request,
`GetRecords` throws `ProvisionedThroughputExceededException`.
:type shard_iterator: string
:param shard_iterator: The position in the shard from which you want to
start sequentially reading data records.
:type limit: integer
:param limit: The maximum number of records to return, which can be set
to a value of up to 10,000.
:type b64_decode: boolean
:param b64_decode: Decode the Base64-encoded ``Data`` field of records.
"""
params = {'ShardIterator': shard_iterator, }
if limit is not None:
params['Limit'] = limit
response = self.make_request(action='GetRecords',
body=json.dumps(params))
# Base64 decode the data
if b64_decode:
for record in response.get('Records', []):
record['Data'] = base64.b64decode(
record['Data'].encode('utf-8')).decode('utf-8')
return response
def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type,
starting_sequence_number=None):
"""
This operation returns a shard iterator in `ShardIterator`.
The shard iterator specifies the position in the shard from
which you want to start reading data records sequentially. A
shard iterator specifies this position using the sequence
number of a data record in a shard. A sequence number is the
identifier associated with every record ingested in the Amazon
Kinesis stream. The sequence number is assigned by the Amazon
Kinesis service when a record is put into the stream.
You must specify the shard iterator type in the
`GetShardIterator` request. For example, you can set the
`ShardIteratorType` parameter to read exactly from the
position denoted by a specific sequence number by using the
AT_SEQUENCE_NUMBER shard iterator type, or right after the
sequence number by using the AFTER_SEQUENCE_NUMBER shard
iterator type, using sequence numbers returned by earlier
PutRecord, GetRecords or DescribeStream requests. You can
specify the shard iterator type TRIM_HORIZON in the request to
cause `ShardIterator` to point to the last untrimmed record in
the shard in the system, which is the oldest data record in
the shard. Or you can point to just after the most recent
record in the shard, by using the shard iterator type LATEST,
so that you always read the most recent data in the shard.
**Note:** Each shard iterator expires five minutes after it is
returned to the requester.
When you repeatedly read from an Amazon Kinesis stream use a
GetShardIterator request to get the first shard iterator to to
use in your first `GetRecords` request and then use the shard
iterator returned by the `GetRecords` request in
`NextShardIterator` for subsequent reads. A new shard iterator
is returned by every `GetRecords` request in
`NextShardIterator`, which you use in the `ShardIterator`
parameter of the next `GetRecords` request.
If a `GetShardIterator` request is made too often, you will
receive a `ProvisionedThroughputExceededException`. For more
information about throughput limits, see the `Amazon Kinesis
Developer Guide`_.
`GetShardIterator` can return `null` for its `ShardIterator`
to indicate that the shard has been closed and that the
requested iterator will return no more data. A shard can be
closed by a SplitShard or MergeShards operation.
`GetShardIterator` has a limit of 5 transactions per second
per account per shard.
:type stream_name: string
:param stream_name: The name of the stream.
:type shard_id: string
:param shard_id: The shard ID of the shard to get the iterator for.
:type shard_iterator_type: string
:param shard_iterator_type:
Determines how the shard iterator is used to start reading data records
from the shard.
The following are the valid shard iterator types:
+ AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted
by a specific sequence number.
+ AFTER_SEQUENCE_NUMBER - Start reading right after the position
denoted by a specific sequence number.
+ TRIM_HORIZON - Start reading at the last untrimmed record in the
shard in the system, which is the oldest data record in the shard.
+ LATEST - Start reading just after the most recent record in the
shard, so that you always read the most recent data in the shard.
:type starting_sequence_number: string
:param starting_sequence_number: The sequence number of the data record
in the shard from which to start reading from.
"""
params = {
'StreamName': stream_name,
'ShardId': shard_id,
'ShardIteratorType': shard_iterator_type,
}
if starting_sequence_number is not None:
params['StartingSequenceNumber'] = starting_sequence_number
return self.make_request(action='GetShardIterator',
body=json.dumps(params))
def list_streams(self, limit=None, exclusive_start_stream_name=None):
"""
This operation returns an array of the names of all the
streams that are associated with the AWS account making the
`ListStreams` request. A given AWS account can have many
streams active at one time.
The number of streams may be too large to return from a single
call to `ListStreams`. You can limit the number of returned
streams using the `Limit` parameter. If you do not specify a
value for the `Limit` parameter, Amazon Kinesis uses the
default limit, which is currently 10.
You can detect if there are more streams available to list by
using the `HasMoreStreams` flag from the returned output. If
there are more streams available, you can request more streams
by using the name of the last stream returned by the
`ListStreams` request in the `ExclusiveStartStreamName`
parameter in a subsequent request to `ListStreams`. The group
of stream names returned by the subsequent request is then
added to the list. You can continue this process until all the
stream names have been collected in the list.
`ListStreams` has a limit of 5 transactions per second per
account.
:type limit: integer
:param limit: The maximum number of streams to list.
:type exclusive_start_stream_name: string
:param exclusive_start_stream_name: The name of the stream to start the
list with.
"""
params = {}
if limit is not None:
params['Limit'] = limit
if exclusive_start_stream_name is not None:
params['ExclusiveStartStreamName'] = exclusive_start_stream_name
return self.make_request(action='ListStreams',
body=json.dumps(params))
def merge_shards(self, stream_name, shard_to_merge,
adjacent_shard_to_merge):
"""
This operation merges two adjacent shards in a stream and
combines them into a single shard to reduce the stream's
capacity to ingest and transport data. Two shards are
considered adjacent if the union of the hash key ranges for
the two shards form a contiguous set with no gaps. For
example, if you have two shards, one with a hash key range of
276...381 and the other with a hash key range of 382...454,
then you could merge these two shards into a single shard that
would have a hash key range of 276...454. After the merge, the
single child shard receives data for all hash key values
covered by the two parent shards.
`MergeShards` is called when there is a need to reduce the
overall capacity of a stream because of excess capacity that
is not being used. The operation requires that you specify the
shard to be merged and the adjacent shard for a given stream.
For more information about merging shards, see the `Amazon
Kinesis Developer Guide`_.
If the stream is in the ACTIVE state, you can call
`MergeShards`. If a stream is in CREATING or UPDATING or
DELETING states, then Amazon Kinesis returns a
`ResourceInUseException`. If the specified stream does not
exist, Amazon Kinesis returns a `ResourceNotFoundException`.
You can use the DescribeStream operation to check the state of
the stream, which is returned in `StreamStatus`.
`MergeShards` is an asynchronous operation. Upon receiving a
`MergeShards` request, Amazon Kinesis immediately returns a
response and sets the `StreamStatus` to UPDATING. After the
operation is completed, Amazon Kinesis sets the `StreamStatus`
to ACTIVE. Read and write operations continue to work while
the stream is in the UPDATING state.
You use the DescribeStream operation to determine the shard
IDs that are specified in the `MergeShards` request.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, `MergeShards` or SplitShard, you
will receive a `LimitExceededException`.
`MergeShards` has limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream for the merge.
:type shard_to_merge: string
:param shard_to_merge: The shard ID of the shard to combine with the
adjacent shard for the merge.
:type adjacent_shard_to_merge: string
:param adjacent_shard_to_merge: The shard ID of the adjacent shard for
the merge.
"""
params = {
'StreamName': stream_name,
'ShardToMerge': shard_to_merge,
'AdjacentShardToMerge': adjacent_shard_to_merge,
}
return self.make_request(action='MergeShards',
body=json.dumps(params))
def put_record(self, stream_name, data, partition_key,
explicit_hash_key=None,
sequence_number_for_ordering=None,
exclusive_minimum_sequence_number=None,
b64_encode=True):
"""
This operation puts a data record into an Amazon Kinesis
stream from a producer. This operation must be called to send
data from the producer into the Amazon Kinesis stream for
real-time ingestion and subsequent processing. The `PutRecord`
operation requires the name of the stream that captures,
stores, and transports the data; a partition key; and the data
blob itself. The data blob could be a segment from a log file,
geographic/location data, website clickstream data, or any
other data type.
The partition key is used to distribute data across shards.
Amazon Kinesis segregates the data records that belong to a
data stream into multiple shards, using the partition key
associated with each data record to determine which shard a
given data record belongs to.
Partition keys are Unicode strings, with a maximum length
limit of 256 bytes. An MD5 hash function is used to map
partition keys to 128-bit integer values and to map associated
data records to shards using the hash key ranges of the
shards. You can override hashing the partition key to
determine the shard by explicitly specifying a hash value
using the `ExplicitHashKey` parameter. For more information,
see the `Amazon Kinesis Developer Guide`_.
`PutRecord` returns the shard ID of where the data record was
placed and the sequence number that was assigned to the data
record.
Sequence numbers generally increase over time. To guarantee
strictly increasing ordering, use the
`SequenceNumberForOrdering` parameter. For more information,
see the `Amazon Kinesis Developer Guide`_.
If a `PutRecord` request cannot be processed because of
insufficient provisioned throughput on the shard involved in
the request, `PutRecord` throws
`ProvisionedThroughputExceededException`.
Data records are accessible for only 24 hours from the time
that they are added to an Amazon Kinesis stream.
:type stream_name: string
:param stream_name: The name of the stream to put the data record into.
:type data: blob
:param data: The data blob to put into the record, which is
Base64-encoded when the blob is serialized.
The maximum size of the data blob (the payload after
Base64-decoding) is 50 kilobytes (KB)
Set `b64_encode` to disable automatic Base64 encoding.
:type partition_key: string
:param partition_key: Determines which shard in the stream the data
record is assigned to. Partition keys are Unicode strings with a
maximum length limit of 256 bytes. Amazon Kinesis uses the
partition key as input to a hash function that maps the partition
key and associated data to a specific shard. Specifically, an MD5
hash function is used to map partition keys to 128-bit integer
values and to map associated data records to shards. As a result of
this hashing mechanism, all data records with the same partition
key will map to the same shard within the stream.
:type explicit_hash_key: string
:param explicit_hash_key: The hash value used to explicitly determine
the shard the data record is assigned to by overriding the
partition key hash.
:type sequence_number_for_ordering: string
:param sequence_number_for_ordering: Guarantees strictly increasing
sequence numbers, for puts from the same client and to the same
partition key. Usage: set the `SequenceNumberForOrdering` of record
n to the sequence number of record n-1 (as returned in the
PutRecordResult when putting record n-1 ). If this parameter is not
set, records will be coarsely ordered based on arrival time.
:type b64_encode: boolean
:param b64_encode: Whether to Base64 encode `data`. Can be set to
``False`` if `data` is already encoded to prevent double encoding.
"""
params = {
'StreamName': stream_name,
'Data': data,
'PartitionKey': partition_key,
}
if explicit_hash_key is not None:
params['ExplicitHashKey'] = explicit_hash_key
if sequence_number_for_ordering is not None:
params['SequenceNumberForOrdering'] = sequence_number_for_ordering
if b64_encode:
params['Data'] = base64.b64encode(
params['Data'].encode('utf-8')).decode('utf-8')
return self.make_request(action='PutRecord',
body=json.dumps(params))
def split_shard(self, stream_name, shard_to_split, new_starting_hash_key):
"""
This operation splits a shard into two new shards in the
stream, to increase the stream's capacity to ingest and
transport data. `SplitShard` is called when there is a need to
increase the overall capacity of stream because of an expected
increase in the volume of data records being ingested.
`SplitShard` can also be used when a given shard appears to be
approaching its maximum utilization, for example, when the set
of producers sending data into the specific shard are suddenly
sending more than previously anticipated. You can also call
the `SplitShard` operation to increase stream capacity, so
that more Amazon Kinesis applications can simultaneously read
data from the stream for real-time processing.
The `SplitShard` operation requires that you specify the shard
to be split and the new hash key, which is the position in the
shard where the shard gets split in two. In many cases, the
new hash key might simply be the average of the beginning and
ending hash key, but it can be any hash key value in the range
being mapped into the shard. For more information about
splitting shards, see the `Amazon Kinesis Developer Guide`_.
You can use the DescribeStream operation to determine the
shard ID and hash key values for the `ShardToSplit` and
`NewStartingHashKey` parameters that are specified in the
`SplitShard` request.
`SplitShard` is an asynchronous operation. Upon receiving a
`SplitShard` request, Amazon Kinesis immediately returns a
response and sets the stream status to UPDATING. After the
operation is completed, Amazon Kinesis sets the stream status
to ACTIVE. Read and write operations continue to work while
the stream is in the UPDATING state.
You can use `DescribeStream` to check the status of the
stream, which is returned in `StreamStatus`. If the stream is
in the ACTIVE state, you can call `SplitShard`. If a stream is
in CREATING or UPDATING or DELETING states, then Amazon
Kinesis returns a `ResourceInUseException`.
If the specified stream does not exist, Amazon Kinesis returns
a `ResourceNotFoundException`. If you try to create more
shards than are authorized for your account, you receive a
`LimitExceededException`.
**Note:** The default limit for an AWS account is two shards
per stream. If you need to create a stream with more than two
shards, contact AWS Support to increase the limit on your
account.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, MergeShards or SplitShard, you
will receive a `LimitExceededException`.
`SplitShard` has limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream for the shard split.
:type shard_to_split: string
:param shard_to_split: The shard ID of the shard to split.
:type new_starting_hash_key: string
:param new_starting_hash_key: A hash key value for the starting hash
key of one of the child shards created by the split. The hash key
range for a given shard constitutes a set of ordered contiguous
positive integers. The value for `NewStartingHashKey` must be in
the range of hash keys being mapped into the shard. The
`NewStartingHashKey` hash key value and all higher hash key values
in hash key range are distributed to one of the child shards. All
the lower hash key values in the range are distributed to the other
child shard.
"""
params = {
'StreamName': stream_name,
'ShardToSplit': shard_to_split,
'NewStartingHashKey': new_starting_hash_key,
}
return self.make_request(action='SplitShard',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response.getheaders())
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| mit |
syhost/android_kernel_xiaomi_armani | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
bzbarsky/servo | tests/wpt/css-tests/tools/py/py/_code/_py2traceback.py | 275 | 2765 | # copied from python-2.7.3's traceback.py
# CHANGES:
# - some_str is replaced, trying to create unicode strings
#
import types
def format_exception_only(etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.last_type and sys.last_value. The return value is a list of
strings, each ending in a newline.
Normally, the list contains a single string; however, for
SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the list.
"""
# An instance should not have a meaningful value parameter, but
# sometimes does, particularly for string exceptions, such as
# >>> raise string1, string2 # deprecated
#
# Clear these out first because issubtype(string1, SyntaxError)
# would throw another exception and mask the original problem.
if (isinstance(etype, BaseException) or
isinstance(etype, types.InstanceType) or
etype is None or type(etype) is str):
return [_format_final_exc_line(etype, value)]
stype = etype.__name__
if not issubclass(etype, SyntaxError):
return [_format_final_exc_line(stype, value)]
# It was a syntax error; show exactly where the problem was found.
lines = []
try:
msg, (filename, lineno, offset, badline) = value.args
except Exception:
pass
else:
filename = filename or "<string>"
lines.append(' File "%s", line %d\n' % (filename, lineno))
if badline is not None:
lines.append(' %s\n' % badline.strip())
if offset is not None:
caretspace = badline.rstrip('\n')[:offset].lstrip()
# non-space whitespace (likes tabs) must be kept for alignment
caretspace = ((c.isspace() and c or ' ') for c in caretspace)
# only three spaces to account for offset1 == pos 0
lines.append(' %s^\n' % ''.join(caretspace))
value = msg
lines.append(_format_final_exc_line(stype, value))
return lines
def _format_final_exc_line(etype, value):
"""Return a list of a single line -- normal case for format_exception_only"""
valuestr = _some_str(value)
if value is None or not valuestr:
line = "%s\n" % etype
else:
line = "%s: %s\n" % (etype, valuestr)
return line
def _some_str(value):
try:
return unicode(value)
except Exception:
try:
return str(value)
except Exception:
pass
return '<unprintable %s object>' % type(value).__name__
| mpl-2.0 |
practicing01/Urho3D-Blender | io_mesh_urho/decompose.py | 2 | 90333 |
#
# This script is licensed as public domain.
# Based on "Export Inter-Quake Model (.iqm/.iqe)" by Lee Salzman
#
# http://www.blender.org/documentation/blender_python_api_2_63_2/info_best_practice.html
# http://www.blender.org/documentation/blender_python_api_2_63_2/info_gotcha.html
# Blender types:
# http://www.blender.org/documentation/blender_python_api_2_63_7/bpy.types.Mesh.html
# http://www.blender.org/documentation/blender_python_api_2_63_7/bpy.types.MeshTessFace.html
# http://www.blender.org/documentation/blender_python_api_2_63_7/bpy.types.Material.html
# UV:
# http://www.blender.org/documentation/blender_python_api_2_63_2/bpy.types.MeshTextureFaceLayer.html
# http://www.blender.org/documentation/blender_python_api_2_63_2/bpy.types.MeshTextureFace.html
# Skeleton:
# http://www.blender.org/documentation/blender_python_api_2_66_4/bpy.types.Armature.html
# http://www.blender.org/documentation/blender_python_api_2_66_4/bpy.types.Bone.html
# http://www.blender.org/documentation/blender_python_api_2_66_4/bpy.types.Pose.html
# http://www.blender.org/documentation/blender_python_api_2_66_4/bpy.types.PoseBone.html
# Animations:
# http://www.blender.org/documentation/blender_python_api_2_66_4/bpy.types.Action.html
# http://www.blender.org/documentation/blender_python_api_2_66_4/bpy.types.AnimData.html
# Vertex color:
# http://www.blender.org/documentation/blender_python_api_2_66_4/bpy.types.MeshColor.html
# Morphs (Shape keys):
# http://www.blender.org/documentation/blender_python_api_2_66_4/bpy.types.Key.html
# http://www.blender.org/documentation/blender_python_api_2_66_4/bpy.types.ShapeKey.html
# Inverse transpose for normals
# http://www.arcsynthesis.org/gltut/Illumination/Tut09%20Normal%20Transformation.html
# Python binary writing:
# http://docs.python.org/2/library/struct.html
DEBUG = 0
import bpy
import bmesh
import math
import time as ostime
from mathutils import Vector, Matrix, Quaternion, Color
from collections import OrderedDict
import os
import operator
import heapq
import logging
import re
log = logging.getLogger("ExportLogger")
#------------------
# Geometry classes
#------------------
# Vertex class
class TVertex:
def __init__(self):
# Index of the vertex in the Blender buffer
self.blenderIndex = None
# Position of the vertex: Vector((0.0, 0.0, 0.0))
self.pos = None
# Normal of the vertex: Vector((0.0, 0.0, 0.0))
self.normal = None
# Color of the vertex: (0, 0, 0, 0)...(255, 255, 255, 255)
self.color = None
# UV coordinates of the vertex: Vector((0.0, 0.0))..Vector((1.0, 1.0))
self.uv = None
# UV2 coordinates of the vertex: Vector((0.0, 0.0))..Vector((1.0, 1.0))
self.uv2 = None
# Tangent of the vertex: Vector((0.0, 0.0, 0.0, 0.0))
self.tangent = None
# Bitangent of the vertex: Vector((0.0, 0.0, 0.0))
self.bitangent = None
# Bones weights: list of tuple(boneIndex, weight)
self.weights = None
# returns True is this vertex is a changed morph of vertex 'other'
def isMorphed(self, other):
# TODO: compare floats with a epsilon margin?
if other.pos is None:
return True
if self.pos and self.pos != other.pos:
return True
if self.normal and self.normal != other.normal:
return True
# We cannot use tangent, it is not calculated yet
if self.uv and self.uv != other.uv:
return True
return False
# used by the function index() of lists
def __eq__(self, other):
# TODO: can we do without color and weights?
# TODO: compare floats with a epsilon margin?
#return (self.__dict__ == other.__dict__)
return (self.pos == other.pos and
self.normal == other.normal and
self.uv == other.uv)
def isEqual(self, other):
# TODO: compare floats with a epsilon margin?
return self == other
def __hash__(self):
hashValue = 0
if self.pos:
hashValue ^= hash(self.pos.x) ^ hash(self.pos.y) ^ hash(self.pos.z)
if self.normal:
hashValue ^= hash(self.normal.x) ^ hash(self.normal.y) ^ hash(self.normal.z)
if self.uv:
hashValue ^= hash(self.uv.x) ^ hash(self.uv.y)
return hashValue
def __str__(self):
s = " coords: {: .3f} {: .3f} {: .3f}".format(self.pos.x, self.pos.y, self.pos.z)
s += "\n normals: {: .3f} {: .3f} {: .3f}".format(self.normal.x, self.normal.y, self.normal.z)
if self.color:
s += "\n color: {:3d} {:3d} {:3d} {:3d}".format(self.color[0], self.color[1], self.color[2], self.color[3])
if self.uv:
s += "\n uv: {: .3f} {: .3f}".format(self.uv[0], self.uv[1])
if self.uv2:
s += "\n uv2: {: .3f} {: .3f}".format(self.uv2[0], self.uv2[1])
if self.tangent:
s += "\n tangent: {: .3f} {: .3f} {: .3f}".format(self.tangent.x, self.tangent.y, self.tangent.z)
if self.weights:
s += "\n weights: "
for w in self.weights:
s += "{:d} {:.3f} ".format(w[0],w[1])
return s
# Geometry LOD level class
class TLodLevel:
def __init__(self):
self.distance = 0.0
# Set of all vertex indices use by this LOD
self.indexSet = set()
# List of triangles of the LOD (triples of vertex indices)
self.triangleList = []
def __str__(self):
s = " distance: {:.3f}\n".format(self.distance)
s += " triangles: "
for i, t in enumerate(self.triangleList):
if i and (i % 5) == 0:
s += "\n "
s += "{:3d} {:3d} {:3d} |".format(t[0],t[1],t[2])
return s
# Geometry class
class TGeometry:
def __init__(self):
# List of TLodLevel
self.lodLevels = []
# Name of the Blender material associated
self.materialName = None
def __str__(self):
s = ""
for i, l in enumerate(self.lodLevels):
s += " {:d}\n".format(i) + str(l)
return s
#------------------
# Morph classes
#------------------
class TMorph:
def __init__(self, name):
# Morph name
self.name = name
# Set of all vertex indices used by this morph
self.indexSet = set()
# List of triangles of the morph (triples of vertex indices)
self.triangleList = []
# Maps vertex index to morphed TVertex
self.vertexMap = {}
def __str__(self):
s = " name: {:s}\n".format(self.name)
s += " Vertices: "
for k, v in sorted(self.vertices.items()):
s += "\n index: {:d}".format(k)
s += "\n" + str(v)
return s
#-------------------
# Materials classes
#-------------------
# NOTE: in Blender images names are unique
class TMaterial:
def __init__(self, name):
# Material name
self.name = name
# Diffuse color (0.0, 0.0, 0.0)
self.diffuseColor = None
# Diffuse intensity (0.0)
self.diffuseIntensity = None
# Specular color (0.0, 0.0, 0.0)
self.specularColor = None
# Specular intensity (0.0)
self.specularIntensity = None
# Specular hardness (1.0)
self.specularHardness = None
# Emit color (0.0, 0.0, 0.0)
self.emitColor = None
# Emit factor (1.0)
self.emitIntensity = None
# Opacity (1.0)
self.opacity = None
# Alpha Mask
self.alphaMask = False
# Material is two sided
self.twoSided = False
# Diffuse color texture filename (no path)
self.diffuseTexName = None
# Normal texture filename (no path)
self.normalTexName = None
# Specular texture filename (no path)
self.specularTexName = None
# Emit texture filename (no path)
self.emitTexName = None
# Light map texture filename (no path)
self.lightmapTexName = None
# Ambient light map texture filename (light map modulated by ambient color)(no path)
self.ambientLightTexName = None
def __eq__(self, other):
if hasattr(other, 'name'):
return (self.name == other.name)
return (self.name == other)
def __str__(self):
return (" name: {:s}\n"
" image: \"{:s}\""
.format(self.name, self.diffuseTexName) )
#--------------------
# Animations classes
#--------------------
class TBone:
def __init__(self, index, parentName, position, rotation, scale, transform):
# Position of the bone in the OrderedDict
self.index = index
# Name of the parent bone
self.parentName = parentName
# Bone position in the parent bone tail space (you first apply this)
self.bindPosition = position
# Bone rotation in the parent bone tail space (and then this)
self.bindRotation = rotation
# Bone scale
self.bindScale = scale
# Bone transformation in object space
self.worldTransform = transform
def __str__(self):
s = " bind pos " + str(self.bindPosition)
s += "\n bind rot " + str(self.bindRotation) #+ "\n" + str(self.bindRotation.to_axis_angle())
#s += "\n" + str(self.worldTransform.inverted())
s += "\n" + str(self.worldTransform)
return s
class TFrame:
def __init__(self, time, position, rotation, scale):
self.time = time
self.position = position
self.rotation = rotation
self.scale = scale
def hasMoved(self, other):
return (self.position != other.position or self.rotation != other.rotation or self.scale != other.scale)
class TTrack:
def __init__(self, name):
self.name = name
self.frames = []
class TTrigger:
def __init__(self, name):
# Trigger name
self.name = name
# Time in seconds
self.time = None
# Event data (variant, see typeNames[] in Variant.cpp)
self.data = None
class TAnimation:
def __init__(self, name):
self.name = name
self.tracks = []
self.triggers = []
#---------------------
# Export data classes
#---------------------
class TData:
def __init__(self):
self.objectName = None
self.blenderObjectName = None
# List of all the TVertex of all the geometries
self.verticesList = []
# List of TGeometry, they contains triangles, triangles are made of vertex indices
self.geometriesList = []
# List of TMorph: a subset of the vertices list with modified position
self.morphsList = []
# List of TMaterial
self.materialsList = []
# Material name to geometry index map
self.materialGeometryMap = {}
# Ordered dictionary of TBone: bone name to TBone
self.bonesMap = OrderedDict()
# List of TAnimation
self.animationsList = []
class TOptions:
def __init__(self):
self.lodUpdatedGeometryIndices = set()
self.lodDistance = None
self.doForceElements = False
self.mergeObjects = False
self.mergeNotMaterials = False
self.useLods = False
self.onlySelected = False
self.orientation = Quaternion()
self.scale = 1.0
self.rbmass = 0.0
self.usegravity = False
self.globalOrigin = True
self.bonesGlobalOrigin = False #useless
self.actionsGlobalOrigin = False
self.applyModifiers = False
self.applySettings = 'PREVIEW'
self.doBones = True
self.doOnlyKeyedBones = False
self.doOnlyDeformBones = False
self.doOnlyVisibleBones = False
self.skinBoneParent = False
self.derigifyArmature = False
self.doAnimations = True
self.doAllActions = True
self.doUsedActions = False
self.doSelectedStrips = False
self.doSelectedTracks = False
self.doStrips = False
self.doTracks = False
self.doTimeline = False
self.doTriggers = False
self.doAnimationPos = True
self.doAnimationRot = True
self.doAnimationSca = True
self.doGeometries = True
self.doGeometryPos = True
self.doGeometryNor = True
self.doGeometryCol = True
self.doGeometryColAlpha = False
self.doGeometryUV = True
self.doGeometryUV2 = False
self.doGeometryTan = True
self.doGeometryWei = True
self.doMorphs = True
self.doMorphNor = True
self.doMorphTan = True
self.doMorphUV = True
self.doOptimizeIndices = True
self.doMaterials = True
#--------------------
# “Computing Tangent Space Basis Vectors for an Arbitrary Mesh” by Lengyel, Eric.
# Terathon Software 3D Graphics Library, 2001.
# http://www.terathon.com/code/tangent.html
#--------------------
def GenerateTangents(tLodLevels, tVertexList, errorsMem):
if not tVertexList:
log.warning("No vertices, tangent generation cancelled.")
return
nullUvIndices = None
incompleteUvIndices = None
if errorsMem:
nullUvIndices = errorsMem.Get("null UV area", set() )
incompleteUvIndices = errorsMem.Get("incomplete UV", set() )
# Init the values
tangentOverwritten = 0
for tLodLevel in reversed(tLodLevels):
if not tLodLevel.indexSet or not tLodLevel.triangleList:
log.warning("Empty LOD, tangent generation skipped.")
tLodLevels.remove(tLodLevel)
continue
for vertexIndex in tLodLevel.indexSet:
vertex = tVertexList[vertexIndex]
# Check if the tangent was already calculated (4 components) for this vertex and we're overwriting it
if vertex.tangent and len(vertex.tangent) == 4:
tangentOverwritten += 1
# Check if we have all the needed data to do the calculations
if vertex.pos is None:
if incompleteUvIndices is not None:
incompleteUvIndices.add(vertex.blenderIndex)
log.warning("Missing position on vertex {:d}, tangent generation cancelled.".format(vertex.blenderIndex))
return
if vertex.normal is None:
if incompleteUvIndices is not None:
incompleteUvIndices.add(vertex.blenderIndex)
log.warning("Missing normal on vertex {:d}, tangent generation cancelled.".format(vertex.blenderIndex))
return
if vertex.uv is None:
if incompleteUvIndices is not None:
incompleteUvIndices.add(vertex.blenderIndex)
log.warning("Missing UV on vertex {:d}, tangent generation cancelled.".format(vertex.blenderIndex))
return
# Init tangent (3 components) and bitangent vectors
vertex.tangent = Vector((0.0, 0.0, 0.0))
vertex.bitangent = Vector((0.0, 0.0, 0.0))
if tangentOverwritten:
log.warning("Overwriting {:d} tangents".format(tangentOverwritten))
# Calculate tangent and bitangent
invalidUV = False
for tLodLevel in tLodLevels:
for i, triangle in enumerate(tLodLevel.triangleList):
# For each triangle, we have 3 vertices vertex1, vertex2, vertex3, each of the have their UV coordinates, we want to
# find two unit orthogonal vectors (tangent and bitangent) such as we can express each vertex position as a function
# of the vertex UV:
# VertexPosition = Tangent * f'(VertexUV) + BiTangent * f"(VertexUV)
# Actually we are going to express them relatively to a vertex chosen as origin (vertex1):
# vertex - vertex1 = Tangent * (vertex.u - vertex1.u) + BiTangent * (vertex.v - vertex1.v)
# We have two equations, one for vertex2-vertex1 and one for vertex3-vertex1, if we put them in a system and solve it
# we can obtain Tangent and BiTangent:
# [T; B] = [u1, v1; u2, v2]^-1 * [V2-V1; V3-V1]
vertex1 = tVertexList[triangle[0]]
vertex2 = tVertexList[triangle[1]]
vertex3 = tVertexList[triangle[2]]
# First equation: [x1, y1, z1] = Tangent * u1 + BiTangent * v1
x1 = vertex2.pos.x - vertex1.pos.x
y1 = vertex2.pos.y - vertex1.pos.y
z1 = vertex2.pos.z - vertex1.pos.z
u1 = vertex2.uv.x - vertex1.uv.x
v1 = vertex2.uv.y - vertex1.uv.y
# Second equation: [x2, y2, z2] = Tangent * u2 + BiTangent * v2
x2 = vertex3.pos.x - vertex1.pos.x
y2 = vertex3.pos.y - vertex1.pos.y
z2 = vertex3.pos.z - vertex1.pos.z
u2 = vertex3.uv.x - vertex1.uv.x
v2 = vertex3.uv.y - vertex1.uv.y
# Determinant of the matrix [u1 v1; u2 v2]
d = u1 * v2 - u2 * v1
# If the determinant is zero then the points (0,0), (u1,v1), (u2,v2) are in line, this means
# the area on the UV map of this triangle is null. This is an error, we must skip this triangle.
if d == 0:
if nullUvIndices is not None:
nullUvIndices.add(vertex1.blenderIndex)
nullUvIndices.add(vertex2.blenderIndex)
nullUvIndices.add(vertex3.blenderIndex)
invalidUV = True
continue
t = Vector( ((v2 * x1 - v1 * x2) / d, (v2 * y1 - v1 * y2) / d, (v2 * z1 - v1 * z2) / d) )
b = Vector( ((u1 * x2 - u2 * x1) / d, (u1 * y2 - u2 * y1) / d, (u1 * z2 - u2 * z1) / d) )
vertex1.tangent += t;
vertex2.tangent += t;
vertex3.tangent += t;
vertex1.bitangent += b;
vertex2.bitangent += b;
vertex3.bitangent += b;
if invalidUV:
log.error("Invalid UV, the area in the UV map is too small.")
# Gram-Schmidt orthogonalize normal, tangent and bitangent
for tLodLevel in tLodLevels:
for vertexIndex in tLodLevel.indexSet:
vertex = tVertexList[vertexIndex]
# Skip already calculated vertices
if len(vertex.tangent) == 4:
continue
# Unit vector perpendicular to normal and in the same plane of normal and tangent
tOrtho = ( vertex.tangent - vertex.normal * vertex.normal.dot(vertex.tangent) ).normalized()
# Unit vector perpendicular to the plane of normal and tangent
bOrtho = vertex.normal.cross(vertex.tangent).normalized()
# Calculate handedness: if bOrtho and bitangent have the different directions, save the verse
# in tangent.w, so we can reconstruct bitangent by: tangent.w * normal.cross(tangent)
w = 1.0 if bOrtho.dot(vertex.bitangent) >= 0.0 else -1.0
vertex.bitangent = bOrtho
vertex.tangent = Vector((tOrtho.x, tOrtho.y, tOrtho.z, w))
#--------------------
# Linear-Speed Vertex Cache Optimisation algorithm by Tom Forsyth
# https://home.comcast.net/~tom_forsyth/papers/fast_vert_cache_opt.html
#--------------------
# This is an optimized version, but it is still slow.
# (on an average pc, 5 minutes for 30K smooth vertices)
# We try to sort triangles in the index buffer so that we gain an optimal use
# of the hardware vertices cache.
# We assign a score to each triangle, we find the best and save it in a new
# ordered list.
# The score of each triangle is the sum of the score of its vertices, and the
# score of a vertex is higher if it is:
# - used recently (it is still in the cache) but we also try to avoid the last
# triangle added (n this way we get better result),
# - lonely isolated vertices (otherwise the will be keep for last and drawing
# them will require an higher cost)
# The order of vertices in the triangle does not matter.
# We'll apply this optimization to each lod of each geometry.
# These are the constants used in the algorithm:
VERTEX_CACHE_SIZE = 32
CACHE_DECAY_POWER = 1.5
LAST_TRI_SCORE = 0.75
VALENCE_BOOST_SCALE = 2.0
VALENCE_BOOST_POWER = 0.5
def CalculateScore(rank):
if rank.useCount == 0:
rank.score = -1.0
return
score = 0.0
cachePosition = rank.cachePosition
if cachePosition < 0:
# Vertex is not in FIFO cache - no score
pass
elif cachePosition < 3:
# This vertex was used in the last triangle,
# so it has a fixed score, whichever of the three
# it's in. Otherwise, you can get very different
# answers depending on whether you add
# the triangle 1,2,3 or 3,1,2 - which is silly.
score = LAST_TRI_SCORE
else:
# Points for being high in the cache
score = 1.0 - float(rank.cachePosition - 3) / (VERTEX_CACHE_SIZE - 3)
score = pow(score, CACHE_DECAY_POWER)
# Bonus points for having a low number of tris still to
# use the vert, so we get rid of lone verts quickly
valenceBoost = VALENCE_BOOST_SCALE * pow(rank.useCount, -VALENCE_BOOST_POWER);
rank.score = score + valenceBoost;
# Triangles score list sizes
TRIANGLERANK_SIZE = 500
TRIANGLERANK_MAX_SIZE = 505
def OptimizeIndices(lodLevel):
# Ranks are used to store data for each vertex
class Rank:
def __init__(self):
self.score = 0.0
self.useCount = 1
self.cachePosition = -1
# Create a map: vertex index to its corresponding Rank
ranking = {}
# This list contains the original triangles (not in optimal order), we'll move them
# one by one in a new list following the optimal order
oldTriangles = lodLevel.triangleList
# For each vertex index of each triangle increment the use counter
# (we can find the same vertex index more than once)
for triangle in oldTriangles:
for index in triangle:
try:
ranking[index].useCount += 1
except KeyError:
ranking[index] = Rank()
# Calculate the first round of scores
# (Rank is mutable, so CalculateScore will be able to modify it)
for rank in ranking.values():
CalculateScore(rank)
# Ths list will contain the triangles sorted in optimal order
newTriangles = []
# Cache of vertex indices
vertexCache = []
# The original algorithm was:
# - scan all the old triangles and find the one with the best score;
# - move it to the new triangles;
# - move its vertices in the cache;
# - recalculate the score on all the vertices on the cache.
# The slowest part is the first step, scanning all the old triangles,
# but in the last step we update only a little subset of these triangles,
# and it is a waste to recalculate the triangle score of each old triangle.
# So we do this:
# - create a map 'trianglesMap': vertex index to triangles;
# - keep a list 'trianglesRanking' of the best triangles;
# - at first this list is empty, we start adding triangles; we add tuples like
# (score, triangle) and we keep track of the min score, we don't add triangles
# with score lower than the min; for now we add triangles without bothering
# about order; if the triangle is already present in the list we only update
# its score (even if it is lower);
# - when the list is a little too big (TRIANGLERANK_MAX_SIZE), we sort the list
# by score and we only keep the best TRIANGLERANK_SIZE triangles, we update
# the min score;
# - after scanning all the old triangles, we take out from the list the best
# triangle;
# - move it to the new triangles and remove it from the map;
# - move its vertices in the cache;
# - recalculate the score on all the vertices in the cache, if the score of one
# vertex is changed, we use the map to find what triangles are affected and
# we add them to the list (unordered and only if their score is > min);
# - now when we repeat we have the list already populated, so we don't need to
# recalculate all old triangles scores, we only need to sort the list and take
# out the best triangle.
# Vertex index to triangle indices list map
trianglesMap = {}
# Populate the map
for triangle in oldTriangles:
for vertexIndex in triangle:
try:
triangleList = trianglesMap[vertexIndex]
except KeyError:
triangleList = []
trianglesMap[vertexIndex] = triangleList
triangleList.append(triangle)
class TrianglesRanking:
def __init__(self):
self.ranklist = []
self.min = None
self.isSorted = True
def update(self, triangle):
# Sum the score of all its vertex.
# >> This is the slowest part of the algorithm <<
triangleScore = ranking[triangle[0]].score + ranking[triangle[1]].score + ranking[triangle[2]].score
# If needed, add it to the list
if not self.ranklist:
self.ranklist.append( (triangleScore, triangle) )
self.min = triangleScore
else:
# We add only triangles with score > min
if triangleScore > self.min:
found = False
# Search of the triangle is already present in the list
for i, rank in enumerate(self.ranklist):
if triangle == rank[1]:
if triangleScore != rank[0]:
self.ranklist[i] = (triangleScore, triangle)
self.isSorted = False
found = True
break
# It is a new triangle
if not found:
self.ranklist.append( (triangleScore, triangle) )
self.isSorted = False
def sort(self):
if self.isSorted:
return
#self.ranklist = sorted(self.ranklist, key=operator.itemgetter(0), reverse=True)[:TRIANGLERANK_SIZE]
self.ranklist = heapq.nlargest(TRIANGLERANK_SIZE, self.ranklist, key = operator.itemgetter(0))
self.min = self.ranklist[-1][0]
self.isSorted = True
def popBest(self):
bestTriangle = self.ranklist[0][1]
del self.ranklist[0]
return bestTriangle
trianglesRanking = TrianglesRanking()
# Progress counter
progressCur = 0
progressTot = 0.01 * len(oldTriangles)
if DEBUG: ttt = ostime.time() #!TIME
# While there still are unsorted triangles
while oldTriangles:
# Print progress
if (progressCur & 0x7F) == 0:
print("{:.3f}%\r".format(progressCur / progressTot), end='' )
progressCur += 1
# When the list is empty, we need to scan all the old triangles
if not trianglesRanking.ranklist:
for triangle in oldTriangles:
# We add the triangle but we don't search for the best one
trianglesRanking.update(triangle)
# If the list is too big, sort and truncate it
if len(trianglesRanking.ranklist) > TRIANGLERANK_MAX_SIZE:
trianglesRanking.sort()
if trianglesRanking:
# Only if needed, we sort and truncate
trianglesRanking.sort()
# We take the best triangles out of the list
bestTriangle = trianglesRanking.popBest()
else:
log.error("Could not find next triangle")
return
# Move the best triangle to the output list
oldTriangles.remove(bestTriangle)
newTriangles.append(bestTriangle)
# Model the LRU cache behaviour
# Recreate the cache removing the vertices of the best triangle
vertexCache = [i for i in vertexCache if i not in bestTriangle]
for vertexIndex in bestTriangle:
# Then push them to the front
vertexCache.insert(0, vertexIndex)
# Decrement the use counter of its vertices
ranking[vertexIndex].useCount -= 1
# Remove best triangle from the map
triangleList = trianglesMap[vertexIndex]
triangleList.remove(bestTriangle)
# Update positions & scores of all vertices in the cache
# Give position -1 if vertex is going to be erased
for i, vertexIndex in enumerate(vertexCache):
rank = ranking[vertexIndex]
if (i > VERTEX_CACHE_SIZE):
rank.cachePosition = -1
else:
rank.cachePosition = i
# Calculate the new score
oldScore = rank.score
CalculateScore(rank)
# If the score is changed
if oldScore != rank.score:
# Add to the list all the triangles affected
triangleList = trianglesMap[vertexIndex]
for triangle in triangleList:
trianglesRanking.update(triangle)
# Finally erase the extra vertices
vertexCache[:] = vertexCache[:VERTEX_CACHE_SIZE]
if DEBUG: print("[TIME2] {:.4f}".format(ostime.time() - ttt) ) #!TIME
# Rewrite the index data now
lodLevel.triangleList = newTriangles
#--------------------
# Decompose armatures
#--------------------
def SetRestPosePosition(context, armatureObj):
if not armatureObj:
return None
# Force the armature in the rest position (warning: https://developer.blender.org/T24674)
# This should reset bones matrices ok, but for sure it is not resetting the mesh tessfaces
# positions
savedPosePositionAndVisibility = [armatureObj.data.pose_position, armatureObj.hide]
armatureObj.data.pose_position = 'REST'
armatureObj.hide = False
# This should help to recalculate all the mesh vertices, it is needed by decomposeMesh
# and maybe it helps decomposeArmature (but no problem was seen there)
# TODO: find the correct way, for sure it is not this
objects = context.scene.objects
savedObjectActive = objects.active
objects.active = armatureObj
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
objects.active = savedObjectActive
return savedPosePositionAndVisibility
def RestorePosePosition(armatureObj, savedValue):
if not armatureObj:
return
armatureObj.data.pose_position = savedValue[0]
armatureObj.hide = savedValue[1]
# -- Rigify --
# In a Rigify system there are ORG bones and DEF bones. The DEF bones causes the mesh deformation
# and from their corresponding vertex groups we'll get all the vertices weights. The ORG bones
# are used to reconstruct the DEF bones hierarchy (they have deform off).
# Normally you can find the DEF bones in the third to last armature layer (29) and the ORG bones
# in last layer (31).
# The association between ORG and DEF bones is done by the names with a pair "ORG-<name>" and
# "DEF-<name>" (e.g. ORG-SPINE and DEF-spine). Sometimes a ORG bone covers multiple DEF bones,
# in this case the DEF bones have name "DEF-<name>.<number>" (e.g. ORG-thigh.L, DEF-thigh.L.01,
# DEF-thigh.L.02) the lowest number is the root of the group and it is linked to the ORG bone.
# -- Derigify --
# We need a list of deform bones ('defbones') and their hierarchy ('defparent', 'defchildren').
# We start by searching all the DEF and ORG bones. Then we try to associate the DEF bones to
# their corresponding ORG bones, we need it in both ways: DEF to ORG and ORG to DEF.
# An ORG bone can be associated with multiple DEF bones, in this case the DEF bones have a number
# in their name. We order this list of DEF bones by name, so we can get their correct structure,
# the lowest number (01) is the root then follows their children.
# Now we can reconstruct the hierarchy. For each DEF bone X we get the list of DEF bones of the
# associate ORG bone Y. X will be in the list, if it is the first (it name can be "DEF-<name>"
# or "DEF-<name>.01") we get the parent P of the ORG bone Y, we get the list of DEF bones of P,
# we set the last bone in this list (lower child) as the parent of X. If X is not the first we
# set the previous DEF bone in the list as the parent of X ('defparent').
# Now for each bone X with a parent Y, we set X as a child of Y ('defchildren').
# Finally for each DEF bone with no parent (root DEF bone) we go down to all its hierarchy and
# for each bone we traverse we store the tuple 'bone, parent bone' in the list 'boneList'.
# -- Note --
# To make it worse from some version of Rigify (<=0.4) the name convention was changed from
# DEF/ORG-<name>.L/R.<number> to DEF/ORG-<name>.<number>.L/R
def DerigifyArmature(armature, tOptions):
# Map {ORG bone name: Blender ORG bone}
orgbones = {}
# Map {DEF bone name: Blender DEF bone}
defbones = {}
# Map {ORG bone name: list of DEF bones names}
org2defs = {}
# Map {DEF bone name: ORG bone name}
def2org = {}
# Map {DEF bone name: list of children DEF bones}
defchildren = {}
# Map {DEF bone name: its parent DEF bone name}
defparent = {}
# List of names of bad DEF bones
badbones = []
# Experimental extended search for the Sintel model (does not work)
extended = False
# Flag for bad Rigify rig
badrig = False
# Scan the armature and collect ORG bones and DEF bones in the maps by their names,
# we remove ORG- or DEF- from names
for bone in armature.bones.values():
if bone.name.startswith('ORG-'):
orgbones[bone.name[4:]] = bone
org2defs[bone.name[4:]] = []
elif bone.name.startswith('DEF-'):
if tOptions.doOnlyVisibleBones and not any(al and bl for al,bl in zip(armature.layers, bone.layers)):
continue
if tOptions.doOnlyDeformBones and not bone.use_deform:
continue
defbones[bone.name[4:]] = bone
defchildren[bone.name[4:]] = []
# Populate the org2defs with all the DEF bones corresponding to a ORG bone and def2org
# with the unique ORG bone corresponding to a DEF bone.
# For each DEF bone in the map get its name and Blender bone
for name, bone in defbones.items():
orgname = name
# Search if exist an ORG bone with the same name of this DEF bone (None if not found)
orgbone = orgbones.get(orgname)
# If this ORG bone does not exist, then the DEF bone name could be DEF-<name>.<number>,
# so we remove .<number> and search for the ORG bone again
if not orgbone:
# Search with new format: <name>.<number>.L/R (e.g. "DEF-thigh.02.L")
prog = re.compile("^(.+)\.\d+(\.[L,R])$")
mo = prog.match(name)
# Search with old format: <name>.L/R.<number> (e.g. "DEF-thigh.L.02")
if not mo:
prog = re.compile("^(.+)(\.[L,R])\.\d+$")
mo = prog.match(name)
# If the format is correct try to find the ORG bone
if mo:
orgname = mo.group(1) + mo.group(2)
orgbone = orgbones.get(orgname)
# Still no ORG bone, is it a chain of DEF bones till a ORG bone ?
if not orgbone and mo and extended:
pbone = bone.parent
while pbone:
# If the parent is a ORG bone, we have found it
if pbone.name.startswith('ORG-'):
orgname = pbone.name[4:]
orgbone = orgbones.get(orgname)
break
# Fail if the parent is not a DEF bone
if not pbone.name.startswith('DEF-'):
break
# Fail if the parent has a different format
pmo = prog.match(pbone.name[4:])
if not pmo or pmo.groups() != mo.groups():
break
pbone = pbone.parent
# If we cannot find a ORG bone for the DEF bone, this is a bad rig
if not orgbone:
badbones.append(name)
badrig = True
continue
# Map the ORG name (can be None) to the DEF name (one to many)
org2defs[orgname].append(name)
# Map the DEF name to the ORG name (can be None) (one to one)
def2org[name] = orgname
# Delete bad DEF bones
if badbones:
log.warning("Bad DEF bones with no ORG skipped: {:s}".format( ", ".join(badbones) ))
for name in badbones:
del defbones[name]
badbones.clear()
# Sort DEF bones names in the ORG to DEF map, so we get: <name>.0, <name>.1, <name>.2 ...
for defs in org2defs.values():
defs.sort()
# Populate the parent map of each DEF bone
for name, bone in defbones.items():
# Get the relative ORG bone name (can be None)
orgname = def2org[name]
# Get the ORG bone
orgbone = orgbones.get(orgname)
# Get the list (sorted by number) of DEF bones associated to the ORG bone
defs = org2defs[orgname]
if orgbone:
# Get the index of the DEF bone in the list
i = defs.index(name)
# If it is the first (it has the lowest number, e.g. <name>.0)
if i == 0:
orgparent = orgbone.parent
# If the ORG parent bone exists and it is an ORG bone
while orgparent and orgparent.name.startswith('ORG-'):
orgpname = orgparent.name[4:]
# Map this DEF bone to the last DEF bone of the ORG parent bone
pdefs = org2defs[orgpname]
if pdefs:
defparent[name] = pdefs[-1]
break
# If the ORG has no DEF bones we can try with its parent
if not extended:
badbones.append(orgbone.parent.name)
badrig = True
break
orgparent = orgparent.parent
else:
# Map this DEF bone to the previous DEF bone in the list (it has a lower number)
defparent[name] = defs[i-1]
# Populate the children list of each DEF bone
for name in defbones.keys():
# If this DEF bone has a parent, append it as a child of the parent
if name in defparent:
defchildren[defparent[name]].append(name)
# List bad ORG bones
if badbones:
log.warning("Bad ORG bones with no DEF children: {:s}".format( ", ".join(badbones) ))
badbones.clear()
bonesList = []
# Warning for not standard rig
if badrig:
log.warning("Incompatible Rigify rig")
##return bonesList
# Recursively add children
def Traverse(boneName):
# Get the Blender bone
bone = defbones[boneName]
parent = None
# If it has a parent, get its Blender bone
if boneName in defparent:
parentName = defparent[boneName]
parent = defbones[parentName]
bonesList.append( (bone, parent) )
# Proceed with children bones
for childName in defchildren[boneName]:
Traverse(childName)
log.info("Derigify found {:d} DEF bones".format(len(defbones)) )
# Start from bones with no parent (root bones)
for boneName in defbones:
if boneName not in defparent:
Traverse(boneName)
return bonesList
# How to read a skeleton:
# start from the root bone, move it of bindPosition in the armature space
# then rotate the armature space with bindRotation, this will be the parent
# space used by its children. For each child bone move it of bindPosition in
# the parent space then rotate the parent space with bindRotation, and so on.
# We need each bone position and rotation in parent bone space:
# upAxis = Matrix.Rotation(pi/2, 4, 'X')
# poseMatrix = bone.matrix
# if parent:
# poseMatrix = parentBone.matrix.inverted() * poseMatrix
# else:
# poseMatrix = upAxis.matrix.inverted() * origin.matrix * poseMatrix
def DecomposeArmature(scene, armatureObj, meshObj, tData, tOptions):
bonesMap = tData.bonesMap
# 'armature.pose.bones' contains bones data for the current frame
# 'armature.data.bones' contains bones data for the rest position (not true?)
armature = armatureObj.data
# Check that armature and children objects have scale, rotation applied and the same origin
if armatureObj.scale != Vector((1.0, 1.0, 1.0)):
log.warning('You should apply scale to armature {:s}'.format(armatureObj.name))
if armatureObj.rotation_quaternion != Quaternion((1.0, 0.0, 0.0, 0.0)):
log.warning('You should apply rotation to armature {:s}'.format(armatureObj.name))
if meshObj.scale != Vector((1.0, 1.0, 1.0)):
log.warning('You should apply scale to object {:s}'.format(meshObj.name))
if meshObj.rotation_quaternion != Quaternion((1.0, 0.0, 0.0, 0.0)):
log.warning('You should apply rotation to object {:s}'.format(meshObj.name))
if not tOptions.globalOrigin and meshObj.location != armatureObj.location:
log.warning('Object {:s} should have the same origin as its armature {:s}'
.format(meshObj.name, armatureObj.name))
if not armature.bones:
log.warning('Armature {:s} has no bones'.format(armatureObj.name))
return
log.info("Decomposing armature: {:s} ({:d} bones)".format(armatureObj.name, len(armature.bones)) )
originMatrix = Matrix.Identity(4)
if tOptions.bonesGlobalOrigin:
originMatrix = armatureObj.matrix_world
# Get a list of bones
if tOptions.derigifyArmature:
# from a Rigify armature
bonesList = DerigifyArmature(armature, tOptions)
else:
# from a standard armature
bonesList = []
# Recursively add children
def Traverse(bone, parent):
childAdded = False
for child in bone.children:
childAdded = Traverse(child, bone) or childAdded
if not childAdded:
if tOptions.doOnlyVisibleBones and not any(al and bl for al,bl in zip(armature.layers, bone.layers)):
return False
if tOptions.doOnlyDeformBones and not bone.use_deform:
return False
bonesList.append( (bone, parent) )
return True
# Start from bones with no parent (root bones)
for bone in armature.bones.values():
if bone.parent is None:
Traverse(bone, None)
if not bonesList:
log.warning('Armature {:s} has no bone to export'.format(armatureObj.name))
return
for bone, parent in bonesList:
# 'bone.matrix_local' is referred to the armature, we need
# the transformation between the current bone and its parent.
boneMatrix = bone.matrix_local.copy()
# Here 'bone.matrix_local' is in object(armature) space, so we have to
# calculate the bone transformation in parent bone space
if parent:
boneMatrix = parent.matrix_local.inverted() * boneMatrix
else:
boneMatrix = originMatrix * boneMatrix
if tOptions.orientation:
boneMatrix = tOptions.orientation.to_matrix().to_4x4() * boneMatrix
# Normally we don't have to worry that Blender is Z up and we want
# Y up because we use relative transformations between bones. However
# the parent bone is relative to the armature so we need to convert
# Z up to Y up by rotating its matrix by -90° on X
boneMatrix = Matrix.Rotation(math.radians(-90.0), 4, 'X' ) * boneMatrix
if tOptions.scale != 1.0:
boneMatrix.translation *= tOptions.scale
# Extract position and rotation relative to parent in parent space
t = boneMatrix.to_translation()
q = boneMatrix.to_quaternion()
s = boneMatrix.to_scale()
# Convert position and rotation to left hand:
tl = Vector((t.x, t.y, -t.z))
ql = Quaternion((q.w, -q.x, -q.y, q.z))
sl = Vector((s.x, s.y, s.z))
# Now we need the bone matrix relative to the armature. 'matrix_local' is
# what we are looking for, but it needs to be converted:
# 1) rotate of -90° on X axis:
# - swap column 1 with column 2
# - negate column 1
# 2) convert bone transformation in object space to left hand:
# - swap row 1 with row 2
# - swap column 1 with column 2
# So putting them together:
# - swap row 1 with row 2
# - negate column 2
ml = bone.matrix_local.copy()
if tOptions.orientation:
ml = tOptions.orientation.to_matrix().to_4x4() * ml
if tOptions.scale != 1.0:
ml.translation *= tOptions.scale
(ml[1][:], ml[2][:]) = (ml[2][:], ml[1][:])
ml[0][2] = -ml[0][2]
ml[1][2] = -ml[1][2]
ml[2][2] = -ml[2][2]
# Create a new bone
parentName = parent and parent.name
tBone = TBone(len(bonesMap), parentName, tl, ql, sl, ml)
# If new, add the bone to the map with its name
if bone.name not in bonesMap:
bonesMap[bone.name] = tBone
else:
log.critical("Bone {:s} already present in the map.".format(bone.name))
#--------------------
# Decompose animations
#--------------------
def DecomposeActions(scene, armatureObj, tData, tOptions):
# Class for storing a NlaStrip, its previous strip and its parent track
class NlaStripLink:
def __init__(self, strip, previous, track):
self.name = strip.name
self.strip = strip
self.previous = previous
self.track = track
bonesMap = tData.bonesMap
animationsList = tData.animationsList
if not armatureObj.animation_data:
log.warning('Armature {:s} has no animation data'.format(armatureObj.name))
return
originMatrix = Matrix.Identity(4)
if tOptions.actionsGlobalOrigin:
originMatrix = armatureObj.matrix_world
if tOptions.globalOrigin and originMatrix != Matrix.Identity(4):
# Blender moves/rotates the armature together with the mesh, so if you set a global origin
# for Mesh and Actions you'll have twice the transformations. Set only one global origin.
log.warning("Use local origin for the object otherwise transformations are applied twice")
# Save current action and frame, we'll restore them later
savedAction = armatureObj.animation_data.action
savedFrame = scene.frame_current
savedUseNla = armatureObj.animation_data.use_nla
# Here we collect every animation objects we want to export
animationObjects = []
# Scan all the Tracks not muted of the armature
for track in armatureObj.animation_data.nla_tracks:
track.is_solo = False
if track.mute:
continue
# Add Track
if tOptions.doTracks or (tOptions.doSelectedTracks and track.select):
animationObjects.append(track)
# Scan all the Strips of the Track
previous = None
for strip in track.strips:
# Add Strip (every Strip is unique, no need to check for duplicates)
if tOptions.doStrips or (tOptions.doSelectedStrips and strip.select):
stripLink = NlaStripLink(strip, previous, track)
animationObjects.append(stripLink)
# Add an used Action
action = strip.action
if tOptions.doUsedActions and action and not action in animationObjects:
animationObjects.append(action)
previous = strip
# Add all the Actions (even if unused or deleted)
if tOptions.doAllActions:
animationObjects.extend(bpy.data.actions)
# Add Timeline (as the armature object)
if tOptions.doTimeline:
animationObjects.append(armatureObj)
if not animationObjects:
log.warning('Armature {:s} has no animation to export'.format(armatureObj.name))
return
for object in animationObjects:
tAnimation = TAnimation(object.name)
# Frame when the animation starts
frameOffset = 0
# Objects to save old values
oldTrackValue = None
oldStripValue = None
if isinstance(object, bpy.types.Action):
# Actions have their frame range
(startframe, endframe) = object.frame_range
startframe = int(startframe)
endframe = int(endframe + 1)
elif isinstance(object, NlaStripLink): # bpy.types.NlaStrip
# Strips also have their frame range
startframe = int(object.strip.frame_start)
endframe = int(object.strip.frame_end + 1)
# Strip can start anywhere
frameOffset = startframe
else:
# For Tracks and Timeline we use the scene playback range
startframe = int(scene.frame_start)
endframe = int(scene.frame_end + 1)
# Here we collect every action used by this animation, so we can filter the only used bones
actionSet = set()
# Clear current action on the armature
try:
armatureObj.animation_data.action = None
except AttributeError:
log.error("You need to exit action edit mode")
return
# If it is an Action, set the current Action; also disable NLA to disable influences from others NLA tracks
if isinstance(object, bpy.types.Action):
log.info("Decomposing action: {:s} (frames {:.1f} {:.1f})".format(object.name, startframe, endframe-1))
# Set Action on the armature
armatureObj.animation_data.use_nla = False
armatureObj.animation_data.action = object
# Get the Actions
actionSet.add(object)
# If it is a Track (not muted), set it as solo
if isinstance(object, bpy.types.NlaTrack):
log.info("Decomposing track: {:s} (frames {:.1f} {:.1f})".format(object.name, startframe, endframe-1))
# Set the NLA Track as solo
oldTrackValue = object.is_solo
object.is_solo = True
armatureObj.animation_data.use_nla = True
# Get the Actions
for strip in object.strips:
if strip.action:
actionSet.add(strip.action)
# If it is a Strip in a Track, set it as solo
if isinstance(object, NlaStripLink):
log.info("Decomposing strip: {:s} (frames {:.1f} {:.1f})".format(object.name, startframe, endframe-1))
# Set the parent NLA Track as solo (strange behavior)
armatureObj.animation_data.use_nla = True
oldTrackValue = object.track.is_solo
object.track.is_solo = True
# We mute the previous strip because it mess with the first frame
if object.previous:
oldStripValue = object.previous.mute
object.previous.mute = True
# Get the Action
actionSet.add(object.strip.action)
# If it is the Timeline, merge all the Tracks (not muted)
if isinstance(object, bpy.types.Object):
log.info("Decomposing animation: {:s} (frames {:.1f} {:.1f})".format(object.name, startframe, endframe-1))
armatureObj.animation_data.use_nla = True
# If there are no Tracks use the saved action (NLA is empty so we can keep it on)
if not object.animation_data.nla_tracks and savedAction:
armatureObj.animation_data.action = savedAction
actionSet.add(savedAction)
# Get the Actions
for track in object.animation_data.nla_tracks:
for strip in track.strips:
if strip.action:
actionSet.add(strip.action)
if not animationObjects:
log.warning("No actions for animation {:s}".format(object.name))
# Get the bones names
bones = []
if tOptions.doOnlyKeyedBones:
# Get all the names of the bones used by the actions
boneSet = set()
for action in actionSet:
for group in action.groups:
boneSet.add(group.name)
# Add the bones name respecting the order of bonesMap
for bone in bonesMap.keys():
if bone in boneSet:
bones.append(bone)
boneSet.remove(bone)
# Check if any bones used by actions is missing in the map
for bone in boneSet:
log.warning("Action group or bone '{:s}' not present in the skeleton".format(bone))
else:
# Get all the names of the bones in the map
bones = bonesMap.keys()
if not bones:
log.warning("No bones for animation {:s}".format(object.name))
continue
# Reset position/rotation/scale of each bone
for poseBone in armatureObj.pose.bones:
poseBone.matrix_basis = Matrix.Identity(4)
# Progress counter
progressCur = 0
progressTot = 0.01 * len(bones) * (endframe-startframe)/scene.frame_step
for boneName in bones:
if not boneName in bonesMap:
log.warning("Skeleton does not contain bone {:s}".format(boneName))
continue
if not boneName in armatureObj.pose.bones:
log.warning("Pose does not contain bone {:s}".format(boneName))
continue
tTrack = TTrack(boneName)
# Get the Blender pose bone (bpy.types.PoseBone)
poseBone = armatureObj.pose.bones[boneName]
parent = poseBone.parent
# For each frame
for time in range( startframe, endframe, scene.frame_step):
if (progressCur % 40) == 0:
print("{:.3f}%\r".format(progressCur / progressTot), end='' )
progressCur += 1
# Set frame (TODO: this is very slow, try to advance only the armature)
# (rna_Scene_frame_set, BKE_scene_update_for_newframe, BKE_animsys_evaluate_animdata)
scene.frame_set(time)
# This matrix is referred to the armature (object space)
poseMatrix = poseBone.matrix.copy()
if parent:
# Bone matrix relative to its parent bone
poseMatrix = parent.matrix.inverted() * poseMatrix
else:
# Root bone matrix relative to the armature
if tOptions.orientation:
poseMatrix = tOptions.orientation.to_matrix().to_4x4() * poseMatrix
poseMatrix = Matrix.Rotation(math.radians(-90.0), 4, 'X' ) * originMatrix * poseMatrix
if tOptions.scale != 1.0:
poseMatrix.translation *= tOptions.scale
# Extract position and rotation relative to parent in parent space
t = poseMatrix.to_translation()
q = poseMatrix.to_quaternion()
s = poseMatrix.to_scale()
# Convert position and rotation to left hand:
tl = Vector((t.x, t.y, -t.z))
ql = Quaternion((q.w, -q.x, -q.y, q.z))
sl = Vector((s.x, s.y, s.z))
if not tOptions.doAnimationPos:
tl = None
if not tOptions.doAnimationRot:
ql = None
if not tOptions.doAnimationSca:
sl = None
tFrame = TFrame((time - frameOffset) / scene.render.fps, tl, ql, sl)
if not tTrack.frames or tTrack.frames[-1].hasMoved(tFrame):
tTrack.frames.append(tFrame)
if tTrack.frames:
tAnimation.tracks.append(tTrack)
# Use timeline marker as Urho triggers
if tOptions.doTriggers:
log.info("Decomposing {:d} markers for animation {:s}"
.format(len(scene.timeline_markers), tAnimation.name))
for marker in scene.timeline_markers:
tTrigger = TTrigger(marker.name)
tTrigger.time = (marker.frame - frameOffset) / scene.render.fps
tTrigger.data = marker.name
tAnimation.triggers.append(tTrigger)
if tAnimation.tracks:
animationsList.append(tAnimation)
if isinstance(object, bpy.types.NlaTrack):
object.is_solo = oldTrackValue
if isinstance(object, NlaStripLink):
object.track.is_solo = oldTrackValue
if object.previous:
object.previous.mute = oldStripValue
# Restore initial action and frame
armatureObj.animation_data.action = savedAction
armatureObj.animation_data.use_nla = savedUseNla
scene.frame_set(savedFrame)
#---------------------------------
# Decompose geometries and morphs
#---------------------------------
def DecomposeMesh(scene, meshObj, tData, tOptions, errorsMem):
verticesList = tData.verticesList
geometriesList = tData.geometriesList
materialsList = tData.materialsList
materialGeometryMap = tData.materialGeometryMap
morphsList = tData.morphsList
bonesMap = tData.bonesMap
meshIndex = errorsMem.SecondIndex(meshObj.name)
verticesMap = {}
# Create a Mesh datablock with modifiers applied
# (note: do not apply if not needed, it loses precision)
mesh = meshObj.to_mesh(scene, tOptions.applyModifiers, tOptions.applySettings)
log.info("Decomposing mesh: {:s} ({:d} vertices)".format(meshObj.name, len(mesh.vertices)) )
# If we use the object local origin (orange dot) we don't need transformations
posMatrix = Matrix.Identity(4)
normalMatrix = Matrix.Identity(4)
if tOptions.globalOrigin:
posMatrix = meshObj.matrix_world
# Use the inverse transpose to rotate normals without scaling (math trick)
normalMatrix = meshObj.matrix_world.inverted().transposed()
# Apply custom rotation
if tOptions.orientation:
posMatrix = tOptions.orientation.to_matrix().to_4x4() * posMatrix
normalMatrix = tOptions.orientation.to_matrix().to_4x4() * normalMatrix
# Apply custom scaling last
if tOptions.scale != 1.0:
posMatrix = Matrix.Scale(tOptions.scale, 4) * posMatrix
# Vertices map: vertex Blender index to TVertex index
faceVertexMap = {}
# Here we store geometriesList indices of geometries with new vertices in its last LOD
# We use this to create a new LOD only once per geometry and to filter where we have
# to optimize and recalculate tangents
updatedGeometryIndices = set()
# Mesh vertex groups
meshVertexGroups = meshObj.vertex_groups
# Errors helpers
notBonesGroups = set()
missingGroups = set()
overrideBones = set()
missingBones = set()
# Python trick: C = A and B, if A is False (None, empty list) then C=A, if A is
# True (object, populated list) then C=B
# Check if the mesh has UV data
uvs = None
uvs2 = None
# In every texture of every material search if the name ends in "_UV1" or "_UV2",
# search also in names of the UV maps
for material in mesh.materials:
if not material:
continue
for texture in material.texture_slots:
if not texture or texture.texture_coords != "UV":
continue
tex = texture.name
uvMap = texture.uv_layer
if not tex or not uvMap or not (uvMap in mesh.uv_textures.keys()):
continue
if tex.endswith("_UV") or uvMap.endswith("_UV") or \
tex.endswith("_UV1") or uvMap.endswith("_UV1"):
uvs = mesh.tessface_uv_textures[uvMap].data
elif tex.endswith("_UV2") or uvMap.endswith("_UV2"):
uvs2 = mesh.tessface_uv_textures[uvMap].data
# If still we don't have UV1, try the current UV map selected
if not uvs and mesh.tessface_uv_textures.active:
uvs = mesh.tessface_uv_textures.active.data
# If still we don't have UV1, try the first UV map in Blender
if not uvs and mesh.tessface_uv_textures:
uvs = mesh.tessface_uv_textures[0].data
if tOptions.doGeometryUV and not uvs:
log.warning("Object {:s} has no UV data".format(meshObj.name))
if tOptions.doGeometryUV2 and not uvs2:
log.warning("Object {:s} has no texture with UV2 data. Append _UV2 to the texture slot name".format(meshObj.name))
# Check if the mesh has vertex color data
colorsRgb = None
colorsAlpha = None
# In vertex colors layer search if the name ends in "_RGB" or "_ALPHA"
for vertexColors in mesh.tessface_vertex_colors:
if not colorsRgb and vertexColors.name.endswith("_RGB"):
colorsRgb = vertexColors.data
if not colorsAlpha and vertexColors.name.endswith("_ALPHA"):
colorsAlpha = vertexColors.data
# If still we don't have RGB, try the current vertex color layer selected
if not colorsRgb and mesh.tessface_vertex_colors.active:
colorsRgb = mesh.tessface_vertex_colors.active.data
# If still we don't have RGB, try the first vertex color layer in Blender
if not colorsRgb and mesh.tessface_vertex_colors:
colorsRgb = mesh.tessface_vertex_colors[0].data
if tOptions.doGeometryCol and not colorsRgb:
log.warning("Object {:s} has no rgb color data".format(meshObj.name))
if tOptions.doGeometryColAlpha and not colorsAlpha:
log.warning("Object {:s} has no alpha color data. Append _ALPHA to the color layer name".format(meshObj.name))
if tOptions.doMaterials:
if scene.render.engine == 'CYCLES':
log.warning("Cycles render engine not supported")
if not mesh.materials:
log.warning("Object {:s} has no materials data".format(meshObj.name))
# Progress counter
progressCur = 0
progressTot = 0.01 * len(mesh.tessfaces)
for face in mesh.tessfaces:
if (progressCur % 10) == 0:
print("{:.3f}%\r".format(progressCur / progressTot), end='' )
progressCur += 1
# Skip if this face has less than 3 unique vertices
# (a frozenset is an immutable set of unique elements)
if len(frozenset(face.vertices)) < 3:
face.hide = True
continue
if face.hide:
continue
# Get face vertices UV, type: MeshTextureFace(bpy_struct)
faceUv = uvs and uvs[face.index]
faceUv2 = uvs2 and uvs2[face.index]
# Get face 4 vertices colors
fcol = colorsRgb and colorsRgb[face.index]
faceRgbColor = fcol and (fcol.color1, fcol.color2, fcol.color3, fcol.color4)
fcol = colorsAlpha and colorsAlpha[face.index]
faceAlphaColor = fcol and (fcol.color1, fcol.color2, fcol.color3, fcol.color4)
# Get the face material
# If no material is associated then face.material_index is 0 but mesh.materials
# is not None
material = None
if mesh.materials and len(mesh.materials):
material = mesh.materials[face.material_index]
# Add the material if it is new
materialName = material and material.name
if tOptions.doMaterials and materialName and (not materialName in materialsList):
tMaterial = TMaterial(materialName)
materialsList.append(tMaterial)
tMaterial.diffuseColor = material.diffuse_color
tMaterial.diffuseIntensity = material.diffuse_intensity
tMaterial.specularColor = material.specular_color
tMaterial.specularIntensity = material.specular_intensity
tMaterial.specularHardness = material.specular_hardness
tMaterial.twoSided = mesh.show_double_sided
if material.use_transparency:
tMaterial.opacity = material.alpha
if material.transparency_method == 'MASK':
tMaterial.alphaMask = True
# In reverse order so the first slots have precedence
for texture in reversed(material.texture_slots):
if texture is None or texture.texture_coords != 'UV':
continue
textureData = bpy.data.textures[texture.name]
if textureData.type != 'IMAGE':
continue
if textureData.image is None:
continue
imageName = textureData.image.name
if texture.use_map_color_diffuse:
tMaterial.diffuseTexName = imageName
if texture.use_map_normal:
tMaterial.normalTexName = imageName
if texture.use_map_color_spec:
tMaterial.specularTexName = imageName
if texture.use_map_emit:
tMaterial.emitTexName = imageName
tMaterial.emitColor = Color((1.0, 1.0, 1.0))
tMaterial.emitIntensity = texture.emit_factor
if "_LIGHTMAP" in texture.name:
tMaterial.lightmapTexName = imageName
if "_AMBIENTLIGHT" in texture.name:
tMaterial.ambientLightTexName = imageName
##tMaterial.imagePath = bpy.path.abspath(faceUv.image.filepath)
# If we are merging and want to have separate materials, add the object name
mapMaterialName = materialName
if tOptions.mergeObjects and tOptions.mergeNotMaterials:
mapMaterialName = str(materialName) + "---" + meshObj.name
# From the material name search for the geometry index, or add it to the map if missing
try:
geometryIndex = materialGeometryMap[mapMaterialName]
except KeyError:
geometryIndex = len(geometriesList)
newGeometry = TGeometry()
newGeometry.materialName = materialName
geometriesList.append(newGeometry)
materialGeometryMap[mapMaterialName] = geometryIndex
log.info("New Geometry{:d} created for material {!s}".format(geometryIndex, materialName))
# Get the geometry associated to the material
geometry = geometriesList[geometryIndex]
# Get the last LOD level, or add a new one if requested in the options
lodLevelIndex = len(geometry.lodLevels)
if not geometry.lodLevels or geometryIndex not in tOptions.lodUpdatedGeometryIndices:
tLodLevel = TLodLevel()
tLodLevel.distance = tOptions.lodDistance
geometry.lodLevels.append(tLodLevel)
tOptions.lodUpdatedGeometryIndices.add(geometryIndex)
log.info("New LOD{:d} created for material {!s}".format(lodLevelIndex, materialName))
else:
tLodLevel = geometry.lodLevels[-1]
# Add the index of the geometry we are going to update
updatedGeometryIndices.add(geometryIndex)
indexSet = tLodLevel.indexSet
triangleList = tLodLevel.triangleList
# Here we store all the indices of the face, then we decompose it into triangles
tempList = []
for i, vertexIndex in enumerate(face.vertices):
# i: vertex index in the face (0..2 tris, 0..3 quad)
# vertexIndex: vertex index in Blender buffer
# Blender vertex
vertex = mesh.vertices[vertexIndex]
position = posMatrix * vertex.co
# if face is smooth use vertex normal else use face normal
if face.use_smooth:
normal = vertex.normal
else:
normal = face.normal
normal = normalMatrix * normal
# Create a new vertex
tVertex = TVertex()
# Set Blender index
tVertex.blenderIndex = (meshIndex, vertexIndex)
# Set Vertex position
if tOptions.doGeometryPos:
tVertex.pos = Vector((position.x, position.z, position.y))
# Set Vertex normal
if tOptions.doGeometryNor:
tVertex.normal = Vector((normal.x, normal.z, normal.y))
# Set Vertex UV coordinates
if tOptions.doGeometryUV:
if faceUv:
uv = faceUv.uv[i]
tVertex.uv = Vector((uv[0], 1.0 - uv[1]))
elif tOptions.doForceElements:
tVertex.uv = Vector((0.0, 0.0))
if tOptions.doGeometryUV2:
if faceUv2:
uv2 = faceUv2.uv[i]
tVertex.uv2 = Vector((uv2[0], 1.0 - uv2[1]))
elif tOptions.doForceElements:
tVertex.uv2 = Vector((0.0, 0.0))
# Set Vertex color
if tOptions.doGeometryCol or tOptions.doGeometryColAlpha:
color = [0, 0, 0, 255]
if faceRgbColor or faceAlphaColor:
if faceRgbColor:
# This is an array of 3 floats from 0.0 to 1.0
rgb = faceRgbColor[i]
# Approx 255*float to the closest int
color[:3] = ( int(round(rgb.r * 255.0)),
int(round(rgb.g * 255.0)),
int(round(rgb.b * 255.0)) )
if faceAlphaColor:
# For Alpha use Value of HSV
alpha = faceAlphaColor[i]
color[3] = int(round(alpha.v * 255.0))
tVertex.color = tuple(color)
elif tOptions.doForceElements:
tVertex.color = tuple(color)
# Set Vertex bones weights
if tOptions.doGeometryWei:
weights = []
# Scan all the vertex groups associated to the vertex, type: VertexGroupElement(bpy_struct)
for g in vertex.groups:
# The group name should be the bone name, but it can also be an user made vertex group
try:
boneName = meshVertexGroups[g.group].name
try:
boneIndex = bonesMap[boneName].index
if g.weight > 0.0 or not weights:
weights.append( (boneIndex, g.weight) )
except KeyError:
notBonesGroups.add(boneName)
except IndexError:
missingGroups.add(str(g.group))
# If the mesh has a bone for parent use it for a 100% weight skinning
if tOptions.skinBoneParent and meshObj.parent_type == 'BONE' and meshObj.parent_bone:
boneName = meshObj.parent_bone
# We shouldn't have any skinning on the vertex
if weights:
overrideBones.add(boneName)
try:
boneIndex = bonesMap[boneName].index
weights.append( (boneIndex, 1.0) )
except KeyError:
missingBones.add(boneName)
# If we found no bone weight (not even one with weight zero) leave the list equal to None
if weights:
tVertex.weights = weights
elif tOptions.doForceElements:
tVertex.weights = [(0, 0.0)]
# All this code do is "tVertexIndex = verticesMapList.index(tVertex)", but we use
# a map to speed up.
# Get an hash of the vertex (different vertices with the same hash are ok)
vertexHash = hash(tVertex)
try:
# Get a list of vertex indices with the same hash
verticesMapList = verticesMap[vertexHash]
except KeyError:
# If the hash is not mapped, create a new list (we should use sets but lists are faster)
verticesMapList = []
verticesMap[vertexHash] = verticesMapList
# For each index in the list, test if it is the same as the current tVertex.
# If Position, Normal and UV must be the same get its index.
## tVertexIndex = next((j for j in verticesMapList if verticesList[j] == tVertex), None)
tVertexIndex = None
for j in verticesMapList:
if verticesList[j].isEqual(tVertex):
tVertexIndex = j
break
# If we cannot find it, the vertex is new, add it to the list, and its index to the map list
if tVertexIndex is None:
tVertexIndex = len(verticesList)
verticesList.append(tVertex)
verticesMapList.append(tVertexIndex)
# Add the vertex index to the temp list to create triangles later
tempList.append(tVertexIndex)
# Map Blender face index and Blender vertex index to our TVertex index (this is used later by Morphs)
faceVertexMap[(face.index, vertexIndex)] = tVertexIndex
# Save every unique vertex this LOD is using
indexSet.add(tVertexIndex)
# Create triangles
if i == 2:
triangle = (tempList[0], tempList[2], tempList[1])
triangleList.append(triangle)
if i == 3:
triangle = (tempList[0], tempList[3], tempList[2])
triangleList.append(triangle)
# end loop vertices
# end loop faces
if notBonesGroups:
log.info("These groups are not used for bone deforms: {:s}".format( ", ".join(notBonesGroups) ))
if missingGroups:
log.warning("These group indices are missing: {:s}".format( ", ".join(missingGroups) ))
if overrideBones:
log.warning("These parent bones will override the deforms: {:s}".format( ", ".join(overrideBones) ))
if missingBones:
log.warning("These parent bones are missing in the armature: {:s}".format( ", ".join(missingBones) ))
# Generate tangents for the last LOD of every geometry with new vertices
if tOptions.doGeometryTan:
lodLevels = []
for geometryIndex in updatedGeometryIndices:
geometry = geometriesList[geometryIndex]
# Only the last LOD was modified (even if it wasn't a new LOD)
lodLevel = geometry.lodLevels[-1]
log.info("Generating tangents on {:d} indices for {:s} Geometry{:d}"
.format(len(lodLevel.indexSet), meshObj.name, geometryIndex) )
lodLevels.append(lodLevel)
GenerateTangents(lodLevels, verticesList, errorsMem)
# Optimize vertex index buffer for the last LOD of every geometry with new vertices
if tOptions.doOptimizeIndices:
for geometryIndex in updatedGeometryIndices:
geometry = geometriesList[geometryIndex]
# Only the last LOD was modified (even if it wasn't a new LOD)
lodLevel = geometry.lodLevels[-1]
log.info("Optimizing {:d} indices for {:s} Geometry{:d}"
.format(len(lodLevel.indexSet), meshObj.name, geometryIndex) )
OptimizeIndices(lodLevel)
# Check if we need and can work on shape keys (morphs)
shapeKeys = meshObj.data.shape_keys
keyBlocks = []
if tOptions.doMorphs:
if not shapeKeys or len(shapeKeys.key_blocks) < 1:
log.warning("Object {:s} has no shape keys".format(meshObj.name))
else:
keyBlocks = shapeKeys.key_blocks
# Decompose shape keys (morphs)
for j, block in enumerate(keyBlocks):
# Skip 'Basis' shape key
if j == 0:
continue
# Skip muted shape keys
if block.mute:
continue
tMorph = TMorph(block.name)
log.info("Decomposing shape: {:s} ({:d} vertices)".format(block.name, len(block.data)) )
# Make a temporary copy of the mesh
shapeMesh = mesh.copy()
if len(shapeMesh.vertices) != len(block.data):
log.error("Vertex count mismatch on shape {:s}.".format(block.name))
continue
# Appy the shape
for i, data in enumerate(block.data):
shapeMesh.vertices[i].co = data.co
# Recalculate normals
shapeMesh.update(calc_edges = True, calc_tessface = True)
##shapeMesh.calc_tessface()
##shapeMesh.calc_normals()
# TODO: if set use 'vertex group' of the shape to filter affected vertices
# TODO: can we use mesh tessfaces and not shapeMesh tessfaces ?
for face in shapeMesh.tessfaces:
if face.hide:
continue
# TODO: add only affected triangles not faces, use morphed as a mask
morphed = False
# In this list we store vertex index and morphed vertex of each face, we'll add them
# to the morph only if at least one vertex on the face is affected by the moprh
tempList = []
# For each Blender vertex index in the face
for vertexIndex in face.vertices:
# Get the Blender morphed vertex
vertex = shapeMesh.vertices[vertexIndex]
position = posMatrix * vertex.co
# If face is smooth use vertex normal else use face normal
if face.use_smooth:
normal = vertex.normal
else:
normal = face.normal
normal = normalMatrix * normal
# Try to find the TVertex index corresponding to this Blender vertex index
try:
tVertexIndex = faceVertexMap[(face.index, vertexIndex)]
except KeyError:
log.error("Cannot find vertex {:d} of face {:d} of shape {:s}."
.format(vertexIndex, face.index, block.name) )
continue
# Get the original not morphed TVertex
tVertex = verticesList[tVertexIndex]
# Create a new morphed vertex
# (note: this vertex stores absolute values, not relative to original values)
tMorphVertex = TVertex()
# Set Blender index
tMorphVertex.blenderIndex = (meshIndex, vertexIndex)
# Set Vertex position
tMorphVertex.pos = Vector((position.x, position.z, position.y))
# Set Vertex normal
if tOptions.doMorphNor:
tMorphVertex.normal = Vector((normal.x, normal.z, normal.y))
# If we have UV, copy them to the TVertex, we only need them to calculate tangents
if tOptions.doMorphUV:
if tVertex.uv:
tMorphVertex.uv = tVertex.uv
elif tOptions.doForceElements:
tVertex.uv = Vector(0.0, 0.0)
# Save vertex index and morphed vertex, to be added later if at least one
# vertex in the face was morphed
tempList.append((tVertexIndex, tMorphVertex))
# Check if the morph has effect
if tMorphVertex.isMorphed(tVertex):
morphed = True
# If at least one vertex in the face was morphed
if morphed:
# Add vertices to the morph
for i, (tVertexIndex, tMorphVertex) in enumerate(tempList):
try:
# Check if already present
oldTMorphVertex = tMorph.vertexMap[tVertexIndex]
if tMorphVertex != oldTMorphVertex:
log.error('Different vertex {:d} of face {:d} of shape {:s}.'
.format(vertexIndex, face.index, block.name) )
continue
except KeyError:
# Add a new morph vertex
tMorph.vertexMap[tVertexIndex] = tMorphVertex
# Save how many unique vertex this LOD is using (for tangents calculation)
tMorph.indexSet.add(tVertexIndex)
# Create triangles (for tangents calculation)
if i == 2:
triangle = (tempList[0][0], tempList[2][0], tempList[1][0])
tMorph.triangleList.append(triangle)
if i == 3:
triangle = (tempList[0][0], tempList[3][0], tempList[2][0])
tMorph.triangleList.append(triangle)
if tOptions.doMorphTan:
log.info("Generating morph tangents {:s}".format(block.name) )
GenerateTangents((tMorph,), tMorph.vertexMap, None)
# If valid add the morph to the model list
if tMorph.vertexMap:
morphsList.append(tMorph)
else:
log.warning('Empty shape {:s}.'.format(block.name))
# Delete the temporary copy
bpy.data.meshes.remove(shapeMesh)
bpy.data.meshes.remove(mesh)
return
#--------------------
# Scan objects
#--------------------
# Scan and decompose objects
def Scan(context, tDataList, errorsMem, tOptions):
scene = context.scene
# Get all objects in the scene or only the selected in visible layers
if tOptions.onlySelected:
objs = context.selected_objects
else:
objs = scene.objects
noLod = True
noWork = True
# Gather objects
meshes = []
for obj in objs:
# Only meshes
if obj.type != 'MESH':
continue
# Only not hidden
if obj.hide:
continue
if tOptions.useLods:
# Search in the object's name for this match: <name>_LOD<distance>
# if not found, consider it as a LOD with distance 0
mo = re.match("(.*)_LOD(\d+\.\d+|\d+)", obj.name)
if mo:
noLod = False
lodName = mo.group(1)
lodDistance = float(mo.group(2))
else:
lodName = obj.name
lodDistance = 0.0
else:
# Normal objects
lodName = obj.name
lodDistance = 0.0
noWork = False
assert(lodName)
meshes.append( (obj, lodName, lodDistance) )
if tOptions.useLods and noLod:
log.warning("No LODs found")
if noWork:
log.warning("No objects to work on")
# Sort objects
if tOptions.useLods:
if tOptions.mergeObjects:
# Sort by distance then by LOD name
meshes.sort(key=lambda x: (x[2],x[1]))
else:
# Sort by LOD name then by distance
meshes.sort(key=lambda x: (x[1],x[2]))
else:
# Sort by object name = LOD name
meshes.sort(key=lambda x: x[1])
# Decompose objects
tData = None
lodCurrentName = None
for obj, lodName, lodDistance in meshes:
log.info("---- Decomposing {:s} ----".format(obj.name))
# Are we creating a new container (TData) for a new mesh?
# When merging is always False, when using LODs is True when changing object but False when adding a LOD.
createNew = True
if tOptions.mergeObjects:
createNew = False
# If we are merging objects, use the current selected object name (only if it is a mesh)
if context.selected_objects:
selectedObject = scene.objects.active
if selectedObject.type == 'MESH' and selectedObject.name:
lodName = selectedObject.name
if tOptions.useLods:
if tOptions.mergeObjects:
# Merging objects: never create a new mesh, add a new LOD when distance changes
if tOptions.lodDistance is None:
# This is the first LOD of the merge
if lodDistance != 0.0:
log.warning("First LOD should have 0.0 distance (found {:.3f})".format(lodDistance))
elif tOptions.lodDistance != lodDistance:
# This is a lower LOD of the merge
tOptions.lodUpdatedGeometryIndices.clear() # request new LOD
assert(lodDistance >= tOptions.lodDistance)
tOptions.lodDistance = lodDistance
log.info("Merging as {:s} LOD with distance {:.3f}".format(lodName, lodDistance))
else:
# Multiple objects: create a new mesh (and new LOD) when name changes, add a new LOD when distance changes
if lodCurrentName is None or lodCurrentName != lodName:
# This is the first LOD of a new object
tOptions.lodIndex = 0
lodCurrentName = lodName
if lodDistance != 0.0:
log.warning("First LOD should have 0.0 distance (found {:.3f})".format(lodDistance))
else:
# This is a lower LOD of the same object
createNew = False
tOptions.lodUpdatedGeometryIndices.clear() # request new LOD
if lodDistance <= tOptions.lodDistance:
log.warning("Wrong LOD sequence: {:d} then {:d}".format(tOptions.lodDistance, lodDistance) )
tOptions.lodDistance = lodDistance
log.info("Added as {:s} LOD with distance {:.3f}".format(lodName, lodDistance))
# Create a new container where to save decomposed data
if not tData or createNew:
tData = TData()
tData.objectName = lodName
if not tOptions.mergeObjects:
tData.blenderObjectName = obj.name
tDataList.append(tData)
tOptions.lodUpdatedGeometryIndices.clear() # request new LOD
tOptions.lodDistance = 0.0
# First we need to populate the skeleton, then animations and then geometries
armatureObj = None
if tOptions.doBones:
# Check if obj has an armature parent, if it is attached to a bone (like hair to head bone)
# we'll skin it to the bone with 100% weight (but it shouldn't have bone vertex groups)
if obj.parent and obj.parent.type == 'ARMATURE':
armatureObj = obj.parent
else:
# Check if there is an Armature modifier
for modifier in obj.modifiers:
if modifier.type == 'ARMATURE' and modifier.object and modifier.object.type == 'ARMATURE':
armatureObj = modifier.object
break
# Decompose armature and animations
if armatureObj:
savedValue = None
if not tData.bonesMap or not tOptions.mergeObjects:
savedValue = SetRestPosePosition(context, armatureObj)
DecomposeArmature(scene, armatureObj, obj, tData, tOptions)
if tOptions.doAnimations and (not tData.animationsList or not tOptions.mergeObjects):
armatureObj.data.pose_position = 'POSE'
DecomposeActions(scene, armatureObj, tData, tOptions)
if savedValue:
RestorePosePosition(armatureObj, savedValue)
else:
log.warning("Object {:s} has no armature".format(obj.name) )
# Decompose geometries
if tOptions.doGeometries:
savedValue = SetRestPosePosition(context, armatureObj)
DecomposeMesh(scene, obj, tData, tOptions, errorsMem)
RestorePosePosition(armatureObj, savedValue)
#-----------------------------------------------------------------------------
if __name__ == "__main__":
print("------------------------------------------------------")
startTime = ostime.time()
tDataList = []
tOptions = TOptions()
Scan(bpy.context, tDataList, tOptions)
if tDataList:
PrintAll(tDataList[0])
print("Executed in {:.4f} sec".format(ostime.time() - startTime) )
print("------------------------------------------------------")
| unlicense |
mydongistiny/external_chromium_org | tools/chrome_proxy/integration_tests/network_metrics_unittest.py | 42 | 6433 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import unittest
from integration_tests import network_metrics
from metrics import test_page_test_results
from telemetry.timeline import event
HTML_BODY = """<!DOCTYPE HTML>
<html>
<head> </head>
<body>
<div id="test"> TEST HTML</div>
</body>
</html>"""
IMAGE_BODY = """fake image data"""
GZIPPED_HTML_LEN = network_metrics.HTTPResponse.GetGizppedBodyLength(HTML_BODY)
# Make up original content length for the image.
IMAGE_OCL = 3 * len(IMAGE_BODY)
class NetworkMetricTest(unittest.TestCase):
@staticmethod
def MakeNetworkTimelineEvent(
url, response_headers, body=None, base64_encoded_body=False,
served_from_cache=False, request_headers=None, status=200):
if not request_headers:
request_headers = {}
e = event.TimelineEvent('network', 'HTTPResponse', 0, 0)
e.args = {}
e.args['requestId'] = 0
e.args['response'] = {
'status': status,
'url': url,
'headers': response_headers,
'requestHeaders': request_headers,
}
e.args['body'] = body
e.args['base64_encoded_body'] = base64_encoded_body
e.args['served_from_cache'] = served_from_cache
return e
def testHTTPResponse(self):
url = 'http://test.url'
self.assertLess(GZIPPED_HTML_LEN, len(HTML_BODY))
# A plain text HTML response
resp = network_metrics.HTTPResponse(self.MakeNetworkTimelineEvent(
url=url,
response_headers={
'Content-Type': 'text/html',
'Content-Length': str(len(HTML_BODY)),
},
body=HTML_BODY))
self.assertEqual(url, resp.response.url)
body, base64_encoded = resp.response.GetBody()
self.assertEqual(HTML_BODY, body)
self.assertFalse(base64_encoded)
self.assertEqual('text/html', resp.response.GetHeader('Content-Type'))
self.assertEqual(len(HTML_BODY), resp.content_length)
self.assertEqual(None, resp.response.GetHeader('Content-Encoding'))
self.assertFalse(resp.has_original_content_length)
self.assertEqual(0.0, resp.data_saving_rate)
# A gzipped HTML response
resp = network_metrics.HTTPResponse(self.MakeNetworkTimelineEvent(
url=url,
response_headers={
'Content-Type': 'text/html',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(len(HTML_BODY)),
},
body=HTML_BODY))
body, base64_encoded = resp.response.GetBody()
self.assertFalse(base64_encoded)
self.assertEqual(GZIPPED_HTML_LEN, resp.content_length)
self.assertEqual('gzip', resp.response.GetHeader('Content-Encoding'))
self.assertTrue(resp.has_original_content_length)
self.assertEqual(len(HTML_BODY), resp.original_content_length)
self.assertEqual(
float(len(HTML_BODY) - GZIPPED_HTML_LEN) / len(HTML_BODY),
resp.data_saving_rate)
# A JPEG image response.
resp = network_metrics.HTTPResponse(self.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(IMAGE_OCL),
},
body=base64.b64encode(IMAGE_BODY),
base64_encoded_body=True))
body, base64_encoded = resp.response.GetBody()
self.assertTrue(base64_encoded)
self.assertEqual(IMAGE_BODY, base64.b64decode(body))
self.assertEqual(len(IMAGE_BODY), resp.content_length)
self.assertTrue(resp.has_original_content_length)
self.assertEqual(IMAGE_OCL, resp.original_content_length)
self.assertFalse(resp.response.served_from_cache)
self.assertEqual(float(IMAGE_OCL - len(IMAGE_BODY)) / IMAGE_OCL,
resp.data_saving_rate)
# A JPEG image response from cache.
resp = network_metrics.HTTPResponse(self.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(IMAGE_OCL),
},
body=base64.b64encode(IMAGE_BODY),
base64_encoded_body=True,
served_from_cache=True))
self.assertEqual(len(IMAGE_BODY), resp.content_length)
self.assertTrue(resp.has_original_content_length)
self.assertEqual(IMAGE_OCL, resp.original_content_length)
# Cached resource has zero saving.
self.assertTrue(resp.response.served_from_cache)
self.assertEqual(0.0, resp.data_saving_rate)
def testNetworkMetricResults(self):
events = [
# A plain text HTML.
self.MakeNetworkTimelineEvent(
url='http://test.html1',
response_headers={
'Content-Type': 'text/html',
'Content-Length': str(len(HTML_BODY)),
},
body=HTML_BODY),
# A compressed HTML.
self.MakeNetworkTimelineEvent(
url='http://test.html2',
response_headers={
'Content-Type': 'text/html',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(len(HTML_BODY)),
},
body=HTML_BODY),
# A base64 encoded image.
self.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(IMAGE_OCL),
},
body=base64.b64encode(IMAGE_BODY),
base64_encoded_body=True),
]
metric = network_metrics.NetworkMetric()
metric._events = events
metric.compute_data_saving = True
self.assertTrue(len(events), len(list(metric.IterResponses(None))))
results = test_page_test_results.TestPageTestResults(self)
metric.AddResults(None, results)
cl = len(HTML_BODY) + GZIPPED_HTML_LEN + len(IMAGE_BODY)
results.AssertHasPageSpecificScalarValue('content_length', 'bytes', cl)
ocl = len(HTML_BODY) + len(HTML_BODY) + IMAGE_OCL
results.AssertHasPageSpecificScalarValue(
'original_content_length', 'bytes', ocl)
saving_percent = float(ocl - cl) * 100/ ocl
results.AssertHasPageSpecificScalarValue(
'data_saving', 'percent', saving_percent)
| bsd-3-clause |
flavour/tldrmp | private/templates/RGIMS/controllers.py | 3 | 7814 | # -*- coding: utf-8 -*-
from os import path
from gluon import current
from gluon.html import *
from s3.s3utils import s3_register_validation
# =============================================================================
class index():
""" Custom Home Page """
def __call__(self):
T = current.T
auth = current.auth
db = current.db
request = current.request
appname = request.application
response = current.response
s3 = response.s3
settings = current.deployment_settings
view = path.join(request.folder, "private", "templates",
"RGIMS", "views", "index.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
title = settings.get_system_name()
response.title = title
# Menu Boxes
menu_btns = [#div, label, app, function
["sit", T("Request"), "req", "req"],
["dec", T("Send"), "inv", "send"],
["res", T("Receive"), "inv", "recv"]
]
menu_divs = {"facility": DIV( H3("Map"),
_id = "facility_box", _class = "menu_box"),
"sit": DIV(
_id = "menu_div_sit", _class = "menu_div"),
"dec": DIV(
_id = "menu_div_dec", _class = "menu_div"),
"res": DIV(
_id = "menu_div_res", _class = "menu_div"),
}
for div, label, app, function in menu_btns:
if settings.has_module(app):
# @ToDo: Also check permissions (e.g. for anonymous users)
menu_divs[div].append(A(DIV(label,
_class = "menu-btn-r"),
_class = "menu-btn-l",
_href = URL(app,function)
)
)
div_arrow = DIV(IMG(_src = "/%s/static/img/arrow_blue_right.png" % \
appname),
_class = "div_arrow")
sit_dec_res_box = DIV(menu_divs["sit"],
div_arrow,
menu_divs["dec"],
div_arrow,
menu_divs["res"],
_id = "sit_dec_res_box",
_class = "menu_box fleft swidth"
#div_additional,
)
facility_box = menu_divs["facility"]
facility_box.append(A(IMG(_src = "/%s/static/img/map_icon_128.png" % \
appname),
_href = URL(c="gis", f="index"),
_title = T("Map")
)
)
# Check logged in AND permissions
_s3 = current.session.s3
AUTHENTICATED = _s3.system_roles.AUTHENTICATED
roles = _s3.roles
if AUTHENTICATED in roles and \
auth.s3_has_permission("read", db.org_organisation):
auth.permission.controller = "org"
auth.permission.function = "site"
permitted_facilities = auth.permitted_facilities(redirect_on_error=False)
if permitted_facilities:
facilities = s3db.org_SiteRepresent().bulk(permitted_facilities)
facility_list = [(fac, facilities[fac]) for fac in facilities]
facility_list = sorted(facility_list, key=lambda fac: fac[1])
facility_opts = [OPTION(fac[1], _value=fac[0])
for fac in facility_list]
manage_facility_box = DIV(H3(T("Manage Your Facilities")),
SELECT(_id = "manage_facility_select",
_style = "max-width:400px;",
*facility_opts
),
A(T("Go"),
_href = URL(c="default", f="site",
args=[facility_list[0][0]]),
#_disabled = "disabled",
_id = "manage_facility_btn",
_class = "action-btn"
),
_id = "manage_facility_box",
_class = "menu_box fleft"
)
s3.jquery_ready.append(
'''$('#manage_facility_select').change(function(){
$('#manage_facility_btn').attr('href',S3.Ap.concat('/default/site/',$('#manage_facility_select').val()))
})''')
else:
manage_facility_box = ""
else:
manage_facility_box = ""
# Login/Registration forms
self_registration = settings.get_security_self_registration()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
if AUTHENTICATED not in roles:
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
if self_registration:
# Provide a Registration box on front page
register_form = auth.s3_registration_form()
register_div = DIV(H3(T("Register")),
P(XML(T("If you would like to help, then please %(sign_up_now)s") % \
dict(sign_up_now=B(T("sign-up now"))))))
if request.env.request_method == "POST":
post_script = \
'''$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')'''
else:
post_script = ""
register_script = \
'''$('#register-btn').attr('href','#register')
$('#login-btn').attr('href','#login')
%s
$('#register-btn').click(function(){
$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')
})
$('#login-btn').click(function(){
$('#register_form').addClass('hide')
$('#login_form').removeClass('hide')
})''' % post_script
s3.jquery_ready.append(register_script)
# Provide a login box on front page
request.args = ["login"]
auth.messages.submit_button = T("Login")
login_form = auth()
login_div = DIV(H3(T("Login")),
P(XML(T("Registered users can %(login)s to access the system") % \
dict(login=B(T("login"))))))
return dict(title = title,
sit_dec_res_box = sit_dec_res_box,
facility_box = facility_box,
manage_facility_box = manage_facility_box,
self_registration=self_registration,
registered=registered,
login_form=login_form,
login_div=login_div,
register_form=register_form,
register_div=register_div
)
# END =========================================================================
| mit |
jerrybai2009/WhereHows-1 | metadata-etl/src/main/resources/jython/requests/__init__.py | 149 | 2215 | # -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
Requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('http://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2016 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.10.0'
__build__ = 0x021000
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2016 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
import warnings
# urllib3's DependencyWarnings should be silenced.
from .packages.urllib3.exceptions import DependencyWarning
warnings.simplefilter('ignore', DependencyWarning)
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError,
FileModeWarning, ConnectTimeout, ReadTimeout
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
import warnings
# FileModeWarnings go off per the default.
warnings.simplefilter('default', FileModeWarning, append=True)
| apache-2.0 |
gaborvecsei/Color-Tracker | examples/tracking.py | 1 | 2306 | import argparse
from functools import partial
import cv2
import color_tracker
# You can determine these values with the HSVColorRangeDetector()
HSV_LOWER_VALUE = [155, 103, 82]
HSV_UPPER_VALUE = [178, 255, 255]
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("-low", "--low", nargs=3, type=int, default=HSV_LOWER_VALUE,
help="Lower value for the HSV range. Default = 155, 103, 82")
parser.add_argument("-high", "--high", nargs=3, type=int, default=HSV_UPPER_VALUE,
help="Higher value for the HSV range. Default = 178, 255, 255")
parser.add_argument("-c", "--contour-area", type=float, default=2500,
help="Minimum object contour area. This controls how small objects should be detected. Default = 2500")
parser.add_argument("-v", "--verbose", action="store_true")
args = parser.parse_args()
return args
def tracking_callback(tracker: color_tracker.ColorTracker, verbose: bool = True):
# Visualizing the original frame and the debugger frame
cv2.imshow("original frame", tracker.frame)
cv2.imshow("debug frame", tracker.debug_frame)
# Stop the script when we press ESC
key = cv2.waitKey(1)
if key == 27:
tracker.stop_tracking()
if verbose:
for obj in tracker.tracked_objects:
print("Object {0} center {1}".format(obj.id, obj.last_point))
def main():
args = get_args()
# Creating a kernel for the morphology operations
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
# Init the ColorTracker object
tracker = color_tracker.ColorTracker(max_nb_of_objects=5, max_nb_of_points=20, debug=True)
# Setting a callback which is called at every iteration
callback = partial(tracking_callback, verbose=args.verbose)
tracker.set_tracking_callback(tracking_callback=callback)
# Start tracking with a camera
with color_tracker.WebCamera(video_src=0) as webcam:
# Start the actual tracking of the object
tracker.track(webcam,
hsv_lower_value=args.low,
hsv_upper_value=args.high,
min_contour_area=args.contour_area,
kernel=kernel)
if __name__ == "__main__":
main()
| mit |
wakatime/wakatime | wakatime/packages/py26/pygments/scanner.py | 31 | 3123 | # -*- coding: utf-8 -*-
"""
pygments.scanner
~~~~~~~~~~~~~~~~
This library implements a regex based scanner. Some languages
like Pascal are easy to parse but have some keywords that
depend on the context. Because of this it's impossible to lex
that just by using a regular expression lexer like the
`RegexLexer`.
Have a look at the `DelphiLexer` to get an idea of how to use
this scanner.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
class EndOfText(RuntimeError):
"""
Raise if end of text is reached and the user
tried to call a match function.
"""
class Scanner(object):
"""
Simple scanner
All method patterns are regular expression strings (not
compiled expressions!)
"""
def __init__(self, text, flags=0):
"""
:param text: The text which should be scanned
:param flags: default regular expression flags
"""
self.data = text
self.data_length = len(text)
self.start_pos = 0
self.pos = 0
self.flags = flags
self.last = None
self.match = None
self._re_cache = {}
def eos(self):
"""`True` if the scanner reached the end of text."""
return self.pos >= self.data_length
eos = property(eos, eos.__doc__)
def check(self, pattern):
"""
Apply `pattern` on the current position and return
the match object. (Doesn't touch pos). Use this for
lookahead.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
return self._re_cache[pattern].match(self.data, self.pos)
def test(self, pattern):
"""Apply a pattern on the current position and check
if it patches. Doesn't touch pos.
"""
return self.check(pattern) is not None
def scan(self, pattern):
"""
Scan the text for the given pattern and update pos/match
and related fields. The return value is a boolen that
indicates if the pattern matched. The matched value is
stored on the instance as ``match``, the last value is
stored as ``last``. ``start_pos`` is the position of the
pointer before the pattern was matched, ``pos`` is the
end position.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
self.last = self.match
m = self._re_cache[pattern].match(self.data, self.pos)
if m is None:
return False
self.start_pos = m.start()
self.pos = m.end()
self.match = m.group()
return True
def get_char(self):
"""Scan exactly one char."""
self.scan('.')
def __repr__(self):
return '<%s %d/%d>' % (
self.__class__.__name__,
self.pos,
self.data_length
)
| bsd-3-clause |
dimtruck/magnum | magnum/tests/functional/api/v1/clients/baymodel_client.py | 2 | 3764 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from magnum.tests.functional.api.v1.models import baymodel_model
from magnum.tests.functional.common import client
class BayModelClient(client.ClientMixin):
"""Encapsulates REST calls and maps JSON to/from models"""
@classmethod
def baymodels_uri(cls, filters=None):
"""Construct baymodels uri with optional filters
:param filters: Optional k:v dict that's converted to url query
:returns: url string
"""
url = "/baymodels"
if filters:
url = cls.add_filters(url, filters)
return url
@classmethod
def baymodel_uri(cls, baymodel_id):
"""Construct baymodel uri
:param baymodel_id: baymodel uuid or name
:returns: url string
"""
return "{0}/{1}".format(cls.baymodels_uri(), baymodel_id)
def list_baymodels(self, filters=None, **kwargs):
"""Makes GET /baymodels request and returns BayModelCollection
Abstracts REST call to return all baymodels
:param filters: Optional k:v dict that's converted to url query
:returns: response object and BayModelCollection object
"""
resp, body = self.client.get(self.baymodels_uri(filters), **kwargs)
return self.deserialize(resp, body, baymodel_model.BayModelCollection)
def get_baymodel(self, baymodel_id, **kwargs):
"""Makes GET /baymodel request and returns BayModelEntity
Abstracts REST call to return a single baymodel based on uuid or name
:param baymodel_id: baymodel uuid or name
:returns: response object and BayModelCollection object
"""
resp, body = self.client.get(self.baymodel_uri(baymodel_id))
return self.deserialize(resp, body, baymodel_model.BayModelEntity)
def post_baymodel(self, model, **kwargs):
"""Makes POST /baymodel request and returns BayModelEntity
Abstracts REST call to create new baymodel
:param model: BayModelEntity
:returns: response object and BayModelEntity object
"""
resp, body = self.client.post(
self.baymodels_uri(),
body=model.to_json(), **kwargs)
return self.deserialize(resp, body, baymodel_model.BayModelEntity)
def patch_baymodel(self, baymodel_id, baymodelpatch_listmodel, **kwargs):
"""Makes PATCH /baymodel request and returns BayModelEntity
Abstracts REST call to update baymodel attributes
:param baymodel_id: UUID of baymodel
:param baymodelpatch_listmodel: BayModelPatchCollection
:returns: response object and BayModelEntity object
"""
resp, body = self.client.patch(
self.baymodel_uri(baymodel_id),
body=baymodelpatch_listmodel.to_json(), **kwargs)
return self.deserialize(resp, body, baymodel_model.BayModelEntity)
def delete_baymodel(self, baymodel_id, **kwargs):
"""Makes DELETE /baymodel request and returns response object
Abstracts REST call to delete baymodel based on uuid or name
:param baymodel_id: UUID or name of baymodel
:returns: response object
"""
return self.client.delete(self.baymodel_uri(baymodel_id), **kwargs)
| apache-2.0 |
yufengg/tensorflow | tensorflow/contrib/slim/python/slim/summaries.py | 63 | 7497 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains helper functions for creating summaries.
This module contains various helper functions for quickly and easily adding
tensorflow summaries. These allow users to print summary values
automatically as they are computed and add prefixes to collections of summaries.
Example usage:
import tensorflow as tf
slim = tf.contrib.slim
slim.summaries.add_histogram_summaries(slim.variables.get_model_variables())
slim.summaries.add_scalar_summary(total_loss, 'Total Loss')
slim.summaries.add_scalar_summary(learning_rate, 'Learning Rate')
slim.summaries.add_histogram_summaries(my_tensors)
slim.summaries.add_zero_fraction_summaries(my_tensors)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import nn_impl as nn
from tensorflow.python.summary import summary
def _get_summary_name(tensor, name=None, prefix=None, postfix=None):
"""Produces the summary name given.
Args:
tensor: A variable or op `Tensor`.
name: The optional name for the summary.
prefix: An optional prefix for the summary name.
postfix: An optional postfix for the summary name.
Returns:
a summary name.
"""
if not name:
name = tensor.op.name
if prefix:
name = prefix + '/' + name
if postfix:
name = name + '/' + postfix
return name
def add_histogram_summary(tensor, name=None, prefix=None):
"""Adds a histogram summary for the given tensor.
Args:
tensor: A variable or op tensor.
name: The optional name for the summary.
prefix: An optional prefix for the summary names.
Returns:
A scalar `Tensor` of type `string` whose contents are the serialized
`Summary` protocol buffer.
"""
return summary.histogram(
_get_summary_name(tensor, name, prefix), tensor)
def add_image_summary(tensor, name=None, prefix=None, print_summary=False):
"""Adds an image summary for the given tensor.
Args:
tensor: a variable or op tensor with shape [batch,height,width,channels]
name: the optional name for the summary.
prefix: An optional prefix for the summary names.
print_summary: If `True`, the summary is printed to stdout when the summary
is computed.
Returns:
An image `Tensor` of type `string` whose contents are the serialized
`Summary` protocol buffer.
"""
summary_name = _get_summary_name(tensor, name, prefix)
# If print_summary, then we need to make sure that this call doesn't add the
# non-printing op to the collection. We'll add it to the collection later.
collections = [] if print_summary else None
op = summary.image(
name=summary_name, tensor=tensor, collections=collections)
if print_summary:
op = logging_ops.Print(op, [tensor], summary_name)
ops.add_to_collection(ops.GraphKeys.SUMMARIES, op)
return op
def add_scalar_summary(tensor, name=None, prefix=None, print_summary=False):
"""Adds a scalar summary for the given tensor.
Args:
tensor: a variable or op tensor.
name: the optional name for the summary.
prefix: An optional prefix for the summary names.
print_summary: If `True`, the summary is printed to stdout when the summary
is computed.
Returns:
A scalar `Tensor` of type `string` whose contents are the serialized
`Summary` protocol buffer.
"""
collections = [] if print_summary else None
summary_name = _get_summary_name(tensor, name, prefix)
# If print_summary, then we need to make sure that this call doesn't add the
# non-printing op to the collection. We'll add it to the collection later.
op = summary.scalar(
name=summary_name, tensor=tensor, collections=collections)
if print_summary:
op = logging_ops.Print(op, [tensor], summary_name)
ops.add_to_collection(ops.GraphKeys.SUMMARIES, op)
return op
def add_zero_fraction_summary(tensor, name=None, prefix=None,
print_summary=False):
"""Adds a summary for the percentage of zero values in the given tensor.
Args:
tensor: a variable or op tensor.
name: the optional name for the summary.
prefix: An optional prefix for the summary names.
print_summary: If `True`, the summary is printed to stdout when the summary
is computed.
Returns:
A scalar `Tensor` of type `string` whose contents are the serialized
`Summary` protocol buffer.
"""
name = _get_summary_name(tensor, name, prefix, 'Fraction of Zero Values')
tensor = nn.zero_fraction(tensor)
return add_scalar_summary(tensor, name, print_summary=print_summary)
def add_histogram_summaries(tensors, prefix=None):
"""Adds a histogram summary for each of the given tensors.
Args:
tensors: A list of variable or op tensors.
prefix: An optional prefix for the summary names.
Returns:
A list of scalar `Tensors` of type `string` whose contents are the
serialized `Summary` protocol buffer.
"""
summary_ops = []
for tensor in tensors:
summary_ops.append(add_histogram_summary(tensor, prefix=prefix))
return summary_ops
def add_image_summaries(tensors, prefix=None):
"""Adds an image summary for each of the given tensors.
Args:
tensors: A list of variable or op tensors.
prefix: An optional prefix for the summary names.
Returns:
A list of scalar `Tensors` of type `string` whose contents are the
serialized `Summary` protocol buffer.
"""
summary_ops = []
for tensor in tensors:
summary_ops.append(add_image_summary(tensor, prefix=prefix))
return summary_ops
def add_scalar_summaries(tensors, prefix=None, print_summary=False):
"""Adds a scalar summary for each of the given tensors.
Args:
tensors: a list of variable or op tensors.
prefix: An optional prefix for the summary names.
print_summary: If `True`, the summary is printed to stdout when the summary
is computed.
Returns:
A list of scalar `Tensors` of type `string` whose contents are the
serialized `Summary` protocol buffer.
"""
summary_ops = []
for tensor in tensors:
summary_ops.append(add_scalar_summary(tensor, prefix=prefix,
print_summary=print_summary))
return summary_ops
def add_zero_fraction_summaries(tensors, prefix=None):
"""Adds a scalar zero-fraction summary for each of the given tensors.
Args:
tensors: a list of variable or op tensors.
prefix: An optional prefix for the summary names.
Returns:
A list of scalar `Tensors` of type `string` whose contents are the
serialized `Summary` protocol buffer.
"""
summary_ops = []
for tensor in tensors:
summary_ops.append(add_zero_fraction_summary(tensor, prefix=prefix))
return summary_ops
| apache-2.0 |
dosiecki/NewsBlur | apps/rss_feeds/management/shells.py | 20 | 2149 |
class ObjectImportError(Exception):
pass
def import_objects(options, style):
# XXX: (Temporary) workaround for ticket #1796: force early loading of all
# models from installed apps. (this is fixed by now, but leaving it here
# for people using 0.96 or older trunk (pre [5919]) versions.
from django.db.models.loading import get_models, get_apps
loaded_models = get_models() # NOQA
from django.conf import settings
imported_objects = {'settings': settings}
dont_load_cli = options.get('dont_load') # optparse will set this to [] if it doensnt exists
dont_load_conf = getattr(settings, 'SHELL_PLUS_DONT_LOAD', [])
dont_load = dont_load_cli + dont_load_conf
quiet_load = options.get('quiet_load')
model_aliases = getattr(settings, 'SHELL_PLUS_MODEL_ALIASES', {})
for app_mod in get_apps():
app_models = get_models(app_mod)
if not app_models:
continue
app_name = app_mod.__name__.split('.')[-2]
if app_name in dont_load:
continue
app_aliases = model_aliases.get(app_name, {})
model_labels = []
for model in app_models:
try:
imported_object = getattr(__import__(app_mod.__name__, {}, {}, model.__name__), model.__name__)
model_name = model.__name__
if "%s.%s" % (app_name, model_name) in dont_load:
continue
alias = app_aliases.get(model_name, model_name)
imported_objects[alias] = imported_object
if model_name == alias:
model_labels.append(model_name)
else:
model_labels.append("%s (as %s)" % (model_name, alias))
except AttributeError as e:
if not quiet_load:
print(style.ERROR("Failed to import '%s' from '%s' reason: %s" % (model.__name__, app_name, str(e))))
continue
if not quiet_load:
print(style.SQL_COLTYPE("From '%s' autoload: %s" % (app_mod.__name__.split('.')[-2], ", ".join(model_labels))))
return imported_objects
| mit |
rayNymous/nupic | src/nupic/encoders/multi.py | 18 | 7681 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.encoders.base import Encoder
from nupic.encoders.scalar import ScalarEncoder
from nupic.encoders.adaptivescalar import AdaptiveScalarEncoder
from nupic.encoders.date import DateEncoder
from nupic.encoders.logenc import LogEncoder
from nupic.encoders.category import CategoryEncoder
from nupic.encoders.sdrcategory import SDRCategoryEncoder
from nupic.encoders.delta import DeltaEncoder
from nupic.encoders.scalarspace import ScalarSpaceEncoder
from nupic.encoders.pass_through_encoder import PassThroughEncoder
from nupic.encoders.sparse_pass_through_encoder import SparsePassThroughEncoder
from nupic.encoders.coordinate import CoordinateEncoder
from nupic.encoders.geospatial_coordinate import GeospatialCoordinateEncoder
# multiencoder must be imported last because it imports * from this module!
from nupic.encoders.utils import bitsToString
from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder
# Map class to Cap'n Proto schema union attribute
_CLASS_ATTR_MAP = {
ScalarEncoder: "scalarEncoder",
AdaptiveScalarEncoder: "adaptivescalar",
DateEncoder: "dateEncoder",
LogEncoder: "logEncoder",
CategoryEncoder: "categoryEncoder",
CoordinateEncoder: "coordinateEncoder",
SDRCategoryEncoder: "sdrCategoryEncoder",
DeltaEncoder: "deltaEncoder",
PassThroughEncoder: "passThroughEncoder",
SparsePassThroughEncoder: "sparsePassThroughEncoder",
RandomDistributedScalarEncoder: "randomDistributedScalarEncoder"
}
# Invert for fast lookup in MultiEncoder.read()
_ATTR_CLASS_MAP = {value:key for key, value in _CLASS_ATTR_MAP.items()}
class MultiEncoder(Encoder):
"""A MultiEncoder encodes a dictionary or object with
multiple components. A MultiEncode contains a number
of sub-encoders, each of which encodes a separate component."""
# TODO expand this docstring to explain how the multiple encoders are combined
def __init__(self, encoderDescriptions=None):
self.width = 0
self.encoders = []
self.description = []
self.name = ''
if encoderDescriptions is not None:
self.addMultipleEncoders(encoderDescriptions)
def setFieldStats(self, fieldName, fieldStatistics ):
for (name, encoder, offset) in self.encoders:
encoder.setFieldStats(name, fieldStatistics)
def addEncoder(self, name, encoder):
self.encoders.append((name, encoder, self.width))
for d in encoder.getDescription():
self.description.append((d[0], d[1] + self.width))
self.width += encoder.getWidth()
self._flattenedEncoderList = None
self._flattenedFieldTypeList = None
def encodeIntoArray(self, obj, output):
for name, encoder, offset in self.encoders:
encoder.encodeIntoArray(self._getInputValue(obj, name), output[offset:])
def getDescription(self):
return self.description
def getWidth(self):
"""Represents the sum of the widths of each fields encoding."""
return self.width
def setLearning(self,learningEnabled):
encoders = self.getEncoderList()
for encoder in encoders:
encoder.setLearning(learningEnabled)
return
def encodeField(self, fieldName, value):
for name, encoder, offset in self.encoders:
if name == fieldName:
return encoder.encode(value)
def encodeEachField(self, inputRecord):
encodings = []
for name, encoder, offset in self.encoders:
encodings.append(encoder.encode(getattr(inputRecord, name)))
return encodings
def addMultipleEncoders(self, fieldEncodings):
"""
fieldEncodings -- a dict of dicts, mapping field names to the field params
dict.
Each field params dict has the following keys
1) data fieldname that matches the key ('fieldname')
2) an encoder type ('type')
3) and the encoder params (all other keys)
For example,
fieldEncodings={
'dateTime': dict(fieldname='dateTime', type='DateEncoder',
timeOfDay=(5,5)),
'attendeeCount': dict(fieldname='attendeeCount', type='ScalarEncoder',
name='attendeeCount', minval=0, maxval=250,
clipInput=True, w=5, resolution=10),
'consumption': dict(fieldname='consumption',type='ScalarEncoder',
name='consumption', minval=0,maxval=110,
clipInput=True, w=5, resolution=5),
}
would yield a vector with a part encoded by the DateEncoder,
and to parts seperately taken care of by the ScalarEncoder with the specified parameters.
The three seperate encodings are then merged together to the final vector, in such a way that
they are always at the same location within the vector.
"""
# Sort the encoders so that they end up in a controlled order
encoderList = sorted(fieldEncodings.items())
for key, fieldParams in encoderList:
if ':' not in key and fieldParams is not None:
fieldParams = fieldParams.copy()
fieldName = fieldParams.pop('fieldname')
encoderName = fieldParams.pop('type')
try:
self.addEncoder(fieldName, eval(encoderName)(**fieldParams))
except TypeError, e:
print ("#### Error in constructing %s encoder. Possibly missing "
"some required constructor parameters. Parameters "
"that were provided are: %s" % (encoderName, fieldParams))
raise
@classmethod
def read(cls, proto):
encoder = object.__new__(cls)
encoder.encoders = [None] * len(proto.encoders)
encoder.width = 0
for index, encoderProto in enumerate(proto.encoders):
# Identify which attr is set in union
encoderType = encoderProto.which()
encoderDetails = getattr(encoderProto, encoderType)
encoder.encoders[index] = (
encoderProto.name,
# Call class.read() where class is determined by _ATTR_CLASS_MAP
_ATTR_CLASS_MAP.get(encoderType).read(encoderDetails),
encoderProto.offset
)
encoder.width += encoder.encoders[index][1].getWidth()
# Derive description from encoder list
encoder.description = [(enc[1].name, enc[2]) for enc in encoder.encoders]
encoder.name = proto.name
return encoder
def write(self, proto):
proto.init("encoders", len(self.encoders))
for index, (name, encoder, offset) in enumerate(self.encoders):
encoderProto = proto.encoders[index]
encoderType = _CLASS_ATTR_MAP.get(encoder.__class__)
encoderProto.init(encoderType)
encoderDetails = getattr(encoderProto, encoderType)
encoder.write(encoderDetails)
encoderProto.name = name
encoderProto.offset = offset
proto.name = self.name
| agpl-3.0 |
stuarth/pixie | pixie/vm/test/test_compile.py | 9 | 8085 | from pixie.vm.reader import read_inner, StringReader, eof
from pixie.vm.object import Type
from pixie.vm.cons import Cons
from pixie.vm.numbers import Integer
from pixie.vm.symbol import Symbol
from pixie.vm.compiler import compile, with_ns
from pixie.vm.interpreter import interpret
from pixie.vm.code import Code, Var
from pixie.vm.primitives import nil, true, false
def read_code(s):
with with_ns(u"user"):
return read(StringReader(unicode(s)), False)
def test_add_compilation():
with with_ns(u"user"):
code = compile(read_code(u"(platform+ 1 2)"))
assert isinstance(code, Code)
#interpret(code)
def eval_string(s):
with with_ns(u"user", True):
rdr = StringReader(unicode(s))
result = nil
while True:
form = read(rdr, False)
if form is eof:
return result
result = compile(form).invoke([])
def test_fn():
with with_ns(u"user", True):
code = compile(read_code("((fn* [x y] (-add x y)) 1 2)"))
assert isinstance(code, Code)
retval = interpret(code)
assert isinstance(retval, Integer) and retval.int_val() == 3
def test_multiarity_fn():
retval = eval_string("""(let* [v 1
f (fn* ([] v)
([x] (+ v x)))]
(f))
""")
assert isinstance(retval, Integer) and retval.int_val() == 1
retval = eval_string("""(let* [v 1
f (fn* ([] v)
([x] (+ v x)))]
(f 2))
""")
assert isinstance(retval, Integer) and retval.int_val() == 3
def test_if():
code = compile(read_code("(if 1 2 3)"))
assert isinstance(code, Code)
retval = interpret(code)
assert isinstance(retval, Integer) and retval.int_val() == 2
code = compile(read_code("(if false 2 3)"))
assert isinstance(code, Code)
retval = interpret(code)
assert isinstance(retval, Integer) and retval.int_val() == 3
def test_eq():
assert eval_string(u"(platform= 1 2)") is false
assert eval_string(u"(platform= 1 1)") is true
def test_if_eq():
assert eval_string("(if (platform= 1 2) true false)") is false
def test_return_self():
assert isinstance(eval_string("((fn* r [] r))"), Code)
def test_recursive():
retval = eval_string("""((fn* rf [x]
(if (platform= x 10)
x
(recur (+ x 1))))
0)""")
assert isinstance(retval, Integer)
assert retval.int_val() == 10
def test_loop():
retval = eval_string("""
(loop [x 0]
(if (platform= x 10)
x
(recur (+ x 1))))
""")
assert isinstance(retval, Integer)
assert retval.int_val() == 10
retval = eval_string("""
(loop [x 0
max 10]
(if (platform= x max)
x
(recur (+ x 1) max)))
""")
assert isinstance(retval, Integer)
assert retval.int_val() == 10
def test_loop():
retval = eval_string("""
(loop [x 0]
(if (platform= x 10)
x
(recur (+ x 1))))
""")
assert isinstance(retval, Integer)
assert retval.int_val() == 10
retval = eval_string("""
(loop [x 0
max 10]
(if (platform= x max)
x
(if (platform= x max)
false
(recur (+ x 1) max))))
""")
assert isinstance(retval, Integer)
assert retval.int_val() == 10
def test_closures():
retval = eval_string("""((fn* [x] ((fn* [] x))) 42)""")
assert isinstance(retval, Integer)
assert retval.int_val() == 42
def test_def():
retval = eval_string("""(def x 42)""")
assert isinstance(retval, Var)
retval = eval_string("""(do (def x 42) x)""")
assert isinstance(retval, Integer)
retval = eval_string("""(def y 42) y""")
assert isinstance(retval, Integer)
assert retval.int_val() == 42
def test_native():
retval = eval_string("""(type 42)""")
assert isinstance(retval, Type)
def test_build_list():
retval = eval_string("""((fn* [i lst]
(if (platform= i 10)
(count lst)
(recur (+ i 1) (cons i lst)))) 0 nil)
""")
assert isinstance(retval, Integer) and retval.int_val() == 10
def test_build_vector():
retval = eval_string("""((fn* [i lst]
(if (platform= i 10)
(count lst)
(recur (+ i 1) (conj lst i)))) 0 [])
""")
assert isinstance(retval, Integer) and retval.int_val() == 10
#def test_stacklets():
# retval = eval_string("""
# (do (def foo (fn [h v] (h 42)))
# ((create-stacklet foo) 0))
# """)
#
# assert isinstance(retval, Integer) and retval.int_val() == 42
def test_let():
retval = eval_string(""" (let* [x 42] x) """)
assert isinstance(retval, Integer) and retval.int_val() == 42
retval = eval_string(""" (let* [x 42 y 1] (+ x y)) """)
assert isinstance(retval, Integer) and retval.int_val() == 43
def test_variadic_fn():
from pixie.vm.array import Array
retval = eval_string(""" ((fn* [& rest] rest) 1 2 3 4) """)
print retval
assert isinstance(retval, Array) and len(retval._list) == 4
#
# def test_handlers():
# retval = eval_string("""(def x 42)
# (platform_install_handler 42 (fn () 1))""")
# assert isinstance(retval, Integer) and retval.int_val() == 1
# retval = eval_string("""(def pass (fn (x k) (k true)))
# (set-effect! pass true)
# (def handler 42)
# (platform_install_handler handler (fn () (pass handler)))""")
# assert retval is true
#
# def test_mult_call_handlers():
# retval = eval_string("""(def pass (fn pass (x k) (+ (k 1) (k 2))))
# (set-effect! pass true)
# (def handler 42)
# (platform_install_handler handler (fn hfn () (pass handler) 42))""")
#
# assert isinstance(retval, Integer) and retval.int_val() == 84
#
def test_quoted():
retval = eval_string("""'(1 2)""")
assert isinstance(retval, Cons)
retval = eval_string("""'type""")
assert isinstance(retval, Symbol)
#def test_custom_type():
# retval = eval_string("""(def my-type (make-type 'my-type '(:a :b)))
# (new my-type 1 2)""")
# assert isinstance(retval, CustomTypeInstance)
# retval = eval_string("""(def my-type (make-type 'my-type '(:a :b)))
# (get-field (new my-type 1 2) :a)""")
# assert isinstance(retval, Integer) and retval.int_val() == 1
#
# def test_keyword():
# retval = eval_string(""":foo""")
# assert isinstance(retval, Keyword)
#
#
# def test_real_effects():
# retval = eval_string("""
#
# (do (def tp (make-type 'Foo '(:x)))
# (def pass (fn (x) (get-field (new tp x) :x)))
#
# ((fn r (x)
# (if (platform= x 10000)
# x
# (r (+ 1 (pass x)))))
#
# 0))
# """)
#
# assert isinstance(retval, Integer) and retval.int_val() == 1000
#
#
# def test_real_effects():
# retval = eval_string("""
#
#
# (do (def tp (make-type 'Foo '(:x)))
# (def pass (fn (x) (get-field (new tp x) :x)))
# (def add (fn (h i k) (k (+ i 1))))
# (set-effect! add true)
# (def handler 0)
#
# ((fn r (x)
# (if (platform= x 10000)
# x
# (r (platform_install_handler handler (fn () (add handler x))))))
#
# 0))
# """)
#
# assert isinstance(retval, Integer) and retval.int_val() == 1000
#
#
#
| gpl-3.0 |
jefftc/changlab | Betsy/Betsy/modules/convert_simplevariantfile_to_matrix.py | 1 | 8224 | from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, in_data, out_attributes, user_options, num_cores,
out_filename):
from genomicode import filelib
from genomicode import SimpleVariantMatrix
from genomicode import AnnotationMatrix
simple_file = in_data.identifier
metadata = {}
# Read all in memory. Hopefully, not too big.
ds = []
for d in filelib.read_row(simple_file, header=-1):
ds.append(d)
#if len(ds) > 50000: # DEBUG
# break
# MuSE sometimes has alternates.
# Alt A,C
# Num_Alt 13,0
# VAF 0.19,0.0
# Detect this and fix it. Take the alternate with the highest VAF.
for d in ds:
if d.Num_Alt.find(",") < 0:
continue
x1 = d.Num_Alt.split(",")
x2 = d.VAF.split(",")
assert len(x1) == len(x2)
x1 = map(int, x1)
x2 = map(float, x2)
max_vaf = max_i = None
for i in range(len(x2)):
if max_vaf is None or x2[i] > max_vaf:
max_vaf = x2[i]
max_i = i
assert max_i is not None
d.Num_Alt = str(x1[max_i])
d.VAF = str(x2[max_i])
# Make a list of all the positions.
positions = {} # (Chrom, Pos) -> 1
for d in ds:
positions[(d.Chrom, int(d.Pos))] = 1
positions = sorted(positions)
# Make a list of all the callers.
callers = {}
for d in ds:
callers[d.Caller] = 1
callers = sorted(callers)
# Make a list of all the samples.
samples = {}
for d in ds:
samples[d.Sample] = 1
samples = sorted(samples)
# Make a list of the coordinates.
coord_data = {}
for d in ds:
x = d.Chrom, int(d.Pos), d.Ref, d.Alt
coord_data[x] = 1
coord_data = sorted(coord_data)
# Make a list of all DNA calls.
call_data = []
for d in ds:
assert d.Source in ["DNA", "RNA"]
if d.Source != "DNA":
continue
num_ref = num_alt = vaf = None
if d.Num_Ref:
num_ref = int(d.Num_Ref)
if d.Num_Alt:
num_alt = int(d.Num_Alt)
if d.VAF:
vaf = float(d.VAF)
if num_ref is None and num_alt is None and vaf is None:
continue
call = SimpleVariantMatrix.Call(num_ref, num_alt, vaf)
x = d.Chrom, int(d.Pos), d.Ref, d.Alt, d.Sample, d.Caller, call
call_data.append(x)
# sample -> caller -> chrom, pos, ref, alt -> call
samp2caller2coord2call = {}
for x in call_data:
chrom, pos, ref, alt, sample, caller, call = x
coord = chrom, pos, ref, alt
if sample not in samp2caller2coord2call:
samp2caller2coord2call[sample] = {}
caller2coord2call = samp2caller2coord2call[sample]
if caller not in caller2coord2call:
caller2coord2call[caller] = {}
coord2call = caller2coord2call[caller]
# A (sample, caller, coord) may have multiple calls. For
# example, for germline samples that are called with each
# tumor sample. If this is the case, then take the call
# with the highest coverage.
if coord in coord2call:
old_call = coord2call[coord]
cov = old_cov = None
if call.num_ref is not None and call.num_alt is not None:
cov = call.num_ref + call.num_alt
if old_call.num_ref is not None and \
old_call.num_alt is not None:
old_cov = old_call.num_ref + old_call.num_alt
if cov is None and old_cov is not None:
call = old_call
elif cov is not None and old_cov is not None and cov < old_cov:
call = old_call
coord2call[coord] = call
# Count the number of callers that called a variant at each
# position for each sample.
samp2coord2caller = {} # sample -> chrom, pos, ref, alt -> caller -> 1
# Need to do this first, to make sure each caller is counted
# at most once. This is to account for germline samples that
# is called by each caller multiple times.
for x in call_data:
chrom, pos, ref, alt, sample, caller, call = x
coord = chrom, pos, ref, alt
if sample not in samp2coord2caller:
samp2coord2caller[sample] = {}
if coord not in samp2coord2caller[sample]:
samp2coord2caller[sample][coord] = {}
samp2coord2caller[sample][coord][caller] = 1
samp2coord2nc = {} # sample -> chrom, pos, ref, alt -> num_callers
for sample in samp2coord2caller:
samp2coord2nc[sample] = {}
for coord in samp2coord2caller[sample]:
samp2coord2nc[sample][coord] = len(
samp2coord2caller[sample][coord])
#for x in call_data:
# chrom, pos, ref, alt, sample, caller, call = x
# coord = chrom, pos, ref, alt
# if sample not in samp2coord2nc:
# samp2coord2nc[sample] = {}
# nc = samp2coord2nc[sample].get(coord, 0) + 1
# samp2coord2nc[sample][coord] = nc
# Format everything into an annotation matrix.
headers0 = []
headers1 = []
headers2 = []
all_annots = []
# Add the positions.
headers0 += ["", "", "", ""]
headers1 += ["", "", "", ""]
headers2 += ["Chrom", "Pos", "Ref", "Alt"]
for i in range(4):
x = [x[i] for x in coord_data]
x = [str(x) for x in x]
all_annots.append(x)
# Add the number of callers information.
headers0 += ["Num Callers"] * len(samples)
headers1 += [""] * len(samples)
headers2 += samples
for sample in samples:
annots = []
for coord in coord_data:
nc = samp2coord2nc.get(sample, {}).get(coord, "")
annots.append(nc)
all_annots.append(annots)
# Add information about calls.
for sample in samples:
caller2coord2call = samp2caller2coord2call.get(sample, {})
for i, caller in enumerate(callers):
h0 = ""
if not i:
h0 = sample
h1 = caller
h2 = "Ref/Alt/VAF"
headers0.append(h0)
headers1.append(h1)
headers2.append(h2)
coord2call = caller2coord2call.get(caller, {})
annots = []
for coord in coord_data:
x = ""
call = coord2call.get(coord)
if call:
x = SimpleVariantMatrix._format_call(call)
annots.append(x)
all_annots.append(annots)
# Set the headers.
assert len(headers0) == len(headers1)
assert len(headers0) == len(headers2)
assert len(headers0) == len(all_annots)
headers = [None] * len(headers0)
for i, x in enumerate(zip(headers0, headers1, headers2)):
x = "___".join(x)
headers[i] = x
matrix = AnnotationMatrix.create_from_annotations(headers, all_annots)
SimpleVariantMatrix.write_from_am(out_filename, matrix)
#annot_header = ["Chrom", "Pos", "Ref", "Alt"]
#matrix = SimpleVariantMatrix.make_matrix(
# samples, callers, annot_header, coord_data, named_data,
# call_data)
#SimpleVariantMatrix.write(out_filename, matrix)
return metadata
def name_outfile(self, antecedents, user_options):
return "calls.txt"
| mit |
hyperized/ansible | test/units/modules/network/fortios/test_fortios_firewall_ipmacbinding_table.py | 21 | 8901 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_firewall_ipmacbinding_table
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_firewall_ipmacbinding_table.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_firewall_ipmacbinding_table_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_ipmacbinding_table': {
'ip': 'test_value_3',
'mac': 'test_value_4',
'name': 'default_name_5',
'seq_num': '6',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_ipmacbinding_table.fortios_firewall_ipmacbinding(input_data, fos_instance)
expected_data = {
'ip': 'test_value_3',
'mac': 'test_value_4',
'name': 'default_name_5',
'seq-num': '6',
'status': 'enable'
}
set_method_mock.assert_called_with('firewall.ipmacbinding', 'table', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_firewall_ipmacbinding_table_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_ipmacbinding_table': {
'ip': 'test_value_3',
'mac': 'test_value_4',
'name': 'default_name_5',
'seq_num': '6',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_ipmacbinding_table.fortios_firewall_ipmacbinding(input_data, fos_instance)
expected_data = {
'ip': 'test_value_3',
'mac': 'test_value_4',
'name': 'default_name_5',
'seq-num': '6',
'status': 'enable'
}
set_method_mock.assert_called_with('firewall.ipmacbinding', 'table', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_firewall_ipmacbinding_table_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'firewall_ipmacbinding_table': {
'ip': 'test_value_3',
'mac': 'test_value_4',
'name': 'default_name_5',
'seq_num': '6',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_ipmacbinding_table.fortios_firewall_ipmacbinding(input_data, fos_instance)
delete_method_mock.assert_called_with('firewall.ipmacbinding', 'table', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_firewall_ipmacbinding_table_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'firewall_ipmacbinding_table': {
'ip': 'test_value_3',
'mac': 'test_value_4',
'name': 'default_name_5',
'seq_num': '6',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_ipmacbinding_table.fortios_firewall_ipmacbinding(input_data, fos_instance)
delete_method_mock.assert_called_with('firewall.ipmacbinding', 'table', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_firewall_ipmacbinding_table_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_ipmacbinding_table': {
'ip': 'test_value_3',
'mac': 'test_value_4',
'name': 'default_name_5',
'seq_num': '6',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_ipmacbinding_table.fortios_firewall_ipmacbinding(input_data, fos_instance)
expected_data = {
'ip': 'test_value_3',
'mac': 'test_value_4',
'name': 'default_name_5',
'seq-num': '6',
'status': 'enable'
}
set_method_mock.assert_called_with('firewall.ipmacbinding', 'table', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_firewall_ipmacbinding_table_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_ipmacbinding_table': {
'random_attribute_not_valid': 'tag',
'ip': 'test_value_3',
'mac': 'test_value_4',
'name': 'default_name_5',
'seq_num': '6',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_ipmacbinding_table.fortios_firewall_ipmacbinding(input_data, fos_instance)
expected_data = {
'ip': 'test_value_3',
'mac': 'test_value_4',
'name': 'default_name_5',
'seq-num': '6',
'status': 'enable'
}
set_method_mock.assert_called_with('firewall.ipmacbinding', 'table', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
oVirt/vdsm | tests/virt/devicexml_test.py | 2 | 50377 | #
# Copyright 2017-2020 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from vdsm.virt.domain_descriptor import DomainDescriptor
from vdsm.virt.vmdevices import lookup
from vdsm.virt import metadata
from vdsm.virt import vmdevices
from vdsm.virt import vmxml
from vdsm.common import hostdev
from vdsm.common import xmlutils
from monkeypatch import MonkeyPatchScope
from testlib import permutations, expandPermutations
from testlib import read_data
from testlib import XMLTestCase
import vmfakecon as fake
import hostdevlib
import pytest
@expandPermutations
class DeviceToXMLTests(XMLTestCase):
PCI_ADDR = \
'bus="0x00" domain="0x0000" function="0x0" slot="0x03" type="pci"'
PCI_ADDR_DICT = {'slot': '0x03', 'bus': '0x00', 'domain': '0x0000',
'function': '0x0', 'type': 'pci'}
def setUp(self):
self.log = logging.getLogger('test.virt')
self.conf = {
'vmName': 'testVm',
'vmId': '9ffe28b6-6134-4b1e-8804-1185f49c436f',
'smp': '8',
'maxVCpus': '160',
'memSize': '1024',
'memGuaranteedSize': '512',
}
def test_memory_device(self):
memoryXML = """<memory model='dimm'>
<target>
<size unit='KiB'>1048576</size>
<node>0</node>
</target>
</memory>
"""
params = {'device': 'memory', 'type': 'memory',
'size': 1024, 'node': 0}
self.assertXMLEqual(vmdevices.core.memory_xml(params), memoryXML)
def test_interface(self):
interfaceXML = """
<interface type="bridge"> <address %s/>
<mac address="52:54:00:59:F5:3F"/>
<model type="virtio"/>
<source bridge="ovirtmgmt"/>
<filterref filter="no-mac-spoofing"/>
<link state="up"/>
<boot order="1"/>
<driver name="vhost" queues="7"/>
<tune>
<sndbuf>0</sndbuf>
</tune>
<bandwidth>
<inbound average="1000" burst="1024" peak="5000"/>
<outbound average="128" burst="256"/>
</bandwidth>
</interface>""" % self.PCI_ADDR
dev = {'nicModel': 'virtio', 'macAddr': '52:54:00:59:F5:3F',
'network': 'ovirtmgmt', 'address': self.PCI_ADDR_DICT,
'device': 'bridge', 'type': 'interface',
'bootOrder': '1', 'filter': 'no-mac-spoofing',
'specParams': {'inbound': {'average': 1000, 'peak': 5000,
'burst': 1024},
'outbound': {'average': 128, 'burst': 256}},
'custom': {'queues': '7'},
'vm_custom': {'vhost': 'ovirtmgmt:true', 'sndbuf': '0'},
}
iface = vmdevices.network.Interface(self.log, **dev)
self.assertXMLEqual(xmlutils.tostring(iface.getXML()), interfaceXML)
def test_interface_filter_parameters(self):
interfaceXML = """
<interface type="bridge"> <address %s/>
<mac address="52:54:00:59:F5:3F"/>
<model type="virtio"/>
<source bridge="ovirtmgmt"/>
<filterref filter="clean-traffic">
<parameter name='IP' value='10.0.0.1'/>
<parameter name='IP' value='10.0.0.2'/>
</filterref>
<link state="up"/>
<boot order="1"/>
<driver name="vhost"/>
<tune>
<sndbuf>0</sndbuf>
</tune>
</interface>""" % self.PCI_ADDR
dev = {
'nicModel': 'virtio', 'macAddr': '52:54:00:59:F5:3F',
'network': 'ovirtmgmt', 'address': self.PCI_ADDR_DICT,
'device': 'bridge', 'type': 'interface',
'bootOrder': '1', 'filter': 'clean-traffic',
'filterParameters': [
{'name': 'IP', 'value': '10.0.0.1'},
{'name': 'IP', 'value': '10.0.0.2'},
],
'vm_custom': {'vhost': 'ovirtmgmt:true', 'sndbuf': '0'},
}
iface = vmdevices.network.Interface(self.log, **dev)
self.assertXMLEqual(xmlutils.tostring(iface.getXML()), interfaceXML)
@permutations([
# base_spec_params:
[{}],
[{'inbound': {'average': 512}, 'outbound': {'average': 512}}],
])
def test_update_bandwidth_xml(self, base_spec_params):
specParams = {
'inbound': {
'average': 1000,
'peak': 5000,
'floor': 200,
'burst': 1024,
},
'outbound': {
'average': 128,
'peak': 256,
'burst': 256,
},
}
conf = {
'device': 'network',
'macAddr': 'fake',
'network': 'default',
'specParams': base_spec_params,
}
XML = u"""
<interface type='network'>
<mac address="fake" />
<source bridge='default'/>
<link state="up"/>
<bandwidth>
<inbound average='1000' peak='5000' floor='200' burst='1024'/>
<outbound average='128' peak='256' burst='256'/>
</bandwidth>
</interface>
"""
dev = vmdevices.network.Interface(self.log, **conf)
vnic_xml = dev.getXML()
vmdevices.network.update_bandwidth_xml(dev, vnic_xml, specParams)
self.assertXMLEqual(xmlutils.tostring(vnic_xml), XML)
@expandPermutations
class ParsingHelperTests(XMLTestCase):
ADDR = {
'domain': '0x0000',
'bus': '0x05',
'slot': '0x11',
'function': '0x3',
}
ALIAS = 'test0'
def test_address_alias(self):
params = {'alias': self.ALIAS}
params.update(self.ADDR)
XML = u"""<device type='fake'>
<address domain='{domain}' bus='{bus}'
slot='{slot}' function='{function}'/>
<alias name='{alias}'/>
</device>""".format(**params)
dev = xmlutils.fromstring(XML)
found_addr = vmdevices.core.find_device_guest_address(dev)
found_alias = vmdevices.core.find_device_alias(dev)
assert found_addr == self.ADDR
assert found_alias == self.ALIAS
def test_missing_address(self):
XML = u"""<device type='fake'>
<alias name='{alias}'/>
</device>""".format(alias=self.ALIAS)
dev = xmlutils.fromstring(XML)
found_addr = vmdevices.core.find_device_guest_address(dev)
found_alias = vmdevices.core.find_device_alias(dev)
assert found_addr is None
assert found_alias == self.ALIAS
def test_missing_alias(self):
params = self.ADDR.copy()
XML = u"""<device type='fake'>
<address domain='{domain}' bus='{bus}'
slot='{slot}' function='{function}'/>
</device>""".format(**params)
dev = xmlutils.fromstring(XML)
found_addr = vmdevices.core.find_device_guest_address(dev)
found_alias = vmdevices.core.find_device_alias(dev)
assert found_addr == self.ADDR
assert found_alias == ''
def test_missing_address_alias(self):
XML = u"<device type='fake' />"
dev = xmlutils.fromstring(XML)
found_addr = vmdevices.core.find_device_guest_address(dev)
found_alias = vmdevices.core.find_device_alias(dev)
assert found_addr is None
assert found_alias == ''
def test_attrs(self):
XML = u"<device type='fake' />"
attrs = vmdevices.core.parse_device_attrs(
xmlutils.fromstring(XML), ('type',)
)
assert attrs == {'type': 'fake'}
def test_attrs_missing(self):
XML = u"<device type='fake' />"
attrs = vmdevices.core.parse_device_attrs(
xmlutils.fromstring(XML), ('type', 'foo')
)
assert attrs == {'type': 'fake'}
def test_attrs_partial(self):
XML = u"<device foo='bar' ans='42' fizz='buzz' />"
attrs = vmdevices.core.parse_device_attrs(
xmlutils.fromstring(XML), ('foo', 'fizz')
)
assert attrs == {'foo': 'bar', 'fizz': 'buzz'}
@permutations([
# xml_data, dev_type
[u'''<interface type='network' />''', 'network'],
[u'''<console type="pty" />''', 'pty'],
[u'''<controller type='usb' index='0' />''', 'usb'],
[u'''<sound model="ac97"/>''', 'sound'],
[u'''<tpm model='tpm-tis'/>''', 'tpm'],
])
def test_find_device_type(self, xml_data, dev_type):
assert dev_type == \
vmdevices.core.find_device_type(xmlutils.fromstring(xml_data))
@permutations([
# xml_data, alias
# well formed XMLs
[u'''<interface><alias name="net0" /></interface>''', 'net0'],
[u'''<console type="pty" />''', ''],
# malformed XMLs
[u'''<controller><alias>foobar</alias></controller>''', ''],
])
def test_find_device_alias(self, xml_data, alias):
assert alias == \
vmdevices.core.find_device_alias(xmlutils.fromstring(xml_data))
@permutations([
# xml_data, address
[u'''<interface>
<source>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x04' function='0x0'/>
</source>
</interface>''',
None],
[u'''<interface>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x04' function='0x0'/>
</interface>''',
{'bus': '0x00', 'domain': '0x0000',
'function': '0x0', 'slot': '0x04', 'type': 'pci'}],
[u'''<interface>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x04' function='0x0'/>
<source>
<address type='pci' domain='0x0000' bus='0x02'
slot='0x02' function='0x5'/>
</source>
</interface>''',
{'bus': '0x00', 'domain': '0x0000',
'function': '0x0', 'slot': '0x04', 'type': 'pci'}],
])
def test_find_device_guest_address(self, xml_data, address):
assert address == \
vmdevices.core.find_device_guest_address(
xmlutils.fromstring(xml_data)
)
# the alias is not rendered by getXML, so having it would make
# the test fail
_CONTROLLERS_XML = [
[u"<controller type='virtio-serial' index='0' ports='16'>"
u"<address type='pci' domain='0x0000' bus='0x00'"
u" slot='0x07' function='0x0'/>"
u"</controller>"],
[u"<controller type='usb' index='0'>"
u"<address type='pci' domain='0x0000' bus='0x00'"
u" slot='0x01' function='0x2'/>"
u"</controller>"],
[u"<controller type='pci' index='0' model='pci-root' />"],
[u"<controller type='ccid' index='0' />"],
[u"<controller type='ide' index='0'/>"],
[u"<controller type='scsi' index='0' model='virtio-scsi'>"
u"<address type='pci' domain='0x0000' bus='0x00' slot='0x0b'"
u" function='0x0'/>"
u"<driver iothread='4'/>"
u"</controller>"],
]
_TRANSIENT_STORAGE_TEST_DATA = [
[u'''<disk device="disk" snapshot="no" type="block">
<source dev="/var/lib/vdsm/transient">
<seclabel model="dac" relabel="no" type="none" />
</source>
<target bus="scsi" dev="sda"/>
<serial>54-a672-23e5b495a9ea</serial>
<driver cache="writethrough" error_policy="stop"
io="native" name="qemu" type="qcow2"/>
</disk>''',
{'shared': 'transient'}],
[u'''<disk device="disk" snapshot="no" type="file">
<source file="/var/lib/vdsm/transient"/>
<target bus="scsi" dev="sda"/>
<serial>54-a672-23e5b495a9ea</serial>
<driver cache="writethrough" error_policy="stop"
io="threads" name="qemu" type="qcow2"/>
</disk>''',
{'shared': 'transient'}]
]
_STORAGE_TEST_DATA = [
[u'''<disk device="disk" snapshot="no" type="block">
<source dev="/path/to/volume">
<seclabel model="dac" relabel="no" type="none" />
</source>
<target bus="virtio" dev="vda"/>
<serial>54-a672-23e5b495a9ea</serial>
<driver cache="none" discard="unmap" error_policy="stop"
io="native" name="qemu" type="raw"/>
</disk>''',
{}],
[u'''<disk device="disk" snapshot="no" type="block">
<source dev="/path/to/volume">
<seclabel model="dac" relabel="no" type="none" />
</source>
<target bus="virtio" dev="vda"/>
<serial>54-a672-23e5b495a9ea</serial>
<driver cache="none" discard="unmap" error_policy="enospace"
io="native" name="qemu" type="raw"/>
</disk>''',
{}],
[u'''<disk device="disk" snapshot="no" type="block">
<source dev="/path/to/volume">
<seclabel model="dac" relabel="no" type="none" />
</source>
<target bus="virtio" dev="vda"/>
<serial>54-a672-23e5b495a9ea</serial>
<driver cache="none" error_policy="stop"
io="native" name="qemu" type="raw"/>
</disk>''',
{}],
[u'''<disk device="disk" snapshot="no" type="file">
<source file="/path/to/volume">
<seclabel model="dac" relabel="no" type="none" />
</source>
<target bus="virtio" dev="vda"/>
<serial>54-a672-23e5b495a9ea</serial>
<driver cache="none" error_policy="stop"
io="threads" name="qemu" type="raw"/>
</disk>''',
{}],
[u'''<disk device="lun" sgio="unfiltered" snapshot="no" type="block">
<source dev="/dev/mapper/lun1">
<seclabel model="dac" relabel="no" type="none" />
</source>
<target bus="scsi" dev="sda"/>
<driver cache="none" error_policy="stop"
io="native" name="qemu" type="raw"/>
</disk>''',
{}],
[u'''<disk device="disk" snapshot="no" type="network">
<source name="poolname/volumename" protocol="rbd">
<host name="1.2.3.41" port="6789" transport="tcp"/>
<host name="1.2.3.42" port="6789" transport="tcp"/>
</source>
<target bus="virtio" dev="vda"/>
<driver cache="none" error_policy="stop"
io="threads" name="qemu" type="raw"/>
</disk>''',
{}],
[u'''<disk device="disk" snapshot="no" type="network">
<source name="poolname/volumename" protocol="rbd">
<host name="1.2.3.41" port="6789" transport="tcp"/>
<host name="1.2.3.42" port="6789" transport="tcp"/>
</source>
<auth username="cinder">
<secret type="ceph" uuid="abcdef"/>
</auth>
<target bus="virtio" dev="vda"/>
<serial>54-a672-23e5b495a9ea</serial>
<driver cache="none" error_policy="stop"
io="threads" name="qemu" type="raw"/>
</disk>''',
{}],
[u'''<disk device="lun" sgio="unfiltered" snapshot="no" type="block">
<address bus="0" controller="0" target="0" type="drive" unit="0" />
<source dev="/dev/mapper/36001405b3b7829f14c1400d925eefebb">
<seclabel model="dac" relabel="no" type="none" />
</source>
<target bus="scsi" dev="sda" />
<driver cache="none" error_policy="stop" io="native"
name="qemu" type="raw" />
</disk>''',
{}],
[u'''<disk device="cdrom" snapshot="no" type="file">
<source file="/run/vdsm/payload/{guid}.{hashsum}.img"
startupPolicy="optional">
<seclabel model="dac" relabel="no" type="none" />
</source>
<target bus="ide" dev="hdd" />
<readonly />
<driver error_policy="report" name="qemu" type="raw" />
</disk>'''.format(guid='8a1dc504-9d00-48f3-abdc-c70404e6f7e2',
hashsum='4137dc5fb55e021fbfd2653621d9d194'),
{}],
# cdrom from Engine 4.2.0, using error_policy="report"
[u'''<disk type="file" device="cdrom" snapshot="no">
<address bus="1" controller="0" unit="0" type="drive" target="0"/>
<source file="" startupPolicy="optional">
<seclabel model="dac" relabel="no" type="none" />
</source>
<target dev="hdc" bus="ide"/>
<readonly/>
<driver name="qemu" type="raw" error_policy="report"/>
</disk>''',
{}],
[u'''<disk device="disk" snapshot="no" type="block">
<source dev="/path/to/volume">
<seclabel model="dac" relabel="no" type="none" />
</source>
<target bus="virtio" dev="vda"/>
<serial>54-a672-23e5b495a9ea</serial>
<driver cache="none" discard="unmap" error_policy="stop"
io="native" name="qemu" type="raw"/>
<iotune>
<read_iops_sec>400000</read_iops_sec>
<total_bytes_sec>10000000</total_bytes_sec>
<write_iops_sec>100000</write_iops_sec>
</iotune>
</disk>''',
{}],
# disk from Engine 4.2.0
[u'''<disk snapshot="no" type="block" device="disk">
<address bus="0" controller="0" unit="0" type="drive" target="0"/>
<source dev="/rhev/data-center/mnt/blockSD/a/images/b/c">
<seclabel model="dac" relabel="no" type="none" />
</source>
<target dev="sda" bus="scsi"/>
<serial>d591482b-eb24-47bd-be07-082c115d11f4</serial>
<boot order="1"/>
<driver name="qemu" io="native" type="qcow2"
error_policy="stop" cache="none"/>
<alias name="ua-58ca6050-0134-00d6-0053-000000000388"/>
</disk>''',
{}],
# cache attribute taken from XML for non-transient disks
[u'''<disk device="disk" snapshot="no" type="file">
<source file="/path/to/volume">
<seclabel model="dac" relabel="no" type="none" />
</source>
<target bus="sata" dev="sda"/>
<serial>54-a672-23e5b495a9ea</serial>
<driver cache="writethrough" error_policy="enospace"
io="threads" name="qemu" type="raw"/>
</disk>''',
{}],
]
_HOSTDEV_XML = [
[u'''<hostdev mode='subsystem' type='pci' managed='no'>
<source>
<address domain='0x0000' bus='0x00' slot='0x19' function='0x0'/>
</source>
<boot order='1'/>
</hostdev>'''],
[u'''<hostdev managed="no" mode="subsystem" type="usb">
<source>
<address bus="1" device="1"/>
</source>
</hostdev>'''],
[u'''<hostdev managed="no" mode="subsystem" rawio="yes" type="scsi">
<source>
<adapter name="scsi_host0"/>
<address bus="0" target="0" unit="0"/>
</source>
</hostdev>'''],
[u'''<hostdev mode='subsystem' type='pci' managed='no'>
<source>
<address domain='0x0000' bus='0x00' slot='0x19' function='0x0'/>
</source>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x03' function='0x0'/>
</hostdev>'''],
]
_MDEV_XML = u'''<hostdev mode="subsystem" model="vfio-pci" type="mdev">
<source>
<address uuid="8cb81aac-0c99-3e14-8f48-17c7c7d1c538"/>
</source>
</hostdev>
'''
@expandPermutations
class DeviceXMLRoundTripTests(XMLTestCase):
def test_base_not_implemented(self):
# simplified version of channel XML, only for test purposes.
# this should never be seen in the wild
generic_xml = '<channel type="spicevmc" />'
try:
vmdevices.core.Base.from_xml_tree(
self.log,
xmlutils.fromstring(generic_xml),
meta={'vmid': 'VMID'}
)
except NotImplementedError as exc:
assert vmdevices.core.Base.__name__ == \
str(exc)
except Exception as ex:
raise AssertionError('from_xml_tree raise unexpected %s', ex)
else:
raise AssertionError('from_xml_tree implemented')
def test_lease(self):
lease_xml = u'''<lease>
<key>12523e3d-ad22-410c-8977-d2a7bf458a65</key>
<lockspace>c2a6d7c8-8d81-4e01-9ed4-7eb670713448</lockspace>
<target offset="1048576"
path="/dev/c2a6d7c8-8d81-4e01-9ed4-7eb670713448/leases"/>
</lease>'''
self._check_roundtrip(vmdevices.lease.Device, lease_xml)
def test_interface(self):
interface_xml = u'''
<interface type="bridge">
<address bus="0x00" domain="0x0000"
function="0x0" slot="0x03" type="pci"/>
<mac address="52:54:00:59:F5:3F"/>
<model type="virtio"/>
<source bridge="ovirtmgmt"/>
<filterref filter="clean-traffic">
<parameter name='IP' value='10.0.0.1'/>
<parameter name='IP' value='10.0.0.2'/>
</filterref>
<link state="up"/>
<boot order="1"/>
<driver name="vhost" queues="7"/>
<tune>
<sndbuf>0</sndbuf>
</tune>
<bandwidth>
<inbound average="1000" burst="1024" peak="5000"/>
<outbound average="128" burst="256"/>
</bandwidth>
</interface>'''
meta = {'vmid': 'VMID'}
self._check_roundtrip(
vmdevices.network.Interface, interface_xml, meta=meta)
def test_interface_mtu(self):
interface_xml = u'''
<interface type="bridge">
<address bus="0x00" domain="0x0000"
function="0x0" slot="0x03" type="pci"/>
<mac address="52:54:00:59:F5:3F"/>
<model type="virtio"/>
<source bridge="ovirtmgmt"/>
<mtu size="1492"/>
<link state="up"/>
<boot order="1"/>
</interface>'''
meta = {'vmid': 'VMID'}
self._check_roundtrip(
vmdevices.network.Interface, interface_xml, meta=meta)
def test_interface_isolated(self):
interface_xml = u'''
<interface type="bridge">
<address bus="0x00" domain="0x0000"
function="0x0" slot="0x03" type="pci"/>
<mac address="52:54:00:59:F5:3F"/>
<model type="virtio"/>
<source bridge="ovirtmgmt"/>
<port isolated="yes"/>
<link state="up"/>
</interface>'''
meta = {'vmid': 'VMID'}
self._check_roundtrip(
vmdevices.network.Interface, interface_xml, meta=meta)
@permutations([
# link state
('up',),
('down',),
])
def test_interface_link_state(self, link_state):
interface_xml = u'''
<interface type="bridge">
<address bus="0x00" domain="0x0000"
function="0x0" slot="0x03" type="pci"/>
<mac address="52:54:00:59:F5:3F"/>
<model type="virtio"/>
<source bridge="ovirtmgmt"/>
<link state="{link_state}"/>
<boot order="1"/>
</interface>'''.format(link_state=link_state)
meta = {'vmid': 'VMID'}
self._check_roundtrip(
vmdevices.network.Interface, interface_xml, meta=meta)
def test_interface_empty_bridge(self):
interface_xml = u'''
<interface type="bridge">
<address bus="0x00" domain="0x0000"
function="0x0" slot="0x03" type="pci"/>
<mac address="52:54:00:59:F5:3F"/>
<model type="virtio"/>
<source bridge=""/>
<link state="down"/>
<boot order="1"/>
</interface>'''
meta = {'vmid': 'VMID'}
expected_xml = u'''
<interface type="bridge">
<address bus="0x00" domain="0x0000"
function="0x0" slot="0x03" type="pci" />
<mac address="52:54:00:59:F5:3F" />
<model type="virtio" />
<source bridge=";vdsmdummy;" />
<link state="down"/>
<boot order="1" />
</interface>'''
self._check_roundtrip(
vmdevices.network.Interface,
interface_xml,
meta=meta,
expected_xml=expected_xml
)
def test_interface_vmfex(self):
interface_xml = u'''
<interface type='network'>
<mac address="52:54:00:59:F5:3F"/>
<model type="virtio"/>
<source network='direct-pool'/>
<virtualport type='802.1Qbh'>
<parameters profileid='OvirtProfileID'/>
</virtualport>
</interface>'''
# the real work is done by the hook, so we check that
# we correctly initialized what we could
meta = {'vmid': 'VMID', 'network': 'ovirttest'}
expected_xml = u'''
<interface type="network">
<mac address="52:54:00:59:F5:3F" />
<model type="virtio" />
<source bridge="ovirttest" />
<link state="up" />
</interface>'''
self._check_roundtrip(
vmdevices.network.Interface,
interface_xml,
meta=meta,
expected_xml=expected_xml
)
def test_interface_sriov_only_host_address(self):
"""
This is what we expect on the very first run. The device has not
one guest address (managed and assigned by libvirt), just the
host address to identify the host device.
"""
interface_xml = u'''
<interface managed="no" type="hostdev">
<mac address="ff:ff:ff:ff:ff:ff"/>
<source>
<address bus="0x05" domain="0x0000"
function="0x7" slot="0x10" type="pci"/>
</source>
<vlan>
<tag id="3"/>
</vlan>
<link state="up"/>
<boot order="9"/>
<driver name="vfio"/>
</interface>'''
meta = {'vmid': 'VMID'}
with MonkeyPatchScope([
(hostdev, 'libvirtconnection', FakeLibvirtConnection())
]):
self._check_roundtrip(
vmdevices.network.Interface, interface_xml, meta=meta)
def test_interface_sriov_with_host_and_guest_address(self):
"""
This is what we could get from the second run, and following.
Engine may or may not pass the guest address, both ways are legal.
Any way, we should never confuse them.
"""
interface_xml = u'''
<interface managed="no" type="hostdev">
<address bus="0x01" domain="0x0000" function="0x0"
slot="0x02" type="pci"/>
<mac address="ff:ff:ff:ff:ff:ff"/>
<source>
<address bus="0x05" domain="0x0000"
function="0x7" slot="0x10" type="pci"/>
</source>
<vlan>
<tag id="3"/>
</vlan>
<link state="up"/>
<boot order="9"/>
<driver name="vfio"/>
</interface>'''
meta = {'vmid': 'VMID'}
with MonkeyPatchScope([
(hostdev, 'libvirtconnection', FakeLibvirtConnection())
]):
self._check_roundtrip(
vmdevices.network.Interface, interface_xml, meta=meta)
def test_interface_hostdev(self):
interface_xml = u'''
<interface type='hostdev' managed='no'>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x04' function='0x0'/>
<mac address='00:1a:4a:16:91:df'/>
<source>
<address type='pci' domain='0x0000' bus='0x05'
slot='0x00' function='0x1'/>
</source>
<link state="up"/>
<driver name='vfio'/>
</interface>'''
meta = {'vmid': 'VMID'}
with MonkeyPatchScope([
(hostdev.libvirtconnection, 'get', hostdevlib.Connection),
(vmdevices.hostdevice, 'detach_detachable',
lambda *args, **kwargs: None),
(vmdevices.hostdevice, 'reattach_detachable',
lambda *args, **kwargs: None),
]):
self._check_roundtrip(
vmdevices.network.Interface, interface_xml, meta=meta)
def test_storage(self):
with pytest.raises(NotImplementedError):
vmdevices.storage.Drive.from_xml_tree(
self.log, None, {}
)
@permutations(_STORAGE_TEST_DATA)
def test_storage_from_xml(self, storage_xml, meta):
dev = vmdevices.storage.Drive(
self.log, **vmdevices.storagexml.parse(
xmlutils.fromstring(storage_xml),
{} if meta is None else meta
)
)
self._check_device_xml(dev, storage_xml)
@permutations(_TRANSIENT_STORAGE_TEST_DATA)
def test_transient_storage_from_xml(self, storage_xml, meta):
dev = vmdevices.storage.Drive(
self.log, **vmdevices.storagexml.parse(
xmlutils.fromstring(storage_xml),
{} if meta is None else meta
)
)
assert dev.shared == vmdevices.storage.DRIVE_SHARED_TYPE.TRANSIENT
def test_storage_from_incomplete_xml(self):
storage_xml = '''<disk device="disk" snapshot="no" type="file">
<source>
<seclabel model="dac" relabel="no" type="none" />
</source>
<target bus="virtio" dev="vda"/>
<serial>54-a672-23e5b495a9ea</serial>
<driver cache="none" error_policy="stop"
io="threads" name="qemu" type="raw"/>
</disk>'''
expected_xml = '''<disk device="disk" snapshot="no" type="file">
<source file="">
<seclabel model="dac" relabel="no" type="none" />
</source>
<target bus="virtio" dev="vda"/>
<serial>54-a672-23e5b495a9ea</serial>
<driver cache="none" error_policy="stop"
io="threads" name="qemu" type="raw"/>
</disk>'''
dev = vmdevices.storage.Drive(
self.log, **vmdevices.storagexml.parse(
xmlutils.fromstring(storage_xml),
{}
)
)
self._check_device_xml(dev, expected_xml)
def test_cdrom_from_xml_without_driver_element(self):
# test that we add the 'driver' element with the
# defaults in in the XML-based initialization flow
# this is the common XML template.
cdrom_xml = u'''
<disk type="file" device="cdrom" snapshot="no">
<address bus="1" controller="0" unit="0"
type="drive" target="0"/>
<source file="" startupPolicy="optional">
<seclabel model="dac" relabel="no" type="none" />
</source>
<target dev="hdc" bus="ide"/>
<readonly/>
{driver_xml}
</disk>'''
# simulate we receive a XML snippet without the driver
# element. This is unlikely with Engine >= 4.2, but still
# supported.
source_xml = cdrom_xml.format(driver_xml='')
# The output XML must include a "driver" element, built
# using the defaults. Everything else should be the same
# (see below for more details).
expected_xml = cdrom_xml.format(
driver_xml=u'''<driver name="qemu" type="raw"
error_policy="stop"/>''')
dev = vmdevices.storage.Drive(
self.log, **vmdevices.storagexml.parse(
xmlutils.fromstring(source_xml),
{}
)
)
# everything which is not related to the driver element should be
# derived from the source XML, thus the source and the expected
# XML snippets should be equal - bar the driver element.
self._check_device_xml(dev, expected_xml)
def test_cdrom_from_xml_without_source_element(self):
cdrom_xml = u'''
<disk type="file" device="cdrom">
<address type='drive' controller='0' bus='1' target='0' unit='0'/>
<target dev='hdc' bus='ide' tray='open'/>
<readonly/>
<driver name='qemu' type='raw' error_policy='report'/>
</disk>'''
expected_xml = u'''
<disk type="file" device="cdrom" snapshot="no">
<address type='drive' controller='0' bus='1' target='0' unit='0'/>
<source file="" startupPolicy="optional">
<seclabel model="dac" relabel="no" type="none"/>
</source>
<target dev='hdc' bus='ide'/>
<readonly/>
<driver name='qemu' type='raw' error_policy='report'/>
</disk>'''
dom = xmlutils.fromstring(cdrom_xml)
dev = vmdevices.storage.Drive(
self.log, **vmdevices.storagexml.parse(dom, {})
)
self._check_device_xml(dev, expected_xml)
def _check_roundtrip(self, klass, dev_xml, meta=None, expected_xml=None):
dev = klass.from_xml_tree(
self.log,
xmlutils.fromstring(dev_xml),
{} if meta is None else meta
)
self._check_device_attrs(dev)
self._check_device_xml(dev, dev_xml, expected_xml)
def _check_device_attrs(self, dev):
assert hasattr(dev, 'specParams')
if (isinstance(dev, vmdevices.network.Interface) or
isinstance(dev, vmdevices.storage.Drive)):
assert hasattr(dev, 'vm_custom')
def _check_device_xml(self, dev, dev_xml, expected_xml=None):
dev.setup()
try:
rebuilt_xml = xmlutils.tostring(dev.getXML(), pretty=True)
# make troubleshooting easier
print(rebuilt_xml)
result_xml = dev_xml if expected_xml is None else expected_xml
self.assertXMLEqual(rebuilt_xml, result_xml)
finally:
dev.teardown()
_DRIVE_PAYLOAD_XML = u"""<domain type='kvm' id='2'>
<uuid>dd493ddc-1ef2-4445-a248-4a7bc266a671</uuid>
<metadata
xmlns:ovirt-tune='http://ovirt.org/vm/tune/1.0'
xmlns:ovirt-vm='http://ovirt.org/vm/1.0'>
<ovirt-tune:qos/>
<ovirt-vm:vm>
<ovirt-vm:device devtype="disk" name="hdd">
<ovirt-vm:readonly type='bool'>true</ovirt-vm:readonly>
<ovirt-vm:payload>
<ovirt-vm:volId>config-1</ovirt-vm:volId>
<ovirt-vm:file path='openstack/content/0000'>AAA</ovirt-vm:file>
<ovirt-vm:file path='openstack/latest/meta_data.json'>BBB</ovirt-vm:file>
<ovirt-vm:file path='openstack/latest/user_data'>CCC</ovirt-vm:file>
</ovirt-vm:payload>
</ovirt-vm:device>
</ovirt-vm:vm>
</metadata>
<devices>
<emulator>/usr/libexec/qemu-kvm</emulator>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source startupPolicy='optional'/>
<target dev='hdd' bus='ide'/>
<readonly/>
<driver error_policy="report" name="qemu" type="raw" />
</disk>
</devices>
</domain>"""
_INVALID_DEVICE_XML = u"""<domain type='kvm' id='2'>
<uuid>1234</uuid>
<devices>
<graphics/>
</devices>
</domain>
"""
class DeviceFromXMLTests(XMLTestCase):
def test_payload_from_metadata(self):
vmPayload = {
'volId': 'config-1',
'file': {
'openstack/content/0000': 'AAA',
'openstack/latest/meta_data.json': 'BBB',
'openstack/latest/user_data': 'CCC',
}
}
md_desc = metadata.Descriptor.from_xml(_DRIVE_PAYLOAD_XML)
root = xmlutils.fromstring(_DRIVE_PAYLOAD_XML)
dev_xml = root.find('./devices/disk')
with md_desc.device(devtype='disk', name='hdd') as meta:
dev_obj = vmdevices.storage.Drive(
self.log, **vmdevices.storagexml.parse(dev_xml, meta)
)
assert dev_obj.specParams['vmPayload'] == vmPayload
def test_payload_from_metadata_dump(self):
expected_xml = u'''<ovirt-vm:vm xmlns:ovirt-vm='http://ovirt.org/vm/1.0'>
<ovirt-vm:device devtype="disk" name="hdd">
<ovirt-vm:readonly type='bool'>True</ovirt-vm:readonly>
<ovirt-vm:payload>
<ovirt-vm:volId>config-1</ovirt-vm:volId>
<ovirt-vm:file path='openstack/content/0000'>AAA</ovirt-vm:file>
<ovirt-vm:file path='openstack/latest/meta_data.json'>BBB</ovirt-vm:file>
<ovirt-vm:file path='openstack/latest/user_data'>CCC</ovirt-vm:file>
</ovirt-vm:payload>
</ovirt-vm:device>
</ovirt-vm:vm>'''
md_desc = metadata.Descriptor.from_xml(_DRIVE_PAYLOAD_XML)
self.assertXMLEqual(md_desc.to_xml(), expected_xml)
def test_device_core_attributes_present_and_never_none(self):
he_xml = read_data('hostedengine.xml')
dom_desc = DomainDescriptor(he_xml)
md_desc = metadata.Descriptor.from_xml(he_xml)
dev_objs = vmdevices.common.dev_map_from_domain_xml(
'HE', dom_desc, md_desc, self.log
)
for devices in dev_objs.values():
for dev in devices:
print(dev) # debug aid
assert dev.type is not None
assert dev.device is not None
def test_erroneous_device_init(self):
dom_desc = DomainDescriptor(_INVALID_DEVICE_XML)
for dom in dom_desc.get_device_elements('graphics'):
dev = vmdevices.graphics.Graphics(dom, '1234')
with pytest.raises(vmxml.NotFound):
dev._display_network()
# invalid domain with only the relevant sections added
# UUID has no meaning, randomly generated
_DOMAIN_MD_MATCH_XML = u"""<domain type='kvm' id='2'>
<uuid>dd493ddc-1ef2-4445-a248-4a7bc266a671</uuid>
<metadata
xmlns:ovirt-tune='http://ovirt.org/vm/tune/1.0'
xmlns:ovirt-vm='http://ovirt.org/vm/1.0'>
<ovirt-tune:qos/>
<ovirt-vm:vm>
<ovirt-vm:device devtype="disk" name="sda">
<ovirt-vm:RBD>/dev/rbd/pool/volume-uuid</ovirt-vm:RBD>
</ovirt-vm:device>
<ovirt-vm:device devtype="disk" name="sdb">
<ovirt-vm:GUID>3600a098038304479363f4c4870455167</ovirt-vm:GUID>
<ovirt-vm:imageID>3600a098038304479363f4c4870455167</ovirt-vm:imageID>
<ovirt-vm:managed type="bool">True</ovirt-vm:managed>
</ovirt-vm:device>
<ovirt-vm:device devtype="disk" name="sdc">
<ovirt-vm:GUID>3600a098038304479363f4c4870455162</ovirt-vm:GUID>
<ovirt-vm:imageID>3600a098038304479363f4c4870455162</ovirt-vm:imageID>
</ovirt-vm:device>
<ovirt-vm:device devtype="disk" name="sdd">
<ovirt-vm:GUID>3600a098038304479363f4c4870455163</ovirt-vm:GUID>
<ovirt-vm:imageID>3600a098038304479363f4c4870455163</ovirt-vm:imageID>
<ovirt-vm:managed type="bool">False</ovirt-vm:managed>
</ovirt-vm:device>
<ovirt-vm:device mac_address="00:1a:4a:16:01:00">
<ovirt-vm:portMirroring>
<ovirt-vm:network>network1</ovirt-vm:network>
<ovirt-vm:network>network2</ovirt-vm:network>
</ovirt-vm:portMirroring>
</ovirt-vm:device>
<ovirt-vm:device alias='net0'>
<ovirt-vm:network>ovirtmgmt0</ovirt-vm:network>
</ovirt-vm:device>
<ovirt-vm:device alias='net1' mac_address='00:1a:3b:16:10:16'>
<ovirt-vm:network>ovirtmgmt1</ovirt-vm:network>
</ovirt-vm:device>
<ovirt-vm:device mac_address='00:1a:55:ff:20:26'>
<ovirt-vm:network>ovirtmgmt2</ovirt-vm:network>
</ovirt-vm:device>
<ovirt-vm:device mac_address='00:1a:55:ff:30:36'>
<ovirt-vm:network>ovirtmgmt2</ovirt-vm:network>
</ovirt-vm:device>
</ovirt-vm:vm>
</metadata>
<devices>
<emulator>/usr/libexec/qemu-kvm</emulator>
<disk type='block' device='disk' snapshot='no'>
<driver name='qemu' type='raw' cache='none'/>
<source dev='/dev/rbd/pool/volume-uuid'>
<seclabel model='dac' relabel='no'/>
</source>
<backingStore/>
<target dev='sda' bus='scsi'/>
<serial>44ab108a-62e6-480e-b44c-aac301227f94</serial>
<boot order='1'/>
<alias name='ua-44ab108a-62e6-480e-b44c-aac301227f94'/>
<address type='drive' controller='0' bus='0' target='0' unit='0'/>
</disk>
<disk type='block' device='disk' snapshot='no'>
<driver name='qemu' type='raw' cache='none'/>
<source dev='/dev/mapper/3600a098038304479363f4c4870455167' index='2'>
<seclabel model='dac' relabel='no'/>
</source>
<backingStore/>
<target dev='sdb' bus='scsi'/>
<serial>ead4a539-6b93-4f6e-8a92-3eea28e91d4e</serial>
<alias name='ua-ead4a539-6b93-4f6e-8a92-3eea28e91d4e'/>
<address type='drive' controller='0' bus='0' target='0' unit='1'/>
</disk>
<disk type='block' device='disk' snapshot='no'>
<driver name='qemu' type='raw' cache='none'/>
<source dev='/dev/mapper/3600a098038304479363f4c4870455162' index='2'>
<seclabel model='dac' relabel='no'/>
</source>
<backingStore/>
<target dev='sdc' bus='scsi'/>
<serial>ead4a529-6b93-4f6e-8a92-3eea28e91d4e</serial>
<alias name='ua-ead4a529-6b93-4f6e-8a92-3eea28e91d4e'/>
<address type='drive' controller='0' bus='0' target='0' unit='1'/>
</disk>
<disk type='block' device='disk' snapshot='no'>
<driver name='qemu' type='raw' cache='none'/>
<source dev='/dev/mapper/3600a098038304479363f4c4870455163' index='2'>
<seclabel model='dac' relabel='no'/>
</source>
<backingStore/>
<target dev='sdd' bus='scsi'/>
<serial>ead4a529-6b93-4f6e-8a92-3eea28e91d5e</serial>
<alias name='ua-ead4a529-6b93-4f6e-8a92-3eea28e91d5e'/>
<address type='drive' controller='0' bus='0' target='0' unit='1'/>
</disk>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source startupPolicy='optional'/>
<backingStore/>
<target dev='hdc' bus='ide'/>
<readonly/>
<driver error_policy="report" name="qemu" type="raw" />
</disk>
<controller type='virtio-serial' index='0' ports='16'>
<alias name='virtio-serial0'/>
</controller>
<controller type='usb' index='0'>
<alias name='usb'/>
</controller>
<controller type='pci' index='0' model='pci-root' />
<interface type='bridge'>
<mac address='00:1a:4a:16:01:51'/>
<source bridge='INVALID0'/>
<target dev='vnet0'/>
<model type='virtio'/>
<filterref filter='vdsm-no-mac-spoofing'/>
<link state='up'/>
<boot order='2'/>
<alias name='net0'/>
</interface>
<interface type='bridge'>
<mac address='00:1a:3b:16:10:16'/>
<source bridge='INVALID1'/>
<target dev='vnet0'/>
<model type='virtio'/>
<filterref filter='vdsm-no-mac-spoofing'/>
<link state='up'/>
<boot order='2'/>
<alias name='net1'/>
</interface>
<interface type='bridge'>
<mac address='00:1a:55:ff:20:26'/>
<source bridge='INVALID1'/>
<target dev='vnet0'/>
<model type='virtio'/>
<filterref filter='vdsm-no-mac-spoofing'/>
<link state='up'/>
<boot order='2'/>
</interface>
<interface type='bridge'>
<mac address='00:1a:4a:16:01:00'/>
<source bridge='network1'/>
<target dev='vnet1'/>
<model type='virtio'/>
<filterref filter='vdsm-no-mac-spoofing'/>
<link state='up'/>
</interface>
<interface type='bridge'>
<mac address='00:1a:55:ff:30:36'/>
<source bridge='network4'/>
<target dev='vnet4'/>
<model type='virtio'/>
<link state='up'/>
</interface>
</devices>
</domain>"""
class DeviceMetadataMatchTests(XMLTestCase):
def setUp(self):
self.dom_desc = DomainDescriptor(_DOMAIN_MD_MATCH_XML)
self.md_desc = metadata.Descriptor.from_xml(_DOMAIN_MD_MATCH_XML)
def test_match_interface_by_alias_only_fails(self):
# fails because we
# assert set(matching_attrs) in set(device_metadata_attrs)
# while the reverse can be false with no consequences.
dev_objs = vmdevices.common.dev_map_from_domain_xml(
'TESTING', self.dom_desc, self.md_desc, self.log
)
nic = self._find_nic_by_mac(dev_objs, '00:1a:4a:16:01:51')
assert nic.network == 'INVALID0'
def test_match_interface_by_mac_only_succeeds(self):
dev_objs = vmdevices.common.dev_map_from_domain_xml(
'TESTING', self.dom_desc, self.md_desc, self.log
)
nic = self._find_nic_by_mac(dev_objs, '00:1a:3b:16:10:16')
assert nic.network == 'ovirtmgmt1'
def test_match_interface_by_mac_and_alias_succeeds(self):
# mac is enough, but we match extra arguments if given
dev_objs = vmdevices.common.dev_map_from_domain_xml(
'TESTING', self.dom_desc, self.md_desc, self.log
)
nic = self._find_nic_by_mac(dev_objs, '00:1a:55:ff:20:26')
assert nic.network == 'ovirtmgmt2'
def test_port_mirroring(self):
dev_objs = vmdevices.common.dev_map_from_domain_xml(
'TESTING', self.dom_desc, self.md_desc, self.log
)
# random MAC, any nic with portMirroring configured is fine
nic1 = self._find_nic_by_mac(dev_objs, '00:1a:55:ff:20:26')
assert nic1.portMirroring == []
nic2 = self._find_nic_by_mac(dev_objs, '00:1a:4a:16:01:00')
assert nic2.portMirroring == ['network1', 'network2']
def test_attributes_present(self):
dev_objs = vmdevices.common.dev_map_from_domain_xml(
'TESTING', self.dom_desc, self.md_desc, self.log
)
nic = self._find_nic_by_mac(dev_objs, '00:1a:55:ff:30:36')
assert nic.filterParameters == []
assert nic.portMirroring == []
assert nic.vm_custom == {}
def _find_nic_by_mac(self, dev_objs, mac_addr):
for nic in dev_objs[vmdevices.hwclass.NIC]:
if nic.macAddr == mac_addr:
return nic
raise AssertionError('no nic with mac=%s found' % mac_addr)
def test_correct_rbd_disk_metadata(self):
drives = vmdevices.common.storage_device_params_from_domain_xml(
'TESTING', self.dom_desc, self.md_desc, self.log
)
disk_objs = [
vmdevices.storage.Drive(self.log, **params)
for params in drives
]
rbd_drive = lookup.drive_by_name(disk_objs, 'sda')
assert getattr(rbd_drive, 'RBD') == '/dev/rbd/pool/volume-uuid'
def test_managed_device_parameter_present(self):
drives = vmdevices.common.storage_device_params_from_domain_xml(
'TESTING', self.dom_desc, self.md_desc, self.log
)
disk_objs = [
vmdevices.storage.Drive(self.log, **params)
for params in drives
]
drive = lookup.drive_by_name(disk_objs, 'sdb')
assert drive.managed
def test_no_managed_device_parameter(self):
drives = vmdevices.common.storage_device_params_from_domain_xml(
'TESTING', self.dom_desc, self.md_desc, self.log
)
disk_objs = [
vmdevices.storage.Drive(self.log, **params)
for params in drives
]
drive = lookup.drive_by_name(disk_objs, 'sdc')
assert not drive.managed
def test_not_managed_device_parameter(self):
drives = vmdevices.common.storage_device_params_from_domain_xml(
'TESTING', self.dom_desc, self.md_desc, self.log
)
disk_objs = [
vmdevices.storage.Drive(self.log, **params)
for params in drives
]
drive = lookup.drive_by_name(disk_objs, 'sdd')
assert not drive.managed
_VM_MDEV_XML = """<?xml version='1.0' encoding='utf-8'?>
<domain xmlns:ns0="http://ovirt.org/vm/tune/1.0"
xmlns:ovirt-vm="http://ovirt.org/vm/1.0" type="kvm">
<name>vm</name>
<uuid>6a28e9f6-6627-49b8-8c24-741ab810ecc0</uuid>
<devices>
<hostdev mode="subsystem" model="vfio-pci" type="mdev">
<source>
<address uuid="c1f343ae-99a5-4d82-9d5c-203cd4b7dac0" />
</source>
</hostdev>
</devices>
<metadata>
<ovirt-vm:vm>
<clusterVersion>4.2</clusterVersion>
<ovirt-vm:device devtype="hostdev"
uuid="c1f343ae-99a5-4d82-9d5c-203cd4b7dac0">
<ovirt-vm:mdevType>graphics-card-1%(placement)s</ovirt-vm:mdevType>
</ovirt-vm:device>
</ovirt-vm:vm>
</metadata>
</domain>
"""
class FakeLibvirtConnection(object):
def get(self, *args, **kwargs):
return fake.Connection()
| gpl-2.0 |
tlatzko/spmcluster | .tox/clean/lib/python2.7/site-packages/pip/_vendor/requests/utils.py | 618 | 21334 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse,
basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
return len(o.getvalue())
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in re.split(", *<", value):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
| bsd-2-clause |
mathemage/h2o-3 | h2o-py/tests/testdir_algos/glrm/pyunit_NOFEATURE_glrm_long_runtime_large.py | 5 | 3155 | from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glrm import H2OGeneralizedLowRankEstimator
# This test is just to take a large dataset, perform GLRM on it and figure
# out the performance time. This test should not be run on Jenkins. It
# simply takes too long
def glrm_long_run():
run_time_ms = []
iterations = []
acs_orig = h2o.upload_file(path=pyunit_utils.locate("bigdata/laptop/milsongs/milsongs-cls-train.csv.gz"))
# run GLRM with max_runtime_ms restriction.
acs_model = H2OGeneralizedLowRankEstimator(k = 10,
transform = 'STANDARDIZE',
loss = 'Quadratic',
multi_loss="Categorical",
model_id="clients_core_glrm",
regularization_x="L2",
regularization_y="L1",
gamma_x=0.2,
gamma_y=0.5,
init="SVD",
seed=1234)
acs_model.train(x = acs_orig.names, training_frame= acs_orig, max_runtime_secs=60)
print("Run time in s with max_runtime_secs of 60 second: "
"{0}".format((acs_model._model_json['output']['end_time']-
acs_model._model_json['output']['start_time'])/1000.0))
print("number of iterations: {0}".format(acs_model._model_json['output']['iterations']))
# let glrm run with restriction on iteration number.
acs_model = H2OGeneralizedLowRankEstimator(k = 10,
transform = 'STANDARDIZE',
loss = 'Quadratic',
multi_loss="Categorical",
model_id="clients_core_glrm",
regularization_x="L2",
regularization_y="L1",
gamma_x=0.2,
gamma_y=0.5,
init="SVD",
seed=1234)
acs_model.train(x = acs_orig.names, training_frame= acs_orig)
run_time_ms.append(acs_model._model_json['output']['end_time'] - acs_model._model_json['output']['start_time'])
iterations.append(acs_model._model_json['output']['iterations'])
print("Run time in s with no max time restrication: "
"{0}".format((acs_model._model_json['output']['end_time'] -
acs_model._model_json['output']['start_time'])/1000.0))
print("number of iterations: {0}".format(acs_model._model_json['output']['iterations']))
sys.stdout.flush()
if __name__ == "__main__":
pyunit_utils.standalone_test(glrm_long_run)
else:
glrm_long_run()
| apache-2.0 |
js0701/chromium-crosswalk | third_party/cython/src/Cython/Compiler/Tests/TestMemView.py | 127 | 2516 | from Cython.TestUtils import CythonTest
import Cython.Compiler.Errors as Errors
from Cython.Compiler.Nodes import *
from Cython.Compiler.ParseTreeTransforms import *
from Cython.Compiler.Buffer import *
class TestMemviewParsing(CythonTest):
def parse(self, s):
return self.should_not_fail(lambda: self.fragment(s)).root
def not_parseable(self, expected_error, s):
e = self.should_fail(lambda: self.fragment(s), Errors.CompileError)
self.assertEqual(expected_error, e.message_only)
def test_default_1dim(self):
self.parse(u"cdef int[:] x")
self.parse(u"cdef short int[:] x")
def test_default_ndim(self):
self.parse(u"cdef int[:,:,:,:,:] x")
self.parse(u"cdef unsigned long int[:,:,:,:,:] x")
self.parse(u"cdef unsigned int[:,:,:,:,:] x")
def test_zero_offset(self):
self.parse(u"cdef long double[0:] x")
self.parse(u"cdef int[0:] x")
def test_zero_offset_ndim(self):
self.parse(u"cdef int[0:,0:,0:,0:] x")
def test_def_arg(self):
self.parse(u"def foo(int[:,:] x): pass")
def test_cdef_arg(self):
self.parse(u"cdef foo(int[:,:] x): pass")
def test_general_slice(self):
self.parse(u'cdef float[::ptr, ::direct & contig, 0::full & strided] x')
def test_non_slice_memview(self):
self.not_parseable(u"An axis specification in memoryview declaration does not have a ':'.",
u"cdef double[:foo, bar] x")
self.not_parseable(u"An axis specification in memoryview declaration does not have a ':'.",
u"cdef double[0:foo, bar] x")
def test_basic(self):
t = self.parse(u"cdef int[:] x")
memv_node = t.stats[0].base_type
self.assert_(isinstance(memv_node, MemoryViewSliceTypeNode))
# we also test other similar declarations (buffers, anonymous C arrays)
# since the parsing has to distinguish between them.
def disable_test_no_buf_arg(self): # TODO
self.not_parseable(u"Expected ']'",
u"cdef extern foo(object[int, ndim=2])")
def disable_test_parse_sizeof(self): # TODO
self.parse(u"sizeof(int[NN])")
self.parse(u"sizeof(int[])")
self.parse(u"sizeof(int[][NN])")
self.not_parseable(u"Expected an identifier or literal",
u"sizeof(int[:NN])")
self.not_parseable(u"Expected ']'",
u"sizeof(foo[dtype=bar]")
if __name__ == '__main__':
import unittest
unittest.main()
| bsd-3-clause |
Eureka22/ASM_xf | PythonD/site_python/mx/DateTime/LazyModule.py | 8 | 2570 | """ Helper to enable simple lazy module import.
'Lazy' means the actual import is deferred until an attribute is
requested from the module's namespace. This has the advantage of
allowing all imports to be done at the top of a script (in a
prominent and visible place) without having a great impact
on startup time.
Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com
See the documentation for further information on copyrights,
or contact the author. All Rights Reserved.
"""
class LazyModule:
""" Lazy module class.
Lazy modules are imported into the given namespaces whenever a
non-special attribute (there are some attributes like __doc__
that class instances handle without calling __getattr__) is
requested. The module is then registered under the given name
in locals usually replacing the import wrapper instance. The
import itself is done using globals as global namespace.
Example of creating a lazy load module:
ISO = LazyModule('ISO',locals(),globals())
Later, requesting an attribute from ISO will load the module
automatically into the locals() namespace, overriding the
LazyModule instance:
t = ISO.Week(1998,1,1)
"""
def __init__(self,name,locals,globals=None):
""" Create a LazyModule instance wrapping module name.
The module will later on be registered in locals under the
given module name.
globals is optional and defaults to locals.
"""
self.__locals__ = locals
if globals is None:
globals = locals
self.__globals__ = globals
mainname = globals.get('__name__','')
if mainname:
self.__name__ = mainname + '.' + name
self.__importname__ = name
else:
self.__name__ = self.__importname__ = name
def __getattr__(self,what):
""" Import the module now.
"""
# Load and register module
name = self.__importname__
# print 'Loading module',name
self.__locals__[name] \
= module \
= __import__(name,self.__locals__,self.__globals__,'*')
# Fill namespace with all symbols from original module to
# provide faster access.
self.__dict__.update(module.__dict__)
# print 'lazy module',name,'import trigger:',what
return getattr(module,what)
def __repr__(self):
return "<lazy module '%s'>" % self.__name__
| gpl-2.0 |
skirsdeda/django | django/contrib/gis/gdal/prototypes/generation.py | 100 | 3976 | """
This module contains functions that generate ctypes prototypes for the
GDAL routines.
"""
from ctypes import c_char_p, c_double, c_int, c_void_p
from django.contrib.gis.gdal.prototypes.errcheck import (
check_arg_errcode, check_errcode, check_geom, check_geom_offset,
check_pointer, check_srs, check_str_arg, check_string, check_const_string)
class gdal_char_p(c_char_p):
pass
def double_output(func, argtypes, errcheck=False, strarg=False):
"Generates a ctypes function that returns a double value."
func.argtypes = argtypes
func.restype = c_double
if errcheck:
func.errcheck = check_arg_errcode
if strarg:
func.errcheck = check_str_arg
return func
def geom_output(func, argtypes, offset=None):
"""
Generates a function that returns a Geometry either by reference
or directly (if the return_geom keyword is set to True).
"""
# Setting the argument types
func.argtypes = argtypes
if not offset:
# When a geometry pointer is directly returned.
func.restype = c_void_p
func.errcheck = check_geom
else:
# Error code returned, geometry is returned by-reference.
func.restype = c_int
def geomerrcheck(result, func, cargs):
return check_geom_offset(result, func, cargs, offset)
func.errcheck = geomerrcheck
return func
def int_output(func, argtypes):
"Generates a ctypes function that returns an integer value."
func.argtypes = argtypes
func.restype = c_int
return func
def srs_output(func, argtypes):
"""
Generates a ctypes prototype for the given function with
the given C arguments that returns a pointer to an OGR
Spatial Reference System.
"""
func.argtypes = argtypes
func.restype = c_void_p
func.errcheck = check_srs
return func
def const_string_output(func, argtypes, offset=None, decoding=None):
func.argtypes = argtypes
if offset:
func.restype = c_int
else:
func.restype = c_char_p
def _check_const(result, func, cargs):
res = check_const_string(result, func, cargs, offset=offset)
if res and decoding:
res = res.decode(decoding)
return res
func.errcheck = _check_const
return func
def string_output(func, argtypes, offset=-1, str_result=False, decoding=None):
"""
Generates a ctypes prototype for the given function with the
given argument types that returns a string from a GDAL pointer.
The `const` flag indicates whether the allocated pointer should
be freed via the GDAL library routine VSIFree -- but only applies
only when `str_result` is True.
"""
func.argtypes = argtypes
if str_result:
# Use subclass of c_char_p so the error checking routine
# can free the memory at the pointer's address.
func.restype = gdal_char_p
else:
# Error code is returned
func.restype = c_int
# Dynamically defining our error-checking function with the
# given offset.
def _check_str(result, func, cargs):
res = check_string(result, func, cargs,
offset=offset, str_result=str_result)
if res and decoding:
res = res.decode(decoding)
return res
func.errcheck = _check_str
return func
def void_output(func, argtypes, errcheck=True):
"""
For functions that don't only return an error code that needs to
be examined.
"""
if argtypes:
func.argtypes = argtypes
if errcheck:
# `errcheck` keyword may be set to False for routines that
# return void, rather than a status code.
func.restype = c_int
func.errcheck = check_errcode
else:
func.restype = None
return func
def voidptr_output(func, argtypes):
"For functions that return c_void_p."
func.argtypes = argtypes
func.restype = c_void_p
func.errcheck = check_pointer
return func
| bsd-3-clause |
pianomania/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 16 | 50617 | # Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..utils import check_array, check_random_state, check_X_y
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from ..utils import deprecated
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function_,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function_, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.n_jobs = int(n_jobs)
@property
@deprecated("Attribute loss_function was deprecated in version 0.19 and "
"will be removed in 0.21. Use 'loss_function_' instead")
def loss_function(self):
return self.loss_function_
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if getattr(self, "coef_", None) is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function_ = self._get_loss_function(loss)
if not hasattr(self, "t_"):
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and hasattr(self, "coef_"):
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
loss_function_ : concrete ``LossFunction``
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if getattr(self, "coef_", None) is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and getattr(self, "average_coef_", None) is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and getattr(self, "coef_", None) is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if not hasattr(self, "t_"):
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
ojengwa/talk | venv/lib/python2.7/site-packages/pip/_vendor/html5lib/trie/datrie.py | 1301 | 1178 | from __future__ import absolute_import, division, unicode_literals
from datrie import Trie as DATrie
from pip._vendor.six import text_type
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
chars = set()
for key in data.keys():
if not isinstance(key, text_type):
raise TypeError("All keys must be strings")
for char in key:
chars.add(char)
self._data = DATrie("".join(chars))
for key, value in data.items():
self._data[key] = value
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
raise NotImplementedError()
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
return self._data.keys(prefix)
def has_keys_with_prefix(self, prefix):
return self._data.has_keys_with_prefix(prefix)
def longest_prefix(self, prefix):
return self._data.longest_prefix(prefix)
def longest_prefix_item(self, prefix):
return self._data.longest_prefix_item(prefix)
| mit |
anandbhoraskar/Diamond | src/collectors/xen_collector/xen_collector.py | 53 | 2217 | # coding=utf-8
"""
The XENCollector grabs usage/allocation metrics using libvirt
#### Dependencies
* python-libvirt
"""
from diamond.collector import Collector
import os
try:
import libvirt
except ImportError:
libvirt = None
class XENCollector(Collector):
def get_default_config_help(self):
config_help = super(XENCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(XENCollector, self).get_default_config()
config.update({
'path': 'xen'
})
return config
def collect(self):
"""
Collect libvirt data
"""
if libvirt is None:
self.log.error('Unable to import either libvirt')
return {}
# Open a restricted (non-root) connection to the hypervisor
conn = libvirt.openReadOnly(None)
# Get hardware info
conninfo = conn.getInfo()
# Initialize variables
memallocated = 0
coresallocated = 0
totalcores = 0
results = {}
domIds = conn.listDomainsID()
if 0 in domIds:
# Total cores
domU = conn.lookupByID(0)
totalcores = domU.info()[3]
# Free Space
s = os.statvfs('/')
freeSpace = (s.f_bavail * s.f_frsize) / 1024
# Calculate allocated memory and cores
for i in domIds:
# Ignore 0
if i == 0:
continue
domU = conn.lookupByID(i)
dominfo = domU.info()
memallocated += dominfo[2]
if i > 0:
coresallocated += dominfo[3]
results = {
'InstalledMem': conninfo[1],
'MemAllocated': memallocated / 1024,
'MemFree': conninfo[1] - (memallocated / 1024),
'AllocatedCores': coresallocated,
'DiskFree': freeSpace,
'TotalCores': totalcores,
'FreeCores': (totalcores - coresallocated)
}
for k in results.keys():
self.publish(k, results[k], 0)
| mit |
mhue/scikit-learn | examples/text/document_classification_20newsgroups.py | 222 | 10500 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
ttm/oscEmRede | venv/lib/python2.7/site-packages/networkx/algorithms/chordal/tests/test_chordal.py | 75 | 2377 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestMCS:
def setUp(self):
# simple graph
connected_chordal_G=nx.Graph()
connected_chordal_G.add_edges_from([(1,2),(1,3),(2,3),(2,4),(3,4),
(3,5),(3,6),(4,5),(4,6),(5,6)])
self.connected_chordal_G=connected_chordal_G
chordal_G = nx.Graph()
chordal_G.add_edges_from([(1,2),(1,3),(2,3),(2,4),(3,4),
(3,5),(3,6),(4,5),(4,6),(5,6),(7,8)])
chordal_G.add_node(9)
self.chordal_G=chordal_G
non_chordal_G = nx.Graph()
non_chordal_G.add_edges_from([(1,2),(1,3),(2,4),(2,5),(3,4),(3,5)])
self.non_chordal_G = non_chordal_G
def test_is_chordal(self):
assert_false(nx.is_chordal(self.non_chordal_G))
assert_true(nx.is_chordal(self.chordal_G))
assert_true(nx.is_chordal(self.connected_chordal_G))
assert_true(nx.is_chordal(nx.complete_graph(3)))
assert_true(nx.is_chordal(nx.cycle_graph(3)))
assert_false(nx.is_chordal(nx.cycle_graph(5)))
def test_induced_nodes(self):
G = nx.generators.classic.path_graph(10)
I = nx.find_induced_nodes(G,1,9,2)
assert_equal(I,set([1,2,3,4,5,6,7,8,9]))
assert_raises(nx.NetworkXTreewidthBoundExceeded,
nx.find_induced_nodes,G,1,9,1)
I = nx.find_induced_nodes(self.chordal_G,1,6)
assert_equal(I,set([1,2,4,6]))
assert_raises(nx.NetworkXError,
nx.find_induced_nodes,self.non_chordal_G,1,5)
def test_chordal_find_cliques(self):
cliques = set([frozenset([9]),frozenset([7,8]),frozenset([1,2,3]),
frozenset([2,3,4]),frozenset([3,4,5,6])])
assert_equal(nx.chordal_graph_cliques(self.chordal_G),cliques)
def test_chordal_find_cliques_path(self):
G = nx.path_graph(10)
cliqueset = nx.chordal_graph_cliques(G)
for (u,v) in G.edges_iter():
assert_true(frozenset([u,v]) in cliqueset
or frozenset([v,u]) in cliqueset)
def test_chordal_find_cliquesCC(self):
cliques = set([frozenset([1,2,3]),frozenset([2,3,4]),
frozenset([3,4,5,6])])
assert_equal(nx.chordal_graph_cliques(self.connected_chordal_G),cliques)
| gpl-3.0 |
jschneier/cmsplugin-filer | cmsplugin_filer_teaser/cms_plugins.py | 9 | 3734 | from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.utils.translation import ugettext_lazy as _
from django.template.loader import select_template
from . import models
from .conf import settings
class FilerTeaserPlugin(CMSPluginBase):
"""
TODO: this plugin is becoming very similar to the image plugin... code
should be re-used somehow.
"""
module = 'Filer'
model = models.FilerTeaser
raw_id_fields = ('page_link',)
name = _("Teaser")
TEMPLATE_NAME = 'cmsplugin_filer_teaser/plugins/teaser/%s.html'
render_template = TEMPLATE_NAME % 'default'
fieldsets = (
(None, {'fields': [
'title',
'image',
'image_url',
'description',
]}),
(_('More'), {
'classes': ('collapse',),
'fields': [
'use_autoscale',
('width', 'height'),
'free_link',
'page_link',
'target_blank'
]
})
)
if settings.CMSPLUGIN_FILER_TEASER_STYLE_CHOICES:
fieldsets[0][1]['fields'].append('style')
def _get_thumbnail_options(self, context, instance):
"""
Return the size and options of the thumbnail that should be inserted
"""
width, height = None, None
subject_location = False
placeholder_width = context.get('width', None)
placeholder_height = context.get('height', None)
if instance.use_autoscale and placeholder_width:
# use the placeholder width as a hint for sizing
width = int(placeholder_width)
if instance.use_autoscale and placeholder_height:
height = int(placeholder_height)
elif instance.width:
width = instance.width
if instance.height:
height = instance.height
if instance.image:
if instance.image.subject_location:
subject_location = instance.image.subject_location
if not height and width:
# height was not externally defined: use ratio to scale it by the width
height = int( float(width)*float(instance.image.height)/float(instance.image.width) )
if not width and height:
# width was not externally defined: use ratio to scale it by the height
width = int( float(height)*float(instance.image.width)/float(instance.image.height) )
if not width:
# width is still not defined. fallback the actual image width
width = instance.image.width
if not height:
# height is still not defined. fallback the actual image height
height = instance.image.height
return {'size': (width, height),
'subject_location': subject_location}
def get_thumbnail(self, context, instance):
if instance.image:
return instance.image.image.file.get_thumbnail(self._get_thumbnail_options(context, instance))
def render(self, context, instance, placeholder):
self.render_template = select_template((
'cmsplugin_filer_teaser/plugins/teaser.html', # backwards compatibility. deprecated!
self.TEMPLATE_NAME % instance.style,
self.TEMPLATE_NAME % 'default')
)
options = self._get_thumbnail_options(context, instance)
context.update({
'instance': instance,
'link': instance.link,
'opts': options,
'size': options.get('size', None),
'placeholder': placeholder
})
return context
plugin_pool.register_plugin(FilerTeaserPlugin)
| bsd-3-clause |
SOKP/external_chromium_org | third_party/jinja2/parser.py | 637 | 35186 | # -*- coding: utf-8 -*-
"""
jinja2.parser
~~~~~~~~~~~~~
Implements the template parser.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
from jinja2.lexer import describe_token, describe_token_expr
from jinja2._compat import next, imap
#: statements that callinto
_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
'macro', 'include', 'from', 'import',
'set'])
_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
class Parser(object):
"""This is the central parsing class Jinja2 uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
def __init__(self, environment, source, name=None, filename=None,
state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
self.closed = False
self.extensions = {}
for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
self._tag_stack = []
self._end_token_stack = []
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
"""Convenience method that raises `exc` with the message, passed
line number or last line number as well as the current name and
filename.
"""
if lineno is None:
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename)
def _fail_ut_eof(self, name, end_token_stack, lineno):
expected = []
for exprs in end_token_stack:
expected.extend(imap(describe_token_expr, exprs))
if end_token_stack:
currently_looking = ' or '.join(
"'%s'" % describe_token_expr(expr)
for expr in end_token_stack[-1])
else:
currently_looking = None
if name is None:
message = ['Unexpected end of template.']
else:
message = ['Encountered unknown tag \'%s\'.' % name]
if currently_looking:
if name is not None and name in expected:
message.append('You probably made a nesting mistake. Jinja '
'is expecting this tag, but currently looking '
'for %s.' % currently_looking)
else:
message.append('Jinja was looking for the following tags: '
'%s.' % currently_looking)
if self._tag_stack:
message.append('The innermost block that needs to be '
'closed is \'%s\'.' % self._tag_stack[-1])
self.fail(' '.join(message), lineno)
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
"""
return self._fail_ut_eof(name, self._end_token_stack, lineno)
def fail_eof(self, end_tokens=None, lineno=None):
"""Like fail_unknown_tag but for end of template situations."""
stack = list(self._end_token_stack)
if end_tokens is not None:
stack.append(end_tokens)
return self._fail_ut_eof(None, stack, lineno)
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
return False
def free_identifier(self, lineno=None):
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
return rv
def parse_statement(self):
"""Parse a single statement."""
token = self.stream.current
if token.type != 'name':
self.fail('tag name expected', token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
return getattr(self, 'parse_' + self.stream.current.value)()
if token.value == 'call':
return self.parse_call_block()
if token.value == 'filter':
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
return ext(self)
# did not work out, remove the token we pushed by accident
# from the stack so that the unknown tag fail function can
# produce a proper error message.
self._tag_stack.pop()
pop_tag = False
self.fail_unknown_tag(token.value, token.lineno)
finally:
if pop_tag:
self._tag_stack.pop()
def parse_statements(self, end_tokens, drop_needle=False):
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
parses template data if appropriate. The parser checks first if the
current token is a colon and skips it if there is one. Then it checks
for the block end and parses until if one of the `end_tokens` is
reached. Per default the active token in the stream at the end of
the call is the matched end token. If this is not wanted `drop_needle`
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
self.stream.skip_if('colon')
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
self.stream.expect('block_end')
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
if self.stream.current.type == 'eof':
self.fail_eof(end_tokens)
if drop_needle:
next(self.stream)
return result
def parse_set(self):
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target()
self.stream.expect('assign')
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
def parse_for(self):
"""Parse a for loop."""
lineno = self.stream.expect('name:for').lineno
target = self.parse_assign_target(extra_end_rules=('name:in',))
self.stream.expect('name:in')
iter = self.parse_tuple(with_condexpr=False,
extra_end_rules=('name:recursive',))
test = None
if self.stream.skip_if('name:if'):
test = self.parse_expression()
recursive = self.stream.skip_if('name:recursive')
body = self.parse_statements(('name:endfor', 'name:else'))
if next(self.stream).value == 'endfor':
else_ = []
else:
else_ = self.parse_statements(('name:endfor',), drop_needle=True)
return nodes.For(target, iter, body, else_, test,
recursive, lineno=lineno)
def parse_if(self):
"""Parse an if construct."""
node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements(('name:elif', 'name:else',
'name:endif'))
token = next(self.stream)
if token.test('name:elif'):
new_node = nodes.If(lineno=self.stream.current.lineno)
node.else_ = [new_node]
node = new_node
continue
elif token.test('name:else'):
node.else_ = self.parse_statements(('name:endif',),
drop_needle=True)
else:
node.else_ = []
break
return result
def parse_block(self):
node = nodes.Block(lineno=next(self.stream).lineno)
node.name = self.stream.expect('name').value
node.scoped = self.stream.skip_if('name:scoped')
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
if self.stream.current.type == 'sub':
self.fail('Block names in Jinja have to be valid Python '
'identifiers and may not contain hyphens, use an '
'underscore instead.')
node.body = self.parse_statements(('name:endblock',), drop_needle=True)
self.stream.skip_if('name:' + node.name)
return node
def parse_extends(self):
node = nodes.Extends(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
return node
def parse_import_context(self, node, default):
if self.stream.current.test_any('name:with', 'name:without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
else:
node.with_context = default
return node
def parse_include(self):
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
if self.stream.current.test('name:ignore') and \
self.stream.look().test('name:missing'):
node.ignore_missing = True
self.stream.skip(2)
else:
node.ignore_missing = False
return self.parse_import_context(node, True)
def parse_import(self):
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:as')
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self):
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:import')
node.names = []
def parse_context():
if self.stream.current.value in ('with', 'without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
return True
return False
while 1:
if node.names:
self.stream.expect('comma')
if self.stream.current.type == 'name':
if parse_context():
break
target = self.parse_assign_target(name_only=True)
if target.name.startswith('_'):
self.fail('names starting with an underline can not '
'be imported', target.lineno,
exc=TemplateAssertionError)
if self.stream.skip_if('name:as'):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
if parse_context() or self.stream.current.type != 'comma':
break
else:
break
if not hasattr(node, 'with_context'):
node.with_context = False
self.stream.skip_if('comma')
return node
def parse_signature(self, node):
node.args = args = []
node.defaults = defaults = []
self.stream.expect('lparen')
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
arg = self.parse_assign_target(name_only=True)
arg.set_ctx('param')
if self.stream.skip_if('assign'):
defaults.append(self.parse_expression())
args.append(arg)
self.stream.expect('rparen')
def parse_call_block(self):
node = nodes.CallBlock(lineno=next(self.stream).lineno)
if self.stream.current.type == 'lparen':
self.parse_signature(node)
else:
node.args = []
node.defaults = []
node.call = self.parse_expression()
if not isinstance(node.call, nodes.Call):
self.fail('expected call', node.lineno)
node.body = self.parse_statements(('name:endcall',), drop_needle=True)
return node
def parse_filter_block(self):
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True)
node.body = self.parse_statements(('name:endfilter',),
drop_needle=True)
return node
def parse_macro(self):
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
node.body = self.parse_statements(('name:endmacro',),
drop_needle=True)
return node
def parse_print(self):
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
while self.stream.current.type != 'block_end':
if node.nodes:
self.stream.expect('comma')
node.nodes.append(self.parse_expression())
return node
def parse_assign_target(self, with_tuple=True, name_only=False,
extra_end_rules=None):
"""Parse an assignment target. As Jinja2 allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
wanted `name_only` can be set to `True`. The `extra_end_rules`
parameter is forwarded to the tuple parsing function.
"""
if name_only:
token = self.stream.expect('name')
target = nodes.Name(token.value, 'store', lineno=token.lineno)
else:
if with_tuple:
target = self.parse_tuple(simplified=True,
extra_end_rules=extra_end_rules)
else:
target = self.parse_primary()
target.set_ctx('store')
if not target.can_assign():
self.fail('can\'t assign to %r' % target.__class__.
__name__.lower(), target.lineno)
return target
def parse_expression(self, with_condexpr=True):
"""Parse an expression. Per default all expressions are parsed, if
the optional `with_condexpr` parameter is set to `False` conditional
expressions are not parsed.
"""
if with_condexpr:
return self.parse_condexpr()
return self.parse_or()
def parse_condexpr(self):
lineno = self.stream.current.lineno
expr1 = self.parse_or()
while self.stream.skip_if('name:if'):
expr2 = self.parse_or()
if self.stream.skip_if('name:else'):
expr3 = self.parse_condexpr()
else:
expr3 = None
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
lineno = self.stream.current.lineno
return expr1
def parse_or(self):
lineno = self.stream.current.lineno
left = self.parse_and()
while self.stream.skip_if('name:or'):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_and(self):
lineno = self.stream.current.lineno
left = self.parse_not()
while self.stream.skip_if('name:and'):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self):
if self.stream.current.test('name:not'):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
def parse_compare(self):
lineno = self.stream.current.lineno
expr = self.parse_add()
ops = []
while 1:
token_type = self.stream.current.type
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_add()))
elif self.stream.skip_if('name:in'):
ops.append(nodes.Operand('in', self.parse_add()))
elif self.stream.current.test('name:not') and \
self.stream.look().test('name:in'):
self.stream.skip(2)
ops.append(nodes.Operand('notin', self.parse_add()))
else:
break
lineno = self.stream.current.lineno
if not ops:
return expr
return nodes.Compare(expr, ops, lineno=lineno)
def parse_add(self):
lineno = self.stream.current.lineno
left = self.parse_sub()
while self.stream.current.type == 'add':
next(self.stream)
right = self.parse_sub()
left = nodes.Add(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_sub(self):
lineno = self.stream.current.lineno
left = self.parse_concat()
while self.stream.current.type == 'sub':
next(self.stream)
right = self.parse_concat()
left = nodes.Sub(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_concat(self):
lineno = self.stream.current.lineno
args = [self.parse_mul()]
while self.stream.current.type == 'tilde':
next(self.stream)
args.append(self.parse_mul())
if len(args) == 1:
return args[0]
return nodes.Concat(args, lineno=lineno)
def parse_mul(self):
lineno = self.stream.current.lineno
left = self.parse_div()
while self.stream.current.type == 'mul':
next(self.stream)
right = self.parse_div()
left = nodes.Mul(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_div(self):
lineno = self.stream.current.lineno
left = self.parse_floordiv()
while self.stream.current.type == 'div':
next(self.stream)
right = self.parse_floordiv()
left = nodes.Div(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_floordiv(self):
lineno = self.stream.current.lineno
left = self.parse_mod()
while self.stream.current.type == 'floordiv':
next(self.stream)
right = self.parse_mod()
left = nodes.FloorDiv(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_mod(self):
lineno = self.stream.current.lineno
left = self.parse_pow()
while self.stream.current.type == 'mod':
next(self.stream)
right = self.parse_pow()
left = nodes.Mod(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_pow(self):
lineno = self.stream.current.lineno
left = self.parse_unary()
while self.stream.current.type == 'pow':
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_unary(self, with_filter=True):
token_type = self.stream.current.type
lineno = self.stream.current.lineno
if token_type == 'sub':
next(self.stream)
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
elif token_type == 'add':
next(self.stream)
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
else:
node = self.parse_primary()
node = self.parse_postfix(node)
if with_filter:
node = self.parse_filter_expr(node)
return node
def parse_primary(self):
token = self.stream.current
if token.type == 'name':
if token.value in ('true', 'false', 'True', 'False'):
node = nodes.Const(token.value in ('true', 'True'),
lineno=token.lineno)
elif token.value in ('none', 'None'):
node = nodes.Const(None, lineno=token.lineno)
else:
node = nodes.Name(token.value, 'load', lineno=token.lineno)
next(self.stream)
elif token.type == 'string':
next(self.stream)
buf = [token.value]
lineno = token.lineno
while self.stream.current.type == 'string':
buf.append(self.stream.current.value)
next(self.stream)
node = nodes.Const(''.join(buf), lineno=lineno)
elif token.type in ('integer', 'float'):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
elif token.type == 'lparen':
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
self.stream.expect('rparen')
elif token.type == 'lbracket':
node = self.parse_list()
elif token.type == 'lbrace':
node = self.parse_dict()
else:
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
return node
def parse_tuple(self, simplified=False, with_condexpr=True,
extra_end_rules=None, explicit_parentheses=False):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
"""
lineno = self.stream.current.lineno
if simplified:
parse = self.parse_primary
elif with_condexpr:
parse = self.parse_expression
else:
parse = lambda: self.parse_expression(with_condexpr=False)
args = []
is_tuple = False
while 1:
if args:
self.stream.expect('comma')
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
lineno = self.stream.current.lineno
if not is_tuple:
if args:
return args[0]
# if we don't have explicit parentheses, an empty tuple is
# not a valid expression. This would mean nothing (literally
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
self.fail('Expected an expression, got \'%s\'' %
describe_token(self.stream.current))
return nodes.Tuple(args, 'load', lineno=lineno)
def parse_list(self):
token = self.stream.expect('lbracket')
items = []
while self.stream.current.type != 'rbracket':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
self.stream.expect('rbracket')
return nodes.List(items, lineno=token.lineno)
def parse_dict(self):
token = self.stream.expect('lbrace')
items = []
while self.stream.current.type != 'rbrace':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
self.stream.expect('colon')
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
self.stream.expect('rbrace')
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'dot' or token_type == 'lbracket':
node = self.parse_subscript(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_filter_expr(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'pipe':
node = self.parse_filter(node)
elif token_type == 'name' and self.stream.current.value == 'is':
node = self.parse_test(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_subscript(self, node):
token = next(self.stream)
if token.type == 'dot':
attr_token = self.stream.current
next(self.stream)
if attr_token.type == 'name':
return nodes.Getattr(node, attr_token.value, 'load',
lineno=token.lineno)
elif attr_token.type != 'integer':
self.fail('expected name or number', attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
if token.type == 'lbracket':
args = []
while self.stream.current.type != 'rbracket':
if args:
self.stream.expect('comma')
args.append(self.parse_subscribed())
self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
arg = nodes.Tuple(args, 'load', lineno=token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
self.fail('expected subscript expression', self.lineno)
def parse_subscribed(self):
lineno = self.stream.current.lineno
if self.stream.current.type == 'colon':
next(self.stream)
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != 'colon':
return node
next(self.stream)
args = [node]
if self.stream.current.type == 'colon':
args.append(None)
elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == 'colon':
next(self.stream)
if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.Slice(lineno=lineno, *args)
def parse_call(self, node):
token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = dyn_kwargs = None
require_comma = False
def ensure(expr):
if not expr:
self.fail('invalid syntax for function call expression',
token.lineno)
while self.stream.current.type != 'rparen':
if require_comma:
self.stream.expect('comma')
# support for trailing comma
if self.stream.current.type == 'rparen':
break
if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
ensure(dyn_args is None and dyn_kwargs is None)
if self.stream.current.type == 'name' and \
self.stream.look().type == 'assign':
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
kwargs.append(nodes.Keyword(key, value,
lineno=value.lineno))
else:
ensure(not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect('rparen')
if node is None:
return args, kwargs, dyn_args, dyn_kwargs
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
lineno=token.lineno)
def parse_filter(self, node, start_inline=False):
while self.stream.current.type == 'pipe' or start_inline:
if not start_inline:
next(self.stream)
token = self.stream.expect('name')
name = token.value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
node = nodes.Filter(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
start_inline = False
return node
def parse_test(self, node):
token = next(self.stream)
if self.stream.current.test('name:not'):
next(self.stream)
negated = True
else:
negated = False
name = self.stream.expect('name').value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
dyn_args = dyn_kwargs = None
kwargs = []
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
elif self.stream.current.type in ('name', 'string', 'integer',
'float', 'lparen', 'lbracket',
'lbrace') and not \
self.stream.current.test_any('name:else', 'name:or',
'name:and'):
if self.stream.current.test('name:is'):
self.fail('You cannot chain multiple tests with is')
args = [self.parse_expression()]
else:
args = []
node = nodes.Test(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
def subparse(self, end_tokens=None):
body = []
data_buffer = []
add_data = data_buffer.append
if end_tokens is not None:
self._end_token_stack.append(end_tokens)
def flush_data():
if data_buffer:
lineno = data_buffer[0].lineno
body.append(nodes.Output(data_buffer[:], lineno=lineno))
del data_buffer[:]
try:
while self.stream:
token = self.stream.current
if token.type == 'data':
if token.value:
add_data(nodes.TemplateData(token.value,
lineno=token.lineno))
next(self.stream)
elif token.type == 'variable_begin':
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
self.stream.expect('variable_end')
elif token.type == 'block_begin':
flush_data()
next(self.stream)
if end_tokens is not None and \
self.stream.current.test_any(*end_tokens):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
self.stream.expect('block_end')
else:
raise AssertionError('internal parsing error')
flush_data()
finally:
if end_tokens is not None:
self._end_token_stack.pop()
return body
def parse(self):
"""Parse the whole template into a `Template` node."""
result = nodes.Template(self.subparse(), lineno=1)
result.set_environment(self.environment)
return result
| bsd-3-clause |
CanalTP/navitia | source/jormungandr/jormungandr/transient_socket.py | 2 | 5712 | # coding=utf-8
# Copyright (c) 2001-2020, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
import zmq
import time
import gevent
from contextlib import contextmanager
import logging
import six
from typing import Dict
from gevent.lock import BoundedSemaphore
from collections import defaultdict, namedtuple
from sortedcontainers import SortedList
_semaphore = BoundedSemaphore(1)
class NoAliveSockets(Exception):
pass
class TransientSocket(object):
"""
With this class, sockets will be shut down and reopened if the TTL run out.
This is useful especially for services that are hosted on AWS and accesses by Zmq.
Why?
Because jormungandr creates sockets via the AWS's autobalancer, when a new instance is popped by auto scaling,
despite the auto balancer, sockets created previously will still stick to the old instance. We have to close the
socket and reopen one so that traffic will be lead to new instances.
"""
# TODO: use dataclass in python3.7
_Socket = namedtuple('_Socket', ['t', 'socket'])
# _sockets is a map of TransientSocket vs a sorted list of tuple of created time and tcp sockets
# the sorted list is arranged in a way that the first element is the most recent one and the last element is oldest
# one.
_sockets = defaultdict(lambda: SortedList([], key=lambda s: -s.t)) # type: Dict[TransientSocket, SortedList]
_reaper_interval = 10
_logger = logging.getLogger(__name__)
def __init__(self, name, zmq_context, socket_path, socket_ttl, *args, **kwargs):
super(TransientSocket, self).__init__(*args, **kwargs)
self.name = name
self._zmq_context = zmq_context
self._socket_path = socket_path
self.ttl = socket_ttl
@contextmanager
def socket(self):
# We don't want to waste time to close sockets in this function since the performance is critical
# The cleaning job is done in another greenlet.
try:
with _semaphore:
# self._sockets's first element is the most recent one
t, socket = self._sockets[self][
0
] # If self._sockets is empty, a IndexError exception will be raised
now = time.time()
if now - t < self.ttl:
# we find an alive socket! we move the ownership to this greenlet and use it!
self._sockets[self].pop(0)
else:
raise NoAliveSockets
except (IndexError, NoAliveSockets): # there is no socket available: let's create one
self._logger.info("opening one socket for %s", self.name)
socket = self._zmq_context.socket(zmq.REQ)
socket.connect(self._socket_path)
t = time.time()
try:
yield socket
except:
self._logger.exception("")
finally:
if not socket.closed:
if time.time() - t >= self.ttl:
self.close_socket(socket, self.name)
else:
with _semaphore:
self._sockets[self].add(TransientSocket._Socket(t, socket))
@classmethod
def close_socket(cls, socket, name):
cls._logger.info("closing one socket for %s", name)
try:
socket.setsockopt(zmq.LINGER, 0)
socket.close()
except:
cls._logger.exception("")
@classmethod
def _reap(cls, o, sockets):
oldest_creation_time = time.time() - o.ttl
with _semaphore:
i = sockets.bisect_left(TransientSocket._Socket(oldest_creation_time, None))
sockets_to_be_closed = sockets[i:] # no worries, it's a copy
del sockets[i:]
for _, socket in sockets_to_be_closed:
cls.close_socket(socket, o.name)
@classmethod
def _reap_sockets(cls):
cls._logger.info("reaping sockets")
for o, sockets in six.iteritems(cls._sockets):
TransientSocket._reap(o, sockets)
@classmethod
def gevent_reap_sockets(cls):
while True:
cls._reap_sockets()
gevent.idle(-1)
gevent.sleep(cls._reaper_interval)
@classmethod
def init_socket_reaper(cls, config):
cls._reaper_interval = config['ASGARD_ZMQ_SOCKET_REAPER_INTERVAL']
# start a greenlet that handle connection closing when idle
cls._logger.info("spawning a socket reaper with gevent")
gevent.spawn(cls.gevent_reap_sockets)
| agpl-3.0 |
kytvi2p/Sigil | 3rdparty/python/Lib/tkinter/commondialog.py | 153 | 1412 | # base class for tk common dialogues
#
# this module provides a base class for accessing the common
# dialogues available in Tk 4.2 and newer. use filedialog,
# colorchooser, and messagebox to access the individual
# dialogs.
#
# written by Fredrik Lundh, May 1997
#
from tkinter import *
class Dialog:
command = None
def __init__(self, master=None, **options):
# FIXME: should this be placed on the module level instead?
if TkVersion < 4.2:
raise TclError("this module requires Tk 4.2 or newer")
self.master = master
self.options = options
if not master and options.get('parent'):
self.master = options['parent']
def _fixoptions(self):
pass # hook
def _fixresult(self, widget, result):
return result # hook
def show(self, **options):
# update instance options
for k, v in options.items():
self.options[k] = v
self._fixoptions()
# we need a dummy widget to properly process the options
# (at least as long as we use Tkinter 1.63)
w = Frame(self.master)
try:
s = w.tk.call(self.command, *w._options(self.options))
s = self._fixresult(w, s)
finally:
try:
# get rid of the widget
w.destroy()
except:
pass
return s
| gpl-3.0 |
Lab603/PicEncyclopedias | jni-build/jni/include/tensorflow/python/kernel_tests/cwise_ops_test.py | 4 | 71457 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for coefficient-wise operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
_ADD = lambda x, y: x + y
_SUB = lambda x, y: x - y
_MUL = lambda x, y: x * y
_POW = lambda x, y: x ** y
_TRUEDIV = lambda x, y: x / y
_FLOORDIV = lambda x, y: x // y
_MOD = lambda x, y: x % y
_NEG = lambda x: -x
_ABS = abs
_LT = lambda x, y: x < y
_LE = lambda x, y: x <= y
_GT = lambda x, y: x > y
_GE = lambda x, y: x >= y
_AND = lambda x, y: x & y
_OR = lambda x, y: x | y
_XOR = lambda x, y: x ^ y
_INV = lambda x: ~x
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return tf.SparseTensor(
indices=x_indices, values=x_values, shape=x_shape), x_values
class UnaryOpTest(tf.test.TestCase):
def _compareCpu(self, x, np_func, tf_func):
np_ans = np_func(x)
with self.test_session(use_gpu=False):
inx = tf.convert_to_tensor(x)
if x.dtype in (np.float32, np.float64):
y = 1.1 * tf_func(inx)
np_ans *= 1.1
else:
y = tf_func(inx)
tf_cpu = y.eval()
self.assertShapeEqual(np_ans, y)
if x.dtype == np.float16:
self.assertAllClose(np_ans, tf_cpu, rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(np_ans, tf_cpu)
if (x.dtype in (np.complex64, np.complex128) and
tf_func in (tf.sign, tf.sqrt, tf.rsqrt, tf.log)):
return # Return early
if x.dtype == np.float16:
s = list(np.shape(x))
jacob_t, _ = tf.test.compute_gradient(inx,
s,
y,
s,
x_init_value=x)
xf = x.astype(np.float)
inxf = tf.convert_to_tensor(xf)
yf = tf_func(inxf)
_, jacob_n = tf.test.compute_gradient(inxf,
s,
yf,
s,
x_init_value=xf)
jacob_n = jacob_n.astype(np.float16)
self.assertAllClose(jacob_t, jacob_n, rtol=5e-3, atol=5e-3)
elif x.dtype in (np.float32, np.complex64):
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
y,
s,
x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype in (np.float64, np.complex128):
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
y,
s,
x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _check(self, result_tensor, result_np, input_sp_t, tol):
self.assertTrue(isinstance(result_tensor, tf.SparseTensor))
self.assertTrue(isinstance(input_sp_t, tf.SparseTensor))
self.assertAllEqual(input_sp_t.indices.eval(), result_tensor.indices.eval())
self.assertAllEqual(input_sp_t.shape.eval(), result_tensor.shape.eval())
if tol is None:
self.assertAllClose(result_np, result_tensor.values.eval())
else:
self.assertAllClose(result_np, result_tensor.values.eval(), rtol=tol,
atol=tol)
def _compareSparseCpu(self, x, np_func, tf_func, tol):
x_sp, x_sp_vals = _sparsify(x)
res_np = np_func(x_sp_vals)
with self.test_session(use_gpu=False):
self._check(tf_func(x_sp), res_np, x_sp, tol)
def _compareGpu(self, x, np_func, tf_func):
np_ans = np_func(x)
with self.test_session(use_gpu=True):
result = tf_func(tf.convert_to_tensor(x))
tf_gpu = result.eval()
if x.dtype == np.float16:
self.assertAllClose(np_ans, tf_gpu, rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(np_ans, tf_gpu)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def _compareSparseGpu(self, x, np_func, tf_func, tol):
x_sp, x_sp_vals = _sparsify(x)
res_np = np_func(x_sp_vals)
with self.test_session(use_gpu=True):
self._check(tf_func(x_sp), res_np, x_sp, tol)
def _compareBoth(self, x, np_func, tf_func):
self._compareCpu(x, np_func, tf_func)
self._compareGpu(x, np_func, tf_func)
def _compareBothSparse(self, x, np_func, tf_func, tol=None):
self._compareSparseCpu(x, np_func, tf_func, tol)
self._compareSparseGpu(x, np_func, tf_func, tol)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _sigmoid(self, x):
return 1.0 / (1.0 + np.exp(-x))
def _replace_domain_error_with_inf(self, fn):
def func(x):
try:
return fn(x)
except ValueError as e:
if "domain error" in str(e):
return np.inf * np.ones_like(x)
else:
raise e
return func
def testFloatBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)
y = (x + .5).astype(np.float32) # no zero
z = (x + 15.5).astype(np.float32) # all positive
k = np.arange(-0.90, 0.90, 0.25).astype(np.float32) # between -1 and 1
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(y, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(z, np.sqrt, tf.sqrt)
self._compareBoth(z, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(z, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(y, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
self._compareBoth(k, np.arcsin, tf.asin)
self._compareBoth(k, np.arccos, tf.acos)
self._compareBoth(x, np.arctan, tf.atan)
self._compareBoth(x, np.tan, tf.tan)
self._compareBoth(
y,
np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
tf.lgamma)
self._compareBoth(x, np.vectorize(math.erf), tf.erf)
self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
self._compareBothSparse(x, np.tanh, tf.tanh)
self._compareBothSparse(y, np.sign, tf.sign)
self._compareBothSparse(x, np.vectorize(math.erf), tf.erf)
def testFloatTanhEdge(self):
x = np.arange(40, 40 + 6).reshape(6).astype(np.float32)
self._compareBoth(x, np.tanh, tf.tanh)
x = np.arange(-40, -40 + 6).reshape(6).astype(np.float32)
self._compareBoth(x, np.tanh, tf.tanh)
def testFloatEmpty(self):
x = np.empty((2, 0, 5), dtype=np.float32)
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(x, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(x, np.sqrt, tf.sqrt)
self._compareBoth(x, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(x, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(x, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
# Can't use vectorize below, so just use some arbitrary function
self._compareBoth(x, np.sign, tf.lgamma)
self._compareBoth(x, np.sign, tf.erf)
self._compareBoth(x, np.sign, tf.erfc)
self._compareBoth(x, np.tan, tf.tan)
self._compareBoth(x, np.arcsin, tf.asin)
self._compareBoth(x, np.arccos, tf.acos)
self._compareBoth(x, np.arctan, tf.atan)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sqrt, tf.sqrt, tol=1e-3)
self._compareBothSparse(x, np.tanh, tf.tanh)
self._compareBothSparse(x, np.sign, tf.sign)
self._compareBothSparse(x, np.sign, tf.erf)
def testDoubleBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float64)
y = (x + .5).astype(np.float64) # no zero
z = (x + 15.5).astype(np.float64) # all positive
k = np.arange(-0.90, 0.90, 0.35).reshape(1, 3, 2).astype(np.float64) # between -1 and 1
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(y, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(z, np.sqrt, tf.sqrt)
self._compareBoth(z, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(z, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(y, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
self._compareBoth(
y,
np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
tf.lgamma)
self._compareBoth(x, np.vectorize(math.erf), tf.erf)
self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)
self._compareBoth(x, np.arctan, tf.atan)
self._compareBoth(k, np.arcsin, tf.asin)
self._compareBoth(k, np.arccos, tf.acos)
self._compareBoth(k, np.tan, tf.tan)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
self._compareBothSparse(x, np.tanh, tf.tanh)
self._compareBothSparse(y, np.sign, tf.sign)
self._compareBothSparse(x, np.vectorize(math.erf), tf.erf)
def testHalfBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float16)
y = (x + .5).astype(np.float16) # no zero
z = (x + 15.5).astype(np.float16) # all positive
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(y, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(z, np.sqrt, tf.sqrt)
self._compareBoth(z, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(z, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(y, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
self._compareBoth(
y,
np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
tf.lgamma)
self._compareBoth(x, np.vectorize(math.erf), tf.erf)
self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
self._compareBothSparse(x, np.tanh, tf.tanh)
self._compareBothSparse(y, np.sign, tf.sign)
self._compareBothSparse(x, np.vectorize(math.erf), tf.erf, tol=1e-3)
def testInt32Basic(self):
x = np.arange(-6, 6, 2).reshape(1, 3, 2).astype(np.int32)
self._compareCpu(x, np.abs, tf.abs)
self._compareCpu(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(x, np.square, tf.square)
self._compareCpu(x, np.sign, tf.sign)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sign, tf.sign)
def testInt64Basic(self):
x = np.arange(
-6 << 40, 6 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
self._compareCpu(x, np.abs, tf.abs)
self._compareCpu(x, np.abs, _ABS)
self._compareCpu(x, np.negative, tf.neg)
self._compareCpu(x, np.negative, _NEG)
self._compareCpu(x, np.square, tf.square)
self._compareCpu(x, np.sign, tf.sign)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sign, tf.sign)
def testComplex64Basic(self):
x = np.complex(1, 1) * np.arange(-3, 3).reshape(1, 3, 2).astype(
np.complex64)
y = x + 0.5 # no zeros
self._compareCpu(x, np.abs, tf.complex_abs)
self._compareCpu(x, np.abs, _ABS)
self._compareCpu(x, np.negative, tf.neg)
self._compareCpu(x, np.negative, _NEG)
self._compareCpu(y, self._inv, tf.inv)
self._compareCpu(x, np.square, tf.square)
self._compareCpu(x, np.sqrt, tf.sqrt)
self._compareCpu(y, self._rsqrt, tf.rsqrt)
self._compareCpu(x, np.exp, tf.exp)
self._compareCpu(y, np.log, tf.log)
self._compareCpu(x, np.tanh, tf.tanh)
self._compareCpu(x, self._sigmoid, tf.sigmoid)
self._compareCpu(x, np.sin, tf.sin)
self._compareCpu(x, np.cos, tf.cos)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sqrt, tf.sqrt, 1e-3)
self._compareBothSparse(x, np.tanh, tf.tanh)
# Numpy uses an incorrect definition of sign; use the right one instead.
def complex_sign(x):
return x / np.abs(x)
self._compareCpu(y, complex_sign, tf.sign)
self._compareBothSparse(y, complex_sign, tf.sign)
def testComplex128Basic(self):
x = np.complex(1, 1) * np.arange(-3, 3).reshape(1, 3, 2).astype(
np.complex128)
y = x + 0.5 # no zeros
self._compareCpu(x, np.abs, tf.abs)
self._compareCpu(x, np.abs, _ABS)
self._compareCpu(x, np.negative, tf.neg)
self._compareCpu(x, np.negative, _NEG)
self._compareCpu(y, self._inv, tf.inv)
self._compareCpu(x, np.square, tf.square)
self._compareCpu(x, np.sqrt, tf.sqrt)
self._compareCpu(y, self._rsqrt, tf.rsqrt)
self._compareCpu(x, np.exp, tf.exp)
self._compareCpu(y, np.log, tf.log)
self._compareCpu(x, np.tanh, tf.tanh)
self._compareCpu(x, self._sigmoid, tf.sigmoid)
self._compareCpu(x, np.sin, tf.sin)
self._compareCpu(x, np.cos, tf.cos)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sqrt, tf.sqrt, 1e-3)
self._compareBothSparse(x, np.tanh, tf.tanh)
# Numpy uses an incorrect definition of sign; use the right one instead.
def complex_sign(x):
return x / np.abs(x)
self._compareCpu(y, complex_sign, tf.sign)
self._compareBothSparse(y, complex_sign, tf.sign)
class BinaryOpTest(tf.test.TestCase):
def _compareCpu(self, x, y, np_func, tf_func, also_compare_variables=False):
np_ans = np_func(x, y)
with self.test_session(use_gpu=False):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = out.eval()
# Test that the op takes precedence over numpy operators.
np_left = tf_func(x, iny).eval()
np_right = tf_func(inx, y).eval()
if also_compare_variables:
var_x = tf.Variable(x)
var_y = tf.Variable(y)
tf.initialize_all_variables().run()
print(type(x), type(y), type(var_x), type(var_y))
print(type(tf_func(x, var_y)), type(tf_func(var_x, y)))
np_var_left = tf_func(x, var_y).eval()
np_var_right = tf_func(var_x, y).eval()
if np_ans.dtype != np.object:
self.assertAllClose(np_ans, tf_cpu)
self.assertAllClose(np_ans, np_left)
self.assertAllClose(np_ans, np_right)
if also_compare_variables:
self.assertAllClose(np_ans, np_var_left)
self.assertAllClose(np_ans, np_var_right)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, x, y, np_func, tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = tf.test.compute_gradient(inx,
xs,
out,
zs,
x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inxf,
xs,
outf,
zs,
x_init_value=xf,
delta=1e-3)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, x, y, np_func, tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = tf.test.compute_gradient(iny,
ys,
out,
zs,
x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inyf,
ys,
outf,
zs,
x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=True):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = out.eval()
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def _compareBoth(self, x, y, np_func, tf_func, also_compare_variables=False):
self._compareCpu(x, y, np_func, tf_func, also_compare_variables)
if x.dtype in (np.float16, np.float32, np.float64):
if tf_func not in (_FLOORDIV, tf.floordiv, tf.igamma, tf.igammac, tf.zeta, tf.polygamma):
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
if tf_func in (tf.igamma, tf.igammac, tf.zeta, tf.polygamma):
# These methods only support gradients in the second parameter
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(x, y, np.add, tf.add, also_compare_variables=True)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc, tf.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc, tf.igammac)
# Need x > 1
self._compareBoth(x_pos_small + 1, a_pos_small, special.zeta, tf.zeta)
n_small = np.arange(0, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(n_small, x_pos_small, special.polygamma, tf.polygamma)
except ImportError as e:
tf.logging.warn("Cannot test special functions: %s" % str(e))
def testFloatDifferentShapes(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.float32)
y = np.array([1, 2]).reshape(2, 1).astype(np.float32)
with self.test_session() as sess:
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
s = tf.reduce_sum(inx * iny)
gx, gy = sess.run(tf.gradients(s, [inx, iny]))
# gx is simply the broadcasted y
self.assertAllEqual(gx, np.array([1, 1, 2, 2])
.reshape(2, 2).astype(np.float32))
# gy is x's column summed up
self.assertAllEqual(gy, np.array([3, 7]).
reshape(2, 1).astype(np.float32))
def testFloatVariableOverload(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.int32)
y = np.array([1, 2]).reshape(2, 1).astype(np.int32)
var_x = tf.Variable(x)
var_y = tf.Variable(y)
with self.test_session() as sess:
sess.run([var_x.initializer, var_y.initializer])
left_result = (var_x * y).eval()
right_result = (x * var_y).eval()
np_result = x * y
self.assertAllEqual(np_result, left_result)
self.assertAllEqual(np_result, right_result)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareBoth(x, y, np.add, tf.add)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc, tf.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc, tf.igammac)
except ImportError as e:
tf.logging.warn("Cannot test special functions: %s" % str(e))
def testInt8Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int8)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int8)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.multiply, _MUL)
def testInt16Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int16)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int16)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.multiply, _MUL)
def testInt32Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int32)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int32)
self._compareBoth(x, y, np.add, tf.add)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.true_divide, tf.truediv)
self._compareBoth(x, y, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.mod, tf.mod)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
# _compareBoth tests on GPU only for floating point types, so test
# _MOD for int32 on GPU by calling _compareGpu
self._compareGpu(x, y, np.mod, _MOD)
def testInt64Basic(self):
x = np.arange(1 << 40, 13 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int64)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.true_divide, tf.truediv)
self._compareBoth(x, y, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.mod, tf.mod)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
def testComplex64Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex64)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex64)
self._compareCpu(x, y, np.add, tf.add)
self._compareCpu(x, y, np.subtract, tf.sub)
self._compareCpu(x, y, np.multiply, tf.mul)
self._compareCpu(x, y + 0.1, np.true_divide, tf.truediv)
self._compareCpu(x, y, np.add, _ADD)
self._compareCpu(x, y, np.subtract, _SUB)
self._compareCpu(x, y, np.multiply, _MUL)
self._compareCpu(x, y + 0.1, np.true_divide, _TRUEDIV)
def testComplex128Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex128)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex128)
self._compareCpu(x, y, np.add, tf.add)
self._compareCpu(x, y, np.subtract, tf.sub)
self._compareCpu(x, y, np.multiply, tf.mul)
self._compareCpu(x, y + 0.1, np.true_divide, tf.truediv)
self._compareCpu(x, y, np.add, _ADD)
self._compareCpu(x, y, np.subtract, _SUB)
self._compareCpu(x, y, np.multiply, _MUL)
self._compareCpu(x, y + 0.1, np.true_divide, _TRUEDIV)
def testStringComparison(self):
x = np.array([["abc", "bh"], ["c", ""]])
y = np.array([["abc", "bh"], ["def", "hi"]])
with self.test_session(use_gpu=False) as sess:
cmp_eq = tf.equal(x, y)
cmp_not_eq = tf.not_equal(x, y)
values = sess.run([cmp_eq, cmp_not_eq])
self.assertAllEqual([[True, True], [False, False]], values[0])
self.assertAllEqual([[False, False], [True, True]], values[1])
def testString(self):
x = np.array([["x_0_0", "x_0_1", "x_0_2"],
["x_1_0", "x_1_1", "x_1_2"],
["x_2_0", "x_2_1", "x_2_2"]], dtype=np.object)
y = np.array([["y_0_0", "y_0_1", "y_0_2"],
["y_1_0", "y_1_1", "y_1_2"],
["y_2_0", "y_2_1", "y_2_2"]], dtype=np.object)
z = np.array([["z_0", "z_1", "z_2"]], dtype=np.object)
w = np.array("w", dtype=np.object)
self._compareCpu(x, y, _ADD, _ADD)
self._compareCpu(x, z, _ADD, _ADD)
self._compareCpu(x, w, _ADD, _ADD)
self._compareCpu(z, w, _ADD, _ADD)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = (1 + np.linspace(0, 5, np.prod(xs))).astype(dtype).reshape(xs)
y = (1 + np.linspace(0, 5, np.prod(ys))).astype(dtype).reshape(ys)
self._compareCpu(x, y, np_func, tf_func)
if x.dtype in (np.float16, np.float32, np.float64):
if tf_func not in (_FLOORDIV, tf.floordiv):
if x.dtype == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(x, y, np_func, tf_func, np.float)
self._compareGradientY(x, y, np_func, tf_func, np.float)
else:
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
# TODO(josh11b,vrv): Refactor this to use parameterized tests.
def _testBCastByFunc(self, funcs, xs, ys):
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
np.complex64,
np.complex128,
]
for dtype in dtypes:
for (np_func, tf_func) in funcs:
if (dtype in (np.complex64, np.complex128) and
tf_func in (_FLOORDIV, tf.floordiv)):
continue # floordiv makes no sense for complex numbers
self._compareBCast(xs, ys, dtype, np_func, tf_func)
self._compareBCast(ys, xs, dtype, np_func, tf_func)
def _testBCastA(self, xs, ys):
funcs = [
(np.add, tf.add),
(np.add, _ADD),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastB(self, xs, ys):
funcs = [
(np.subtract, tf.sub),
(np.subtract, _SUB),
(np.power, tf.pow),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastC(self, xs, ys):
funcs = [
(np.multiply, tf.mul),
(np.multiply, _MUL),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastD(self, xs, ys):
funcs = [
(np.true_divide, tf.truediv),
(np.floor_divide, tf.floordiv),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
self._testBCastByFunc(funcs, xs, ys)
def testBCast_0A(self):
self._testBCastA([1, 3, 2], [1])
def testBCast_0B(self):
self._testBCastB([1, 3, 2], [1])
def testBCast_0C(self):
self._testBCastC([1, 3, 2], [1])
def testBCast_0D(self):
self._testBCastD([1, 3, 2], [1])
def testBCast_1A(self):
self._testBCastA([1, 3, 2], [2])
def testBCast_1B(self):
self._testBCastB([1, 3, 2], [2])
def testBCast_1C(self):
self._testBCastC([1, 3, 2], [2])
def testBCast_1D(self):
self._testBCastD([1, 3, 2], [2])
def testBCast_2A(self):
self._testBCastA([1, 3, 2], [3, 2])
def testBCast_2B(self):
self._testBCastB([1, 3, 2], [3, 2])
def testBCast_2C(self):
self._testBCastC([1, 3, 2], [3, 2])
def testBCast_2D(self):
self._testBCastD([1, 3, 2], [3, 2])
def testBCast_3A(self):
self._testBCastA([1, 3, 2], [3, 1])
def testBCast_3B(self):
self._testBCastB([1, 3, 2], [3, 1])
def testBCast_3C(self):
self._testBCastC([1, 3, 2], [3, 1])
def testBCast_3D(self):
self._testBCastD([1, 3, 2], [3, 1])
def testBCast_4A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
def testBCast_4B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
def testBCast_4C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
def testBCast_4D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
def testBCast_5A(self):
self._testBCastA([1, 3, 2], [2, 3, 1])
def testBCast_5B(self):
self._testBCastB([1, 3, 2], [2, 3, 1])
def testBCast_5C(self):
self._testBCastC([1, 3, 2], [2, 3, 1])
def testBCast_5D(self):
self._testBCastD([1, 3, 2], [2, 3, 1])
def testBCast_6A(self):
self._testBCastA([1, 3, 2], [2, 1, 1])
def testBCast_6B(self):
self._testBCastB([1, 3, 2], [2, 1, 1])
def testBCast_6C(self):
self._testBCastC([1, 3, 2], [2, 1, 1])
def testBCast_6D(self):
self._testBCastD([1, 3, 2], [2, 1, 1])
def testBCast_7A(self):
self._testBCastA([1, 3, 2], [1, 3, 1])
def testBCast_7B(self):
self._testBCastB([1, 3, 2], [1, 3, 1])
def testBCast_7C(self):
self._testBCastC([1, 3, 2], [1, 3, 1])
def testBCast_7D(self):
self._testBCastD([1, 3, 2], [1, 3, 1])
def testBCast_8A(self):
self._testBCastA([2, 1, 5], [2, 3, 1])
def testBCast_8B(self):
self._testBCastB([2, 1, 5], [2, 3, 1])
def testBCast_8C(self):
self._testBCastC([2, 1, 5], [2, 3, 1])
def testBCast_8D(self):
self._testBCastD([2, 1, 5], [2, 3, 1])
def testBCast_9A(self):
self._testBCastA([2, 0, 5], [2, 0, 1])
def testBCast_9B(self):
self._testBCastB([2, 0, 5], [2, 0, 1])
def testBCast_9C(self):
self._testBCastC([2, 0, 5], [2, 0, 1])
def testBCast_9D(self):
self._testBCastD([2, 0, 5], [2, 0, 1])
def testBCast_10A(self):
self._testBCastA([2, 3, 0], [2, 3, 1])
def testBCast_10B(self):
self._testBCastB([2, 3, 0], [2, 3, 1])
def testBCast_10C(self):
self._testBCastC([2, 3, 0], [2, 3, 1])
def testBCast_10D(self):
self._testBCastD([2, 3, 0], [2, 3, 1])
def testBCast_11A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
def testBCast_11B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
def testBCast_11C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
def testBCast_11D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
def testBCast_12A(self):
self._testBCastA([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12B(self):
self._testBCastB([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12C(self):
self._testBCastC([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12D(self):
self._testBCastD([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_13A(self):
self._testBCastA([1, 3, 2, 1, 1], [1])
def testBCast_13B(self):
self._testBCastB([1, 3, 2, 1, 1], [1])
def testBCast_13C(self):
self._testBCastC([1, 3, 2, 1, 1], [1])
def testBCast_13D(self):
self._testBCastD([1, 3, 2, 1, 1], [1])
def testBCast_14A(self):
self._testBCastA([2, 3, 1, 1, 5], [1])
def testBCast_14B(self):
self._testBCastB([2, 3, 1, 1, 5], [1])
def testBCast_14C(self):
self._testBCastC([2, 3, 1, 1, 5], [1])
def testBCast_14D(self):
self._testBCastD([2, 3, 1, 1, 5], [1])
def testBCast_15A(self):
self._testBCastA([10, 3, 1, 2], [3, 1, 2])
def testBCast_15B(self):
self._testBCastB([10, 3, 1, 2], [3, 1, 2])
def testBCast_15C(self):
self._testBCastC([10, 3, 1, 2], [3, 1, 2])
def testBCast_15D(self):
self._testBCastD([10, 3, 1, 2], [3, 1, 2])
def testMismatchedDimensions(self):
for func in [tf.add, tf.sub, tf.mul, tf.div, _ADD, _SUB, _MUL, _TRUEDIV,
_FLOORDIV]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Incompatible shapes" in str(e)):
func(tf.convert_to_tensor([10.0, 20.0, 30.0]),
tf.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
def testZeroPowGrad(self):
with self.test_session():
for dtype in np.float16, np.float32, np.float64:
x = tf.constant(0.0, dtype=dtype)
y = tf.constant(2.0, dtype=dtype)
z = tf.pow(x, y)
error = tf.test.compute_gradient_error(y, [], z, [])
self.assertEqual(error, 0)
class ComparisonOpTest(tf.test.TestCase):
def _compare(self, func, x, y, dtype):
with self.test_session(use_gpu=False):
out = func(tf.convert_to_tensor(np.array([x]).astype(dtype)),
tf.convert_to_tensor(np.array([y]).astype(dtype)))
ret = out.eval()
return ret[0]
def testScalarCompareScalar(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
data = [-1, 0, 1]
for t in dtypes:
for x in data:
for y in data:
self.assertEqual(self._compare(tf.less, x, y, t), x < y)
self.assertEqual(self._compare(tf.less_equal, x, y, t), x <= y)
self.assertEqual(self._compare(tf.greater, x, y, t), x > y)
self.assertEqual(self._compare(tf.greater_equal, x, y, t), x >= y)
self.assertEqual(self._compare(tf.equal, x, y, t), x == y)
self.assertEqual(self._compare(tf.not_equal, x, y, t), x != y)
data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]
for t in [np.complex64, np.complex128]:
for x in data:
for y in data:
self.assertEqual(self._compare(tf.equal, x, y, t), x == y)
self.assertEqual(self._compare(tf.not_equal, x, y, t), x != y)
def _compareCpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=False):
out = tf_func(tf.convert_to_tensor(x), tf.convert_to_tensor(y))
tf_cpu = out.eval()
self.assertAllEqual(np_ans, tf_cpu)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=True):
out = tf_func(tf.convert_to_tensor(x), tf.convert_to_tensor(y))
tf_gpu = out.eval()
self.assertAllEqual(np_ans, tf_gpu)
def _compareBoth(self, x, y, np_func, tf_func):
self._compareCpu(x, y, np_func, tf_func)
if x.dtype == np.float16 or x.dtype == np.float32 or x.dtype == np.float64:
self._compareGpu(x, y, np_func, tf_func)
def testTensorCompareTensor(self):
x = np.linspace(-15, 15, 6).reshape(1, 3, 2)
y = np.linspace(20, -10, 6).reshape(1, 3, 2)
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
xt = x.astype(t)
yt = y.astype(t)
self._compareBoth(xt, yt, np.less, tf.less)
self._compareBoth(xt, yt, np.less_equal, tf.less_equal)
self._compareBoth(xt, yt, np.greater, tf.greater)
self._compareBoth(xt, yt, np.greater_equal, tf.greater_equal)
self._compareBoth(xt, yt, np.equal, tf.equal)
self._compareBoth(xt, yt, np.not_equal, tf.not_equal)
# TODO(zhifengc): complex64 doesn't work on GPU yet.
for t in [np.complex64, np.complex128]:
self._compareCpu(x.astype(t), y.astype(t), np.equal, tf.equal)
self._compareCpu(x.astype(t), y.astype(t), np.not_equal, tf.not_equal)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)
y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)
self._compareCpu(x, y, np_func, tf_func)
self._compareCpu(y, x, np_func, tf_func)
if x.dtype == np.float16 or x.dtype == np.float32 or x.dtype == np.float64:
self._compareGpu(x, y, np_func, tf_func)
self._compareGpu(y, x, np_func, tf_func)
def _testBCastByFunc(self, np_func, tf_func):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
]
for (xs, ys) in shapes:
for dtype in dtypes:
self._compareBCast(xs, ys, dtype, np_func, tf_func)
def testBCastLess(self):
self._testBCastByFunc(np.less, tf.less)
def testBCastLessEqual(self):
self._testBCastByFunc(np.less_equal, tf.less_equal)
def testBCastGreater(self):
self._testBCastByFunc(np.greater, tf.greater)
def testBCastGreaterEqual(self):
self._testBCastByFunc(np.greater_equal, tf.greater_equal)
def testBCastEqual(self):
self._testBCastByFunc(np.equal, tf.equal)
def testBCastNotEqual(self):
self._testBCastByFunc(np.not_equal, tf.not_equal)
def testShapeMismatch(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
funcs = [tf.less, tf.less_equal, tf.greater,
tf.greater_equal, tf.equal, tf.not_equal]
x = np.arange(0, 10).reshape([2, 5])
y = np.arange(0, 10).reshape([5, 2])
for t in dtypes:
for f in funcs:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Incompatible shapes" in str(e)):
f(x.astype(t), y.astype(t))
class LogicalOpTest(tf.test.TestCase):
def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False):
np_ans = np_func(x, y)
with self.test_session(use_gpu=use_gpu):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_val = out.eval()
self.assertEqual(out.dtype, tf.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def _not(self, x, use_gpu=False):
np_ans = np.logical_not(x)
with self.test_session(use_gpu=use_gpu):
out = tf.logical_not(tf.convert_to_tensor(x))
tf_val = out.eval()
self.assertEqual(out.dtype, tf.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def testScalar(self):
data = [np.array([True]), np.array([False])]
for use_gpu in [True, False]:
for x in data:
self._not(x, use_gpu)
for x in data:
for y in data:
self._compareBinary(
x, y, np.logical_and, tf.logical_and, use_gpu)
self._compareBinary(
x, y, np.logical_or, tf.logical_or, use_gpu)
self._compareBinary(
x, y, np.logical_xor, tf.logical_xor, use_gpu)
def testTensor(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
for use_gpu in [True, False]:
self._not(x, use_gpu)
self._compareBinary(x, y, np.logical_and, tf.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, tf.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, tf.logical_xor, use_gpu)
def testBCast(self):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
for (xs, ys) in shapes:
x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool).reshape(xs)
y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool).reshape(ys)
for use_gpu in [True, False]:
self._compareBinary(x, y, np.logical_and, tf.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, tf.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, tf.logical_xor, use_gpu)
def testShapeMismatch(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(3, 2, 1)
for f in [tf.logical_and, tf.logical_or, tf.logical_xor]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Incompatible shapes" in str(e)):
f(x, y)
def testUsingAsPythonValueFails(self):
# Ensure that we raise an error when the user attempts to treat a
# `Tensor` as a Python `bool`.
b = tf.constant(False)
with self.assertRaises(TypeError):
if b:
pass
x = tf.constant(3)
y = tf.constant(4)
with self.assertRaises(TypeError):
if x > y:
pass
z = tf.constant(7)
# The chained comparison should fail because Python computes `x <
# y` and short-circuits the comparison with `z` if it is `False`.
with self.assertRaises(TypeError):
_ = x < y < z
class SelectOpTest(tf.test.TestCase):
def _compare(self, c, x, y, use_gpu):
np_ans = np.where(c, x, y)
with self.test_session(use_gpu=use_gpu):
out = tf.select(c, x, y)
tf_ans = out.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(c))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
out,
s,
x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf.select(c, inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inxf,
s,
outf,
s,
x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(c))
jacob_t, jacob_n = tf.test.compute_gradient(iny,
s,
out,
s,
x_init_value=y,
delta=1.0)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf.select(c, inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inyf,
s,
outf,
s,
x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testBasic(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(c, xt, yt, use_gpu=True)
def testGradients(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [np.float16, np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
if t == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(c, xt, yt, np.float)
self._compareGradientY(c, xt, yt, np.float)
else:
self._compareGradientX(c, xt, yt)
self._compareGradientY(c, xt, yt)
def testShapeMismatch(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(2, 5, 3) * 100
for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
tf.select(c, xt, yt)
def testEmptyTensor(self):
c = np.random.randint(0, 3, 0).astype(np.bool).reshape(1, 3, 0)
x = np.random.rand(1, 3, 0) * 100
y = np.random.rand(1, 3, 0) * 100
z_expected = np.zeros((1, 3, 0), dtype=np.float32)
with self.test_session():
xt = x.astype(np.float32)
yt = y.astype(np.float32)
z = tf.select(c, xt, yt).eval()
self.assertAllEqual(z_expected, z)
def testNan(self):
"""Verify that nans don't propagate where they shouldn't."""
with self.test_session():
for c in False, True:
for a in 7.0, np.nan:
for b in 5.0, np.nan:
x = tf.select(c, a, b).eval()
y = a if c else b
self.assertEqual(np.isnan(x), np.isnan(y))
class BatchSelectOpTest(tf.test.TestCase):
"""Test broadcasting of Select when 'c' is a vec and 't' &'e' are rank2+."""
def _compare(self, c, x, y, use_gpu):
np_ans = np.dstack(
[x_i if c_i else y_i for c_i, x_i, y_i in zip(c, x, y)]).transpose(
[2, 0, 1])
with self.test_session(use_gpu=use_gpu):
out = tf.select(c, x, y)
tf_ans = out.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
out,
s,
x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf.select(c, inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inxf,
s,
outf,
s,
x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(iny,
s,
out,
s,
x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf.select(c, inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inyf,
s,
outf,
s,
x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testBasic(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(c, xt, yt, use_gpu=True)
def testGradients(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [np.float16, np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
if t == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(c, xt, yt, np.float)
self._compareGradientY(c, xt, yt, np.float)
else:
self._compareGradientX(c, xt, yt)
self._compareGradientY(c, xt, yt)
def testShapeMismatch(self):
c = np.random.randint(0, 2, 8).astype(np.bool)
x = np.random.rand(16, 3, 2) * 100
y = np.random.rand(16, 3, 2) * 100
for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
tf.select(c, xt, yt)
class MinMaxOpTest(tf.test.TestCase):
def _compare(self, x, y, use_gpu):
np_min, np_max = np.minimum(x, y), np.maximum(x, y)
with self.test_session(use_gpu=use_gpu) as sess:
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
omin, omax = tf.minimum(inx, iny), tf.maximum(inx, iny)
tf_min, tf_max = sess.run([omin, omax])
self.assertAllEqual(np_min, tf_min)
self.assertAllEqual(np_max, tf_max)
def testBasic(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(1, 3, 2) * 100.
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testDifferentShapes(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(2) * 100. # should broadcast
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testScalar(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.asscalar(np.random.rand(1) * 100.) # should broadcast
# dropped np.float64, int64 because TF automatically converts to 32 bit
for t in [np.float32, np.int32]:
self._compare(x.astype(t), t(y), use_gpu=False)
self._compare(x.astype(t), t(y), use_gpu=True)
def _compareGradientX(self, func, x, y):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
out,
s,
x_init_value=x)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, func, x, y):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(iny,
s,
out,
s,
x_init_value=y)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testGradients(self):
x = np.random.rand(1, 3, 2) * 100.
# ensure x != y
y = x + (np.random.randint(2, size=x.shape) - .5) * 2 # -1 or +1
self._compareGradientX(tf.maximum, x, y)
self._compareGradientY(tf.maximum, x, y)
self._compareGradientX(tf.minimum, x, y)
self._compareGradientY(tf.minimum, x, y)
class MathOpsOverloadTest(tf.test.TestCase):
def _computeTensorAndLiteral(self, x, y, dtype, func):
with self.test_session(use_gpu=False):
inx = tf.convert_to_tensor(x, dtype=dtype)
z = func(inx, y) # Should use __add__, __sub__, etc.
return z.eval()
def _computeLiteralAndTensor(self, x, y, dtype, func):
with self.test_session(use_gpu=False):
iny = tf.convert_to_tensor(y, dtype=dtype)
z = func(x, iny) # Should use __radd__, __rsub__, etc.
return z.eval()
def _compareBinary(self, x, y, dtype, np_func, tf_func):
np_ans = np_func(x, y).astype(dtype.as_numpy_dtype)
self.assertAllClose(np_ans, self._computeTensorAndLiteral(
x, y, dtype, tf_func))
self.assertAllClose(np_ans, self._computeLiteralAndTensor(
x, y, dtype, tf_func))
def _compareUnary(self, x, dtype, np_func, tf_func):
np_ans = np_func(x).astype(dtype.as_numpy_dtype)
with self.test_session(use_gpu=False):
self.assertAllClose(np_ans, tf_func(tf.convert_to_tensor(x, dtype=dtype)).eval())
def testOverload(self):
dtypes = [
tf.float16,
tf.float32,
tf.float64,
tf.int32,
tf.int64,
tf.complex64,
tf.complex128,
]
funcs = [
(np.add, _ADD),
(np.subtract, _SUB),
(np.multiply, _MUL),
(np.power, _POW),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
for dtype in dtypes:
for np_func, tf_func in funcs:
if dtype in (tf.complex64, tf.complex128) and tf_func == _FLOORDIV:
continue # floordiv makes no sense for complex
self._compareBinary(10, 5, dtype, np_func, tf_func)
# Mod only works for int32 and int64.
for dtype in [tf.int32, tf.int64]:
self._compareBinary(10, 3, dtype, np.mod, _MOD)
def testOverloadComparisons(self):
dtypes = [
tf.float16,
tf.float32,
tf.float64,
tf.int32,
tf.int64,
]
funcs = [
(np.less, _LT),
(np.less_equal, _LE),
(np.greater, _GT),
(np.greater_equal, _GE),
]
for dtype in dtypes:
for np_func, tf_func in funcs:
self._compareBinary(10, 5, dtype, np_func, tf_func)
logical_funcs = [
(np.logical_and, _AND),
(np.logical_or, _OR),
(np.logical_xor, _XOR),
(np.equal, tf.equal),
(np.not_equal, tf.not_equal)
]
for np_func, tf_func in logical_funcs:
self._compareBinary(True, False, tf.bool, np_func, tf_func)
self._compareBinary(True, True, tf.bool, np_func, tf_func)
self._compareBinary(False, False, tf.bool, np_func, tf_func)
self._compareBinary(False, True, tf.bool, np_func, tf_func)
self._compareBinary([True, True, False, False],
[True, False, True, False],
tf.bool, np_func, tf_func)
self._compareUnary(True, tf.bool, np.logical_not, _INV)
self._compareUnary(False, tf.bool, np.logical_not, _INV)
self._compareUnary([True, False], tf.bool, np.logical_not, _INV)
class IsFiniteInfNanTest(tf.test.TestCase):
def _compare(self, x, use_gpu):
np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x)
with self.test_session(use_gpu=use_gpu) as sess:
inx = tf.convert_to_tensor(x)
ofinite, oinf, onan = tf.is_finite(inx), tf.is_inf(
inx), tf.is_nan(inx)
tf_finite, tf_inf, tf_nan = sess.run([ofinite, oinf, onan])
self.assertAllEqual(np_inf, tf_inf)
self.assertAllEqual(np_nan, tf_nan)
self.assertAllEqual(np_finite, tf_finite)
self.assertShapeEqual(np_inf, oinf)
self.assertShapeEqual(np_nan, onan)
self.assertShapeEqual(np_finite, ofinite)
def _testDtype(self, dtype):
fi = np.finfo(dtype)
data = np.array([0, -1, 1, fi.resolution, -fi.resolution, fi.min, fi.max,
-np.inf, np.inf, np.nan]).astype(dtype)
self._compare(data, use_gpu=False)
self._compare(data, use_gpu=True)
def testHalf(self):
self._testDtype(np.float16)
def testFloat(self):
self._testDtype(np.float32)
def testDouble(self):
self._testDtype(np.float64)
class RoundingTest(tf.test.TestCase):
def _compare(self, x, use_gpu):
np_floor, np_ceil = np.floor(x), np.ceil(x)
with self.test_session(use_gpu=use_gpu) as sess:
inx = tf.convert_to_tensor(x)
ofloor, oceil = tf.floor(inx), tf.ceil(inx)
tf_floor, tf_ceil = sess.run([ofloor, oceil])
self.assertAllEqual(np_floor, tf_floor)
self.assertAllEqual(np_ceil, tf_ceil)
self.assertShapeEqual(np_floor, ofloor)
self.assertShapeEqual(np_ceil, oceil)
def _testDtype(self, dtype):
data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)
self._compare(data, use_gpu=True)
self._compare(data, use_gpu=True)
def testTypes(self):
for dtype in [np.float16, np.float32, np.float64]:
self._testDtype(dtype)
class ComplexMakeRealImagTest(tf.test.TestCase):
def _compareMake(self, real, imag, use_gpu):
np_ans = real + (1j) * imag
with self.test_session(use_gpu=use_gpu):
real = tf.convert_to_tensor(real)
imag = tf.convert_to_tensor(imag)
tf_ans = tf.complex(real, imag)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def testMake(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
for use_gpu in [False, True]:
self._compareMake(real, imag, use_gpu)
self._compareMake(real, 12.0, use_gpu)
self._compareMake(23.0, imag, use_gpu)
def _compareRealImag(self, cplx, use_gpu):
np_real, np_imag = np.real(cplx), np.imag(cplx)
with self.test_session(use_gpu=use_gpu) as sess:
inx = tf.convert_to_tensor(cplx)
tf_real = tf.real(inx)
tf_imag = tf.imag(inx)
tf_real_val, tf_imag_val = sess.run([tf_real, tf_imag])
self.assertAllEqual(np_real, tf_real_val)
self.assertAllEqual(np_imag, tf_imag_val)
self.assertShapeEqual(np_real, tf_real)
self.assertShapeEqual(np_imag, tf_imag)
def testRealImag64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareRealImag(cplx, use_gpu=False)
self._compareRealImag(cplx, use_gpu=True)
def testRealImag128(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareRealImag(cplx, use_gpu=False)
self._compareRealImag(cplx, use_gpu=True)
def _compareConj(self, cplx, use_gpu):
np_ans = np.conj(cplx)
with self.test_session(use_gpu=use_gpu):
inx = tf.convert_to_tensor(cplx)
tf_conj = tf.conj(inx)
tf_ans = tf_conj.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, tf_conj)
def testConj64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareConj(cplx, use_gpu=False)
self._compareConj(cplx, use_gpu=True)
def testConj128(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareConj(cplx, use_gpu=False)
self._compareConj(cplx, use_gpu=True)
def _compareGradient(self, x):
# x[:, 0] is real, x[:, 1] is imag. We combine real and imag into
# complex numbers. Then, we extract real and imag parts and
# computes the squared sum. This is obviously the same as sum(real
# * real) + sum(imag * imag). We just want to make sure the
# gradient function is checked.
with self.test_session():
inx = tf.convert_to_tensor(x)
real, imag = tf.split(1, 2, inx)
real, imag = tf.reshape(real, [-1]), tf.reshape(imag, [-1])
cplx = tf.complex(real, imag)
cplx = tf.conj(cplx)
loss = tf.reduce_sum(
tf.square(tf.real(cplx))) + tf.reduce_sum(
tf.square(tf.imag(cplx)))
epsilon = 1e-3
jacob_t, jacob_n = tf.test.compute_gradient(inx,
list(x.shape),
loss,
[1],
x_init_value=x,
delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
def _compareBroadcastGradient(self, x):
x_ = tf.convert_to_tensor(x)
epsilon = 1e-3
with self.test_session():
for args in [(x_, 0.), (0., x_)]:
z = tf.reduce_sum(tf.complex_abs(tf.complex(*args)))
jacob_t, jacob_n = tf.test.compute_gradient(x_,
list(x.shape),
z,
[1],
x_init_value=x,
delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
def testGradient(self):
# complex64
data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float32)
self._compareGradient(data)
self._compareBroadcastGradient(data)
# complex128
data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float64)
self._compareGradient(data)
def _compareMulGradient(self, data):
# data is a float matrix of shape [n, 4]. data[:, 0], data[:, 1],
# data[:, 2], data[:, 3] are real parts of x, imaginary parts of
# x, real parts of y and imaginary parts of y.
with self.test_session():
inp = tf.convert_to_tensor(data)
xr, xi, yr, yi = tf.split(1, 4, inp)
def vec(x): # Reshape to a vector
return tf.reshape(x, [-1])
xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)
def cplx(r, i): # Combine to a complex vector
return tf.complex(r, i)
x, y = cplx(xr, xi), cplx(yr, yi)
# z is x times y in complex plane.
z = x * y
# Defines the loss function as the sum of all coefficients of z.
loss = tf.reduce_sum(tf.real(z) + tf.imag(z))
epsilon = 0.005
jacob_t, jacob_n = tf.test.compute_gradient(inp,
list(data.shape),
loss,
[1],
x_init_value=data,
delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
def testMulGradient(self):
data = np.arange(1, 2, 0.125).reshape([2, 4]).astype(np.float32)
self._compareMulGradient(data)
class AccumulateTest(tf.test.TestCase):
def testSimple(self):
with self.test_session():
random_arrays = [np.random.rand(16, 16, 16, 16).astype(np.float32)
for _ in range(20)]
random_tensors = [tf.convert_to_tensor(x, dtype=tf.float32)
for x in random_arrays]
tf_val = tf.accumulate_n(random_tensors)
np_val = random_arrays[0]
for random_array in random_arrays[1:]:
np_val += random_array
self.assertAllClose(np_val, tf_val.eval())
def testZeroArgs(self):
with self.test_session():
with self.assertRaises(ValueError):
tf_val = tf.accumulate_n([])
tf_val.eval()
if __name__ == "__main__":
tf.test.main()
| mit |
karan1276/servo | tests/wpt/web-platform-tests/tools/wptserve/tests/functional/test_pipes.py | 109 | 3167 | import os
import unittest
import time
from .base import TestUsingServer, doc_root
class TestStatus(TestUsingServer):
def test_status(self):
resp = self.request("/document.txt", query="pipe=status(202)")
self.assertEqual(resp.getcode(), 202)
class TestHeader(TestUsingServer):
def test_not_set(self):
resp = self.request("/document.txt", query="pipe=header(X-TEST,PASS)")
self.assertEqual(resp.info()["X-TEST"], "PASS")
def test_set(self):
resp = self.request("/document.txt", query="pipe=header(Content-Type,text/html)")
self.assertEqual(resp.info()["Content-Type"], "text/html")
def test_multiple(self):
resp = self.request("/document.txt", query="pipe=header(X-Test,PASS)|header(Content-Type,text/html)")
self.assertEqual(resp.info()["X-TEST"], "PASS")
self.assertEqual(resp.info()["Content-Type"], "text/html")
def test_multiple_same(self):
resp = self.request("/document.txt", query="pipe=header(Content-Type,FAIL)|header(Content-Type,text/html)")
self.assertEqual(resp.info()["Content-Type"], "text/html")
def test_multiple_append(self):
resp = self.request("/document.txt", query="pipe=header(X-Test,1)|header(X-Test,2,True)")
self.assertEqual(resp.info()["X-Test"], "1, 2")
class TestSlice(TestUsingServer):
def test_both_bounds(self):
resp = self.request("/document.txt", query="pipe=slice(1,10)")
expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
self.assertEqual(resp.read(), expected[1:10])
def test_no_upper(self):
resp = self.request("/document.txt", query="pipe=slice(1)")
expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
self.assertEqual(resp.read(), expected[1:])
def test_no_lower(self):
resp = self.request("/document.txt", query="pipe=slice(null,10)")
expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
self.assertEqual(resp.read(), expected[:10])
class TestSub(TestUsingServer):
def test_sub_config(self):
resp = self.request("/sub.txt", query="pipe=sub")
expected = "localhost localhost %i" % self.server.port
self.assertEqual(resp.read().rstrip(), expected)
def test_sub_headers(self):
resp = self.request("/sub_headers.txt", query="pipe=sub", headers={"X-Test": "PASS"})
expected = "PASS"
self.assertEqual(resp.read().rstrip(), expected)
def test_sub_params(self):
resp = self.request("/sub_params.txt", query="test=PASS&pipe=sub")
expected = "PASS"
self.assertEqual(resp.read().rstrip(), expected)
class TestTrickle(TestUsingServer):
def test_trickle(self):
#Actually testing that the response trickles in is not that easy
t0 = time.time()
resp = self.request("/document.txt", query="pipe=trickle(1:d2:5:d1:r2)")
t1 = time.time()
expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
self.assertEqual(resp.read(), expected)
self.assertGreater(6, t1-t0)
if __name__ == '__main__':
unittest.main()
| mpl-2.0 |
wrenchzc/photomanager | tests/test_command_update.py | 1 | 1307 | from tests.utils import remove_file
from photomanager.lib.pmconst import PMDBNAME
from photomanager.commands.index import CommandIndex
from photomanager.commands.update import CommandUpdate
from photomanager.db.dbutils import get_db_session, close_db_session
from photomanager.db.models import ImageMeta
cmd_inx_test_root = 'tests/data'
class TestDisplayImg(object):
@classmethod
def setup_class(cls):
cls._clear()
cls._do_index()
@classmethod
def teardown_class(cls):
cls._clear()
db_filename = cmd_inx_test_root + '/' + PMDBNAME
remove_file(db_filename)
@staticmethod
def _clear():
db_filename = cmd_inx_test_root + '/' + PMDBNAME
close_db_session(db_filename)
@staticmethod
def _do_index():
command_index = CommandIndex(cmd_inx_test_root, {})
cnt = command_index.do()
def setup_method(self):
self._clear()
def teardown_method(self):
self._clear()
def test_update_address_by_geoinfo(self):
command_update = CommandUpdate(cmd_inx_test_root, {"geoinfo": True})
command_update.do()
test2_meta = command_update.handler.session.query(ImageMeta).filter(ImageMeta.filename == "test2.jpg").first()
assert ("汝城" in test2_meta.address)
| mit |
fenglu-g/incubator-airflow | tests/contrib/operators/test_oracle_to_oracle_transfer.py | 13 | 3067 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow.contrib.operators.oracle_to_oracle_transfer \
import OracleToOracleTransfer
try:
from unittest import mock
from unittest.mock import MagicMock
except ImportError:
try:
import mock
from mock import MagicMock
except ImportError:
mock = None
class OracleToOracleTransferTest(unittest.TestCase):
@staticmethod
def test_execute():
oracle_destination_conn_id = 'oracle_destination_conn_id'
destination_table = 'destination_table'
oracle_source_conn_id = 'oracle_source_conn_id'
source_sql = "select sysdate from dual where trunc(sysdate) = :p_data"
source_sql_params = {':p_data': "2018-01-01"}
rows_chunk = 5000
cursor_description = [
('id', "<class 'cx_Oracle.NUMBER'>", 39, None, 38, 0, 0),
('description', "<class 'cx_Oracle.STRING'>", 60, 240, None, None, 1)
]
cursor_rows = [[1, 'description 1'], [2, 'description 2']]
mock_dest_hook = MagicMock()
mock_src_hook = MagicMock()
mock_src_conn = mock_src_hook.get_conn.return_value.__enter__.return_value
mock_cursor = mock_src_conn.cursor.return_value
mock_cursor.description.__iter__.return_value = cursor_description
mock_cursor.fetchmany.side_effect = [cursor_rows, []]
op = OracleToOracleTransfer(
task_id='copy_data',
oracle_destination_conn_id=oracle_destination_conn_id,
destination_table=destination_table,
oracle_source_conn_id=oracle_source_conn_id,
source_sql=source_sql,
source_sql_params=source_sql_params,
rows_chunk=rows_chunk)
op._execute(mock_src_hook, mock_dest_hook, None)
assert mock_src_hook.get_conn.called
assert mock_src_conn.cursor.called
mock_cursor.execute.assert_called_with(source_sql, source_sql_params)
mock_cursor.fetchmany.assert_called_with(rows_chunk)
mock_dest_hook.bulk_insert_rows.assert_called_once_with(
destination_table,
cursor_rows,
commit_every=rows_chunk,
target_fields=['id', 'description'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
cortedeltimo/SickRage | lib/unidecode/x064.py | 252 | 4655 | data = (
'Chan ', # 0x00
'Ge ', # 0x01
'Lou ', # 0x02
'Zong ', # 0x03
'Geng ', # 0x04
'Jiao ', # 0x05
'Gou ', # 0x06
'Qin ', # 0x07
'Yong ', # 0x08
'Que ', # 0x09
'Chou ', # 0x0a
'Chi ', # 0x0b
'Zhan ', # 0x0c
'Sun ', # 0x0d
'Sun ', # 0x0e
'Bo ', # 0x0f
'Chu ', # 0x10
'Rong ', # 0x11
'Beng ', # 0x12
'Cuo ', # 0x13
'Sao ', # 0x14
'Ke ', # 0x15
'Yao ', # 0x16
'Dao ', # 0x17
'Zhi ', # 0x18
'Nu ', # 0x19
'Xie ', # 0x1a
'Jian ', # 0x1b
'Sou ', # 0x1c
'Qiu ', # 0x1d
'Gao ', # 0x1e
'Xian ', # 0x1f
'Shuo ', # 0x20
'Sang ', # 0x21
'Jin ', # 0x22
'Mie ', # 0x23
'E ', # 0x24
'Chui ', # 0x25
'Nuo ', # 0x26
'Shan ', # 0x27
'Ta ', # 0x28
'Jie ', # 0x29
'Tang ', # 0x2a
'Pan ', # 0x2b
'Ban ', # 0x2c
'Da ', # 0x2d
'Li ', # 0x2e
'Tao ', # 0x2f
'Hu ', # 0x30
'Zhi ', # 0x31
'Wa ', # 0x32
'Xia ', # 0x33
'Qian ', # 0x34
'Wen ', # 0x35
'Qiang ', # 0x36
'Tian ', # 0x37
'Zhen ', # 0x38
'E ', # 0x39
'Xi ', # 0x3a
'Nuo ', # 0x3b
'Quan ', # 0x3c
'Cha ', # 0x3d
'Zha ', # 0x3e
'Ge ', # 0x3f
'Wu ', # 0x40
'En ', # 0x41
'She ', # 0x42
'Kang ', # 0x43
'She ', # 0x44
'Shu ', # 0x45
'Bai ', # 0x46
'Yao ', # 0x47
'Bin ', # 0x48
'Sou ', # 0x49
'Tan ', # 0x4a
'Sa ', # 0x4b
'Chan ', # 0x4c
'Suo ', # 0x4d
'Liao ', # 0x4e
'Chong ', # 0x4f
'Chuang ', # 0x50
'Guo ', # 0x51
'Bing ', # 0x52
'Feng ', # 0x53
'Shuai ', # 0x54
'Di ', # 0x55
'Qi ', # 0x56
'Sou ', # 0x57
'Zhai ', # 0x58
'Lian ', # 0x59
'Tang ', # 0x5a
'Chi ', # 0x5b
'Guan ', # 0x5c
'Lu ', # 0x5d
'Luo ', # 0x5e
'Lou ', # 0x5f
'Zong ', # 0x60
'Gai ', # 0x61
'Hu ', # 0x62
'Zha ', # 0x63
'Chuang ', # 0x64
'Tang ', # 0x65
'Hua ', # 0x66
'Cui ', # 0x67
'Nai ', # 0x68
'Mo ', # 0x69
'Jiang ', # 0x6a
'Gui ', # 0x6b
'Ying ', # 0x6c
'Zhi ', # 0x6d
'Ao ', # 0x6e
'Zhi ', # 0x6f
'Nie ', # 0x70
'Man ', # 0x71
'Shan ', # 0x72
'Kou ', # 0x73
'Shu ', # 0x74
'Suo ', # 0x75
'Tuan ', # 0x76
'Jiao ', # 0x77
'Mo ', # 0x78
'Mo ', # 0x79
'Zhe ', # 0x7a
'Xian ', # 0x7b
'Keng ', # 0x7c
'Piao ', # 0x7d
'Jiang ', # 0x7e
'Yin ', # 0x7f
'Gou ', # 0x80
'Qian ', # 0x81
'Lue ', # 0x82
'Ji ', # 0x83
'Ying ', # 0x84
'Jue ', # 0x85
'Pie ', # 0x86
'Pie ', # 0x87
'Lao ', # 0x88
'Dun ', # 0x89
'Xian ', # 0x8a
'Ruan ', # 0x8b
'Kui ', # 0x8c
'Zan ', # 0x8d
'Yi ', # 0x8e
'Xun ', # 0x8f
'Cheng ', # 0x90
'Cheng ', # 0x91
'Sa ', # 0x92
'Nao ', # 0x93
'Heng ', # 0x94
'Si ', # 0x95
'Qian ', # 0x96
'Huang ', # 0x97
'Da ', # 0x98
'Zun ', # 0x99
'Nian ', # 0x9a
'Lin ', # 0x9b
'Zheng ', # 0x9c
'Hui ', # 0x9d
'Zhuang ', # 0x9e
'Jiao ', # 0x9f
'Ji ', # 0xa0
'Cao ', # 0xa1
'Dan ', # 0xa2
'Dan ', # 0xa3
'Che ', # 0xa4
'Bo ', # 0xa5
'Che ', # 0xa6
'Jue ', # 0xa7
'Xiao ', # 0xa8
'Liao ', # 0xa9
'Ben ', # 0xaa
'Fu ', # 0xab
'Qiao ', # 0xac
'Bo ', # 0xad
'Cuo ', # 0xae
'Zhuo ', # 0xaf
'Zhuan ', # 0xb0
'Tuo ', # 0xb1
'Pu ', # 0xb2
'Qin ', # 0xb3
'Dun ', # 0xb4
'Nian ', # 0xb5
'[?] ', # 0xb6
'Xie ', # 0xb7
'Lu ', # 0xb8
'Jiao ', # 0xb9
'Cuan ', # 0xba
'Ta ', # 0xbb
'Han ', # 0xbc
'Qiao ', # 0xbd
'Zhua ', # 0xbe
'Jian ', # 0xbf
'Gan ', # 0xc0
'Yong ', # 0xc1
'Lei ', # 0xc2
'Kuo ', # 0xc3
'Lu ', # 0xc4
'Shan ', # 0xc5
'Zhuo ', # 0xc6
'Ze ', # 0xc7
'Pu ', # 0xc8
'Chuo ', # 0xc9
'Ji ', # 0xca
'Dang ', # 0xcb
'Suo ', # 0xcc
'Cao ', # 0xcd
'Qing ', # 0xce
'Jing ', # 0xcf
'Huan ', # 0xd0
'Jie ', # 0xd1
'Qin ', # 0xd2
'Kuai ', # 0xd3
'Dan ', # 0xd4
'Xi ', # 0xd5
'Ge ', # 0xd6
'Pi ', # 0xd7
'Bo ', # 0xd8
'Ao ', # 0xd9
'Ju ', # 0xda
'Ye ', # 0xdb
'[?] ', # 0xdc
'Mang ', # 0xdd
'Sou ', # 0xde
'Mi ', # 0xdf
'Ji ', # 0xe0
'Tai ', # 0xe1
'Zhuo ', # 0xe2
'Dao ', # 0xe3
'Xing ', # 0xe4
'Lan ', # 0xe5
'Ca ', # 0xe6
'Ju ', # 0xe7
'Ye ', # 0xe8
'Ru ', # 0xe9
'Ye ', # 0xea
'Ye ', # 0xeb
'Ni ', # 0xec
'Hu ', # 0xed
'Ji ', # 0xee
'Bin ', # 0xef
'Ning ', # 0xf0
'Ge ', # 0xf1
'Zhi ', # 0xf2
'Jie ', # 0xf3
'Kuo ', # 0xf4
'Mo ', # 0xf5
'Jian ', # 0xf6
'Xie ', # 0xf7
'Lie ', # 0xf8
'Tan ', # 0xf9
'Bai ', # 0xfa
'Sou ', # 0xfb
'Lu ', # 0xfc
'Lue ', # 0xfd
'Rao ', # 0xfe
'Zhi ', # 0xff
)
| gpl-3.0 |
mancoast/CPythonPyc_test | fail/333_test_abc_loader.py | 34 | 32554 | import importlib
from importlib import abc
from .. import abc as testing_abc
from .. import util
from . import util as source_util
import imp
import inspect
import io
import marshal
import os
import sys
import types
import unittest
import warnings
class SourceOnlyLoaderMock(abc.SourceLoader):
# Globals that should be defined for all modules.
source = (b"_ = '::'.join([__name__, __file__, __cached__, __package__, "
b"repr(__loader__)])")
def __init__(self, path):
self.path = path
def get_data(self, path):
assert self.path == path
return self.source
def get_filename(self, fullname):
return self.path
def module_repr(self, module):
return '<module>'
class SourceLoaderMock(SourceOnlyLoaderMock):
source_mtime = 1
def __init__(self, path, magic=imp.get_magic()):
super().__init__(path)
self.bytecode_path = imp.cache_from_source(self.path)
self.source_size = len(self.source)
data = bytearray(magic)
data.extend(importlib._w_long(self.source_mtime))
data.extend(importlib._w_long(self.source_size))
code_object = compile(self.source, self.path, 'exec',
dont_inherit=True)
data.extend(marshal.dumps(code_object))
self.bytecode = bytes(data)
self.written = {}
def get_data(self, path):
if path == self.path:
return super().get_data(path)
elif path == self.bytecode_path:
return self.bytecode
else:
raise IOError
def path_stats(self, path):
assert path == self.path
return {'mtime': self.source_mtime, 'size': self.source_size}
def set_data(self, path, data):
self.written[path] = bytes(data)
return path == self.bytecode_path
class PyLoaderMock(abc.PyLoader):
# Globals that should be defined for all modules.
source = (b"_ = '::'.join([__name__, __file__, __package__, "
b"repr(__loader__)])")
def __init__(self, data):
"""Take a dict of 'module_name: path' pairings.
Paths should have no file extension, allowing packages to be denoted by
ending in '__init__'.
"""
self.module_paths = data
self.path_to_module = {val:key for key,val in data.items()}
def get_data(self, path):
if path not in self.path_to_module:
raise IOError
return self.source
def is_package(self, name):
filename = os.path.basename(self.get_filename(name))
return os.path.splitext(filename)[0] == '__init__'
def source_path(self, name):
try:
return self.module_paths[name]
except KeyError:
raise ImportError
def get_filename(self, name):
"""Silence deprecation warning."""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
path = super().get_filename(name)
assert len(w) == 1
assert issubclass(w[0].category, DeprecationWarning)
return path
def module_repr(self):
return '<module>'
class PyLoaderCompatMock(PyLoaderMock):
"""Mock that matches what is suggested to have a loader that is compatible
from Python 3.1 onwards."""
def get_filename(self, fullname):
try:
return self.module_paths[fullname]
except KeyError:
raise ImportError
def source_path(self, fullname):
try:
return self.get_filename(fullname)
except ImportError:
return None
class PyPycLoaderMock(abc.PyPycLoader, PyLoaderMock):
default_mtime = 1
def __init__(self, source, bc={}):
"""Initialize mock.
'bc' is a dict keyed on a module's name. The value is dict with
possible keys of 'path', 'mtime', 'magic', and 'bc'. Except for 'path',
each of those keys control if any part of created bytecode is to
deviate from default values.
"""
super().__init__(source)
self.module_bytecode = {}
self.path_to_bytecode = {}
self.bytecode_to_path = {}
for name, data in bc.items():
self.path_to_bytecode[data['path']] = name
self.bytecode_to_path[name] = data['path']
magic = data.get('magic', imp.get_magic())
mtime = importlib._w_long(data.get('mtime', self.default_mtime))
source_size = importlib._w_long(len(self.source) & 0xFFFFFFFF)
if 'bc' in data:
bc = data['bc']
else:
bc = self.compile_bc(name)
self.module_bytecode[name] = magic + mtime + source_size + bc
def compile_bc(self, name):
source_path = self.module_paths.get(name, '<test>') or '<test>'
code = compile(self.source, source_path, 'exec')
return marshal.dumps(code)
def source_mtime(self, name):
if name in self.module_paths:
return self.default_mtime
elif name in self.module_bytecode:
return None
else:
raise ImportError
def bytecode_path(self, name):
try:
return self.bytecode_to_path[name]
except KeyError:
if name in self.module_paths:
return None
else:
raise ImportError
def write_bytecode(self, name, bytecode):
self.module_bytecode[name] = bytecode
return True
def get_data(self, path):
if path in self.path_to_module:
return super().get_data(path)
elif path in self.path_to_bytecode:
name = self.path_to_bytecode[path]
return self.module_bytecode[name]
else:
raise IOError
def is_package(self, name):
try:
return super().is_package(name)
except TypeError:
return '__init__' in self.bytecode_to_path[name]
def get_code(self, name):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
code_object = super().get_code(name)
assert len(w) == 1
assert issubclass(w[0].category, DeprecationWarning)
return code_object
class PyLoaderTests(testing_abc.LoaderTests):
"""Tests for importlib.abc.PyLoader."""
mocker = PyLoaderMock
def eq_attrs(self, ob, **kwargs):
for attr, val in kwargs.items():
found = getattr(ob, attr)
self.assertEqual(found, val,
"{} attribute: {} != {}".format(attr, found, val))
def test_module(self):
name = '<module>'
path = os.path.join('', 'path', 'to', 'module')
mock = self.mocker({name: path})
with util.uncache(name):
module = mock.load_module(name)
self.assertIn(name, sys.modules)
self.eq_attrs(module, __name__=name, __file__=path, __package__='',
__loader__=mock)
self.assertTrue(not hasattr(module, '__path__'))
return mock, name
def test_package(self):
name = '<pkg>'
path = os.path.join('path', 'to', name, '__init__')
mock = self.mocker({name: path})
with util.uncache(name):
module = mock.load_module(name)
self.assertIn(name, sys.modules)
self.eq_attrs(module, __name__=name, __file__=path,
__path__=[os.path.dirname(path)], __package__=name,
__loader__=mock)
return mock, name
def test_lacking_parent(self):
name = 'pkg.mod'
path = os.path.join('path', 'to', 'pkg', 'mod')
mock = self.mocker({name: path})
with util.uncache(name):
module = mock.load_module(name)
self.assertIn(name, sys.modules)
self.eq_attrs(module, __name__=name, __file__=path, __package__='pkg',
__loader__=mock)
self.assertFalse(hasattr(module, '__path__'))
return mock, name
def test_module_reuse(self):
name = 'mod'
path = os.path.join('path', 'to', 'mod')
module = imp.new_module(name)
mock = self.mocker({name: path})
with util.uncache(name):
sys.modules[name] = module
loaded_module = mock.load_module(name)
self.assertIs(loaded_module, module)
self.assertIs(sys.modules[name], module)
return mock, name
def test_state_after_failure(self):
name = "mod"
module = imp.new_module(name)
module.blah = None
mock = self.mocker({name: os.path.join('path', 'to', 'mod')})
mock.source = b"1/0"
with util.uncache(name):
sys.modules[name] = module
with self.assertRaises(ZeroDivisionError):
mock.load_module(name)
self.assertIs(sys.modules[name], module)
self.assertTrue(hasattr(module, 'blah'))
return mock
def test_unloadable(self):
name = "mod"
mock = self.mocker({name: os.path.join('path', 'to', 'mod')})
mock.source = b"1/0"
with util.uncache(name):
with self.assertRaises(ZeroDivisionError):
mock.load_module(name)
self.assertNotIn(name, sys.modules)
return mock
class PyLoaderCompatTests(PyLoaderTests):
"""Test that the suggested code to make a loader that is compatible from
Python 3.1 forward works."""
mocker = PyLoaderCompatMock
class PyLoaderInterfaceTests(unittest.TestCase):
"""Tests for importlib.abc.PyLoader to make sure that when source_path()
doesn't return a path everything works as expected."""
def test_no_source_path(self):
# No source path should lead to ImportError.
name = 'mod'
mock = PyLoaderMock({})
with util.uncache(name), self.assertRaises(ImportError):
mock.load_module(name)
def test_source_path_is_None(self):
name = 'mod'
mock = PyLoaderMock({name: None})
with util.uncache(name), self.assertRaises(ImportError):
mock.load_module(name)
def test_get_filename_with_source_path(self):
# get_filename() should return what source_path() returns.
name = 'mod'
path = os.path.join('path', 'to', 'source')
mock = PyLoaderMock({name: path})
with util.uncache(name):
self.assertEqual(mock.get_filename(name), path)
def test_get_filename_no_source_path(self):
# get_filename() should raise ImportError if source_path returns None.
name = 'mod'
mock = PyLoaderMock({name: None})
with util.uncache(name), self.assertRaises(ImportError):
mock.get_filename(name)
class PyPycLoaderTests(PyLoaderTests):
"""Tests for importlib.abc.PyPycLoader."""
mocker = PyPycLoaderMock
@source_util.writes_bytecode_files
def verify_bytecode(self, mock, name):
assert name in mock.module_paths
self.assertIn(name, mock.module_bytecode)
magic = mock.module_bytecode[name][:4]
self.assertEqual(magic, imp.get_magic())
mtime = importlib._r_long(mock.module_bytecode[name][4:8])
self.assertEqual(mtime, 1)
source_size = mock.module_bytecode[name][8:12]
self.assertEqual(len(mock.source) & 0xFFFFFFFF,
importlib._r_long(source_size))
bc = mock.module_bytecode[name][12:]
self.assertEqual(bc, mock.compile_bc(name))
def test_module(self):
mock, name = super().test_module()
self.verify_bytecode(mock, name)
def test_package(self):
mock, name = super().test_package()
self.verify_bytecode(mock, name)
def test_lacking_parent(self):
mock, name = super().test_lacking_parent()
self.verify_bytecode(mock, name)
def test_module_reuse(self):
mock, name = super().test_module_reuse()
self.verify_bytecode(mock, name)
def test_state_after_failure(self):
super().test_state_after_failure()
def test_unloadable(self):
super().test_unloadable()
class PyPycLoaderInterfaceTests(unittest.TestCase):
"""Test for the interface of importlib.abc.PyPycLoader."""
def get_filename_check(self, src_path, bc_path, expect):
name = 'mod'
mock = PyPycLoaderMock({name: src_path}, {name: {'path': bc_path}})
with util.uncache(name):
assert mock.source_path(name) == src_path
assert mock.bytecode_path(name) == bc_path
self.assertEqual(mock.get_filename(name), expect)
def test_filename_with_source_bc(self):
# When source and bytecode paths present, return the source path.
self.get_filename_check('source_path', 'bc_path', 'source_path')
def test_filename_with_source_no_bc(self):
# With source but no bc, return source path.
self.get_filename_check('source_path', None, 'source_path')
def test_filename_with_no_source_bc(self):
# With not source but bc, return the bc path.
self.get_filename_check(None, 'bc_path', 'bc_path')
def test_filename_with_no_source_or_bc(self):
# With no source or bc, raise ImportError.
name = 'mod'
mock = PyPycLoaderMock({name: None}, {name: {'path': None}})
with util.uncache(name), self.assertRaises(ImportError):
mock.get_filename(name)
class SkipWritingBytecodeTests(unittest.TestCase):
"""Test that bytecode is properly handled based on
sys.dont_write_bytecode."""
@source_util.writes_bytecode_files
def run_test(self, dont_write_bytecode):
name = 'mod'
mock = PyPycLoaderMock({name: os.path.join('path', 'to', 'mod')})
sys.dont_write_bytecode = dont_write_bytecode
with util.uncache(name):
mock.load_module(name)
self.assertIsNot(name in mock.module_bytecode, dont_write_bytecode)
def test_no_bytecode_written(self):
self.run_test(True)
def test_bytecode_written(self):
self.run_test(False)
class RegeneratedBytecodeTests(unittest.TestCase):
"""Test that bytecode is regenerated as expected."""
@source_util.writes_bytecode_files
def test_different_magic(self):
# A different magic number should lead to new bytecode.
name = 'mod'
bad_magic = b'\x00\x00\x00\x00'
assert bad_magic != imp.get_magic()
mock = PyPycLoaderMock({name: os.path.join('path', 'to', 'mod')},
{name: {'path': os.path.join('path', 'to',
'mod.bytecode'),
'magic': bad_magic}})
with util.uncache(name):
mock.load_module(name)
self.assertIn(name, mock.module_bytecode)
magic = mock.module_bytecode[name][:4]
self.assertEqual(magic, imp.get_magic())
@source_util.writes_bytecode_files
def test_old_mtime(self):
# Bytecode with an older mtime should be regenerated.
name = 'mod'
old_mtime = PyPycLoaderMock.default_mtime - 1
mock = PyPycLoaderMock({name: os.path.join('path', 'to', 'mod')},
{name: {'path': 'path/to/mod.bytecode', 'mtime': old_mtime}})
with util.uncache(name):
mock.load_module(name)
self.assertIn(name, mock.module_bytecode)
mtime = importlib._r_long(mock.module_bytecode[name][4:8])
self.assertEqual(mtime, PyPycLoaderMock.default_mtime)
class BadBytecodeFailureTests(unittest.TestCase):
"""Test import failures when there is no source and parts of the bytecode
is bad."""
def test_bad_magic(self):
# A bad magic number should lead to an ImportError.
name = 'mod'
bad_magic = b'\x00\x00\x00\x00'
bc = {name:
{'path': os.path.join('path', 'to', 'mod'),
'magic': bad_magic}}
mock = PyPycLoaderMock({name: None}, bc)
with util.uncache(name), self.assertRaises(ImportError) as cm:
mock.load_module(name)
self.assertEqual(cm.exception.name, name)
def test_no_bytecode(self):
# Missing code object bytecode should lead to an EOFError.
name = 'mod'
bc = {name: {'path': os.path.join('path', 'to', 'mod'), 'bc': b''}}
mock = PyPycLoaderMock({name: None}, bc)
with util.uncache(name), self.assertRaises(EOFError):
mock.load_module(name)
def test_bad_bytecode(self):
# Malformed code object bytecode should lead to a ValueError.
name = 'mod'
bc = {name: {'path': os.path.join('path', 'to', 'mod'), 'bc': b'1234'}}
mock = PyPycLoaderMock({name: None}, bc)
with util.uncache(name), self.assertRaises(ValueError):
mock.load_module(name)
def raise_ImportError(*args, **kwargs):
raise ImportError
class MissingPathsTests(unittest.TestCase):
"""Test what happens when a source or bytecode path does not exist (either
from *_path returning None or raising ImportError)."""
def test_source_path_None(self):
# Bytecode should be used when source_path returns None, along with
# __file__ being set to the bytecode path.
name = 'mod'
bytecode_path = 'path/to/mod'
mock = PyPycLoaderMock({name: None}, {name: {'path': bytecode_path}})
with util.uncache(name):
module = mock.load_module(name)
self.assertEqual(module.__file__, bytecode_path)
# Testing for bytecode_path returning None handled by all tests where no
# bytecode initially exists.
def test_all_paths_None(self):
# If all *_path methods return None, raise ImportError.
name = 'mod'
mock = PyPycLoaderMock({name: None})
with util.uncache(name), self.assertRaises(ImportError) as cm:
mock.load_module(name)
self.assertEqual(cm.exception.name, name)
def test_source_path_ImportError(self):
# An ImportError from source_path should trigger an ImportError.
name = 'mod'
mock = PyPycLoaderMock({}, {name: {'path': os.path.join('path', 'to',
'mod')}})
with util.uncache(name), self.assertRaises(ImportError):
mock.load_module(name)
def test_bytecode_path_ImportError(self):
# An ImportError from bytecode_path should trigger an ImportError.
name = 'mod'
mock = PyPycLoaderMock({name: os.path.join('path', 'to', 'mod')})
bad_meth = types.MethodType(raise_ImportError, mock)
mock.bytecode_path = bad_meth
with util.uncache(name), self.assertRaises(ImportError) as cm:
mock.load_module(name)
class SourceLoaderTestHarness(unittest.TestCase):
def setUp(self, *, is_package=True, **kwargs):
self.package = 'pkg'
if is_package:
self.path = os.path.join(self.package, '__init__.py')
self.name = self.package
else:
module_name = 'mod'
self.path = os.path.join(self.package, '.'.join(['mod', 'py']))
self.name = '.'.join([self.package, module_name])
self.cached = imp.cache_from_source(self.path)
self.loader = self.loader_mock(self.path, **kwargs)
def verify_module(self, module):
self.assertEqual(module.__name__, self.name)
self.assertEqual(module.__file__, self.path)
self.assertEqual(module.__cached__, self.cached)
self.assertEqual(module.__package__, self.package)
self.assertEqual(module.__loader__, self.loader)
values = module._.split('::')
self.assertEqual(values[0], self.name)
self.assertEqual(values[1], self.path)
self.assertEqual(values[2], self.cached)
self.assertEqual(values[3], self.package)
self.assertEqual(values[4], repr(self.loader))
def verify_code(self, code_object):
module = imp.new_module(self.name)
module.__file__ = self.path
module.__cached__ = self.cached
module.__package__ = self.package
module.__loader__ = self.loader
module.__path__ = []
exec(code_object, module.__dict__)
self.verify_module(module)
class SourceOnlyLoaderTests(SourceLoaderTestHarness):
"""Test importlib.abc.SourceLoader for source-only loading.
Reload testing is subsumed by the tests for
importlib.util.module_for_loader.
"""
loader_mock = SourceOnlyLoaderMock
def test_get_source(self):
# Verify the source code is returned as a string.
# If an IOError is raised by get_data then raise ImportError.
expected_source = self.loader.source.decode('utf-8')
self.assertEqual(self.loader.get_source(self.name), expected_source)
def raise_IOError(path):
raise IOError
self.loader.get_data = raise_IOError
with self.assertRaises(ImportError) as cm:
self.loader.get_source(self.name)
self.assertEqual(cm.exception.name, self.name)
def test_is_package(self):
# Properly detect when loading a package.
self.setUp(is_package=False)
self.assertFalse(self.loader.is_package(self.name))
self.setUp(is_package=True)
self.assertTrue(self.loader.is_package(self.name))
self.assertFalse(self.loader.is_package(self.name + '.__init__'))
def test_get_code(self):
# Verify the code object is created.
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
def test_load_module(self):
# Loading a module should set __name__, __loader__, __package__,
# __path__ (for packages), __file__, and __cached__.
# The module should also be put into sys.modules.
with util.uncache(self.name):
module = self.loader.load_module(self.name)
self.verify_module(module)
self.assertEqual(module.__path__, [os.path.dirname(self.path)])
self.assertIn(self.name, sys.modules)
def test_package_settings(self):
# __package__ needs to be set, while __path__ is set on if the module
# is a package.
# Testing the values for a package are covered by test_load_module.
self.setUp(is_package=False)
with util.uncache(self.name):
module = self.loader.load_module(self.name)
self.verify_module(module)
self.assertTrue(not hasattr(module, '__path__'))
def test_get_source_encoding(self):
# Source is considered encoded in UTF-8 by default unless otherwise
# specified by an encoding line.
source = "_ = 'ü'"
self.loader.source = source.encode('utf-8')
returned_source = self.loader.get_source(self.name)
self.assertEqual(returned_source, source)
source = "# coding: latin-1\n_ = ü"
self.loader.source = source.encode('latin-1')
returned_source = self.loader.get_source(self.name)
self.assertEqual(returned_source, source)
@unittest.skipIf(sys.dont_write_bytecode, "sys.dont_write_bytecode is true")
class SourceLoaderBytecodeTests(SourceLoaderTestHarness):
"""Test importlib.abc.SourceLoader's use of bytecode.
Source-only testing handled by SourceOnlyLoaderTests.
"""
loader_mock = SourceLoaderMock
def verify_code(self, code_object, *, bytecode_written=False):
super().verify_code(code_object)
if bytecode_written:
self.assertIn(self.cached, self.loader.written)
data = bytearray(imp.get_magic())
data.extend(importlib._w_long(self.loader.source_mtime))
data.extend(importlib._w_long(self.loader.source_size))
data.extend(marshal.dumps(code_object))
self.assertEqual(self.loader.written[self.cached], bytes(data))
def test_code_with_everything(self):
# When everything should work.
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
def test_no_bytecode(self):
# If no bytecode exists then move on to the source.
self.loader.bytecode_path = "<does not exist>"
# Sanity check
with self.assertRaises(IOError):
bytecode_path = imp.cache_from_source(self.path)
self.loader.get_data(bytecode_path)
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
def test_code_bad_timestamp(self):
# Bytecode is only used when the timestamp matches the source EXACTLY.
for source_mtime in (0, 2):
assert source_mtime != self.loader.source_mtime
original = self.loader.source_mtime
self.loader.source_mtime = source_mtime
# If bytecode is used then EOFError would be raised by marshal.
self.loader.bytecode = self.loader.bytecode[8:]
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
self.loader.source_mtime = original
def test_code_bad_magic(self):
# Skip over bytecode with a bad magic number.
self.setUp(magic=b'0000')
# If bytecode is used then EOFError would be raised by marshal.
self.loader.bytecode = self.loader.bytecode[8:]
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
def test_dont_write_bytecode(self):
# Bytecode is not written if sys.dont_write_bytecode is true.
# Can assume it is false already thanks to the skipIf class decorator.
try:
sys.dont_write_bytecode = True
self.loader.bytecode_path = "<does not exist>"
code_object = self.loader.get_code(self.name)
self.assertNotIn(self.cached, self.loader.written)
finally:
sys.dont_write_bytecode = False
def test_no_set_data(self):
# If set_data is not defined, one can still read bytecode.
self.setUp(magic=b'0000')
original_set_data = self.loader.__class__.set_data
try:
del self.loader.__class__.set_data
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
finally:
self.loader.__class__.set_data = original_set_data
def test_set_data_raises_exceptions(self):
# Raising NotImplementedError or IOError is okay for set_data.
def raise_exception(exc):
def closure(*args, **kwargs):
raise exc
return closure
self.setUp(magic=b'0000')
self.loader.set_data = raise_exception(NotImplementedError)
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
class SourceLoaderGetSourceTests(unittest.TestCase):
"""Tests for importlib.abc.SourceLoader.get_source()."""
def test_default_encoding(self):
# Should have no problems with UTF-8 text.
name = 'mod'
mock = SourceOnlyLoaderMock('mod.file')
source = 'x = "ü"'
mock.source = source.encode('utf-8')
returned_source = mock.get_source(name)
self.assertEqual(returned_source, source)
def test_decoded_source(self):
# Decoding should work.
name = 'mod'
mock = SourceOnlyLoaderMock("mod.file")
source = "# coding: Latin-1\nx='ü'"
assert source.encode('latin-1') != source.encode('utf-8')
mock.source = source.encode('latin-1')
returned_source = mock.get_source(name)
self.assertEqual(returned_source, source)
def test_universal_newlines(self):
# PEP 302 says universal newlines should be used.
name = 'mod'
mock = SourceOnlyLoaderMock('mod.file')
source = "x = 42\r\ny = -13\r\n"
mock.source = source.encode('utf-8')
expect = io.IncrementalNewlineDecoder(None, True).decode(source)
self.assertEqual(mock.get_source(name), expect)
class AbstractMethodImplTests(unittest.TestCase):
"""Test the concrete abstractmethod implementations."""
class MetaPathFinder(abc.MetaPathFinder):
def find_module(self, fullname, path):
super().find_module(fullname, path)
class PathEntryFinder(abc.PathEntryFinder):
def find_module(self, _):
super().find_module(_)
def find_loader(self, _):
super().find_loader(_)
class Finder(abc.Finder):
def find_module(self, fullname, path):
super().find_module(fullname, path)
class Loader(abc.Loader):
def load_module(self, fullname):
super().load_module(fullname)
def module_repr(self, module):
super().module_repr(module)
class ResourceLoader(Loader, abc.ResourceLoader):
def get_data(self, _):
super().get_data(_)
class InspectLoader(Loader, abc.InspectLoader):
def is_package(self, _):
super().is_package(_)
def get_code(self, _):
super().get_code(_)
def get_source(self, _):
super().get_source(_)
class ExecutionLoader(InspectLoader, abc.ExecutionLoader):
def get_filename(self, _):
super().get_filename(_)
class SourceLoader(ResourceLoader, ExecutionLoader, abc.SourceLoader):
pass
class PyLoader(ResourceLoader, InspectLoader, abc.PyLoader):
def source_path(self, _):
super().source_path(_)
class PyPycLoader(PyLoader, abc.PyPycLoader):
def bytecode_path(self, _):
super().bytecode_path(_)
def source_mtime(self, _):
super().source_mtime(_)
def write_bytecode(self, _, _2):
super().write_bytecode(_, _2)
def raises_NotImplementedError(self, ins, *args):
for method_name in args:
method = getattr(ins, method_name)
arg_count = len(inspect.getfullargspec(method)[0]) - 1
args = [''] * arg_count
try:
method(*args)
except NotImplementedError:
pass
else:
msg = "{}.{} did not raise NotImplementedError"
self.fail(msg.format(ins.__class__.__name__, method_name))
def test_Loader(self):
self.raises_NotImplementedError(self.Loader(), 'load_module')
# XXX misplaced; should be somewhere else
def test_Finder(self):
self.raises_NotImplementedError(self.Finder(), 'find_module')
def test_ResourceLoader(self):
self.raises_NotImplementedError(self.ResourceLoader(), 'load_module',
'get_data')
def test_InspectLoader(self):
self.raises_NotImplementedError(self.InspectLoader(), 'load_module',
'is_package', 'get_code', 'get_source')
def test_ExecutionLoader(self):
self.raises_NotImplementedError(self.ExecutionLoader(), 'load_module',
'is_package', 'get_code', 'get_source',
'get_filename')
def test_SourceLoader(self):
ins = self.SourceLoader()
# Required abstractmethods.
self.raises_NotImplementedError(ins, 'get_filename', 'get_data')
# Optional abstractmethods.
self.raises_NotImplementedError(ins,'path_stats', 'set_data')
def test_PyLoader(self):
self.raises_NotImplementedError(self.PyLoader(), 'source_path',
'get_data', 'is_package')
def test_PyPycLoader(self):
self.raises_NotImplementedError(self.PyPycLoader(), 'source_path',
'source_mtime', 'bytecode_path',
'write_bytecode')
def test_main():
from test.support import run_unittest
run_unittest(PyLoaderTests, PyLoaderCompatTests,
PyLoaderInterfaceTests,
PyPycLoaderTests, PyPycLoaderInterfaceTests,
SkipWritingBytecodeTests, RegeneratedBytecodeTests,
BadBytecodeFailureTests, MissingPathsTests,
SourceOnlyLoaderTests,
SourceLoaderBytecodeTests,
SourceLoaderGetSourceTests,
AbstractMethodImplTests)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
MenZil/kuma | vendor/packages/translate/misc/autoencode.py | 25 | 2334 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Supports a hybrid Unicode string that knows which encoding is preferable,
and uses this when converting to a string."""
# Python 3 compatibility
try:
unicode
except NameError:
unicode = str
class autoencode(unicode):
def __new__(newtype, string=u"", encoding=None, errors=None):
if isinstance(string, unicode):
if errors is None:
newstring = unicode.__new__(newtype, string)
else:
newstring = unicode.__new__(newtype, string, errors=errors)
if encoding is None and isinstance(string, autoencode):
newstring.encoding = string.encoding
else:
newstring.encoding = encoding
else:
if errors is None and encoding is None:
newstring = unicode.__new__(newtype, string)
elif errors is None:
try:
newstring = unicode.__new__(newtype, string, encoding)
except LookupError as e:
raise ValueError(str(e))
elif encoding is None:
newstring = unicode.__new__(newtype, string, errors)
else:
newstring = unicode.__new__(newtype, string, encoding, errors)
newstring.encoding = encoding
return newstring
def join(self, seq):
return autoencode(super(autoencode, self).join(seq))
def __str__(self):
if self.encoding is None:
return super(autoencode, self).__str__()
else:
return self.encode(self.encoding)
| mpl-2.0 |
khaeusler/website | website_sale_order_company/models/website.py | 33 | 1518 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
class website(orm.Model):
_inherit = 'website'
def sale_get_order(
self, cr, uid, ids, force_create=False, code=None,
update_pricelist=None, context=None
):
order = super(website, self).sale_get_order(
cr, uid, ids, force_create=force_create, code=code,
update_pricelist=update_pricelist, context=context)
if order:
company = order.get_products_company()
if company:
order.write({'company_id': company.id})
return order
| agpl-3.0 |
dzorlu/sdc-segmentation | train.py | 1 | 4118 | import sys
import tensorflow as tf
from tensorflow.python.ops import math_ops
sys.path.append("slim/")
slim = tf.contrib.slim
TRAIN_DIR = "/tmp/tf"
class Trainer(object):
def __init__(self, nb_classes, optimizer, learning_rate):
self.nb_classes = nb_classes
# learning rate can be a placeholder tensor
self.learning_rate = learning_rate
self.optimizer = optimizer(learning_rate)
self.train_op = None
self.prediction = None
def build(self, predictions, labels, one_hot=False):
with tf.name_scope('training'):
if one_hot:
labels = tf.one_hot(labels, depth=self.nb_classes)
labels = tf.squeeze(labels, axis=2)
label_shape = tf.shape(labels)[:2]
predictions = tf.image.resize_bilinear(predictions, label_shape, name='resize_predictions')
else:
labels = tf.reshape(labels, (-1, self.nb_clasess))
predictions = tf.reshape(predictions, (-1, self.nb_classes))
self.prediction = predictions
labels = tf.expand_dims(labels, 0)
print("pred shape {}, label shape {}".format(predictions.get_shape(), labels.get_shape()))
# wraps the softmax_with_entropy fn. adds it to loss collection
tf.losses.softmax_cross_entropy(logits=predictions, onehot_labels=labels)
# include the regulization losses in the loss collection.
total_loss = tf.losses.get_total_loss()
self.train_op = slim.learning.create_train_op(total_loss,
optimizer=self.optimizer)
def add_summaries(self):
# Add summaries for images, variables and losses.
global_summaries = set([])
# image summary
image_summary = tf.get_default_graph().get_tensor_by_name('IteratorGetNext:0')
image_summary = tf.expand_dims(image_summary, 0)
image_summary = tf.summary.image('image', image_summary)
global_summaries.add(image_summary)
# prediction summary
prediction = tf.argmax(self.prediction, axis=3)
prediction = tf.cast(prediction, tf.float32)
prediction = tf.expand_dims(prediction, 3)
image_summary = tf.summary.image('prediction', prediction)
global_summaries.add(image_summary)
for model_var in slim.get_model_variables():
global_summaries.add(tf.summary.histogram(model_var.op.name, model_var))
# total loss
total_loss_tensor = tf.get_default_graph().get_tensor_by_name('training/total_loss:0')
global_summaries.add(tf.summary.scalar(total_loss_tensor.op.name, total_loss_tensor))
# Merge all summaries together.
summary_op = tf.summary.merge(list(global_summaries), name='summary_op')
return summary_op
def train(self, iterator,
filename,
restore_fn=None,
_add_summaries = True,
number_of_steps=10000,
save_interval_secs = 12000,
same_summaries_secs=120,
keep_checkpoint_every_n_hours=5):
summary_op = None
if _add_summaries:
summary_op = self.add_summaries()
# Save checkpoints regularly.
saver = tf.train.Saver(
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
# init fn for the dataset ops and checkpointin
def initializer_fn(sess):
input_tensor = tf.get_default_graph().get_tensor_by_name('training_data/input:0')
sess.run(iterator.initializer, feed_dict={input_tensor: filename})
if restore_fn:
restore_fn(sess)
init_fn = initializer_fn
# Soft placement allows placing on CPU ops without GPU implementation.
session_config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
# train
slim.learning.train(train_op=self.train_op,
logdir=TRAIN_DIR,
session_config=session_config,
summary_op=summary_op,
init_fn=init_fn,
save_interval_secs = save_interval_secs,
number_of_steps=number_of_steps,
save_summaries_secs=same_summaries_secs,
saver=saver)
| mit |
rtucker-mozilla/inventory | libs/jinja_extensions.py | 3 | 1372 | from jinja2 import nodes
from jinja2.ext import Extension
from django.utils.safestring import mark_safe
import traceback
class CsrfExtension(Extension):
# a set of names that trigger the extension.
tags = set(['csrf_token'])
def __init__(self, environment):
self.environment = environment
def parse(self, parser):
try:
token = parser.stream.next()
return nodes.Output([self.call_method('_render', [nodes.Name('csrf_token','load')])]).set_lineno(token.lineno)
except:
traceback.print_exc()
def _render(self, csrf_token):
"""Helper callback."""
if csrf_token:
if csrf_token == 'NOTPROVIDED':
return mark_safe(u"")
else:
return mark_safe(u"<div style='display:none'><input type='hidden' name='csrfmiddlewaretoken' value='%s' /></div>" % (csrf_token))
else:
# It's very probable that the token is missing because of
# misconfiguration, so we raise a warning
from django.conf import settings
if settings.DEBUG:
import warnings
warnings.warn("A {% csrf_token %} was used in a template, but the context did not provide the value. This is usually caused by not using RequestContext.")
return u''
csrf_token=CsrfExtension
| bsd-3-clause |
janusnic/wagtail | wagtail/contrib/wagtailapi/tests/test_documents.py | 26 | 14599 | import json
import mock
from django.test import TestCase
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from wagtail.wagtaildocs.models import Document
from wagtail.contrib.wagtailapi import signal_handlers
class TestDocumentListing(TestCase):
fixtures = ['demosite.json']
def get_response(self, **params):
return self.client.get(reverse('wagtailapi_v1:documents:listing'), params)
def get_document_id_list(self, content):
return [page['id'] for page in content['documents']]
# BASIC TESTS
def test_basic(self):
response = self.get_response()
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check that the total count is there and correct
self.assertIn('total_count', content['meta'])
self.assertIsInstance(content['meta']['total_count'], int)
self.assertEqual(content['meta']['total_count'], Document.objects.count())
# Check that the documents section is there
self.assertIn('documents', content)
self.assertIsInstance(content['documents'], list)
# Check that each document has a meta section with type and detail_url attributes
for document in content['documents']:
self.assertIn('meta', document)
self.assertIsInstance(document['meta'], dict)
self.assertEqual(set(document['meta'].keys()), {'type', 'detail_url'})
# Type should always be wagtaildocs.Document
self.assertEqual(document['meta']['type'], 'wagtaildocs.Document')
# Check detail_url
self.assertEqual(document['meta']['detail_url'], 'http://localhost/api/v1/documents/%d/' % document['id'])
# EXTRA FIELDS
def test_extra_fields_default(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
for document in content['documents']:
self.assertEqual(set(document.keys()), {'id', 'meta', 'title'})
def test_extra_fields(self):
response = self.get_response(fields='title,tags')
content = json.loads(response.content.decode('UTF-8'))
for document in content['documents']:
self.assertEqual(set(document.keys()), {'id', 'meta', 'title', 'tags'})
def test_extra_fields_tags(self):
response = self.get_response(fields='tags')
content = json.loads(response.content.decode('UTF-8'))
for document in content['documents']:
self.assertIsInstance(document['tags'], list)
def test_extra_fields_which_are_not_in_api_fields_gives_error(self):
response = self.get_response(fields='uploaded_by_user')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: uploaded_by_user"})
def test_extra_fields_unknown_field_gives_error(self):
response = self.get_response(fields='123,title,abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
# FILTERING
def test_filtering_exact_filter(self):
response = self.get_response(title='James Joyce')
content = json.loads(response.content.decode('UTF-8'))
document_id_list = self.get_document_id_list(content)
self.assertEqual(document_id_list, [2])
def test_filtering_on_id(self):
response = self.get_response(id=10)
content = json.loads(response.content.decode('UTF-8'))
document_id_list = self.get_document_id_list(content)
self.assertEqual(document_id_list, [10])
def test_filtering_tags(self):
Document.objects.get(id=3).tags.add('test')
response = self.get_response(tags='test')
content = json.loads(response.content.decode('UTF-8'))
document_id_list = self.get_document_id_list(content)
self.assertEqual(document_id_list, [3])
def test_filtering_unknown_field_gives_error(self):
response = self.get_response(not_a_field='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "query parameter is not an operation or a recognised field: not_a_field"})
# ORDERING
def test_ordering_by_title(self):
response = self.get_response(order='title')
content = json.loads(response.content.decode('UTF-8'))
document_id_list = self.get_document_id_list(content)
self.assertEqual(document_id_list, [3, 12, 10, 2, 7, 8, 5, 4, 1, 11, 9, 6])
def test_ordering_by_title_backwards(self):
response = self.get_response(order='-title')
content = json.loads(response.content.decode('UTF-8'))
document_id_list = self.get_document_id_list(content)
self.assertEqual(document_id_list, [6, 9, 11, 1, 4, 5, 8, 7, 2, 10, 12, 3])
def test_ordering_by_random(self):
response_1 = self.get_response(order='random')
content_1 = json.loads(response_1.content.decode('UTF-8'))
document_id_list_1 = self.get_document_id_list(content_1)
response_2 = self.get_response(order='random')
content_2 = json.loads(response_2.content.decode('UTF-8'))
document_id_list_2 = self.get_document_id_list(content_2)
self.assertNotEqual(document_id_list_1, document_id_list_2)
def test_ordering_by_random_backwards_gives_error(self):
response = self.get_response(order='-random')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "cannot order by 'random' (unknown field)"})
def test_ordering_by_random_with_offset_gives_error(self):
response = self.get_response(order='random', offset=10)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "random ordering with offset is not supported"})
def test_ordering_by_unknown_field_gives_error(self):
response = self.get_response(order='not_a_field')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "cannot order by 'not_a_field' (unknown field)"})
# LIMIT
def test_limit_only_two_results_returned(self):
response = self.get_response(limit=2)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(len(content['documents']), 2)
def test_limit_total_count(self):
response = self.get_response(limit=2)
content = json.loads(response.content.decode('UTF-8'))
# The total count must not be affected by "limit"
self.assertEqual(content['meta']['total_count'], Document.objects.count())
def test_limit_not_integer_gives_error(self):
response = self.get_response(limit='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit must be a positive integer"})
def test_limit_too_high_gives_error(self):
response = self.get_response(limit=1000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit cannot be higher than 20"})
@override_settings(WAGTAILAPI_LIMIT_MAX=10)
def test_limit_maximum_can_be_changed(self):
response = self.get_response(limit=20)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit cannot be higher than 10"})
@override_settings(WAGTAILAPI_LIMIT_MAX=2)
def test_limit_default_changes_with_max(self):
# The default limit is 20. If WAGTAILAPI_LIMIT_MAX is less than that,
# the default should change accordingly.
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(len(content['documents']), 2)
# OFFSET
def test_offset_5_usually_appears_5th_in_list(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
document_id_list = self.get_document_id_list(content)
self.assertEqual(document_id_list.index(5), 4)
def test_offset_5_moves_after_offset(self):
response = self.get_response(offset=4)
content = json.loads(response.content.decode('UTF-8'))
document_id_list = self.get_document_id_list(content)
self.assertEqual(document_id_list.index(5), 0)
def test_offset_total_count(self):
response = self.get_response(offset=10)
content = json.loads(response.content.decode('UTF-8'))
# The total count must not be affected by "offset"
self.assertEqual(content['meta']['total_count'], Document.objects.count())
def test_offset_not_integer_gives_error(self):
response = self.get_response(offset='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "offset must be a positive integer"})
# SEARCH
def test_search_for_james_joyce(self):
response = self.get_response(search='james')
content = json.loads(response.content.decode('UTF-8'))
document_id_list = self.get_document_id_list(content)
self.assertEqual(set(document_id_list), set([2]))
def test_search_when_ordering_gives_error(self):
response = self.get_response(search='james', order='title')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "ordering with a search query is not supported"})
@override_settings(WAGTAILAPI_SEARCH_ENABLED=False)
def test_search_when_disabled_gives_error(self):
response = self.get_response(search='james')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "search is disabled"})
def test_search_when_filtering_by_tag_gives_error(self):
response = self.get_response(search='james', tags='wagtail')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "filtering by tag with a search query is not supported"})
class TestDocumentDetail(TestCase):
fixtures = ['demosite.json']
def get_response(self, image_id, **params):
return self.client.get(reverse('wagtailapi_v1:documents:detail', args=(image_id, )), params)
def test_basic(self):
response = self.get_response(1)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check the id field
self.assertIn('id', content)
self.assertEqual(content['id'], 1)
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check the meta type
self.assertIn('type', content['meta'])
self.assertEqual(content['meta']['type'], 'wagtaildocs.Document')
# Check the meta detail_url
self.assertIn('detail_url', content['meta'])
self.assertEqual(content['meta']['detail_url'], 'http://localhost/api/v1/documents/1/')
# Check the meta download_url
self.assertIn('download_url', content['meta'])
self.assertEqual(content['meta']['download_url'], 'http://localhost/documents/1/wagtail_by_markyharky.jpg')
# Check the title field
self.assertIn('title', content)
self.assertEqual(content['title'], "Wagtail by mark Harkin")
# Check the tags field
self.assertIn('tags', content)
self.assertEqual(content['tags'], [])
def test_tags(self):
Document.objects.get(id=1).tags.add('hello')
Document.objects.get(id=1).tags.add('world')
response = self.get_response(1)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('tags', content)
self.assertEqual(content['tags'], ['hello', 'world'])
@override_settings(WAGTAILAPI_BASE_URL='http://api.example.com/')
def test_download_url_with_custom_base_url(self):
response = self.get_response(1)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('download_url', content['meta'])
self.assertEqual(content['meta']['download_url'], 'http://api.example.com/documents/1/wagtail_by_markyharky.jpg')
@override_settings(
WAGTAILFRONTENDCACHE={
'varnish': {
'BACKEND': 'wagtail.contrib.wagtailfrontendcache.backends.HTTPBackend',
'LOCATION': 'http://localhost:8000',
},
},
WAGTAILAPI_BASE_URL='http://api.example.com',
)
@mock.patch('wagtail.contrib.wagtailfrontendcache.backends.HTTPBackend.purge')
class TestDocumentCacheInvalidation(TestCase):
fixtures = ['demosite.json']
@classmethod
def setUpClass(cls):
super(TestDocumentCacheInvalidation, cls).setUpClass()
signal_handlers.register_signal_handlers()
@classmethod
def tearDownClass(cls):
super(TestDocumentCacheInvalidation, cls).tearDownClass()
signal_handlers.unregister_signal_handlers()
def test_resave_document_purges(self, purge):
Document.objects.get(id=5).save()
purge.assert_any_call('http://api.example.com/api/v1/documents/5/')
def test_delete_document_purges(self, purge):
Document.objects.get(id=5).delete()
purge.assert_any_call('http://api.example.com/api/v1/documents/5/')
| bsd-3-clause |
geodynamics/gale | boost/libs/python/test/numpy.py | 46 | 2433 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
false = 0;
true = 1;
import doctest, numeric_tests
def _count_failures(test_modules = (numeric_tests,)):
failures = 0
for m in test_modules:
failures += doctest.testmod(m)[0]
return failures
def _run(args = None):
import sys, numarray_tests, numeric_tests
if args is not None:
sys.argv = args
# See which of the numeric modules are installed
has_numeric = 0
try: import Numeric
except ImportError: pass
else:
has_numeric = 1
m = Numeric
has_numarray = 0
try: import numarray
except ImportError: pass
else:
has_numarray = 1
m = numarray
# Bail if neither one is installed
if not (has_numeric or has_numarray):
return 0
# test the info routine outside the doctest. See numpy.cpp for an
# explanation
import numpy_ext
if (has_numarray):
numpy_ext.info(m.array((1,2,3)))
failures = 0
#
# Run tests 4 different ways if both modules are installed, just
# to show that set_module_and_type() is working properly
#
# run all the tests with default module search
print 'testing default extension module:', \
numpy_ext.get_module_name() or '[numeric support not installed]'
failures += _count_failures()
# test against Numeric if installed
if has_numeric:
print 'testing Numeric module explicitly'
numpy_ext.set_module_and_type('Numeric', 'ArrayType')
failures += _count_failures()
if has_numarray:
print 'testing numarray module explicitly'
numpy_ext.set_module_and_type('numarray', 'NDArray')
# Add the _numarray_tests to the list of things to test in
# this case.
failures += _count_failures((numarray_tests, numeric_tests))
# see that we can go back to the default
numpy_ext.set_module_and_type('', '')
print 'testing default module again:', \
numpy_ext.get_module_name() or '[numeric support not installed]'
failures += _count_failures()
return failures
if __name__ == '__main__':
print "running..."
import sys
status = _run()
if (status == 0): print "Done."
sys.exit(status)
| gpl-2.0 |
openplans/shareabouts-demo | src/sa_web/config.py | 60 | 2400 | import yaml
import os.path
import urllib2
from contextlib import closing
from django.conf import settings
from django.utils.translation import ugettext as _
def get_shareabouts_config(path_or_url):
if path_or_url.startswith('http://') or path_or_url.startswith('https://'):
return ShareaboutsRemoteConfig(path_or_url)
else:
return ShareaboutsLocalConfig(path_or_url)
def translate(data):
i18n_data = {}
# If it's an object, recurse
if isinstance(data, dict):
return dict([(k, translate(v))
for k, v in data.items()])
# If it's a list, recurse on each item
elif isinstance(data, list):
return [translate(item)
for item in data]
# If it's a string, output it, unless it should be excluded
elif isinstance(data, basestring):
msg = parse_msg(data)
if msg is not None:
return _(msg)
else:
return data
else:
return data
def parse_msg(s):
s = s.strip()
if s.startswith('_(') and s.endswith(')'):
return s[2:-1]
class _ShareaboutsConfig (object):
"""
Base class representing Shareabouts configuration options
"""
raw = False
@property
def data(self):
if not hasattr(self, '_yml'):
with closing(self.config_file()) as config_yml:
self._yml = yaml.load(config_yml)
if not self.raw:
self._yml = translate(self._yml)
return self._yml
def __getitem__(self, key):
return self.data[key]
def get(self, key, default=None):
return self.data.get(key, default)
def items(self):
return self.data.items()
def update(self, other):
self.data.update(other)
class ShareaboutsRemoteConfig (_ShareaboutsConfig):
def __init__(self, url):
self.url = url
def static_url(self):
return os.path.join(self.url, 'static/')
def config_file(self):
config_fileurl = os.path.join(self.url, 'config.yml')
return urllib2.urlopen(config_fileurl)
class ShareaboutsLocalConfig (_ShareaboutsConfig):
def __init__(self, path):
self.path = path
def static_url(self):
return settings.STATIC_URL
def config_file(self):
config_filename = os.path.join(self.path, 'config.yml')
return open(config_filename)
| gpl-3.0 |
phil65/SublimeKodi | libs/chardet/euckrfreq.py | 3121 | 45978 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKRCharToFreqOrder = ( \
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
#Everything below is of no interest for detection purpose
2643,2644,2645,2646,2647,2648,2649,2650,2651,2652,2653,2654,2655,2656,2657,2658,
2659,2660,2661,2662,2663,2664,2665,2666,2667,2668,2669,2670,2671,2672,2673,2674,
2675,2676,2677,2678,2679,2680,2681,2682,2683,2684,2685,2686,2687,2688,2689,2690,
2691,2692,2693,2694,2695,2696,2697,2698,2699,1542, 880,2700,2701,2702,2703,2704,
2705,2706,2707,2708,2709,2710,2711,2712,2713,2714,2715,2716,2717,2718,2719,2720,
2721,2722,2723,2724,2725,1543,2726,2727,2728,2729,2730,2731,2732,1544,2733,2734,
2735,2736,2737,2738,2739,2740,2741,2742,2743,2744,2745,2746,2747,2748,2749,2750,
2751,2752,2753,2754,1545,2755,2756,2757,2758,2759,2760,2761,2762,2763,2764,2765,
2766,1546,2767,1547,2768,2769,2770,2771,2772,2773,2774,2775,2776,2777,2778,2779,
2780,2781,2782,2783,2784,2785,2786,1548,2787,2788,2789,1109,2790,2791,2792,2793,
2794,2795,2796,2797,2798,2799,2800,2801,2802,2803,2804,2805,2806,2807,2808,2809,
2810,2811,2812,1329,2813,2814,2815,2816,2817,2818,2819,2820,2821,2822,2823,2824,
2825,2826,2827,2828,2829,2830,2831,2832,2833,2834,2835,2836,2837,2838,2839,2840,
2841,2842,2843,2844,2845,2846,2847,2848,2849,2850,2851,2852,2853,2854,2855,2856,
1549,2857,2858,2859,2860,1550,2861,2862,1551,2863,2864,2865,2866,2867,2868,2869,
2870,2871,2872,2873,2874,1110,1330,2875,2876,2877,2878,2879,2880,2881,2882,2883,
2884,2885,2886,2887,2888,2889,2890,2891,2892,2893,2894,2895,2896,2897,2898,2899,
2900,2901,2902,2903,2904,2905,2906,2907,2908,2909,2910,2911,2912,2913,2914,2915,
2916,2917,2918,2919,2920,2921,2922,2923,2924,2925,2926,2927,2928,2929,2930,1331,
2931,2932,2933,2934,2935,2936,2937,2938,2939,2940,2941,2942,2943,1552,2944,2945,
2946,2947,2948,2949,2950,2951,2952,2953,2954,2955,2956,2957,2958,2959,2960,2961,
2962,2963,2964,1252,2965,2966,2967,2968,2969,2970,2971,2972,2973,2974,2975,2976,
2977,2978,2979,2980,2981,2982,2983,2984,2985,2986,2987,2988,2989,2990,2991,2992,
2993,2994,2995,2996,2997,2998,2999,3000,3001,3002,3003,3004,3005,3006,3007,3008,
3009,3010,3011,3012,1553,3013,3014,3015,3016,3017,1554,3018,1332,3019,3020,3021,
3022,3023,3024,3025,3026,3027,3028,3029,3030,3031,3032,3033,3034,3035,3036,3037,
3038,3039,3040,3041,3042,3043,3044,3045,3046,3047,3048,3049,3050,1555,3051,3052,
3053,1556,1557,3054,3055,3056,3057,3058,3059,3060,3061,3062,3063,3064,3065,3066,
3067,1558,3068,3069,3070,3071,3072,3073,3074,3075,3076,1559,3077,3078,3079,3080,
3081,3082,3083,1253,3084,3085,3086,3087,3088,3089,3090,3091,3092,3093,3094,3095,
3096,3097,3098,3099,3100,3101,3102,3103,3104,3105,3106,3107,3108,1152,3109,3110,
3111,3112,3113,1560,3114,3115,3116,3117,1111,3118,3119,3120,3121,3122,3123,3124,
3125,3126,3127,3128,3129,3130,3131,3132,3133,3134,3135,3136,3137,3138,3139,3140,
3141,3142,3143,3144,3145,3146,3147,3148,3149,3150,3151,3152,3153,3154,3155,3156,
3157,3158,3159,3160,3161,3162,3163,3164,3165,3166,3167,3168,3169,3170,3171,3172,
3173,3174,3175,3176,1333,3177,3178,3179,3180,3181,3182,3183,3184,3185,3186,3187,
3188,3189,1561,3190,3191,1334,3192,3193,3194,3195,3196,3197,3198,3199,3200,3201,
3202,3203,3204,3205,3206,3207,3208,3209,3210,3211,3212,3213,3214,3215,3216,3217,
3218,3219,3220,3221,3222,3223,3224,3225,3226,3227,3228,3229,3230,3231,3232,3233,
3234,1562,3235,3236,3237,3238,3239,3240,3241,3242,3243,3244,3245,3246,3247,3248,
3249,3250,3251,3252,3253,3254,3255,3256,3257,3258,3259,3260,3261,3262,3263,3264,
3265,3266,3267,3268,3269,3270,3271,3272,3273,3274,3275,3276,3277,1563,3278,3279,
3280,3281,3282,3283,3284,3285,3286,3287,3288,3289,3290,3291,3292,3293,3294,3295,
3296,3297,3298,3299,3300,3301,3302,3303,3304,3305,3306,3307,3308,3309,3310,3311,
3312,3313,3314,3315,3316,3317,3318,3319,3320,3321,3322,3323,3324,3325,3326,3327,
3328,3329,3330,3331,3332,3333,3334,3335,3336,3337,3338,3339,3340,3341,3342,3343,
3344,3345,3346,3347,3348,3349,3350,3351,3352,3353,3354,3355,3356,3357,3358,3359,
3360,3361,3362,3363,3364,1335,3365,3366,3367,3368,3369,3370,3371,3372,3373,3374,
3375,3376,3377,3378,3379,3380,3381,3382,3383,3384,3385,3386,3387,1336,3388,3389,
3390,3391,3392,3393,3394,3395,3396,3397,3398,3399,3400,3401,3402,3403,3404,3405,
3406,3407,3408,3409,3410,3411,3412,3413,3414,1337,3415,3416,3417,3418,3419,1338,
3420,3421,3422,1564,1565,3423,3424,3425,3426,3427,3428,3429,3430,3431,1254,3432,
3433,3434,1339,3435,3436,3437,3438,3439,1566,3440,3441,3442,3443,3444,3445,3446,
3447,3448,3449,3450,3451,3452,3453,3454,1255,3455,3456,3457,3458,3459,1567,1191,
3460,1568,1569,3461,3462,3463,1570,3464,3465,3466,3467,3468,1571,3469,3470,3471,
3472,3473,1572,3474,3475,3476,3477,3478,3479,3480,3481,3482,3483,3484,3485,3486,
1340,3487,3488,3489,3490,3491,3492,1021,3493,3494,3495,3496,3497,3498,1573,3499,
1341,3500,3501,3502,3503,3504,3505,3506,3507,3508,3509,3510,3511,1342,3512,3513,
3514,3515,3516,1574,1343,3517,3518,3519,1575,3520,1576,3521,3522,3523,3524,3525,
3526,3527,3528,3529,3530,3531,3532,3533,3534,3535,3536,3537,3538,3539,3540,3541,
3542,3543,3544,3545,3546,3547,3548,3549,3550,3551,3552,3553,3554,3555,3556,3557,
3558,3559,3560,3561,3562,3563,3564,3565,3566,3567,3568,3569,3570,3571,3572,3573,
3574,3575,3576,3577,3578,3579,3580,1577,3581,3582,1578,3583,3584,3585,3586,3587,
3588,3589,3590,3591,3592,3593,3594,3595,3596,3597,3598,3599,3600,3601,3602,3603,
3604,1579,3605,3606,3607,3608,3609,3610,3611,3612,3613,3614,3615,3616,3617,3618,
3619,3620,3621,3622,3623,3624,3625,3626,3627,3628,3629,1580,3630,3631,1581,3632,
3633,3634,3635,3636,3637,3638,3639,3640,3641,3642,3643,3644,3645,3646,3647,3648,
3649,3650,3651,3652,3653,3654,3655,3656,1582,3657,3658,3659,3660,3661,3662,3663,
3664,3665,3666,3667,3668,3669,3670,3671,3672,3673,3674,3675,3676,3677,3678,3679,
3680,3681,3682,3683,3684,3685,3686,3687,3688,3689,3690,3691,3692,3693,3694,3695,
3696,3697,3698,3699,3700,1192,3701,3702,3703,3704,1256,3705,3706,3707,3708,1583,
1257,3709,3710,3711,3712,3713,3714,3715,3716,1584,3717,3718,3719,3720,3721,3722,
3723,3724,3725,3726,3727,3728,3729,3730,3731,3732,3733,3734,3735,3736,3737,3738,
3739,3740,3741,3742,3743,3744,3745,1344,3746,3747,3748,3749,3750,3751,3752,3753,
3754,3755,3756,1585,3757,3758,3759,3760,3761,3762,3763,3764,3765,3766,1586,3767,
3768,3769,3770,3771,3772,3773,3774,3775,3776,3777,3778,1345,3779,3780,3781,3782,
3783,3784,3785,3786,3787,3788,3789,3790,3791,3792,3793,3794,3795,1346,1587,3796,
3797,1588,3798,3799,3800,3801,3802,3803,3804,3805,3806,1347,3807,3808,3809,3810,
3811,1589,3812,3813,3814,3815,3816,3817,3818,3819,3820,3821,1590,3822,3823,1591,
1348,3824,3825,3826,3827,3828,3829,3830,1592,3831,3832,1593,3833,3834,3835,3836,
3837,3838,3839,3840,3841,3842,3843,3844,1349,3845,3846,3847,3848,3849,3850,3851,
3852,3853,3854,3855,3856,3857,3858,1594,3859,3860,3861,3862,3863,3864,3865,3866,
3867,3868,3869,1595,3870,3871,3872,3873,1596,3874,3875,3876,3877,3878,3879,3880,
3881,3882,3883,3884,3885,3886,1597,3887,3888,3889,3890,3891,3892,3893,3894,3895,
1598,3896,3897,3898,1599,1600,3899,1350,3900,1351,3901,3902,1352,3903,3904,3905,
3906,3907,3908,3909,3910,3911,3912,3913,3914,3915,3916,3917,3918,3919,3920,3921,
3922,3923,3924,1258,3925,3926,3927,3928,3929,3930,3931,1193,3932,1601,3933,3934,
3935,3936,3937,3938,3939,3940,3941,3942,3943,1602,3944,3945,3946,3947,3948,1603,
3949,3950,3951,3952,3953,3954,3955,3956,3957,3958,3959,3960,3961,3962,3963,3964,
3965,1604,3966,3967,3968,3969,3970,3971,3972,3973,3974,3975,3976,3977,1353,3978,
3979,3980,3981,3982,3983,3984,3985,3986,3987,3988,3989,3990,3991,1354,3992,3993,
3994,3995,3996,3997,3998,3999,4000,4001,4002,4003,4004,4005,4006,4007,4008,4009,
4010,4011,4012,4013,4014,4015,4016,4017,4018,4019,4020,4021,4022,4023,1355,4024,
4025,4026,4027,4028,4029,4030,4031,4032,4033,4034,4035,4036,4037,4038,4039,4040,
1605,4041,4042,4043,4044,4045,4046,4047,4048,4049,4050,4051,4052,4053,4054,4055,
4056,4057,4058,4059,4060,1606,4061,4062,4063,4064,1607,4065,4066,4067,4068,4069,
4070,4071,4072,4073,4074,4075,4076,1194,4077,4078,1608,4079,4080,4081,4082,4083,
4084,4085,4086,4087,1609,4088,4089,4090,4091,4092,4093,4094,4095,4096,4097,4098,
4099,4100,4101,4102,4103,4104,4105,4106,4107,4108,1259,4109,4110,4111,4112,4113,
4114,4115,4116,4117,4118,4119,4120,4121,4122,4123,4124,1195,4125,4126,4127,1610,
4128,4129,4130,4131,4132,4133,4134,4135,4136,4137,1356,4138,4139,4140,4141,4142,
4143,4144,1611,4145,4146,4147,4148,4149,4150,4151,4152,4153,4154,4155,4156,4157,
4158,4159,4160,4161,4162,4163,4164,4165,4166,4167,4168,4169,4170,4171,4172,4173,
4174,4175,4176,4177,4178,4179,4180,4181,4182,4183,4184,4185,4186,4187,4188,4189,
4190,4191,4192,4193,4194,4195,4196,4197,4198,4199,4200,4201,4202,4203,4204,4205,
4206,4207,4208,4209,4210,4211,4212,4213,4214,4215,4216,4217,4218,4219,1612,4220,
4221,4222,4223,4224,4225,4226,4227,1357,4228,1613,4229,4230,4231,4232,4233,4234,
4235,4236,4237,4238,4239,4240,4241,4242,4243,1614,4244,4245,4246,4247,4248,4249,
4250,4251,4252,4253,4254,4255,4256,4257,4258,4259,4260,4261,4262,4263,4264,4265,
4266,4267,4268,4269,4270,1196,1358,4271,4272,4273,4274,4275,4276,4277,4278,4279,
4280,4281,4282,4283,4284,4285,4286,4287,1615,4288,4289,4290,4291,4292,4293,4294,
4295,4296,4297,4298,4299,4300,4301,4302,4303,4304,4305,4306,4307,4308,4309,4310,
4311,4312,4313,4314,4315,4316,4317,4318,4319,4320,4321,4322,4323,4324,4325,4326,
4327,4328,4329,4330,4331,4332,4333,4334,1616,4335,4336,4337,4338,4339,4340,4341,
4342,4343,4344,4345,4346,4347,4348,4349,4350,4351,4352,4353,4354,4355,4356,4357,
4358,4359,4360,1617,4361,4362,4363,4364,4365,1618,4366,4367,4368,4369,4370,4371,
4372,4373,4374,4375,4376,4377,4378,4379,4380,4381,4382,4383,4384,4385,4386,4387,
4388,4389,4390,4391,4392,4393,4394,4395,4396,4397,4398,4399,4400,4401,4402,4403,
4404,4405,4406,4407,4408,4409,4410,4411,4412,4413,4414,4415,4416,1619,4417,4418,
4419,4420,4421,4422,4423,4424,4425,1112,4426,4427,4428,4429,4430,1620,4431,4432,
4433,4434,4435,4436,4437,4438,4439,4440,4441,4442,1260,1261,4443,4444,4445,4446,
4447,4448,4449,4450,4451,4452,4453,4454,4455,1359,4456,4457,4458,4459,4460,4461,
4462,4463,4464,4465,1621,4466,4467,4468,4469,4470,4471,4472,4473,4474,4475,4476,
4477,4478,4479,4480,4481,4482,4483,4484,4485,4486,4487,4488,4489,1055,4490,4491,
4492,4493,4494,4495,4496,4497,4498,4499,4500,4501,4502,4503,4504,4505,4506,4507,
4508,4509,4510,4511,4512,4513,4514,4515,4516,4517,4518,1622,4519,4520,4521,1623,
4522,4523,4524,4525,4526,4527,4528,4529,4530,4531,4532,4533,4534,4535,1360,4536,
4537,4538,4539,4540,4541,4542,4543, 975,4544,4545,4546,4547,4548,4549,4550,4551,
4552,4553,4554,4555,4556,4557,4558,4559,4560,4561,4562,4563,4564,4565,4566,4567,
4568,4569,4570,4571,1624,4572,4573,4574,4575,4576,1625,4577,4578,4579,4580,4581,
4582,4583,4584,1626,4585,4586,4587,4588,4589,4590,4591,4592,4593,4594,4595,1627,
4596,4597,4598,4599,4600,4601,4602,4603,4604,4605,4606,4607,4608,4609,4610,4611,
4612,4613,4614,4615,1628,4616,4617,4618,4619,4620,4621,4622,4623,4624,4625,4626,
4627,4628,4629,4630,4631,4632,4633,4634,4635,4636,4637,4638,4639,4640,4641,4642,
4643,4644,4645,4646,4647,4648,4649,1361,4650,4651,4652,4653,4654,4655,4656,4657,
4658,4659,4660,4661,1362,4662,4663,4664,4665,4666,4667,4668,4669,4670,4671,4672,
4673,4674,4675,4676,4677,4678,4679,4680,4681,4682,1629,4683,4684,4685,4686,4687,
1630,4688,4689,4690,4691,1153,4692,4693,4694,1113,4695,4696,4697,4698,4699,4700,
4701,4702,4703,4704,4705,4706,4707,4708,4709,4710,4711,1197,4712,4713,4714,4715,
4716,4717,4718,4719,4720,4721,4722,4723,4724,4725,4726,4727,4728,4729,4730,4731,
4732,4733,4734,4735,1631,4736,1632,4737,4738,4739,4740,4741,4742,4743,4744,1633,
4745,4746,4747,4748,4749,1262,4750,4751,4752,4753,4754,1363,4755,4756,4757,4758,
4759,4760,4761,4762,4763,4764,4765,4766,4767,4768,1634,4769,4770,4771,4772,4773,
4774,4775,4776,4777,4778,1635,4779,4780,4781,4782,4783,4784,4785,4786,4787,4788,
4789,1636,4790,4791,4792,4793,4794,4795,4796,4797,4798,4799,4800,4801,4802,4803,
4804,4805,4806,1637,4807,4808,4809,1638,4810,4811,4812,4813,4814,4815,4816,4817,
4818,1639,4819,4820,4821,4822,4823,4824,4825,4826,4827,4828,4829,4830,4831,4832,
4833,1077,4834,4835,4836,4837,4838,4839,4840,4841,4842,4843,4844,4845,4846,4847,
4848,4849,4850,4851,4852,4853,4854,4855,4856,4857,4858,4859,4860,4861,4862,4863,
4864,4865,4866,4867,4868,4869,4870,4871,4872,4873,4874,4875,4876,4877,4878,4879,
4880,4881,4882,4883,1640,4884,4885,1641,4886,4887,4888,4889,4890,4891,4892,4893,
4894,4895,4896,4897,4898,4899,4900,4901,4902,4903,4904,4905,4906,4907,4908,4909,
4910,4911,1642,4912,4913,4914,1364,4915,4916,4917,4918,4919,4920,4921,4922,4923,
4924,4925,4926,4927,4928,4929,4930,4931,1643,4932,4933,4934,4935,4936,4937,4938,
4939,4940,4941,4942,4943,4944,4945,4946,4947,4948,4949,4950,4951,4952,4953,4954,
4955,4956,4957,4958,4959,4960,4961,4962,4963,4964,4965,4966,4967,4968,4969,4970,
4971,4972,4973,4974,4975,4976,4977,4978,4979,4980,1644,4981,4982,4983,4984,1645,
4985,4986,1646,4987,4988,4989,4990,4991,4992,4993,4994,4995,4996,4997,4998,4999,
5000,5001,5002,5003,5004,5005,1647,5006,1648,5007,5008,5009,5010,5011,5012,1078,
5013,5014,5015,5016,5017,5018,5019,5020,5021,5022,5023,5024,5025,5026,5027,5028,
1365,5029,5030,5031,5032,5033,5034,5035,5036,5037,5038,5039,1649,5040,5041,5042,
5043,5044,5045,1366,5046,5047,5048,5049,5050,5051,5052,5053,5054,5055,1650,5056,
5057,5058,5059,5060,5061,5062,5063,5064,5065,5066,5067,5068,5069,5070,5071,5072,
5073,5074,5075,5076,5077,1651,5078,5079,5080,5081,5082,5083,5084,5085,5086,5087,
5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102,5103,
5104,5105,5106,5107,5108,5109,5110,1652,5111,5112,5113,5114,5115,5116,5117,5118,
1367,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,1653,5130,5131,5132,
5133,5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,
5149,1368,5150,1654,5151,1369,5152,5153,5154,5155,5156,5157,5158,5159,5160,5161,
5162,5163,5164,5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,5176,5177,
5178,1370,5179,5180,5181,5182,5183,5184,5185,5186,5187,5188,5189,5190,5191,5192,
5193,5194,5195,5196,5197,5198,1655,5199,5200,5201,5202,1656,5203,5204,5205,5206,
1371,5207,1372,5208,5209,5210,5211,1373,5212,5213,1374,5214,5215,5216,5217,5218,
5219,5220,5221,5222,5223,5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,
5235,5236,5237,5238,5239,5240,5241,5242,5243,5244,5245,5246,5247,1657,5248,5249,
5250,5251,1658,1263,5252,5253,5254,5255,5256,1375,5257,5258,5259,5260,5261,5262,
5263,5264,5265,5266,5267,5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,
5279,5280,5281,5282,5283,1659,5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,
5294,5295,5296,5297,5298,5299,5300,1660,5301,5302,5303,5304,5305,5306,5307,5308,
5309,5310,5311,5312,5313,5314,5315,5316,5317,5318,5319,5320,5321,1376,5322,5323,
5324,5325,5326,5327,5328,5329,5330,5331,5332,5333,1198,5334,5335,5336,5337,5338,
5339,5340,5341,5342,5343,1661,5344,5345,5346,5347,5348,5349,5350,5351,5352,5353,
5354,5355,5356,5357,5358,5359,5360,5361,5362,5363,5364,5365,5366,5367,5368,5369,
5370,5371,5372,5373,5374,5375,5376,5377,5378,5379,5380,5381,5382,5383,5384,5385,
5386,5387,5388,5389,5390,5391,5392,5393,5394,5395,5396,5397,5398,1264,5399,5400,
5401,5402,5403,5404,5405,5406,5407,5408,5409,5410,5411,5412,1662,5413,5414,5415,
5416,1663,5417,5418,5419,5420,5421,5422,5423,5424,5425,5426,5427,5428,5429,5430,
5431,5432,5433,5434,5435,5436,5437,5438,1664,5439,5440,5441,5442,5443,5444,5445,
5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456,5457,5458,5459,5460,5461,
5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472,5473,5474,5475,5476,5477,
5478,1154,5479,5480,5481,5482,5483,5484,5485,1665,5486,5487,5488,5489,5490,5491,
5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504,5505,5506,5507,
5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520,5521,5522,5523,
5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536,5537,5538,5539,
5540,5541,5542,5543,5544,5545,5546,5547,5548,1377,5549,5550,5551,5552,5553,5554,
5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568,5569,5570,
1114,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584,5585,
5586,5587,5588,5589,5590,5591,5592,1378,5593,5594,5595,5596,5597,5598,5599,5600,
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,1379,5615,
5616,5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,
5632,5633,5634,1380,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,
5647,5648,5649,1381,1056,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,
1666,5661,5662,5663,5664,5665,5666,5667,5668,1667,5669,1668,5670,5671,5672,5673,
5674,5675,5676,5677,5678,1155,5679,5680,5681,5682,5683,5684,5685,5686,5687,5688,
5689,5690,5691,5692,5693,5694,5695,5696,5697,5698,1669,5699,5700,5701,5702,5703,
5704,5705,1670,5706,5707,5708,5709,5710,1671,5711,5712,5713,5714,1382,5715,5716,
5717,5718,5719,5720,5721,5722,5723,5724,5725,1672,5726,5727,1673,1674,5728,5729,
5730,5731,5732,5733,5734,5735,5736,1675,5737,5738,5739,5740,5741,5742,5743,5744,
1676,5745,5746,5747,5748,5749,5750,5751,1383,5752,5753,5754,5755,5756,5757,5758,
5759,5760,5761,5762,5763,5764,5765,5766,5767,5768,1677,5769,5770,5771,5772,5773,
1678,5774,5775,5776, 998,5777,5778,5779,5780,5781,5782,5783,5784,5785,1384,5786,
5787,5788,5789,5790,5791,5792,5793,5794,5795,5796,5797,5798,5799,5800,1679,5801,
5802,5803,1115,1116,5804,5805,5806,5807,5808,5809,5810,5811,5812,5813,5814,5815,
5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828,5829,5830,5831,
5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844,5845,5846,5847,
5848,5849,5850,5851,5852,5853,5854,5855,1680,5856,5857,5858,5859,5860,5861,5862,
5863,5864,1681,5865,5866,5867,1682,5868,5869,5870,5871,5872,5873,5874,5875,5876,
5877,5878,5879,1683,5880,1684,5881,5882,5883,5884,1685,5885,5886,5887,5888,5889,
5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,
5906,5907,1686,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,1687,
5936,5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,
5952,1688,1689,5953,1199,5954,5955,5956,5957,5958,5959,5960,5961,1690,5962,5963,
5964,5965,5966,5967,5968,5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,
5980,5981,1385,5982,1386,5983,5984,5985,5986,5987,5988,5989,5990,5991,5992,5993,
5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004,6005,6006,6007,6008,6009,
6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020,6021,6022,6023,6024,6025,
6026,6027,1265,6028,6029,1691,6030,6031,6032,6033,6034,6035,6036,6037,6038,6039,
6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052,6053,6054,6055,
6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068,6069,6070,6071,
6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084,1692,6085,6086,
6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100,6101,6102,
6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116,6117,6118,
6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,1693,6132,6133,
6134,6135,6136,1694,6137,6138,6139,6140,6141,1695,6142,6143,6144,6145,6146,6147,
6148,6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,
6164,6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,
6180,6181,6182,6183,6184,6185,1696,6186,6187,6188,6189,6190,6191,6192,6193,6194,
6195,6196,6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,
6211,6212,6213,6214,6215,6216,6217,6218,6219,1697,6220,6221,6222,6223,6224,6225,
6226,6227,6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,
6242,6243,6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,1698,6254,6255,6256,
6257,6258,6259,6260,6261,6262,6263,1200,6264,6265,6266,6267,6268,6269,6270,6271, #1024
6272,6273,6274,6275,6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,6286,6287,
6288,6289,6290,6291,6292,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,1699,
6303,6304,1700,6305,6306,6307,6308,6309,6310,6311,6312,6313,6314,6315,6316,6317,
6318,6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,
6334,6335,6336,6337,6338,6339,1701,6340,6341,6342,6343,6344,1387,6345,6346,6347,
6348,6349,6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,
6364,6365,6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,
6380,6381,6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,
6396,6397,6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,6411,
6412,6413,1702,6414,6415,6416,6417,6418,6419,6420,6421,6422,1703,6423,6424,6425,
6426,6427,6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,1704,6439,6440,
6441,6442,6443,6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,6455,6456,
6457,6458,6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,
6473,6474,6475,6476,6477,6478,6479,6480,6481,6482,6483,6484,6485,6486,6487,6488,
6489,6490,6491,6492,6493,6494,6495,6496,6497,6498,6499,6500,6501,6502,6503,1266,
6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,
6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532,6533,6534,6535,
6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548,6549,6550,6551,
1705,1706,6552,6553,6554,6555,6556,6557,6558,6559,6560,6561,6562,6563,6564,6565,
6566,6567,6568,6569,6570,6571,6572,6573,6574,6575,6576,6577,6578,6579,6580,6581,
6582,6583,6584,6585,6586,6587,6588,6589,6590,6591,6592,6593,6594,6595,6596,6597,
6598,6599,6600,6601,6602,6603,6604,6605,6606,6607,6608,6609,6610,6611,6612,6613,
6614,6615,6616,6617,6618,6619,6620,6621,6622,6623,6624,6625,6626,6627,6628,6629,
6630,6631,6632,6633,6634,6635,6636,6637,1388,6638,6639,6640,6641,6642,6643,6644,
1707,6645,6646,6647,6648,6649,6650,6651,6652,6653,6654,6655,6656,6657,6658,6659,
6660,6661,6662,6663,1708,6664,6665,6666,6667,6668,6669,6670,6671,6672,6673,6674,
1201,6675,6676,6677,6678,6679,6680,6681,6682,6683,6684,6685,6686,6687,6688,6689,
6690,6691,6692,6693,6694,6695,6696,6697,6698,6699,6700,6701,6702,6703,6704,6705,
6706,6707,6708,6709,6710,6711,6712,6713,6714,6715,6716,6717,6718,6719,6720,6721,
6722,6723,6724,6725,1389,6726,6727,6728,6729,6730,6731,6732,6733,6734,6735,6736,
1390,1709,6737,6738,6739,6740,6741,6742,1710,6743,6744,6745,6746,1391,6747,6748,
6749,6750,6751,6752,6753,6754,6755,6756,6757,1392,6758,6759,6760,6761,6762,6763,
6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777,6778,6779,
6780,1202,6781,6782,6783,6784,6785,6786,6787,6788,6789,6790,6791,6792,6793,6794,
6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806,6807,6808,6809,1711,
6810,6811,6812,6813,6814,6815,6816,6817,6818,6819,6820,6821,6822,6823,6824,6825,
6826,6827,6828,6829,6830,6831,6832,6833,6834,6835,6836,1393,6837,6838,6839,6840,
6841,6842,6843,6844,6845,6846,6847,6848,6849,6850,6851,6852,6853,6854,6855,6856,
6857,6858,6859,6860,6861,6862,6863,6864,6865,6866,6867,6868,6869,6870,6871,6872,
6873,6874,6875,6876,6877,6878,6879,6880,6881,6882,6883,6884,6885,6886,6887,6888,
6889,6890,6891,6892,6893,6894,6895,6896,6897,6898,6899,6900,6901,6902,1712,6903,
6904,6905,6906,6907,6908,6909,6910,1713,6911,6912,6913,6914,6915,6916,6917,6918,
6919,6920,6921,6922,6923,6924,6925,6926,6927,6928,6929,6930,6931,6932,6933,6934,
6935,6936,6937,6938,6939,6940,6941,6942,6943,6944,6945,6946,6947,6948,6949,6950,
6951,6952,6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,
6967,6968,6969,6970,6971,6972,6973,6974,1714,6975,6976,6977,6978,6979,6980,6981,
6982,6983,6984,6985,6986,6987,6988,1394,6989,6990,6991,6992,6993,6994,6995,6996,
6997,6998,6999,7000,1715,7001,7002,7003,7004,7005,7006,7007,7008,7009,7010,7011,
7012,7013,7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,
7028,1716,7029,7030,7031,7032,7033,7034,7035,7036,7037,7038,7039,7040,7041,7042,
7043,7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,
7059,7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,7071,7072,7073,7074,
7075,7076,7077,7078,7079,7080,7081,7082,7083,7084,7085,7086,7087,7088,7089,7090,
7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105,7106,
7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,7119,7120,7121,7122,
7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136,7137,7138,
7139,7140,7141,7142,7143,7144,7145,7146,7147,7148,7149,7150,7151,7152,7153,7154,
7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167,7168,7169,7170,
7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183,7184,7185,7186,
7187,7188,7189,7190,7191,7192,7193,7194,7195,7196,7197,7198,7199,7200,7201,7202,
7203,7204,7205,7206,7207,1395,7208,7209,7210,7211,7212,7213,1717,7214,7215,7216,
7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229,7230,7231,7232,
7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245,7246,7247,7248,
7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261,7262,7263,7264,
7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277,7278,7279,7280,
7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293,7294,7295,7296,
7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308,7309,7310,7311,7312,
7313,1718,7314,7315,7316,7317,7318,7319,7320,7321,7322,7323,7324,7325,7326,7327,
7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339,7340,7341,7342,7343,
7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,7354,7355,7356,7357,7358,7359,
7360,7361,7362,7363,7364,7365,7366,7367,7368,7369,7370,7371,7372,7373,7374,7375,
7376,7377,7378,7379,7380,7381,7382,7383,7384,7385,7386,7387,7388,7389,7390,7391,
7392,7393,7394,7395,7396,7397,7398,7399,7400,7401,7402,7403,7404,7405,7406,7407,
7408,7409,7410,7411,7412,7413,7414,7415,7416,7417,7418,7419,7420,7421,7422,7423,
7424,7425,7426,7427,7428,7429,7430,7431,7432,7433,7434,7435,7436,7437,7438,7439,
7440,7441,7442,7443,7444,7445,7446,7447,7448,7449,7450,7451,7452,7453,7454,7455,
7456,7457,7458,7459,7460,7461,7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,
7472,7473,7474,7475,7476,7477,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,
7488,7489,7490,7491,7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,7503,
7504,7505,7506,7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,
7520,7521,7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,
7536,7537,7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,7550,7551,
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567,
7568,7569,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582,7583,
7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598,7599,
7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614,7615,
7616,7617,7618,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628,7629,7630,7631,
7632,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643,7644,7645,7646,7647,
7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659,7660,7661,7662,7663,
7664,7665,7666,7667,7668,7669,7670,7671,7672,7673,7674,7675,7676,7677,7678,7679,
7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690,7691,7692,7693,7694,7695,
7696,7697,7698,7699,7700,7701,7702,7703,7704,7705,7706,7707,7708,7709,7710,7711,
7712,7713,7714,7715,7716,7717,7718,7719,7720,7721,7722,7723,7724,7725,7726,7727,
7728,7729,7730,7731,7732,7733,7734,7735,7736,7737,7738,7739,7740,7741,7742,7743,
7744,7745,7746,7747,7748,7749,7750,7751,7752,7753,7754,7755,7756,7757,7758,7759,
7760,7761,7762,7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,
7776,7777,7778,7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,
7792,7793,7794,7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,7806,7807,
7808,7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,
7824,7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,
7840,7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,
7856,7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,
7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,
7888,7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,
7904,7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,
7920,7921,7922,7923,7924,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935,
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951,
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967,
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983,
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999,
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031,
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047,
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111,
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127,
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,
8272,8273,8274,8275,8276,8277,8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,
8288,8289,8290,8291,8292,8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,
8304,8305,8306,8307,8308,8309,8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,
8320,8321,8322,8323,8324,8325,8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,
8336,8337,8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,
8352,8353,8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,
8368,8369,8370,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,
8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,
8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,
8416,8417,8418,8419,8420,8421,8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,
8432,8433,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,
8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,
8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,
8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,
8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,
8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,
8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,
8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,
8560,8561,8562,8563,8564,8565,8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,
8576,8577,8578,8579,8580,8581,8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,
8592,8593,8594,8595,8596,8597,8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,
8608,8609,8610,8611,8612,8613,8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,
8624,8625,8626,8627,8628,8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,
8640,8641,8642,8643,8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,
8656,8657,8658,8659,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,
8672,8673,8674,8675,8676,8677,8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,
8704,8705,8706,8707,8708,8709,8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,
8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,
8736,8737,8738,8739,8740,8741)
# flake8: noqa
| gpl-3.0 |
espadrine/opera | chromium/src/third_party/webpagereplay/third_party/dns/zone.py | 215 | 31930 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Zones."""
from __future__ import generators
import sys
import dns.exception
import dns.name
import dns.node
import dns.rdataclass
import dns.rdatatype
import dns.rdata
import dns.rrset
import dns.tokenizer
import dns.ttl
class BadZone(dns.exception.DNSException):
"""The zone is malformed."""
pass
class NoSOA(BadZone):
"""The zone has no SOA RR at its origin."""
pass
class NoNS(BadZone):
"""The zone has no NS RRset at its origin."""
pass
class UnknownOrigin(BadZone):
"""The zone's origin is unknown."""
pass
class Zone(object):
"""A DNS zone.
A Zone is a mapping from names to nodes. The zone object may be
treated like a Python dictionary, e.g. zone[name] will retrieve
the node associated with that name. The I{name} may be a
dns.name.Name object, or it may be a string. In the either case,
if the name is relative it is treated as relative to the origin of
the zone.
@ivar rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int
@ivar origin: The origin of the zone.
@type origin: dns.name.Name object
@ivar nodes: A dictionary mapping the names of nodes in the zone to the
nodes themselves.
@type nodes: dict
@ivar relativize: should names in the zone be relativized?
@type relativize: bool
@cvar node_factory: the factory used to create a new node
@type node_factory: class or callable
"""
node_factory = dns.node.Node
__slots__ = ['rdclass', 'origin', 'nodes', 'relativize']
def __init__(self, origin, rdclass=dns.rdataclass.IN, relativize=True):
"""Initialize a zone object.
@param origin: The origin of the zone.
@type origin: dns.name.Name object
@param rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int"""
self.rdclass = rdclass
self.origin = origin
self.nodes = {}
self.relativize = relativize
def __eq__(self, other):
"""Two zones are equal if they have the same origin, class, and
nodes.
@rtype: bool
"""
if not isinstance(other, Zone):
return False
if self.rdclass != other.rdclass or \
self.origin != other.origin or \
self.nodes != other.nodes:
return False
return True
def __ne__(self, other):
"""Are two zones not equal?
@rtype: bool
"""
return not self.__eq__(other)
def _validate_name(self, name):
if isinstance(name, (str, unicode)):
name = dns.name.from_text(name, None)
elif not isinstance(name, dns.name.Name):
raise KeyError("name parameter must be convertable to a DNS name")
if name.is_absolute():
if not name.is_subdomain(self.origin):
raise KeyError("name parameter must be a subdomain of the zone origin")
if self.relativize:
name = name.relativize(self.origin)
return name
def __getitem__(self, key):
key = self._validate_name(key)
return self.nodes[key]
def __setitem__(self, key, value):
key = self._validate_name(key)
self.nodes[key] = value
def __delitem__(self, key):
key = self._validate_name(key)
del self.nodes[key]
def __iter__(self):
return self.nodes.iterkeys()
def iterkeys(self):
return self.nodes.iterkeys()
def keys(self):
return self.nodes.keys()
def itervalues(self):
return self.nodes.itervalues()
def values(self):
return self.nodes.values()
def iteritems(self):
return self.nodes.iteritems()
def items(self):
return self.nodes.items()
def get(self, key):
key = self._validate_name(key)
return self.nodes.get(key)
def __contains__(self, other):
return other in self.nodes
def find_node(self, name, create=False):
"""Find a node in the zone, possibly creating it.
@param name: the name of the node to find
@type name: dns.name.Name object or string
@param create: should the node be created if it doesn't exist?
@type create: bool
@raises KeyError: the name is not known and create was not specified.
@rtype: dns.node.Node object
"""
name = self._validate_name(name)
node = self.nodes.get(name)
if node is None:
if not create:
raise KeyError
node = self.node_factory()
self.nodes[name] = node
return node
def get_node(self, name, create=False):
"""Get a node in the zone, possibly creating it.
This method is like L{find_node}, except it returns None instead
of raising an exception if the node does not exist and creation
has not been requested.
@param name: the name of the node to find
@type name: dns.name.Name object or string
@param create: should the node be created if it doesn't exist?
@type create: bool
@rtype: dns.node.Node object or None
"""
try:
node = self.find_node(name, create)
except KeyError:
node = None
return node
def delete_node(self, name):
"""Delete the specified node if it exists.
It is not an error if the node does not exist.
"""
name = self._validate_name(name)
if self.nodes.has_key(name):
del self.nodes[name]
def find_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Look for rdata with the specified name and type in the zone,
and return an rdataset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
The rdataset returned is not a copy; changes to it will change
the zone.
KeyError is raised if the name or type are not found.
Use L{get_rdataset} if you want to have None returned instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@param create: should the node and rdataset be created if they do not
exist?
@type create: bool
@raises KeyError: the node or rdata could not be found
@rtype: dns.rrset.RRset object
"""
name = self._validate_name(name)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
node = self.find_node(name, create)
return node.find_rdataset(self.rdclass, rdtype, covers, create)
def get_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Look for rdata with the specified name and type in the zone,
and return an rdataset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
The rdataset returned is not a copy; changes to it will change
the zone.
None is returned if the name or type are not found.
Use L{find_rdataset} if you want to have KeyError raised instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@param create: should the node and rdataset be created if they do not
exist?
@type create: bool
@rtype: dns.rrset.RRset object
"""
try:
rdataset = self.find_rdataset(name, rdtype, covers, create)
except KeyError:
rdataset = None
return rdataset
def delete_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Delete the rdataset matching I{rdtype} and I{covers}, if it
exists at the node specified by I{name}.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
It is not an error if the node does not exist, or if there is no
matching rdataset at the node.
If the node has no rdatasets after the deletion, it will itself
be deleted.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
name = self._validate_name(name)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
node = self.get_node(name)
if not node is None:
node.delete_rdataset(self.rdclass, rdtype, covers)
if len(node) == 0:
self.delete_node(name)
def replace_rdataset(self, name, replacement):
"""Replace an rdataset at name.
It is not an error if there is no rdataset matching I{replacement}.
Ownership of the I{replacement} object is transferred to the zone;
in other words, this method does not store a copy of I{replacement}
at the node, it stores I{replacement} itself.
If the I{name} node does not exist, it is created.
@param name: the owner name
@type name: DNS.name.Name object or string
@param replacement: the replacement rdataset
@type replacement: dns.rdataset.Rdataset
"""
if replacement.rdclass != self.rdclass:
raise ValueError('replacement.rdclass != zone.rdclass')
node = self.find_node(name, True)
node.replace_rdataset(replacement)
def find_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Look for rdata with the specified name and type in the zone,
and return an RRset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
This method is less efficient than the similar
L{find_rdataset} because it creates an RRset instead of
returning the matching rdataset. It may be more convenient
for some uses since it returns an object which binds the owner
name to the rdata.
This method may not be used to create new nodes or rdatasets;
use L{find_rdataset} instead.
KeyError is raised if the name or type are not found.
Use L{get_rrset} if you want to have None returned instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@raises KeyError: the node or rdata could not be found
@rtype: dns.rrset.RRset object
"""
name = self._validate_name(name)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
rdataset = self.nodes[name].find_rdataset(self.rdclass, rdtype, covers)
rrset = dns.rrset.RRset(name, self.rdclass, rdtype, covers)
rrset.update(rdataset)
return rrset
def get_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Look for rdata with the specified name and type in the zone,
and return an RRset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
This method is less efficient than the similar L{get_rdataset}
because it creates an RRset instead of returning the matching
rdataset. It may be more convenient for some uses since it
returns an object which binds the owner name to the rdata.
This method may not be used to create new nodes or rdatasets;
use L{find_rdataset} instead.
None is returned if the name or type are not found.
Use L{find_rrset} if you want to have KeyError raised instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@rtype: dns.rrset.RRset object
"""
try:
rrset = self.find_rrset(name, rdtype, covers)
except KeyError:
rrset = None
return rrset
def iterate_rdatasets(self, rdtype=dns.rdatatype.ANY,
covers=dns.rdatatype.NONE):
"""Return a generator which yields (name, rdataset) tuples for
all rdatasets in the zone which have the specified I{rdtype}
and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,
then all rdatasets will be matched.
@param rdtype: int or string
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
for (name, node) in self.iteritems():
for rds in node:
if rdtype == dns.rdatatype.ANY or \
(rds.rdtype == rdtype and rds.covers == covers):
yield (name, rds)
def iterate_rdatas(self, rdtype=dns.rdatatype.ANY,
covers=dns.rdatatype.NONE):
"""Return a generator which yields (name, ttl, rdata) tuples for
all rdatas in the zone which have the specified I{rdtype}
and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,
then all rdatas will be matched.
@param rdtype: int or string
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
for (name, node) in self.iteritems():
for rds in node:
if rdtype == dns.rdatatype.ANY or \
(rds.rdtype == rdtype and rds.covers == covers):
for rdata in rds:
yield (name, rds.ttl, rdata)
def to_file(self, f, sorted=True, relativize=True, nl=None):
"""Write a zone to a file.
@param f: file or string. If I{f} is a string, it is treated
as the name of a file to open.
@param sorted: if True, the file will be written with the
names sorted in DNSSEC order from least to greatest. Otherwise
the names will be written in whatever order they happen to have
in the zone's dictionary.
@param relativize: if True, domain names in the output will be
relativized to the zone's origin (if possible).
@type relativize: bool
@param nl: The end of line string. If not specified, the
output will use the platform's native end-of-line marker (i.e.
LF on POSIX, CRLF on Windows, CR on Macintosh).
@type nl: string or None
"""
if sys.hexversion >= 0x02030000:
# allow Unicode filenames
str_type = basestring
else:
str_type = str
if nl is None:
opts = 'w'
else:
opts = 'wb'
if isinstance(f, str_type):
f = file(f, opts)
want_close = True
else:
want_close = False
try:
if sorted:
names = self.keys()
names.sort()
else:
names = self.iterkeys()
for n in names:
l = self[n].to_text(n, origin=self.origin,
relativize=relativize)
if nl is None:
print >> f, l
else:
f.write(l)
f.write(nl)
finally:
if want_close:
f.close()
def check_origin(self):
"""Do some simple checking of the zone's origin.
@raises dns.zone.NoSOA: there is no SOA RR
@raises dns.zone.NoNS: there is no NS RRset
@raises KeyError: there is no origin node
"""
if self.relativize:
name = dns.name.empty
else:
name = self.origin
if self.get_rdataset(name, dns.rdatatype.SOA) is None:
raise NoSOA
if self.get_rdataset(name, dns.rdatatype.NS) is None:
raise NoNS
class _MasterReader(object):
"""Read a DNS master file
@ivar tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer object
@ivar ttl: The default TTL
@type ttl: int
@ivar last_name: The last name read
@type last_name: dns.name.Name object
@ivar current_origin: The current origin
@type current_origin: dns.name.Name object
@ivar relativize: should names in the zone be relativized?
@type relativize: bool
@ivar zone: the zone
@type zone: dns.zone.Zone object
@ivar saved_state: saved reader state (used when processing $INCLUDE)
@type saved_state: list of (tokenizer, current_origin, last_name, file)
tuples.
@ivar current_file: the file object of the $INCLUDed file being parsed
(None if no $INCLUDE is active).
@ivar allow_include: is $INCLUDE allowed?
@type allow_include: bool
@ivar check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
"""
def __init__(self, tok, origin, rdclass, relativize, zone_factory=Zone,
allow_include=False, check_origin=True):
if isinstance(origin, (str, unicode)):
origin = dns.name.from_text(origin)
self.tok = tok
self.current_origin = origin
self.relativize = relativize
self.ttl = 0
self.last_name = None
self.zone = zone_factory(origin, rdclass, relativize=relativize)
self.saved_state = []
self.current_file = None
self.allow_include = allow_include
self.check_origin = check_origin
def _eat_line(self):
while 1:
token = self.tok.get()
if token.is_eol_or_eof():
break
def _rr_line(self):
"""Process one line from a DNS master file."""
# Name
if self.current_origin is None:
raise UnknownOrigin
token = self.tok.get(want_leading = True)
if not token.is_whitespace():
self.last_name = dns.name.from_text(token.value, self.current_origin)
else:
token = self.tok.get()
if token.is_eol_or_eof():
# treat leading WS followed by EOL/EOF as if they were EOL/EOF.
return
self.tok.unget(token)
name = self.last_name
if not name.is_subdomain(self.zone.origin):
self._eat_line()
return
if self.relativize:
name = name.relativize(self.zone.origin)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
# TTL
try:
ttl = dns.ttl.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.ttl.BadTTL:
ttl = self.ttl
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except:
rdclass = self.zone.rdclass
if rdclass != self.zone.rdclass:
raise dns.exception.SyntaxError("RR class is not zone's class")
# Type
try:
rdtype = dns.rdatatype.from_text(token.value)
except:
raise dns.exception.SyntaxError("unknown rdatatype '%s'" % token.value)
n = self.zone.nodes.get(name)
if n is None:
n = self.zone.node_factory()
self.zone.nodes[name] = n
try:
rd = dns.rdata.from_text(rdclass, rdtype, self.tok,
self.current_origin, False)
except dns.exception.SyntaxError:
# Catch and reraise.
(ty, va) = sys.exc_info()[:2]
raise va
except:
# All exceptions that occur in the processing of rdata
# are treated as syntax errors. This is not strictly
# correct, but it is correct almost all of the time.
# We convert them to syntax errors so that we can emit
# helpful filename:line info.
(ty, va) = sys.exc_info()[:2]
raise dns.exception.SyntaxError("caught exception %s: %s" % (str(ty), str(va)))
rd.choose_relativity(self.zone.origin, self.relativize)
covers = rd.covers()
rds = n.find_rdataset(rdclass, rdtype, covers, True)
rds.add(rd, ttl)
def read(self):
"""Read a DNS master file and build a zone object.
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
"""
try:
while 1:
token = self.tok.get(True, True).unescape()
if token.is_eof():
if not self.current_file is None:
self.current_file.close()
if len(self.saved_state) > 0:
(self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.ttl) = self.saved_state.pop(-1)
continue
break
elif token.is_eol():
continue
elif token.is_comment():
self.tok.get_eol()
continue
elif token.value[0] == '$':
u = token.value.upper()
if u == '$TTL':
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError("bad $TTL")
self.ttl = dns.ttl.from_text(token.value)
self.tok.get_eol()
elif u == '$ORIGIN':
self.current_origin = self.tok.get_name()
self.tok.get_eol()
if self.zone.origin is None:
self.zone.origin = self.current_origin
elif u == '$INCLUDE' and self.allow_include:
token = self.tok.get()
if not token.is_quoted_string():
raise dns.exception.SyntaxError("bad filename in $INCLUDE")
filename = token.value
token = self.tok.get()
if token.is_identifier():
new_origin = dns.name.from_text(token.value, \
self.current_origin)
self.tok.get_eol()
elif not token.is_eol_or_eof():
raise dns.exception.SyntaxError("bad origin in $INCLUDE")
else:
new_origin = self.current_origin
self.saved_state.append((self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.ttl))
self.current_file = file(filename, 'r')
self.tok = dns.tokenizer.Tokenizer(self.current_file,
filename)
self.current_origin = new_origin
else:
raise dns.exception.SyntaxError("Unknown master file directive '" + u + "'")
continue
self.tok.unget(token)
self._rr_line()
except dns.exception.SyntaxError, detail:
(filename, line_number) = self.tok.where()
if detail is None:
detail = "syntax error"
raise dns.exception.SyntaxError("%s:%d: %s" % (filename, line_number, detail))
# Now that we're done reading, do some basic checking of the zone.
if self.check_origin:
self.zone.check_origin()
def from_text(text, origin = None, rdclass = dns.rdataclass.IN,
relativize = True, zone_factory=Zone, filename=None,
allow_include=False, check_origin=True):
"""Build a zone object from a master file format string.
@param text: the master file format input
@type text: string.
@param origin: The origin of the zone; if not specified, the first
$ORIGIN statement in the master file will determine the origin of the
zone.
@type origin: dns.name.Name object or string
@param rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int
@param relativize: should names be relativized? The default is True
@type relativize: bool
@param zone_factory: The zone factory to use
@type zone_factory: function returning a Zone
@param filename: The filename to emit when describing where an error
occurred; the default is '<string>'.
@type filename: string
@param allow_include: is $INCLUDE allowed?
@type allow_include: bool
@param check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
# 'text' can also be a file, but we don't publish that fact
# since it's an implementation detail. The official file
# interface is from_file().
if filename is None:
filename = '<string>'
tok = dns.tokenizer.Tokenizer(text, filename)
reader = _MasterReader(tok, origin, rdclass, relativize, zone_factory,
allow_include=allow_include,
check_origin=check_origin)
reader.read()
return reader.zone
def from_file(f, origin = None, rdclass = dns.rdataclass.IN,
relativize = True, zone_factory=Zone, filename=None,
allow_include=True, check_origin=True):
"""Read a master file and build a zone object.
@param f: file or string. If I{f} is a string, it is treated
as the name of a file to open.
@param origin: The origin of the zone; if not specified, the first
$ORIGIN statement in the master file will determine the origin of the
zone.
@type origin: dns.name.Name object or string
@param rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int
@param relativize: should names be relativized? The default is True
@type relativize: bool
@param zone_factory: The zone factory to use
@type zone_factory: function returning a Zone
@param filename: The filename to emit when describing where an error
occurred; the default is '<file>', or the value of I{f} if I{f} is a
string.
@type filename: string
@param allow_include: is $INCLUDE allowed?
@type allow_include: bool
@param check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
if sys.hexversion >= 0x02030000:
# allow Unicode filenames; turn on universal newline support
str_type = basestring
opts = 'rU'
else:
str_type = str
opts = 'r'
if isinstance(f, str_type):
if filename is None:
filename = f
f = file(f, opts)
want_close = True
else:
if filename is None:
filename = '<file>'
want_close = False
try:
z = from_text(f, origin, rdclass, relativize, zone_factory,
filename, allow_include, check_origin)
finally:
if want_close:
f.close()
return z
def from_xfr(xfr, zone_factory=Zone, relativize=True):
"""Convert the output of a zone transfer generator into a zone object.
@param xfr: The xfr generator
@type xfr: generator of dns.message.Message objects
@param relativize: should names be relativized? The default is True.
It is essential that the relativize setting matches the one specified
to dns.query.xfr().
@type relativize: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
z = None
for r in xfr:
if z is None:
if relativize:
origin = r.origin
else:
origin = r.answer[0].name
rdclass = r.answer[0].rdclass
z = zone_factory(origin, rdclass, relativize=relativize)
for rrset in r.answer:
znode = z.nodes.get(rrset.name)
if not znode:
znode = z.node_factory()
z.nodes[rrset.name] = znode
zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype,
rrset.covers, True)
zrds.update_ttl(rrset.ttl)
for rd in rrset:
rd.choose_relativity(z.origin, relativize)
zrds.add(rd)
z.check_origin()
return z
| bsd-3-clause |
ge0rgi/cinder | cinder/volume/drivers/dell_emc/vmax/provision_v3.py | 1 | 46116 | # Copyright (c) 2012 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
import six
from cinder import coordination
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.volume.drivers.dell_emc.vmax import utils
LOG = logging.getLogger(__name__)
STORAGEGROUPTYPE = 4
POSTGROUPTYPE = 3
EMC_ROOT = 'root/emc'
THINPROVISIONINGCOMPOSITE = 32768
THINPROVISIONING = 5
INFO_SRC_V3 = 3
ACTIVATESNAPVX = 4
DEACTIVATESNAPVX = 19
SNAPSYNCTYPE = 7
RDF_FAILOVER = 10
RDF_FAILBACK = 11
RDF_RESYNC = 14
RDF_SYNC_MODE = 2
RDF_SYNCHRONIZED = 6
RDF_FAILEDOVER = 12
class VMAXProvisionV3(object):
"""Provisioning Class for SMI-S based EMC volume drivers.
This Provisioning class is for EMC volume drivers based on SMI-S.
It supports VMAX arrays.
"""
def __init__(self, prtcl):
self.protocol = prtcl
self.utils = utils.VMAXUtils(prtcl)
def delete_volume_from_pool(
self, conn, storageConfigservice, volumeInstanceName, volumeName,
extraSpecs):
"""Given the volume instance remove it from the pool.
:param conn: connection to the ecom server
:param storageConfigservice: volume created from job
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param extraSpecs: additional info
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
startTime = time.time()
if isinstance(volumeInstanceName, list):
theElements = volumeInstanceName
volumeName = 'Bulk Delete'
else:
theElements = [volumeInstanceName]
rc, job = conn.InvokeMethod(
'ReturnElementsToStoragePool', storageConfigservice,
TheElements=theElements)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Delete Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod ReturnElementsToStoragePool took: "
"%(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc
def create_volume_from_sg(
self, conn, storageConfigService, volumeName,
sgInstanceName, volumeSize, extraSpecs):
"""Create the volume and associate it with a storage group.
We use EMCCollections parameter to supply a Device Masking Group
to contain a newly created storage volume.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage configuration service
:param volumeName: the volume name (String)
:param sgInstanceName: the storage group instance name
associated with an SLO
:param volumeSize: volume size (String)
:param extraSpecs: additional info
:returns: dict -- volumeDict - the volume dict
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
try:
storageGroupInstance = conn.GetInstance(sgInstanceName)
except Exception:
exceptionMessage = (_(
"Unable to get the name of the storage group"))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
sgName = storageGroupInstance['ElementName']
@coordination.synchronized("emc-sg-{storageGroup}")
def do_create_volume_from_sg(storageGroup):
startTime = time.time()
rc, job = conn.InvokeMethod(
'CreateOrModifyElementFromStoragePool',
storageConfigService, ElementName=volumeName,
EMCCollections=[sgInstanceName],
ElementType=self.utils.get_num(THINPROVISIONING, '16'),
Size=self.utils.get_num(volumeSize, '64'))
LOG.debug("Create Volume: %(volumename)s. Return code: %(rc)lu.",
{'volumename': volumeName,
'rc': rc})
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Create Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
# Find the newly created volume.
volumeDict = self.get_volume_dict_from_job(conn, job['Job'])
return volumeDict, rc
return do_create_volume_from_sg(sgName)
def _find_new_storage_group(
self, conn, maskingGroupDict, storageGroupName):
"""After creating an new storage group find it and return it.
:param conn: connection to the ecom server
:param maskingGroupDict: the maskingGroupDict dict
:param storageGroupName: storage group name (String)
:returns: maskingGroupDict['MaskingGroup'] or None
"""
foundStorageGroupInstanceName = None
if 'MaskingGroup' in maskingGroupDict:
foundStorageGroupInstanceName = maskingGroupDict['MaskingGroup']
return foundStorageGroupInstanceName
def get_volume_dict_from_job(self, conn, jobInstance):
"""Given the jobInstance determine the volume Instance.
:param conn: the ecom connection
:param jobInstance: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
associators = conn.Associators(
jobInstance,
ResultClass='EMC_StorageVolume')
if len(associators) > 0:
return self.create_volume_dict(associators[0].path)
else:
exceptionMessage = (_(
"Unable to get storage volume from job."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
def get_volume_from_job(self, conn, jobInstance):
"""Given the jobInstance determine the volume Instance.
:param conn: the ecom connection
:param jobInstance: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
associators = conn.Associators(
jobInstance,
ResultClass='EMC_StorageVolume')
if len(associators) > 0:
return associators[0]
else:
exceptionMessage = (_(
"Unable to get storage volume from job."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
def create_volume_dict(self, volumeInstanceName):
"""Create volume dictionary
:param volumeInstanceName: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
volpath = volumeInstanceName
volumeDict = {}
volumeDict['classname'] = volpath.classname
keys = {}
keys['CreationClassName'] = volpath['CreationClassName']
keys['SystemName'] = volpath['SystemName']
keys['DeviceID'] = volpath['DeviceID']
keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
volumeDict['keybindings'] = keys
return volumeDict
def get_or_create_default_sg(self, conn, extraSpecs, storageSystemName,
doDisableCompression):
"""Get or create default storage group for a replica.
:param conn: the connection to the ecom server
:param extraSpecs: the extra specifications
:param storageSystemName: the storage system name
:param doDisableCompression: flag for compression
:returns: sgInstanceName, instance of storage group
"""
pool = extraSpecs[self.utils.POOL]
slo = extraSpecs[self.utils.SLO]
workload = extraSpecs[self.utils.WORKLOAD]
storageGroupName, controllerConfigService, sgInstanceName = (
self.utils.get_v3_default_sg_instance_name(
conn, pool, slo, workload, storageSystemName,
doDisableCompression))
if sgInstanceName is None:
sgInstanceName = self.create_storage_group_v3(
conn, controllerConfigService, storageGroupName,
pool, slo, workload, extraSpecs, doDisableCompression)
return sgInstanceName
def create_element_replica(
self, conn, repServiceInstanceName,
cloneName, syncType, sourceInstance, extraSpecs,
targetInstance=None, rsdInstance=None, copyState=None):
"""Make SMI-S call to create replica for source element.
:param conn: the connection to the ecom server
:param repServiceInstanceName: replication service
:param cloneName: clone volume name
:param syncType: 7=snapshot, 8=clone
:param sourceInstance: source volume instance
:param extraSpecs: additional info
:param targetInstance: Target volume instance. Default None
:param rsdInstance: replication settingdata instance. Default None
:returns: int -- rc - return code
:returns: job - job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
startTime = time.time()
LOG.debug("Create replica: %(clone)s "
"syncType: %(syncType)s Source: %(source)s.",
{'clone': cloneName,
'syncType': syncType,
'source': sourceInstance.path})
storageSystemName = sourceInstance['SystemName']
doDisableCompression = self.utils.is_compression_disabled(extraSpecs)
sgInstanceName = (
self.get_or_create_default_sg(
conn, extraSpecs, storageSystemName, doDisableCompression))
try:
storageGroupInstance = conn.GetInstance(sgInstanceName)
except Exception:
exceptionMessage = (_(
"Unable to get the name of the storage group"))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
@coordination.synchronized("emc-sg-{storageGroupName}")
def do_create_element_replica(storageGroupName):
if targetInstance is None and rsdInstance is None:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=self.utils.get_num(syncType, '16'),
SourceElement=sourceInstance.path,
Collections=[sgInstanceName])
else:
rc, job = self._create_element_replica_extra_params(
conn, repServiceInstanceName, cloneName, syncType,
sourceInstance, targetInstance, rsdInstance,
sgInstanceName, copyState=copyState)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Create Cloned Volume: %(cloneName)s "
"Return code: %(rc)lu. Error: %(error)s.")
% {'cloneName': cloneName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateElementReplica "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc, job
return do_create_element_replica(storageGroupInstance['ElementName'])
def create_remote_element_replica(
self, conn, repServiceInstanceName, cloneName, syncType,
sourceInstance, targetInstance, rdfGroupInstance, extraSpecs):
"""Create a replication relationship between source and target.
:param conn: the ecom connection
:param repServiceInstanceName: the replication service
:param cloneName: the name of the target volume
:param syncType: the synchronization type
:param sourceInstance: the source volume instance
:param targetInstance: the target volume instance
:param rdfGroupInstance: the rdf group instance
:param extraSpecs: additional info
:return: rc, job
"""
startTime = time.time()
LOG.debug("Setup replication relationship: %(source)s "
"syncType: %(syncType)s Source: %(target)s.",
{'source': sourceInstance.path,
'syncType': syncType,
'target': targetInstance.path})
rc, job = self._create_element_replica_extra_params(
conn, repServiceInstanceName, cloneName, syncType,
sourceInstance, targetInstance, None, None, rdfGroupInstance)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (
_("Error Create Cloned Volume: %(cloneName)s "
"Return code: %(rc)lu. Error: %(error)s.")
% {'cloneName': cloneName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateElementReplica "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc, job
def _create_element_replica_extra_params(
self, conn, repServiceInstanceName, cloneName, syncType,
sourceInstance, targetInstance, rsdInstance, sgInstanceName,
rdfGroupInstance=None, copyState=None):
"""CreateElementReplica using extra parameters.
:param conn: the connection to the ecom server
:param repServiceInstanceName: replication service
:param cloneName: clone volume name
:param syncType: 7=snapshot, 8=clone
:param sourceInstance: source volume instance
:param targetInstance: Target volume instance. Default None
:param rsdInstance: replication settingdata instance. Default None
:param sgInstanceName: pool instance name
:returns: int -- rc - return code
:returns: job - job object of the replica creation operation
"""
syncType = self.utils.get_num(syncType, '16')
modeType = self.utils.get_num(RDF_SYNC_MODE, '16')
if targetInstance and rsdInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path,
ReplicationSettingData=rsdInstance)
elif targetInstance and rdfGroupInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
SyncType=syncType,
Mode=modeType,
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path,
ConnectivityCollection=rdfGroupInstance)
elif rsdInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
ReplicationSettingData=rsdInstance,
Collections=[sgInstanceName],
WaitForCopyState=copyState)
elif targetInstance and copyState:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path,
WaitForCopyState=copyState)
elif targetInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path)
return rc, job
def break_replication_relationship(
self, conn, repServiceInstanceName, syncInstanceName,
operation, extraSpecs, force=False):
"""Deletes the relationship between the clone/snap and source volume.
Makes an SMI-S call to break clone relationship between the clone
volume and the source.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param operation: operation code
:param extraSpecs: additional info
:param force: force to break replication relationship if True
:returns: rc - return code
:returns: job - job object of the replica creation operation
"""
LOG.debug("Break replication relationship: %(sv)s "
"operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs, force)
def create_storage_group_v3(self, conn, controllerConfigService,
groupName, srp, slo, workload, extraSpecs,
doDisableCompression):
"""Create the volume in the specified pool.
:param conn: the connection information to the ecom server
:param controllerConfigService: the controller configuration service
:param groupName: the group name (String)
:param srp: the SRP (String)
:param slo: the SLO (String)
:param workload: the workload (String)
:param extraSpecs: additional info
:param doDisableCompression: disable compression flag
:returns: storageGroupInstanceName - storage group instance name
"""
startTime = time.time()
@coordination.synchronized("emc-sg-{sgGroupName}")
def do_create_storage_group_v3(sgGroupName):
if doDisableCompression:
if slo and workload:
rc, job = conn.InvokeMethod(
'CreateGroup',
controllerConfigService,
GroupName=groupName,
Type=self.utils.get_num(4, '16'),
EMCSRP=srp,
EMCSLO=slo,
EMCWorkload=workload,
EMCDisableCompression=True)
else:
if slo and workload:
rc, job = conn.InvokeMethod(
'CreateGroup',
controllerConfigService,
GroupName=groupName,
Type=self.utils.get_num(4, '16'),
EMCSRP=srp,
EMCSLO=slo,
EMCWorkload=workload)
else:
rc, job = conn.InvokeMethod(
'CreateGroup',
controllerConfigService,
GroupName=groupName,
Type=self.utils.get_num(4, '16'))
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(
conn, job, extraSpecs)
if rc != 0:
LOG.error(_LE(
"Error Create Group: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s."),
{'groupName': groupName,
'rc': rc,
'error': errordesc})
raise
LOG.debug("InvokeMethod CreateGroup "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
foundStorageGroupInstanceName = self._find_new_storage_group(
conn, job, groupName)
return foundStorageGroupInstanceName
return do_create_storage_group_v3(groupName)
def get_storage_pool_capability(self, conn, poolInstanceName):
"""Get the pool capability.
:param conn: the connection information to the ecom server
:param poolInstanceName: the pool instance
:returns: the storage pool capability instance. None if not found
"""
storagePoolCapability = None
associators = (
conn.AssociatorNames(poolInstanceName,
ResultClass='Symm_StoragePoolCapabilities'))
if len(associators) > 0:
storagePoolCapability = associators[0]
return storagePoolCapability
def get_storage_pool_setting(
self, conn, storagePoolCapability, slo, workload):
"""Get the pool setting for pool capability.
:param conn: the connection information to the ecom server
:param storagePoolCapability: the storage pool capability instance
:param slo: the slo string e.g Bronze
:param workload: the workload string e.g DSS_REP
:returns: the storage pool setting instance
"""
foundStoragePoolSetting = None
storagePoolSettings = (
conn.AssociatorNames(storagePoolCapability,
ResultClass='CIM_storageSetting'))
for storagePoolSetting in storagePoolSettings:
settingInstanceID = storagePoolSetting['InstanceID']
matchString = ("%(slo)s:%(workload)s"
% {'slo': slo,
'workload': workload})
if matchString in settingInstanceID:
foundStoragePoolSetting = storagePoolSetting
break
if foundStoragePoolSetting is None:
exceptionMessage = (_(
"The array does not support the storage pool setting "
"for SLO %(slo)s and workload %(workload)s. Please "
"check the array for valid SLOs and workloads.")
% {'slo': slo,
'workload': workload})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return foundStoragePoolSetting
def _get_supported_size_range_for_SLO(
self, conn, storageConfigService,
srpPoolInstanceName, storagePoolSettingInstanceName, extraSpecs):
"""Gets available performance capacity per SLO.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage configuration service instance
:param srpPoolInstanceName: the SRP storage pool instance
:param storagePoolSettingInstanceName: the SLO type, e.g Bronze
:param extraSpecs: additional info
:returns: dict -- supportedSizeDict - the supported size dict
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, supportedSizeDict = conn.InvokeMethod(
'GetSupportedSizeRange',
srpPoolInstanceName,
ElementType=self.utils.get_num(3, '16'),
Goal=storagePoolSettingInstanceName)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(
conn, supportedSizeDict, extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Cannot get supported size range for %(sps)s "
"Return code: %(rc)lu. Error: %(error)s.")
% {'sps': storagePoolSettingInstanceName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod GetSupportedSizeRange "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return supportedSizeDict
def get_volume_range(
self, conn, storageConfigService, poolInstanceName, slo, workload,
extraSpecs):
"""Get upper and lower range for volume for slo/workload combination.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage config service
:param poolInstanceName: the pool instance
:param slo: slo string e.g Bronze
:param workload: workload string e.g DSS
:param extraSpecs: additional info
:returns: supportedSizeDict
"""
supportedSizeDict = {}
storagePoolCapabilityInstanceName = self.get_storage_pool_capability(
conn, poolInstanceName)
if storagePoolCapabilityInstanceName:
storagePoolSettingInstanceName = self.get_storage_pool_setting(
conn, storagePoolCapabilityInstanceName, slo, workload)
supportedSizeDict = self._get_supported_size_range_for_SLO(
conn, storageConfigService, poolInstanceName,
storagePoolSettingInstanceName, extraSpecs)
return supportedSizeDict
def activate_snap_relationship(
self, conn, repServiceInstanceName, syncInstanceName, extraSpecs):
"""Activate snap relationship and start copy operation.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param extraSpecs: additional info
:returns: int -- return code
:returns: job object of the replica creation operation
"""
# Operation 4: activate the snapVx.
operation = ACTIVATESNAPVX
LOG.debug("Activate snap: %(sv)s operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs)
def return_to_resource_pool(self, conn, repServiceInstanceName,
syncInstanceName, extraSpecs):
"""Return the snap target resources back to the pool.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
:param extraSpecs: additional info
:returns: rc - return code
:returns: job object of the replica creation operation
"""
# Operation 4: activate the snapVx.
operation = DEACTIVATESNAPVX
LOG.debug("Return snap resource back to pool: "
"%(sv)s operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs)
def _modify_replica_synchronization(
self, conn, repServiceInstanceName, syncInstanceName,
operation, extraSpecs, force=False):
"""Modify the relationship between the clone/snap and source volume.
Helper function that makes an SMI-S call to break clone relationship
between the clone volume and the source.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param operation: operation code
:param extraSpecs: additional info
:param force: force to modify replication synchronization if True
:returns: int -- return code
:returns: job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, job = conn.InvokeMethod(
'ModifyReplicaSynchronization', repServiceInstanceName,
Operation=self.utils.get_num(operation, '16'),
Synchronization=syncInstanceName,
Force=force)
LOG.debug("_modify_replica_synchronization: %(sv)s "
"operation: %(operation)s Return code: %(rc)lu.",
{'sv': syncInstanceName, 'operation': operation, 'rc': rc})
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error modify replica synchronization: %(sv)s "
"operation: %(operation)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'sv': syncInstanceName, 'operation': operation,
'rc': rc, 'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod ModifyReplicaSynchronization "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc, job
def create_group_replica(
self, conn, replicationService,
srcGroupInstanceName, tgtGroupInstanceName, relationName,
extraSpecs):
"""Make SMI-S call to create replica for source group.
:param conn: the connection to the ecom server
:param replicationService: replication service
:param srcGroupInstanceName: source group instance name
:param tgtGroupInstanceName: target group instance name
:param relationName: replica relationship name
:param extraSpecs: additional info
:returns: int -- return code
:returns: job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
LOG.debug(
"Creating CreateGroupReplica V3: "
"replicationService: %(replicationService)s "
"RelationName: %(relationName)s "
"sourceGroup: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'replicationService': replicationService,
'relationName': relationName,
'srcGroup': srcGroupInstanceName,
'tgtGroup': tgtGroupInstanceName})
rc, job = conn.InvokeMethod(
'CreateGroupReplica',
replicationService,
RelationshipName=relationName,
SourceGroup=srcGroupInstanceName,
TargetGroup=tgtGroupInstanceName,
SyncType=self.utils.get_num(SNAPSYNCTYPE, '16'),
WaitForCopyState=self.utils.get_num(4, '16'))
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMsg = (_("Error CreateGroupReplica: "
"source: %(source)s target: %(target)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'source': srcGroupInstanceName,
'target': tgtGroupInstanceName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMsg)
raise exception.VolumeBackendAPIException(data=exceptionMsg)
return rc, job
def get_srp_pool_stats(self, conn, arrayInfo):
"""Get the totalManagedSpace, remainingManagedSpace.
:param conn: the connection to the ecom server
:param arrayInfo: the array dict
:returns: totalCapacityGb
:returns: remainingCapacityGb
:returns: subscribedCapacityGb
:returns: array_reserve_percent
:returns: wlpEnabled
"""
totalCapacityGb = -1
remainingCapacityGb = -1
subscribedCapacityGb = -1
array_reserve_percent = -1
wlpEnabled = False
storageSystemInstanceName = self.utils.find_storageSystem(
conn, arrayInfo['SerialNumber'])
srpPoolInstanceNames = conn.AssociatorNames(
storageSystemInstanceName,
ResultClass='Symm_SRPStoragePool')
for srpPoolInstanceName in srpPoolInstanceNames:
poolnameStr = self.utils.get_pool_name(conn, srpPoolInstanceName)
if six.text_type(arrayInfo['PoolName']) == (
six.text_type(poolnameStr)):
try:
# Check that pool hasn't suddently been deleted.
srpPoolInstance = conn.GetInstance(srpPoolInstanceName)
propertiesList = srpPoolInstance.properties.items()
for properties in propertiesList:
if properties[0] == 'TotalManagedSpace':
cimProperties = properties[1]
totalManagedSpace = cimProperties.value
totalCapacityGb = self.utils.convert_bits_to_gbs(
totalManagedSpace)
elif properties[0] == 'RemainingManagedSpace':
cimProperties = properties[1]
remainingManagedSpace = cimProperties.value
remainingCapacityGb = (
self.utils.convert_bits_to_gbs(
remainingManagedSpace))
elif properties[0] == 'EMCSubscribedCapacity':
cimProperties = properties[1]
subscribedManagedSpace = cimProperties.value
subscribedCapacityGb = (
self.utils.convert_bits_to_gbs(
subscribedManagedSpace))
elif properties[0] == 'EMCPercentReservedCapacity':
cimProperties = properties[1]
array_reserve_percent = int(cimProperties.value)
except Exception:
pass
remainingSLOCapacityGb = (
self._get_remaining_slo_capacity_wlp(
conn, srpPoolInstanceName, arrayInfo,
storageSystemInstanceName['Name']))
if remainingSLOCapacityGb != -1:
remainingCapacityGb = remainingSLOCapacityGb
wlpEnabled = True
else:
LOG.warning(_LW(
"Remaining capacity %(remainingCapacityGb)s "
"GBs is determined from SRP pool capacity "
"and not the SLO capacity. Performance may "
"not be what you expect."),
{'remainingCapacityGb': remainingCapacityGb})
return (totalCapacityGb, remainingCapacityGb, subscribedCapacityGb,
array_reserve_percent, wlpEnabled)
def _get_remaining_slo_capacity_wlp(self, conn, srpPoolInstanceName,
arrayInfo, systemName):
"""Get the remaining SLO capacity.
This is derived from the WLP portion of Unisphere. Please
see the SMIProvider doc and the readme doc for details.
:param conn: the connection to the ecom server
:param srpPoolInstanceName: SRP instance name
:param arrayInfo: the array dict
:param systemName: the system name
:returns: remainingCapacityGb
"""
remainingCapacityGb = -1
if arrayInfo['SLO']:
storageConfigService = (
self.utils.find_storage_configuration_service(
conn, systemName))
supportedSizeDict = (
self.get_volume_range(
conn, storageConfigService, srpPoolInstanceName,
arrayInfo['SLO'], arrayInfo['Workload'],
None))
try:
if supportedSizeDict['EMCInformationSource'] == INFO_SRC_V3:
remainingCapacityGb = self.utils.convert_bits_to_gbs(
supportedSizeDict['EMCRemainingSLOCapacity'])
LOG.debug("Received remaining SLO Capacity "
"%(remainingCapacityGb)s GBs for SLO "
"%(SLO)s and workload %(workload)s.",
{'remainingCapacityGb': remainingCapacityGb,
'SLO': arrayInfo['SLO'],
'workload': arrayInfo['Workload']})
except KeyError:
pass
return remainingCapacityGb
def extend_volume_in_SG(
self, conn, storageConfigService, volumeInstanceName,
volumeName, volumeSize, extraSpecs):
"""Extend a volume instance.
:param conn: connection to the ecom server
:param storageConfigservice: the storage configuration service
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param volumeSize: the volume size
:param extraSpecs: additional info
:returns: volumeDict
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, job = conn.InvokeMethod(
'CreateOrModifyElementFromStoragePool',
storageConfigService, TheElement=volumeInstanceName,
Size=self.utils.get_num(volumeSize, '64'))
LOG.debug("Extend Volume: %(volumename)s. Return code: %(rc)lu.",
{'volumename': volumeName,
'rc': rc})
if rc != 0:
rc, error_desc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Extend Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': error_desc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
# Find the newly created volume.
volumeDict = self.get_volume_dict_from_job(conn, job['Job'])
return volumeDict, rc
def get_rdf_group_instance(self, conn, repServiceInstanceName,
RDFGroupName):
"""Get the SRDF group instance.
:param conn: the connection to the ecom server
:param repServiceInstanceName: the replication service
:param RDFGroupName: the element name of the RDF group
:return: foundRDFGroupInstanceName
"""
foundRDFGroupInstanceName = None
RDFGroupInstances = (
conn.Associators(repServiceInstanceName,
ResultClass='CIM_ConnectivityCollection'))
for RDFGroupInstance in RDFGroupInstances:
if RDFGroupName == (
six.text_type(RDFGroupInstance['ElementName'])):
# Check that it has not been deleted recently.
instance = self.utils.get_existing_instance(
conn, RDFGroupInstance.path)
if instance is None:
# SRDF group not found.
foundRDFGroupInstanceName = None
else:
foundRDFGroupInstanceName = (
RDFGroupInstance.path)
break
return foundRDFGroupInstanceName
def failover_volume(self, conn, repServiceInstanceName,
storageSynchronizationSv,
extraSpecs):
"""Failover a volume to its target device.
:param conn: the connection to the ecom server
:param repServiceInstanceName: the replication service
:param storageSynchronizationSv: the storage synchronized object
:param extraSpecs: the extra specifications
"""
operation = RDF_FAILOVER
# check if volume already in failover state
syncState = self._check_sync_state(conn, storageSynchronizationSv)
if syncState == RDF_FAILEDOVER:
return
else:
LOG.debug("Failover: %(sv)s operation: %(operation)s.",
{'sv': storageSynchronizationSv, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, storageSynchronizationSv,
operation, extraSpecs)
def failback_volume(self, conn, repServiceInstanceName,
storageSynchronizationSv,
extraSpecs):
"""Failback a volume to the source device.
:param conn: the connection to the ecom server
:param repServiceInstanceName: the replication service
:param storageSynchronizationSv: the storage synchronized object
:param extraSpecs: the extra specifications
"""
failback_operation = RDF_FAILBACK
# check if volume already in failback state
syncState = self._check_sync_state(conn, storageSynchronizationSv)
if syncState == RDF_SYNCHRONIZED:
return
else:
LOG.debug("Failback: %(sv)s operation: %(operation)s.",
{'sv': storageSynchronizationSv,
'operation': failback_operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, storageSynchronizationSv,
failback_operation, extraSpecs)
def _check_sync_state(self, conn, syncName):
"""Get the copy state of a sync name.
:param conn: the connection to the ecom server
:param syncName: the storage sync sv name
:return: the copy state
"""
try:
syncInstance = conn.GetInstance(syncName,
LocalOnly=False)
syncState = syncInstance['syncState']
LOG.debug("syncState is %(syncState)lu.",
{'syncState': syncState})
return syncState
except Exception as ex:
exceptionMessage = (
_("Getting sync instance failed with: %(ex)s.")
% {'ex': six.text_type(ex)})
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
| apache-2.0 |
Big-B702/python-for-android | python3-alpha/python3-src/Lib/urllib/error.py | 48 | 2264 | """Exception classes raised by urllib.
The base exception class is URLError, which inherits from IOError. It
doesn't define any behavior of its own, but is the base class for all
exceptions defined in this package.
HTTPError is an exception class that is also a valid HTTP response
instance. It behaves this way because HTTP protocol errors are valid
responses, with a status code, headers, and a body. In some contexts,
an application may want to handle an exception like a regular
response.
"""
import urllib.response
# do these error classes make sense?
# make sure all of the IOError stuff is overridden. we just want to be
# subtypes.
class URLError(IOError):
# URLError is a sub-type of IOError, but it doesn't share any of
# the implementation. need to override __init__ and __str__.
# It sets self.args for compatibility with other EnvironmentError
# subclasses, but args doesn't have the typical format with errno in
# slot 0 and strerror in slot 1. This may be better than nothing.
def __init__(self, reason, filename=None):
self.args = reason,
self.reason = reason
if filename is not None:
self.filename = filename
def __str__(self):
return '<urlopen error %s>' % self.reason
class HTTPError(URLError, urllib.response.addinfourl):
"""Raised when HTTP error occurs, but also acts like non-error return"""
__super_init = urllib.response.addinfourl.__init__
def __init__(self, url, code, msg, hdrs, fp):
self.code = code
self.msg = msg
self.hdrs = hdrs
self.fp = fp
self.filename = url
# The addinfourl classes depend on fp being a valid file
# object. In some cases, the HTTPError may not have a valid
# file object. If this happens, the simplest workaround is to
# not initialize the base classes.
if fp is not None:
self.__super_init(fp, hdrs, url, code)
def __str__(self):
return 'HTTP Error %s: %s' % (self.code, self.msg)
# exception raised when downloaded size does not match content-length
class ContentTooShortError(URLError):
def __init__(self, message, content):
URLError.__init__(self, message)
self.content = content
| apache-2.0 |
fidomason/kbengine | kbe/res/scripts/common/Lib/site-packages/setuptools/command/setopt.py | 285 | 5068 | import distutils, os
from setuptools import Command
from distutils.util import convert_path
from distutils import log
from distutils.errors import *
__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
def config_file(kind="local"):
"""Get the filename of the distutils, local, global, or per-user config
`kind` must be one of "local", "global", or "user"
"""
if kind=='local':
return 'setup.cfg'
if kind=='global':
return os.path.join(
os.path.dirname(distutils.__file__),'distutils.cfg'
)
if kind=='user':
dot = os.name=='posix' and '.' or ''
return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
raise ValueError(
"config_file() type must be 'local', 'global', or 'user'", kind
)
def edit_config(filename, settings, dry_run=False):
"""Edit a configuration file to include `settings`
`settings` is a dictionary of dictionaries or ``None`` values, keyed by
command/section name. A ``None`` value means to delete the entire section,
while a dictionary lists settings to be changed or deleted in that section.
A setting of ``None`` means to delete that setting.
"""
from setuptools.compat import ConfigParser
log.debug("Reading configuration from %s", filename)
opts = ConfigParser.RawConfigParser()
opts.read([filename])
for section, options in settings.items():
if options is None:
log.info("Deleting section [%s] from %s", section, filename)
opts.remove_section(section)
else:
if not opts.has_section(section):
log.debug("Adding new section [%s] to %s", section, filename)
opts.add_section(section)
for option,value in options.items():
if value is None:
log.debug("Deleting %s.%s from %s",
section, option, filename
)
opts.remove_option(section,option)
if not opts.options(section):
log.info("Deleting empty [%s] section from %s",
section, filename)
opts.remove_section(section)
else:
log.debug(
"Setting %s.%s to %r in %s",
section, option, value, filename
)
opts.set(section,option,value)
log.info("Writing %s", filename)
if not dry_run:
f = open(filename,'w'); opts.write(f); f.close()
class option_base(Command):
"""Abstract base class for commands that mess with config files"""
user_options = [
('global-config', 'g',
"save options to the site-wide distutils.cfg file"),
('user-config', 'u',
"save options to the current user's pydistutils.cfg file"),
('filename=', 'f',
"configuration file to use (default=setup.cfg)"),
]
boolean_options = [
'global-config', 'user-config',
]
def initialize_options(self):
self.global_config = None
self.user_config = None
self.filename = None
def finalize_options(self):
filenames = []
if self.global_config:
filenames.append(config_file('global'))
if self.user_config:
filenames.append(config_file('user'))
if self.filename is not None:
filenames.append(self.filename)
if not filenames:
filenames.append(config_file('local'))
if len(filenames)>1:
raise DistutilsOptionError(
"Must specify only one configuration file option",
filenames
)
self.filename, = filenames
class setopt(option_base):
"""Save command-line options to a file"""
description = "set an option in setup.cfg or another config file"
user_options = [
('command=', 'c', 'command to set an option for'),
('option=', 'o', 'option to set'),
('set-value=', 's', 'value of the option'),
('remove', 'r', 'remove (unset) the value'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.command = None
self.option = None
self.set_value = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.command is None or self.option is None:
raise DistutilsOptionError("Must specify --command *and* --option")
if self.set_value is None and not self.remove:
raise DistutilsOptionError("Must specify --set-value or --remove")
def run(self):
edit_config(
self.filename, {
self.command: {self.option.replace('-','_'):self.set_value}
},
self.dry_run
)
| lgpl-3.0 |
TanguyPatte/phantomjs-packaging | src/qt/qtwebkit/Source/WebKit2/Scripts/webkit2/parser.py | 118 | 4545 | # Copyright (C) 2010, 2011 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from webkit2 import model
def parse(file):
receiver_attributes = None
destination = None
messages = []
condition = None
master_condition = None
for line in file:
match = re.search(r'messages -> (?P<destination>[A-Za-z_0-9]+) \s*(?:(?P<attributes>.*?)\s+)?{', line)
if match:
receiver_attributes = parse_attributes_string(match.group('attributes'))
if condition:
master_condition = condition
condition = None
destination = match.group('destination')
continue
if line.startswith('#'):
if line.startswith('#if '):
condition = line.rstrip()[4:]
elif line.startswith('#endif'):
condition = None
continue
match = re.search(r'([A-Za-z_0-9]+)\((.*?)\)(?:(?:\s+->\s+)\((.*?)\))?(?:\s+(.*))?', line)
if match:
name, parameters_string, reply_parameters_string, attributes_string = match.groups()
if parameters_string:
parameters = parse_parameters_string(parameters_string)
for parameter in parameters:
parameter.condition = condition
else:
parameters = []
attributes = parse_attributes_string(attributes_string)
if reply_parameters_string:
reply_parameters = parse_parameters_string(reply_parameters_string)
for reply_parameter in reply_parameters:
reply_parameter.condition = condition
elif reply_parameters_string == '':
reply_parameters = []
else:
reply_parameters = None
messages.append(model.Message(name, parameters, reply_parameters, attributes, condition))
return model.MessageReceiver(destination, receiver_attributes, messages, master_condition)
def parse_attributes_string(attributes_string):
if not attributes_string:
return None
return attributes_string.split()
def split_parameters_string(parameters_string):
parameters = []
current_parameter_string = ''
nest_level = 0
for character in parameters_string:
if character == ',' and nest_level == 0:
parameters.append(current_parameter_string)
current_parameter_string = ''
continue
if character == '<':
nest_level += 1
elif character == '>':
nest_level -= 1
current_parameter_string += character
parameters.append(current_parameter_string)
return parameters
def parse_parameters_string(parameters_string):
parameters = []
for parameter_string in split_parameters_string(parameters_string):
match = re.search(r'\s*(?:\[(?P<attributes>.*?)\]\s+)?(?P<type_and_name>.*)', parameter_string)
attributes_string, type_and_name_string = match.group('attributes', 'type_and_name')
parameter_type, parameter_name = type_and_name_string.rsplit(' ', 1)
parameters.append(model.Parameter(type=parameter_type, name=parameter_name, attributes=parse_attributes_string(attributes_string)))
return parameters
| bsd-3-clause |
Aorjoa/aiyara-ceph-dash | .tox/flake8/lib/python2.7/site-packages/flake8/formatting/default.py | 1 | 2191 | """Default formatting class for Flake8."""
from flake8.formatting import base
class SimpleFormatter(base.BaseFormatter):
"""Simple abstraction for Default and Pylint formatter commonality.
Sub-classes of this need to define an ``error_format`` attribute in order
to succeed. The ``format`` method relies on that attribute and expects the
``error_format`` string to use the old-style formatting strings with named
parameters:
* code
* text
* path
* row
* col
"""
error_format = None
def format(self, error):
"""Format and write error out.
If an output filename is specified, write formatted errors to that
file. Otherwise, print the formatted error to standard out.
"""
return self.error_format % {
"code": error.code,
"text": error.text,
"path": error.filename,
"row": error.line_number,
"col": error.column_number,
}
class Default(SimpleFormatter):
"""Default formatter for Flake8.
This also handles backwards compatibility for people specifying a custom
format string.
"""
error_format = '%(path)s:%(row)d:%(col)d: %(code)s %(text)s'
def after_init(self):
"""Check for a custom format string."""
if self.options.format.lower() != 'default':
self.error_format = self.options.format
class Pylint(SimpleFormatter):
"""Pylint formatter for Flake8."""
error_format = '%(path)s:%(row)d: [%(code)s] %(text)s'
class FilenameOnly(SimpleFormatter):
"""Only print filenames, e.g., flake8 -q."""
error_format = '%(path)s'
def after_init(self):
"""Initialize our set of filenames."""
self.filenames_already_printed = set()
def format(self, error):
"""Ensure we only print each error once."""
if error.filename not in self.filenames_already_printed:
self.filenames_already_printed.add(error.filename)
return super(FilenameOnly, self).format(error)
class Nothing(base.BaseFormatter):
"""Print absolutely nothing."""
def format(self, error):
"""Do nothing."""
pass
| bsd-2-clause |
cancan101/tensorflow | tensorflow/contrib/quantization/python/array_ops.py | 178 | 1156 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized Array Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops import gen_array_ops as quantized_gen_array_ops
from tensorflow.python.ops.gen_array_ops import dequantize
from tensorflow.python.ops.gen_array_ops import quantize_v2
from tensorflow.python.ops.gen_array_ops import quantized_concat
# pylint: enable=unused-import
| apache-2.0 |
edcast-inc/edx-platform-edcast | common/test/acceptance/pages/lms/discussion.py | 27 | 25034 | from contextlib import contextmanager
from bok_choy.javascript import wait_for_js
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise, Promise
from .course_page import CoursePage
class DiscussionPageMixin(object):
def is_ajax_finished(self):
return self.browser.execute_script("return jQuery.active") == 0
class DiscussionThreadPage(PageObject, DiscussionPageMixin):
url = None
def __init__(self, browser, thread_selector):
super(DiscussionThreadPage, self).__init__(browser)
self.thread_selector = thread_selector
def _find_within(self, selector):
"""
Returns a query corresponding to the given CSS selector within the scope
of this thread page
"""
return self.q(css=self.thread_selector + " " + selector)
def is_browser_on_page(self):
return self.q(css=self.thread_selector).present
def _get_element_text(self, selector):
"""
Returns the text of the first element matching the given selector, or
None if no such element exists
"""
text_list = self._find_within(selector).text
return text_list[0] if text_list else None
def _is_element_visible(self, selector):
query = self._find_within(selector)
return query.present and query.visible
@contextmanager
def _secondary_action_menu_open(self, ancestor_selector):
"""
Given the selector for an ancestor of a secondary menu, return a context
manager that will open and close the menu
"""
self._find_within(ancestor_selector + " .action-more").click()
EmptyPromise(
lambda: self._is_element_visible(ancestor_selector + " .actions-dropdown"),
"Secondary action menu opened"
).fulfill()
yield
if self._is_element_visible(ancestor_selector + " .actions-dropdown"):
self._find_within(ancestor_selector + " .action-more").click()
EmptyPromise(
lambda: not self._is_element_visible(ancestor_selector + " .actions-dropdown"),
"Secondary action menu closed"
).fulfill()
def get_group_visibility_label(self):
"""
Returns the group visibility label shown for the thread.
"""
return self._get_element_text(".group-visibility-label")
def get_response_total_text(self):
"""Returns the response count text, or None if not present"""
return self._get_element_text(".response-count")
def get_num_displayed_responses(self):
"""Returns the number of responses actually rendered"""
return len(self._find_within(".discussion-response"))
def get_shown_responses_text(self):
"""Returns the shown response count text, or None if not present"""
return self._get_element_text(".response-display-count")
def get_load_responses_button_text(self):
"""Returns the load more responses button text, or None if not present"""
return self._get_element_text(".load-response-button")
def load_more_responses(self):
"""Clicks the load more responses button and waits for responses to load"""
self._find_within(".load-response-button").click()
EmptyPromise(
self.is_ajax_finished,
"Loading more Responses"
).fulfill()
def has_add_response_button(self):
"""Returns true if the add response button is visible, false otherwise"""
return self._is_element_visible(".add-response-btn")
def click_add_response_button(self):
"""
Clicks the add response button and ensures that the response text
field receives focus
"""
self._find_within(".add-response-btn").first.click()
EmptyPromise(
lambda: self._find_within(".discussion-reply-new textarea:focus").present,
"Response field received focus"
).fulfill()
@wait_for_js
def is_response_editor_visible(self, response_id):
"""Returns true if the response editor is present, false otherwise"""
return self._is_element_visible(".response_{} .edit-post-body".format(response_id))
@wait_for_js
def is_discussion_body_visible(self):
return self._is_element_visible(".post-body")
def is_mathjax_preview_available(self):
return self.q(css=".MathJax_Preview").text[0] == ""
def is_mathjax_rendered(self):
return self._is_element_visible(".MathJax")
def is_response_visible(self, comment_id):
"""Returns true if the response is viewable onscreen"""
return self._is_element_visible(".response_{} .response-body".format(comment_id))
def is_response_editable(self, response_id):
"""Returns true if the edit response button is present, false otherwise"""
with self._secondary_action_menu_open(".response_{} .discussion-response".format(response_id)):
return self._is_element_visible(".response_{} .discussion-response .action-edit".format(response_id))
def get_response_body(self, response_id):
return self._get_element_text(".response_{} .response-body".format(response_id))
def start_response_edit(self, response_id):
"""Click the edit button for the response, loading the editing view"""
with self._secondary_action_menu_open(".response_{} .discussion-response".format(response_id)):
self._find_within(".response_{} .discussion-response .action-edit".format(response_id)).first.click()
EmptyPromise(
lambda: self.is_response_editor_visible(response_id),
"Response edit started"
).fulfill()
def get_response_vote_count(self, response_id):
return self._get_element_text(".response_{} .discussion-response .action-vote .vote-count".format(response_id))
def vote_response(self, response_id):
current_count = self._get_element_text(".response_{} .discussion-response .action-vote .vote-count".format(response_id))
self._find_within(".response_{} .discussion-response .action-vote".format(response_id)).first.click()
self.wait_for_ajax()
EmptyPromise(
lambda: current_count != self.get_response_vote_count(response_id),
"Response is voted"
).fulfill()
def is_response_reported(self, response_id):
return self._is_element_visible(".response_{} .discussion-response .post-label-reported".format(response_id))
def report_response(self, response_id):
with self._secondary_action_menu_open(".response_{} .discussion-response".format(response_id)):
self._find_within(".response_{} .discussion-response .action-report".format(response_id)).first.click()
self.wait_for_ajax()
EmptyPromise(
lambda: self.is_response_reported(response_id),
"Response is reported"
).fulfill()
def is_response_endorsed(self, response_id):
return "endorsed" in self._get_element_text(".response_{} .discussion-response .posted-details".format(response_id))
def endorse_response(self, response_id):
self._find_within(".response_{} .discussion-response .action-endorse".format(response_id)).first.click()
self.wait_for_ajax()
EmptyPromise(
lambda: self.is_response_endorsed(response_id),
"Response edit started"
).fulfill()
def set_response_editor_value(self, response_id, new_body):
"""Replace the contents of the response editor"""
self._find_within(".response_{} .discussion-response .wmd-input".format(response_id)).fill(new_body)
def submit_response_edit(self, response_id, new_response_body):
"""Click the submit button on the response editor"""
self._find_within(".response_{} .discussion-response .post-update".format(response_id)).first.click()
EmptyPromise(
lambda: (
not self.is_response_editor_visible(response_id) and
self.is_response_visible(response_id) and
self.get_response_body(response_id) == new_response_body
),
"Comment edit succeeded"
).fulfill()
def is_show_comments_visible(self, response_id):
"""Returns true if the "show comments" link is visible for a response"""
return self._is_element_visible(".response_{} .action-show-comments".format(response_id))
def show_comments(self, response_id):
"""Click the "show comments" link for a response"""
self._find_within(".response_{} .action-show-comments".format(response_id)).first.click()
EmptyPromise(
lambda: self._is_element_visible(".response_{} .comments".format(response_id)),
"Comments shown"
).fulfill()
def is_add_comment_visible(self, response_id):
"""Returns true if the "add comment" form is visible for a response"""
return self._is_element_visible("#wmd-input-comment-body-{}".format(response_id))
def is_comment_visible(self, comment_id):
"""Returns true if the comment is viewable onscreen"""
return self._is_element_visible("#comment_{} .response-body".format(comment_id))
def get_comment_body(self, comment_id):
return self._get_element_text("#comment_{} .response-body".format(comment_id))
def is_comment_deletable(self, comment_id):
"""Returns true if the delete comment button is present, false otherwise"""
with self._secondary_action_menu_open("#comment_{}".format(comment_id)):
return self._is_element_visible("#comment_{} .action-delete".format(comment_id))
def delete_comment(self, comment_id):
with self.handle_alert():
with self._secondary_action_menu_open("#comment_{}".format(comment_id)):
self._find_within("#comment_{} .action-delete".format(comment_id)).first.click()
EmptyPromise(
lambda: not self.is_comment_visible(comment_id),
"Deleted comment was removed"
).fulfill()
def is_comment_editable(self, comment_id):
"""Returns true if the edit comment button is present, false otherwise"""
with self._secondary_action_menu_open("#comment_{}".format(comment_id)):
return self._is_element_visible("#comment_{} .action-edit".format(comment_id))
def is_comment_editor_visible(self, comment_id):
"""Returns true if the comment editor is present, false otherwise"""
return self._is_element_visible(".edit-comment-body[data-id='{}']".format(comment_id))
def _get_comment_editor_value(self, comment_id):
return self._find_within("#wmd-input-edit-comment-body-{}".format(comment_id)).text[0]
def start_comment_edit(self, comment_id):
"""Click the edit button for the comment, loading the editing view"""
old_body = self.get_comment_body(comment_id)
with self._secondary_action_menu_open("#comment_{}".format(comment_id)):
self._find_within("#comment_{} .action-edit".format(comment_id)).first.click()
EmptyPromise(
lambda: (
self.is_comment_editor_visible(comment_id) and
not self.is_comment_visible(comment_id) and
self._get_comment_editor_value(comment_id) == old_body
),
"Comment edit started"
).fulfill()
def set_comment_editor_value(self, comment_id, new_body):
"""Replace the contents of the comment editor"""
self._find_within("#comment_{} .wmd-input".format(comment_id)).fill(new_body)
def submit_comment_edit(self, comment_id, new_comment_body):
"""Click the submit button on the comment editor"""
self._find_within("#comment_{} .post-update".format(comment_id)).first.click()
EmptyPromise(
lambda: (
not self.is_comment_editor_visible(comment_id) and
self.is_comment_visible(comment_id) and
self.get_comment_body(comment_id) == new_comment_body
),
"Comment edit succeeded"
).fulfill()
def cancel_comment_edit(self, comment_id, original_body):
"""Click the cancel button on the comment editor"""
self._find_within("#comment_{} .post-cancel".format(comment_id)).first.click()
EmptyPromise(
lambda: (
not self.is_comment_editor_visible(comment_id) and
self.is_comment_visible(comment_id) and
self.get_comment_body(comment_id) == original_body
),
"Comment edit was canceled"
).fulfill()
class DiscussionSortPreferencePage(CoursePage):
"""
Page that contain the discussion board with sorting options
"""
def __init__(self, browser, course_id):
super(DiscussionSortPreferencePage, self).__init__(browser, course_id)
self.url_path = "discussion/forum"
def is_browser_on_page(self):
"""
Return true if the browser is on the right page else false.
"""
return self.q(css="body.discussion .forum-nav-sort-control").present
def get_selected_sort_preference(self):
"""
Return the text of option that is selected for sorting.
"""
options = self.q(css="body.discussion .forum-nav-sort-control option")
return options.filter(lambda el: el.is_selected())[0].get_attribute("value")
def change_sort_preference(self, sort_by):
"""
Change the option of sorting by clicking on new option.
"""
self.q(css="body.discussion .forum-nav-sort-control option[value='{0}']".format(sort_by)).click()
def refresh_page(self):
"""
Reload the page.
"""
self.browser.refresh()
class DiscussionTabSingleThreadPage(CoursePage):
def __init__(self, browser, course_id, discussion_id, thread_id):
super(DiscussionTabSingleThreadPage, self).__init__(browser, course_id)
self.thread_page = DiscussionThreadPage(
browser,
"body.discussion .discussion-article[data-id='{thread_id}']".format(thread_id=thread_id)
)
self.url_path = "discussion/forum/{discussion_id}/threads/{thread_id}".format(
discussion_id=discussion_id, thread_id=thread_id
)
def is_browser_on_page(self):
return self.thread_page.is_browser_on_page()
def __getattr__(self, name):
return getattr(self.thread_page, name)
def close_open_thread(self):
with self.thread_page._secondary_action_menu_open(".forum-thread-main-wrapper"):
self._find_within(".forum-thread-main-wrapper .action-close").first.click()
@wait_for_js
def is_window_on_top(self):
"""
Check if window's scroll is at top
"""
return self.browser.execute_script("return $('html, body').offset().top") == 0
def _thread_is_rendered_successfully(self, thread_id):
return self.q(css=".discussion-article[data-id='{}']".format(thread_id)).visible
def click_and_open_thread(self, thread_id):
"""
Click specific thread on the list.
"""
thread_selector = "li[data-id='{}']".format(thread_id)
self.q(css=thread_selector).first.click()
EmptyPromise(
lambda: self._thread_is_rendered_successfully(thread_id),
"Thread has been rendered"
).fulfill()
def check_threads_rendered_successfully(self, thread_count):
"""
Count the number of threads available on page.
"""
return len(self.q(css=".forum-nav-thread").results) == thread_count
def check_window_is_on_top(self):
"""
Check window is on top of the page
"""
EmptyPromise(
self.is_window_on_top,
"Window is on top"
).fulfill()
class InlineDiscussionPage(PageObject):
url = None
def __init__(self, browser, discussion_id):
super(InlineDiscussionPage, self).__init__(browser)
self._discussion_selector = (
"body.courseware .discussion-module[data-discussion-id='{discussion_id}'] ".format(
discussion_id=discussion_id
)
)
def _find_within(self, selector):
"""
Returns a query corresponding to the given CSS selector within the scope
of this discussion page
"""
return self.q(css=self._discussion_selector + " " + selector)
def is_browser_on_page(self):
return self.q(css=self._discussion_selector).present
def is_discussion_expanded(self):
return self._find_within(".discussion").present
def expand_discussion(self):
"""Click the link to expand the discussion"""
self._find_within(".discussion-show").first.click()
EmptyPromise(
self.is_discussion_expanded,
"Discussion expanded"
).fulfill()
def get_num_displayed_threads(self):
return len(self._find_within(".discussion-thread"))
def element_exists(self, selector):
return self.q(css=self._discussion_selector + " " + selector).present
def is_new_post_opened(self):
return self._find_within(".new-post-article").visible
def click_element(self, selector):
self.wait_for_element_presence(
"{discussion} {selector}".format(discussion=self._discussion_selector, selector=selector),
"{selector} is visible".format(selector=selector)
)
self._find_within(selector).click()
def click_cancel_new_post(self):
self.click_element(".cancel")
EmptyPromise(
lambda: not self.is_new_post_opened(),
"New post closed"
).fulfill()
def click_new_post_button(self):
self.click_element(".new-post-btn")
EmptyPromise(
self.is_new_post_opened,
"New post opened"
).fulfill()
@wait_for_js
def _is_element_visible(self, selector):
query = self._find_within(selector)
return query.present and query.visible
class InlineDiscussionThreadPage(DiscussionThreadPage):
def __init__(self, browser, thread_id):
super(InlineDiscussionThreadPage, self).__init__(
browser,
"body.courseware .discussion-module #thread_{thread_id}".format(thread_id=thread_id)
)
def expand(self):
"""Clicks the link to expand the thread"""
self._find_within(".forum-thread-expand").first.click()
EmptyPromise(
lambda: bool(self.get_response_total_text()),
"Thread expanded"
).fulfill()
def is_thread_anonymous(self):
return not self.q(css=".posted-details > .username").present
@wait_for_js
def check_if_selector_is_focused(self, selector):
"""
Check if selector is focused
"""
return self.browser.execute_script("return $('{}').is(':focus')".format(selector))
class DiscussionUserProfilePage(CoursePage):
TEXT_NEXT = u'Next >'
TEXT_PREV = u'< Previous'
PAGING_SELECTOR = "a.discussion-pagination[data-page-number]"
def __init__(self, browser, course_id, user_id, username, page=1):
super(DiscussionUserProfilePage, self).__init__(browser, course_id)
self.url_path = "discussion/forum/dummy/users/{}?page={}".format(user_id, page)
self.username = username
def is_browser_on_page(self):
return (
self.q(css='section.discussion-user-threads[data-course-id="{}"]'.format(self.course_id)).present
and
self.q(css='section.user-profile a.learner-profile-link').present
and
self.q(css='section.user-profile a.learner-profile-link').text[0] == self.username
)
@wait_for_js
def is_window_on_top(self):
return self.browser.execute_script("return $('html, body').offset().top") == 0
def get_shown_thread_ids(self):
elems = self.q(css="article.discussion-thread")
return [elem.get_attribute("id")[7:] for elem in elems]
def get_current_page(self):
def check_func():
try:
current_page = int(self.q(css="nav.discussion-paginator li.current-page").text[0])
except:
return False, None
return True, current_page
return Promise(
check_func, 'discussion-paginator current page has text', timeout=5,
).fulfill()
def _check_pager(self, text, page_number=None):
"""
returns True if 'text' matches the text in any of the pagination elements. If
page_number is provided, only return True if the element points to that result
page.
"""
elems = self.q(css=self.PAGING_SELECTOR).filter(lambda elem: elem.text == text)
if page_number:
elems = elems.filter(lambda elem: int(elem.get_attribute('data-page-number')) == page_number)
return elems.present
def get_clickable_pages(self):
return sorted([
int(elem.get_attribute('data-page-number'))
for elem in self.q(css=self.PAGING_SELECTOR)
if str(elem.text).isdigit()
])
def is_prev_button_shown(self, page_number=None):
return self._check_pager(self.TEXT_PREV, page_number)
def is_next_button_shown(self, page_number=None):
return self._check_pager(self.TEXT_NEXT, page_number)
def _click_pager_with_text(self, text, page_number):
"""
click the first pagination element with whose text is `text` and ensure
the resulting page number matches `page_number`.
"""
targets = [elem for elem in self.q(css=self.PAGING_SELECTOR) if elem.text == text]
targets[0].click()
EmptyPromise(
lambda: self.get_current_page() == page_number,
"navigated to desired page"
).fulfill()
def click_prev_page(self):
self._click_pager_with_text(self.TEXT_PREV, self.get_current_page() - 1)
EmptyPromise(
self.is_window_on_top,
"Window is on top"
).fulfill()
def click_next_page(self):
self._click_pager_with_text(self.TEXT_NEXT, self.get_current_page() + 1)
EmptyPromise(
self.is_window_on_top,
"Window is on top"
).fulfill()
def click_on_page(self, page_number):
self._click_pager_with_text(unicode(page_number), page_number)
EmptyPromise(
self.is_window_on_top,
"Window is on top"
).fulfill()
def click_on_sidebar_username(self):
self.wait_for_page()
self.q(css='.learner-profile-link').first.click()
class DiscussionTabHomePage(CoursePage, DiscussionPageMixin):
ALERT_SELECTOR = ".discussion-body .forum-nav .search-alert"
def __init__(self, browser, course_id):
super(DiscussionTabHomePage, self).__init__(browser, course_id)
self.url_path = "discussion/forum/"
def is_browser_on_page(self):
return self.q(css=".discussion-body section.home-header").present
def perform_search(self, text="dummy"):
self.q(css=".forum-nav-search-input").fill(text + chr(10))
EmptyPromise(
self.is_ajax_finished,
"waiting for server to return result"
).fulfill()
def get_search_alert_messages(self):
return self.q(css=self.ALERT_SELECTOR + " .message").text
def get_search_alert_links(self):
return self.q(css=self.ALERT_SELECTOR + " .link-jump")
def dismiss_alert_message(self, text):
"""
dismiss any search alert message containing the specified text.
"""
def _match_messages(text):
return self.q(css=".search-alert").filter(lambda elem: text in elem.text)
for alert_id in _match_messages(text).attrs("id"):
self.q(css="{}#{} a.dismiss".format(self.ALERT_SELECTOR, alert_id)).click()
EmptyPromise(
lambda: _match_messages(text).results == [],
"waiting for dismissed alerts to disappear"
).fulfill()
def click_new_post_button(self):
"""
Clicks the 'New Post' button.
"""
self.new_post_button.click()
EmptyPromise(
lambda: (
self.new_post_form
),
"New post action succeeded"
).fulfill()
@property
def new_post_button(self):
"""
Returns the new post button.
"""
elements = self.q(css="ol.course-tabs .new-post-btn")
return elements.first if elements.visible and len(elements) == 1 else None
@property
def new_post_form(self):
"""
Returns the new post form.
"""
elements = self.q(css=".forum-new-post-form")
return elements[0] if elements.visible and len(elements) == 1 else None
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.