gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of dtypes (Tensor element types)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import types_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.util.tf_export import tf_export
_np_bfloat16 = pywrap_tensorflow.TF_bfloat16_type()
@tf_export("DType")
class DType(object):
"""Represents the type of the elements in a `Tensor`.
The following `DType` objects are defined:
* `tf.float16`: 16-bit half-precision floating-point.
* `tf.float32`: 32-bit single-precision floating-point.
* `tf.float64`: 64-bit double-precision floating-point.
* `tf.bfloat16`: 16-bit truncated floating-point.
* `tf.complex64`: 64-bit single-precision complex.
* `tf.complex128`: 128-bit double-precision complex.
* `tf.int8`: 8-bit signed integer.
* `tf.uint8`: 8-bit unsigned integer.
* `tf.uint16`: 16-bit unsigned integer.
* `tf.uint32`: 32-bit unsigned integer.
* `tf.uint64`: 64-bit unsigned integer.
* `tf.int16`: 16-bit signed integer.
* `tf.int32`: 32-bit signed integer.
* `tf.int64`: 64-bit signed integer.
* `tf.bool`: Boolean.
* `tf.string`: String.
* `tf.qint8`: Quantized 8-bit signed integer.
* `tf.quint8`: Quantized 8-bit unsigned integer.
* `tf.qint16`: Quantized 16-bit signed integer.
* `tf.quint16`: Quantized 16-bit unsigned integer.
* `tf.qint32`: Quantized 32-bit signed integer.
* `tf.resource`: Handle to a mutable resource.
* `tf.variant`: Values of arbitrary types.
In addition, variants of these types with the `_ref` suffix are
defined for reference-typed tensors.
The `tf.as_dtype()` function converts numpy types and string type
names to a `DType` object.
"""
def __init__(self, type_enum):
"""Creates a new `DataType`.
NOTE(mrry): In normal circumstances, you should not need to
construct a `DataType` object directly. Instead, use the
`tf.as_dtype()` function.
Args:
type_enum: A `types_pb2.DataType` enum value.
Raises:
TypeError: If `type_enum` is not a value `types_pb2.DataType`.
"""
# TODO(mrry): Make the necessary changes (using __new__) to ensure
# that calling this returns one of the interned values.
type_enum = int(type_enum)
if (type_enum not in types_pb2.DataType.values() or
type_enum == types_pb2.DT_INVALID):
raise TypeError(
"type_enum is not a valid types_pb2.DataType: %s" % type_enum)
self._type_enum = type_enum
@property
def _is_ref_dtype(self):
"""Returns `True` if this `DType` represents a reference type."""
return self._type_enum > 100
@property
def _as_ref(self):
"""Returns a reference `DType` based on this `DType`."""
if self._is_ref_dtype:
return self
else:
return _INTERN_TABLE[self._type_enum + 100]
@property
def base_dtype(self):
"""Returns a non-reference `DType` based on this `DType`."""
if self._is_ref_dtype:
return _INTERN_TABLE[self._type_enum - 100]
else:
return self
@property
def real_dtype(self):
"""Returns the dtype correspond to this dtype's real part."""
base = self.base_dtype
if base == complex64:
return float32
elif base == complex128:
return float64
else:
return self
@property
def is_numpy_compatible(self):
numpy_incompatible = [
types_pb2.DT_VARIANT, types_pb2.DT_VARIANT_REF, types_pb2.DT_RESOURCE,
types_pb2.DT_RESOURCE_REF
]
return self._type_enum not in numpy_incompatible
@property
def as_numpy_dtype(self):
"""Returns a `numpy.dtype` based on this `DType`."""
return _TF_TO_NP[self._type_enum]
@property
def as_datatype_enum(self):
"""Returns a `types_pb2.DataType` enum value based on this `DType`."""
return self._type_enum
@property
def is_bool(self):
"""Returns whether this is a boolean data type"""
return self.base_dtype == bool
@property
def is_integer(self):
"""Returns whether this is a (non-quantized) integer type."""
return (self.is_numpy_compatible and not self.is_quantized and
np.issubdtype(self.as_numpy_dtype, np.integer))
@property
def is_floating(self):
"""Returns whether this is a (non-quantized, real) floating point type."""
return ((self.is_numpy_compatible and
np.issubdtype(self.as_numpy_dtype, np.floating)) or
self.base_dtype == bfloat16)
@property
def is_complex(self):
"""Returns whether this is a complex floating point type."""
return self.base_dtype in (complex64, complex128)
@property
def is_quantized(self):
"""Returns whether this is a quantized data type."""
return self.base_dtype in [qint8, quint8, qint16, quint16, qint32]
@property
def is_unsigned(self):
"""Returns whether this type is unsigned.
Non-numeric, unordered, and quantized types are not considered unsigned, and
this function returns `False`.
Returns:
Whether a `DType` is unsigned.
"""
try:
return self.min == 0
except TypeError:
return False
@property
def min(self):
"""Returns the minimum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or
self.base_dtype in (bool, string, complex64, complex128)):
raise TypeError("Cannot find minimum value of %s." % self)
# there is no simple way to get the min value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).min
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).min
except:
if self.base_dtype == bfloat16:
return _np_bfloat16(float.fromhex("-0x1.FEp127"))
raise TypeError("Cannot find minimum value of %s." % self)
@property
def max(self):
"""Returns the maximum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or
self.base_dtype in (bool, string, complex64, complex128)):
raise TypeError("Cannot find maximum value of %s." % self)
# there is no simple way to get the max value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).max
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).max
except:
if self.base_dtype == bfloat16:
return _np_bfloat16(float.fromhex("0x1.FEp127"))
raise TypeError("Cannot find maximum value of %s." % self)
@property
def limits(self, clip_negative=True):
"""Return intensity limits, i.e. (min, max) tuple, of the dtype.
Args:
clip_negative : bool, optional
If True, clip the negative range (i.e. return 0 for min intensity)
even if the image dtype allows negative values.
Returns
min, max : tuple
Lower and upper intensity limits.
"""
min, max = dtype_range[self.as_numpy_dtype]
if clip_negative:
min = 0
return min, max
def is_compatible_with(self, other):
"""Returns True if the `other` DType will be converted to this DType.
The conversion rules are as follows:
```python
DType(T) .is_compatible_with(DType(T)) == True
DType(T) .is_compatible_with(DType(T).as_ref) == True
DType(T).as_ref.is_compatible_with(DType(T)) == False
DType(T).as_ref.is_compatible_with(DType(T).as_ref) == True
```
Args:
other: A `DType` (or object that may be converted to a `DType`).
Returns:
True if a Tensor of the `other` `DType` will be implicitly converted to
this `DType`.
"""
other = as_dtype(other)
return self._type_enum in (other.as_datatype_enum,
other.base_dtype.as_datatype_enum)
def __eq__(self, other):
"""Returns True iff this DType refers to the same type as `other`."""
if other is None:
return False
try:
dtype = as_dtype(other).as_datatype_enum
return self._type_enum == dtype # pylint: disable=protected-access
except TypeError:
return False
def __ne__(self, other):
"""Returns True iff self != other."""
return not self.__eq__(other)
@property
def name(self):
"""Returns the string name for this `DType`."""
return _TYPE_TO_STRING[self._type_enum]
def __int__(self):
return self._type_enum
def __str__(self):
return "<dtype: %r>" % self.name
def __repr__(self):
return "tf." + self.name
def __hash__(self):
return self._type_enum
@property
def size(self):
if (self._type_enum == types_pb2.DT_VARIANT or
self._type_enum == types_pb2.DT_RESOURCE):
return 1
return np.dtype(self.as_numpy_dtype).itemsize
# Define data type range of numpy dtype
dtype_range = {
np.bool_: (False, True),
np.bool8: (False, True),
np.uint8: (0, 255),
np.uint16: (0, 65535),
np.int8: (-128, 127),
np.int16: (-32768, 32767),
np.int64: (-2**63, 2**63 - 1),
np.uint64: (0, 2**64 - 1),
np.int32: (-2**31, 2**31 - 1),
np.uint32: (0, 2**32 - 1),
np.float32: (-1, 1),
np.float64: (-1, 1)
}
# Define standard wrappers for the types_pb2.DataType enum.
resource = DType(types_pb2.DT_RESOURCE)
tf_export("resource").export_constant(__name__, "resource")
variant = DType(types_pb2.DT_VARIANT)
tf_export("variant").export_constant(__name__, "variant")
float16 = DType(types_pb2.DT_HALF)
tf_export("float16").export_constant(__name__, "float16")
half = float16
tf_export("half").export_constant(__name__, "half")
float32 = DType(types_pb2.DT_FLOAT)
tf_export("float32").export_constant(__name__, "float32")
float64 = DType(types_pb2.DT_DOUBLE)
tf_export("float64").export_constant(__name__, "float64")
double = float64
tf_export("double").export_constant(__name__, "double")
int32 = DType(types_pb2.DT_INT32)
tf_export("int32").export_constant(__name__, "int32")
uint8 = DType(types_pb2.DT_UINT8)
tf_export("uint8").export_constant(__name__, "uint8")
uint16 = DType(types_pb2.DT_UINT16)
tf_export("uint16").export_constant(__name__, "uint16")
uint32 = DType(types_pb2.DT_UINT32)
uint64 = DType(types_pb2.DT_UINT64)
int16 = DType(types_pb2.DT_INT16)
tf_export("int16").export_constant(__name__, "int16")
int8 = DType(types_pb2.DT_INT8)
tf_export("int8").export_constant(__name__, "int8")
string = DType(types_pb2.DT_STRING)
tf_export("string").export_constant(__name__, "string")
complex64 = DType(types_pb2.DT_COMPLEX64)
tf_export("complex64").export_constant(__name__, "complex64")
complex128 = DType(types_pb2.DT_COMPLEX128)
tf_export("complex128").export_constant(__name__, "complex128")
int64 = DType(types_pb2.DT_INT64)
tf_export("int64").export_constant(__name__, "int64")
bool = DType(types_pb2.DT_BOOL)
tf_export("bool").export_constant(__name__, "bool")
qint8 = DType(types_pb2.DT_QINT8)
tf_export("qint8").export_constant(__name__, "qint8")
quint8 = DType(types_pb2.DT_QUINT8)
tf_export("quint8").export_constant(__name__, "quint8")
qint16 = DType(types_pb2.DT_QINT16)
tf_export("qint16").export_constant(__name__, "qint16")
quint16 = DType(types_pb2.DT_QUINT16)
tf_export("quint16").export_constant(__name__, "quint16")
qint32 = DType(types_pb2.DT_QINT32)
tf_export("qint32").export_constant(__name__, "qint32")
resource_ref = DType(types_pb2.DT_RESOURCE_REF)
variant_ref = DType(types_pb2.DT_VARIANT_REF)
bfloat16 = DType(types_pb2.DT_BFLOAT16)
tf_export("bfloat16").export_constant(__name__, "bfloat16")
float16_ref = DType(types_pb2.DT_HALF_REF)
half_ref = float16_ref
float32_ref = DType(types_pb2.DT_FLOAT_REF)
float64_ref = DType(types_pb2.DT_DOUBLE_REF)
double_ref = float64_ref
int32_ref = DType(types_pb2.DT_INT32_REF)
uint32_ref = DType(types_pb2.DT_UINT32_REF)
uint8_ref = DType(types_pb2.DT_UINT8_REF)
uint16_ref = DType(types_pb2.DT_UINT16_REF)
int16_ref = DType(types_pb2.DT_INT16_REF)
int8_ref = DType(types_pb2.DT_INT8_REF)
string_ref = DType(types_pb2.DT_STRING_REF)
complex64_ref = DType(types_pb2.DT_COMPLEX64_REF)
complex128_ref = DType(types_pb2.DT_COMPLEX128_REF)
int64_ref = DType(types_pb2.DT_INT64_REF)
uint64_ref = DType(types_pb2.DT_UINT64_REF)
bool_ref = DType(types_pb2.DT_BOOL_REF)
qint8_ref = DType(types_pb2.DT_QINT8_REF)
quint8_ref = DType(types_pb2.DT_QUINT8_REF)
qint16_ref = DType(types_pb2.DT_QINT16_REF)
quint16_ref = DType(types_pb2.DT_QUINT16_REF)
qint32_ref = DType(types_pb2.DT_QINT32_REF)
bfloat16_ref = DType(types_pb2.DT_BFLOAT16_REF)
# Maintain an intern table so that we don't have to create a large
# number of small objects.
_INTERN_TABLE = {
types_pb2.DT_HALF: float16,
types_pb2.DT_FLOAT: float32,
types_pb2.DT_DOUBLE: float64,
types_pb2.DT_INT32: int32,
types_pb2.DT_UINT8: uint8,
types_pb2.DT_UINT16: uint16,
types_pb2.DT_UINT32: uint32,
types_pb2.DT_UINT64: uint64,
types_pb2.DT_INT16: int16,
types_pb2.DT_INT8: int8,
types_pb2.DT_STRING: string,
types_pb2.DT_COMPLEX64: complex64,
types_pb2.DT_COMPLEX128: complex128,
types_pb2.DT_INT64: int64,
types_pb2.DT_BOOL: bool,
types_pb2.DT_QINT8: qint8,
types_pb2.DT_QUINT8: quint8,
types_pb2.DT_QINT16: qint16,
types_pb2.DT_QUINT16: quint16,
types_pb2.DT_QINT32: qint32,
types_pb2.DT_BFLOAT16: bfloat16,
types_pb2.DT_RESOURCE: resource,
types_pb2.DT_VARIANT: variant,
types_pb2.DT_HALF_REF: float16_ref,
types_pb2.DT_FLOAT_REF: float32_ref,
types_pb2.DT_DOUBLE_REF: float64_ref,
types_pb2.DT_INT32_REF: int32_ref,
types_pb2.DT_UINT32_REF: uint32_ref,
types_pb2.DT_UINT8_REF: uint8_ref,
types_pb2.DT_UINT16_REF: uint16_ref,
types_pb2.DT_INT16_REF: int16_ref,
types_pb2.DT_INT8_REF: int8_ref,
types_pb2.DT_STRING_REF: string_ref,
types_pb2.DT_COMPLEX64_REF: complex64_ref,
types_pb2.DT_COMPLEX128_REF: complex128_ref,
types_pb2.DT_INT64_REF: int64_ref,
types_pb2.DT_UINT64_REF: uint64_ref,
types_pb2.DT_BOOL_REF: bool_ref,
types_pb2.DT_QINT8_REF: qint8_ref,
types_pb2.DT_QUINT8_REF: quint8_ref,
types_pb2.DT_QINT16_REF: qint16_ref,
types_pb2.DT_QUINT16_REF: quint16_ref,
types_pb2.DT_QINT32_REF: qint32_ref,
types_pb2.DT_BFLOAT16_REF: bfloat16_ref,
types_pb2.DT_RESOURCE_REF: resource_ref,
types_pb2.DT_VARIANT_REF: variant_ref,
}
# Standard mappings between types_pb2.DataType values and string names.
_TYPE_TO_STRING = {
types_pb2.DT_HALF: "float16",
types_pb2.DT_FLOAT: "float32",
types_pb2.DT_DOUBLE: "float64",
types_pb2.DT_INT32: "int32",
types_pb2.DT_UINT8: "uint8",
types_pb2.DT_UINT16: "uint16",
types_pb2.DT_UINT32: "uint32",
types_pb2.DT_UINT64: "uint64",
types_pb2.DT_INT16: "int16",
types_pb2.DT_INT8: "int8",
types_pb2.DT_STRING: "string",
types_pb2.DT_COMPLEX64: "complex64",
types_pb2.DT_COMPLEX128: "complex128",
types_pb2.DT_INT64: "int64",
types_pb2.DT_BOOL: "bool",
types_pb2.DT_QINT8: "qint8",
types_pb2.DT_QUINT8: "quint8",
types_pb2.DT_QINT16: "qint16",
types_pb2.DT_QUINT16: "quint16",
types_pb2.DT_QINT32: "qint32",
types_pb2.DT_BFLOAT16: "bfloat16",
types_pb2.DT_RESOURCE: "resource",
types_pb2.DT_VARIANT: "variant",
types_pb2.DT_HALF_REF: "float16_ref",
types_pb2.DT_FLOAT_REF: "float32_ref",
types_pb2.DT_DOUBLE_REF: "float64_ref",
types_pb2.DT_INT32_REF: "int32_ref",
types_pb2.DT_UINT32_REF: "uint32_ref",
types_pb2.DT_UINT8_REF: "uint8_ref",
types_pb2.DT_UINT16_REF: "uint16_ref",
types_pb2.DT_INT16_REF: "int16_ref",
types_pb2.DT_INT8_REF: "int8_ref",
types_pb2.DT_STRING_REF: "string_ref",
types_pb2.DT_COMPLEX64_REF: "complex64_ref",
types_pb2.DT_COMPLEX128_REF: "complex128_ref",
types_pb2.DT_INT64_REF: "int64_ref",
types_pb2.DT_UINT64_REF: "uint64_ref",
types_pb2.DT_BOOL_REF: "bool_ref",
types_pb2.DT_QINT8_REF: "qint8_ref",
types_pb2.DT_QUINT8_REF: "quint8_ref",
types_pb2.DT_QINT16_REF: "qint16_ref",
types_pb2.DT_QUINT16_REF: "quint16_ref",
types_pb2.DT_QINT32_REF: "qint32_ref",
types_pb2.DT_BFLOAT16_REF: "bfloat16_ref",
types_pb2.DT_RESOURCE_REF: "resource_ref",
types_pb2.DT_VARIANT_REF: "variant_ref",
}
_STRING_TO_TF = {
value: _INTERN_TABLE[key]
for key, value in _TYPE_TO_STRING.items()
}
# Add non-canonical aliases.
_STRING_TO_TF["half"] = float16
_STRING_TO_TF["half_ref"] = float16_ref
_STRING_TO_TF["float"] = float32
_STRING_TO_TF["float_ref"] = float32_ref
_STRING_TO_TF["double"] = float64
_STRING_TO_TF["double_ref"] = float64_ref
# Numpy representation for quantized dtypes.
#
# These are magic strings that are used in the swig wrapper to identify
# quantized types.
# TODO(mrry,keveman): Investigate Numpy type registration to replace this
# hard-coding of names.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
# _np_bfloat16 is defined by a module import.
# Custom struct dtype for directly-fed ResourceHandles of supported type(s).
np_resource = np.dtype([("resource", np.ubyte, 1)])
# Standard mappings between types_pb2.DataType values and numpy.dtypes.
_NP_TO_TF = frozenset([
(np.float16, float16),
(np.float32, float32),
(np.float64, float64),
(np.int32, int32),
(np.int64, int64),
(np.uint8, uint8),
(np.uint16, uint16),
(np.uint32, uint32),
(np.uint64, uint64),
(np.int16, int16),
(np.int8, int8),
(np.complex64, complex64),
(np.complex128, complex128),
(np.object, string),
(np.bool, bool),
(_np_qint8, qint8),
(_np_quint8, quint8),
(_np_qint16, qint16),
(_np_quint16, quint16),
(_np_qint32, qint32),
(_np_bfloat16, bfloat16),
])
_TF_TO_NP = {
types_pb2.DT_HALF:
np.float16,
types_pb2.DT_FLOAT:
np.float32,
types_pb2.DT_DOUBLE:
np.float64,
types_pb2.DT_INT32:
np.int32,
types_pb2.DT_UINT8:
np.uint8,
types_pb2.DT_UINT16:
np.uint16,
types_pb2.DT_UINT32:
np.uint32,
types_pb2.DT_UINT64:
np.uint64,
types_pb2.DT_INT16:
np.int16,
types_pb2.DT_INT8:
np.int8,
# NOTE(touts): For strings we use np.object as it supports variable length
# strings.
types_pb2.DT_STRING:
np.object,
types_pb2.DT_COMPLEX64:
np.complex64,
types_pb2.DT_COMPLEX128:
np.complex128,
types_pb2.DT_INT64:
np.int64,
types_pb2.DT_BOOL:
np.bool,
types_pb2.DT_QINT8:
_np_qint8,
types_pb2.DT_QUINT8:
_np_quint8,
types_pb2.DT_QINT16:
_np_qint16,
types_pb2.DT_QUINT16:
_np_quint16,
types_pb2.DT_QINT32:
_np_qint32,
types_pb2.DT_BFLOAT16:
_np_bfloat16,
# Ref types
types_pb2.DT_HALF_REF:
np.float16,
types_pb2.DT_FLOAT_REF:
np.float32,
types_pb2.DT_DOUBLE_REF:
np.float64,
types_pb2.DT_INT32_REF:
np.int32,
types_pb2.DT_UINT32_REF:
np.uint32,
types_pb2.DT_UINT8_REF:
np.uint8,
types_pb2.DT_UINT16_REF:
np.uint16,
types_pb2.DT_INT16_REF:
np.int16,
types_pb2.DT_INT8_REF:
np.int8,
types_pb2.DT_STRING_REF:
np.object,
types_pb2.DT_COMPLEX64_REF:
np.complex64,
types_pb2.DT_COMPLEX128_REF:
np.complex128,
types_pb2.DT_INT64_REF:
np.int64,
types_pb2.DT_UINT64_REF:
np.uint64,
types_pb2.DT_BOOL_REF:
np.bool,
types_pb2.DT_QINT8_REF:
_np_qint8,
types_pb2.DT_QUINT8_REF:
_np_quint8,
types_pb2.DT_QINT16_REF:
_np_qint16,
types_pb2.DT_QUINT16_REF:
_np_quint16,
types_pb2.DT_QINT32_REF:
_np_qint32,
types_pb2.DT_BFLOAT16_REF:
_np_bfloat16,
}
QUANTIZED_DTYPES = frozenset([
qint8, quint8, qint16, quint16, qint32, qint8_ref, quint8_ref, qint16_ref,
quint16_ref, qint32_ref
])
tf_export("QUANTIZED_DTYPES").export_constant(__name__, "QUANTIZED_DTYPES")
@tf_export("as_dtype")
def as_dtype(type_value):
"""Converts the given `type_value` to a `DType`.
Args:
type_value: A value that can be converted to a `tf.DType`
object. This may currently be a `tf.DType` object, a
[`DataType`
enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto),
a string type name, or a `numpy.dtype`.
Returns:
A `DType` corresponding to `type_value`.
Raises:
TypeError: If `type_value` cannot be converted to a `DType`.
"""
if isinstance(type_value, DType):
return type_value
try:
return _INTERN_TABLE[type_value]
except KeyError:
pass
try:
return _STRING_TO_TF[type_value]
except KeyError:
pass
if isinstance(type_value, np.dtype):
# The numpy dtype for strings is variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if type_value.type == np.string_ or type_value.type == np.unicode_:
return string
for key, val in _NP_TO_TF:
try:
if key == type_value:
return val
except TypeError as e:
raise TypeError("Cannot convert {} to a dtype. {}".format(type_value, e))
raise TypeError("Cannot convert value %r to a TensorFlow DType." % type_value)
| |
##########################################################################
#
# Copyright (c) 2007-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import glob
import sys
import random
import shutil
import os
from IECore import *
class TestTIFFReader(unittest.TestCase):
def testConstruction( self ):
r = Reader.create( "test/IECore/data/tiff/uvMap.512x256.8bit.tif" )
self.assertEqual( type(r), TIFFImageReader )
def testCanRead( self ):
self.assert_( TIFFImageReader.canRead( "test/IECore/data/tiff/uvMap.512x256.8bit.tif" ) )
def testIsComplete( self ):
r = Reader.create( "test/IECore/data/tiff/uvMap.512x256.8bit.tif" )
self.assertEqual( type(r), TIFFImageReader )
self.assert_( r.isComplete() )
r = Reader.create( "test/IECore/data/tiff/uvMap.512x256.16bit.truncated.tif" )
self.assertEqual( type(r), TIFFImageReader )
self.failIf( r.isComplete() )
def testChannelNames( self ):
r = Reader.create( "test/IECore/data/tiff/uvMap.512x256.8bit.tif" )
self.assertEqual( type(r), TIFFImageReader )
channelNames = r.channelNames()
self.assertEqual( len( channelNames ), 3 )
self.assert_( "R" in channelNames )
self.assert_( "G" in channelNames )
self.assert_( "B" in channelNames )
def testReadHeader( self ):
r = Reader.create( "test/IECore/data/tiff/uvMap.512x256.8bit.tif" )
self.assertEqual( type(r), TIFFImageReader )
h = r.readHeader()
channelNames = h['channelNames']
self.assertEqual( len( channelNames ), 3 )
self.assert_( "R" in channelNames )
self.assert_( "G" in channelNames )
self.assert_( "B" in channelNames )
self.assertEqual( h['displayWindow'], Box2iData( Box2i( V2i(0,0), V2i(511,255) ) ) )
self.assertEqual( h['dataWindow'], Box2iData( Box2i( V2i(0,0), V2i(511,255) ) ) )
def testManyChannels( self ):
r = Reader.create( "test/IECore/data/tiff/uvMap.100x100.manyChannels.16bit.tif" )
self.assertEqual( type(r), TIFFImageReader )
img = r.read()
self.assertEqual( type(img), ImagePrimitive )
self.assertEqual( img.displayWindow, Box2i( V2i( 0, 0 ), V2i( 99, 99 ) ) )
self.assertEqual( img.dataWindow, Box2i( V2i( 0, 0 ), V2i( 99, 99 ) ) )
ipe = PrimitiveEvaluator.create( img )
self.assert_( ipe.R() )
self.assert_( ipe.G() )
self.assert_( ipe.B() )
self.assert_( ipe.A() )
self.assert_( "Data1" in img )
self.assert_( "Data2" in img )
def testRead( self ):
r = Reader.create( "test/IECore/data/tiff/uvMap.512x256.8bit.tif" )
self.assertEqual( type(r), TIFFImageReader )
img = r.read()
self.assertEqual( type(img), ImagePrimitive )
self.assertEqual( img.displayWindow, Box2i( V2i( 0, 0 ), V2i( 511, 255 ) ) )
self.assertEqual( img.dataWindow, Box2i( V2i( 0, 0 ), V2i( 511, 255 ) ) )
def testReadChannel( self ):
r = Reader.create( "test/IECore/data/tiff/uvMap.512x256.8bit.tif" )
self.assertEqual( type(r), TIFFImageReader )
red = r.readChannel( "R" )
self.assert_( red )
green = r.readChannel( "G" )
self.assert_( green )
blue = r.readChannel( "B" )
self.assert_( blue )
self.assertRaises( RuntimeError, r.readChannel, "NonExistantChannel" )
self.assertEqual( len(red), len(green) )
self.assertEqual( len(red), len(blue) )
self.assertEqual( len(red), 512 * 256 )
def testSourceColorSpace( self ):
# for 32-bit floating point channels, it should be linear
self.assertEqual( Reader.create( "test/IECore/data/tiff/uvMap.200x100.rgba.32bit.tif" ).sourceColorSpace(), "linear" )
# for other bit-depths it assumes srgb
self.assertEqual( Reader.create( "test/IECore/data/tiff/uvMap.200x100.rgba.8bit.tif" ).sourceColorSpace(), "srgb" )
def testDataWindow( self ):
r = Reader.create( "test/IECore/data/tiff/cropWindow.640x480.16bit.tif" )
self.assertEqual( type(r), TIFFImageReader )
img = r.read()
expectedDataWindow = Box2i(
V2i( 320, 240 ),
V2i( 479, 359 ),
)
expectedDisplayWindow = Box2i(
V2i( 0, 0 ),
V2i( 639,479 )
)
self.assertEqual( img.dataWindow, expectedDataWindow )
self.assertEqual( img.displayWindow, expectedDisplayWindow )
def testDataWindowRead( self ):
r = Reader.create( "test/IECore/data/tiff/uvMap.512x256.8bit.tif" )
self.assertEqual( type(r), TIFFImageReader )
dataWindow = Box2i(
V2i( 360, 160 ),
V2i( 399, 199 )
)
dataWindow = Box2i(
V2i( 50, 50 ),
V2i( 450, 200 )
)
imgOriginal = r.read()
self.assertEqual( type(imgOriginal), ImagePrimitive )
r.parameters()["dataWindow"].setValue( Box2iData( dataWindow ) )
img = r.read()
self.assertEqual( type(img), ImagePrimitive )
self.assertEqual( img.dataWindow, dataWindow )
self.assertEqual( img.displayWindow, Box2i( V2i( 0, 0 ), V2i( 511, 255 ) ) )
self.assertEqual( len(img["R"].data), 401 * 151 )
self.assertEqual( len(img["G"].data), 401 * 151 )
self.assertEqual( len(img["B"].data), 401 * 151 )
ipe = PrimitiveEvaluator.create( img )
self.assert_( ipe.R() )
self.assert_( ipe.G() )
self.assert_( ipe.B() )
self.failIf ( ipe.A() )
result = ipe.createResult()
ipeOriginal = PrimitiveEvaluator.create( imgOriginal )
resultOriginal = ipeOriginal.createResult()
random.seed( 1 )
# Test for equivalence using 50 random pixels. Inside the data window, we expect the
# pixel values to be the same. Outside the data window we expect black.
for i in range( 0, 50 ):
pixel = V2i( int( random.uniform( 0, 511 ) ), int( random.uniform( 0, 255 ) ) )
found = ipe.pointAtPixel( pixel, result )
self.assert_( found )
found = ipeOriginal.pointAtPixel( pixel, resultOriginal )
self.assert_( found )
color = V3f(
result.floatPrimVar( ipe.R() ),
result.floatPrimVar( ipe.G() ),
result.floatPrimVar( ipe.B() )
)
if ( pixel.x >= dataWindow.min.x ) and ( pixel.x < dataWindow.max.x ) and (pixel.y >= dataWindow.min.y ) and ( pixel.y < dataWindow.max.y ) :
expectedColor = V3f(
resultOriginal.floatPrimVar( ipeOriginal.R() ),
resultOriginal.floatPrimVar( ipeOriginal.G() ),
resultOriginal.floatPrimVar( ipeOriginal.B() )
)
else :
expectedColor = V3f( 0, 0, 0 )
self.assert_( ( color - expectedColor).length() < 1.e-3 )
def testOrientation( self ) :
""" Test orientation of TIFF files """
img = Reader.create( "test/IECore/data/tiff/uvMap.512x256.8bit.tif" ).read()
ipe = PrimitiveEvaluator.create( img )
self.assert_( ipe.R() )
self.assert_( ipe.G() )
self.assert_( ipe.B() )
self.failIf ( ipe.A() )
result = ipe.createResult()
colorMap = {
V2i( 0 , 0 ) : V3f( 0, 0, 0 ),
V2i( 511, 0 ) : V3f( 1, 0, 0 ),
V2i( 0, 255 ) : V3f( 0, 1, 0 ),
V2i( 511, 255 ) : V3f( 1, 1, 0 ),
}
for point, expectedColor in colorMap.items() :
found = ipe.pointAtPixel( point, result )
self.assert_( found )
color = V3f(
result.floatPrimVar( ipe.R() ),
result.floatPrimVar( ipe.G() ),
result.floatPrimVar( ipe.B() )
)
self.assert_( ( color - expectedColor).length() < 1.e-6 )
def testMultiDirectory( self ):
r = Reader.create( "test/IECore/data/tiff/uvMap.multiRes.32bit.tif" )
r['colorSpace'] = 'linear'
self.assertEqual( type(r), TIFFImageReader )
self.assertEqual( r.numDirectories(), 10 )
self.assertRaises( RuntimeError, r.setDirectory, 10 )
self.assertRaises( RuntimeError, r.setDirectory, 11 )
self.assertRaises( RuntimeError, r.setDirectory, 200 )
directoryResolutions = {
0 : V2i( 256, 128 ),
1 : V2i( 512, 256 ),
2 : V2i( 256, 128 ),
3 : V2i( 128, 64 ),
4 : V2i( 64, 32 ),
5 : V2i( 32, 16 ),
6 : V2i( 16, 8 ),
7 : V2i( 8, 4 ),
8 : V2i( 4, 2 ),
9 : V2i( 2, 1 ),
}
for dirIndex, resolution in directoryResolutions.items() :
r.setDirectory( dirIndex )
img = r.read()
self.assertEqual( type(img), ImagePrimitive )
bottomRight = V2i( resolution.x - 1, resolution.y - 1)
self.assertEqual( img.displayWindow, Box2i( V2i( 0, 0 ), bottomRight ) )
self.assertEqual( img.dataWindow, Box2i( V2i( 0, 0 ), bottomRight ) )
expectedResult = Reader.create( "test/IECore/data/expectedResults/multiDirTiff" + str( dirIndex) + ".exr" ).read()
op = ImageDiffOp()
res = op(
imageA = img,
imageB = expectedResult,
maxError = 0.004,
skipMissingChannels = True
)
self.failIf( res.value )
def testErrors( self ):
r = TIFFImageReader()
self.assertRaises( RuntimeError, r.read )
self.assertRaises( RuntimeError, r.readChannel, "R" )
r = TIFFImageReader( "test/IECore/data/jpg/uvMap.512x256.jpg" )
self.assertRaises( RuntimeError, r.read )
def testAll( self ):
fileNames = glob.glob( "test/IECore/data/tiff/*.tif" ) + glob.glob( "test/IECore/data/tiff/*.tiff" )
expectedFailures = [
"test/IECore/data/tiff/rgb_black_circle.256x256.4bit.tiff",
"test/IECore/data/tiff/rgb_black_circle.256x256.2bit.tiff",
"test/IECore/data/tiff/rgb_black_circle.256x256.1bit.tiff",
"test/IECore/data/tiff/uvMap.512x256.16bit.truncated.tif",
]
for f in fileNames:
r = TIFFImageReader( f )
if f in expectedFailures :
self.assertRaises( RuntimeError, r.read )
else :
self.assert_( TIFFImageReader.canRead( f ) )
self.failIf( JPEGImageReader.canRead( f ) )
self.failIf( EXRImageReader.canRead( f ) )
self.failIf( CINImageReader.canRead( f ) )
img = r.read()
self.assertEqual( type(img), ImagePrimitive )
self.assert_( img.arePrimitiveVariablesValid() )
def testTilesWithLeftovers( self ) :
"""Check we cope with tiled images where the width and height aren't multiples of the tile size."""
r = TIFFImageReader( "test/IECore/data/tiff/tilesWithLeftovers.tif" )
r['colorSpace'] = 'linear'
i = r.read()
i2 = EXRImageReader( "test/IECore/data/exrFiles/tiffTileTestExpectedResults.exr" ).read()
op = ImageDiffOp()
res = op(
imageA = i,
imageB = i2,
maxError = 0.004,
skipMissingChannels = False
)
self.failIf( res.value )
def testReadWithIncorrectExtension( self ) :
shutil.copyfile( "test/IECore/data/tiff/uvMap.512x256.8bit.tif", "test/IECore/data/tiff/uvMap.512x256.8bit.dpx" )
# should be able to infer a correct reader even though the extension is incorrect
r = Reader.create( "test/IECore/data/tiff/uvMap.512x256.8bit.dpx" )
self.failUnless( isinstance( r, TIFFImageReader ) )
i = r.read()
self.failUnless( isinstance( i, ImagePrimitive ) )
def testProblemTDL( self ) :
# 3delight has started using the SMinSampleValue and SMaxSampleValue tags to store
# the range of values in a tdl file. this is jolly useful for shader writers but a pain
# for anyone using libtiff to read the images. libtiff currently doesn't support the
# storage of different values per sample, and therefore complains when given
# one of these files. we deal with this by pretending nothing has happened and allowing
# all directories except the last one to be read (it's only the last one that has the
# problem).
r = TIFFImageReader( "test/IECore/data/tiff/problem.tdl" )
expectedResolutions = [
( 64, 32 ),
( 32, 16 ),
( 16, 8 ),
( 8, 4 ),
( 4, 2 ),
# there should be a ( 2, 1 ) as well, but the best we can do is
# ignore it.
]
self.assertEqual( r.numDirectories(), len( expectedResolutions ) )
for i in range( 0, len( expectedResolutions ) ) :
r.setDirectory( i )
image = r.read()
size = image.dataWindow.size()
self.assertEqual( size.x + 1, expectedResolutions[i][0] )
self.assertEqual( size.y + 1, expectedResolutions[i][1] )
def tearDown( self ) :
for f in [
"test/IECore/data/tiff/uvMap.512x256.8bit.dpx",
] :
if os.path.exists( f ) :
os.remove( f )
if __name__ == "__main__":
unittest.main()
| |
import logging
from PyQt4.QtGui import (
QGraphicsItem, QGraphicsPathItem, QGraphicsWidget, QGraphicsTextItem,
QGraphicsDropShadowEffect, QPainterPath, QPainterPathStroker,
QPolygonF, QColor, QPen
)
from PyQt4.QtCore import (
Qt, QPointF, QSizeF, QRectF, QLineF, QEvent, QT_VERSION
)
from PyQt4.QtCore import (
pyqtSignal as Signal,
pyqtProperty as Property
)
log = logging.getLogger(__name__)
from .graphicspathobject import GraphicsPathObject
class Annotation(QGraphicsWidget):
"""Base class for annotations in the canvas scheme.
"""
def __init__(self, parent=None, **kwargs):
QGraphicsWidget.__init__(self, parent, **kwargs)
if QT_VERSION < 0x40700:
geometryChanged = Signal()
def setGeometry(self, rect):
QGraphicsWidget.setGeometry(self, rect)
self.geometryChanged.emit()
else:
def setGeometry(self, rect):
QGraphicsWidget.setGeometry(self, rect)
class GraphicsTextEdit(QGraphicsTextItem):
"""
QGraphicsTextItem subclass defining an additional placeholderText
property (text displayed when no text is set).
"""
def __init__(self, *args, **kwargs):
QGraphicsTextItem.__init__(self, *args, **kwargs)
self.__placeholderText = ""
def setPlaceholderText(self, text):
"""
Set the placeholder text. This is shown when the item has no text,
i.e when `toPlainText()` returns an empty string.
"""
if self.__placeholderText != text:
self.__placeholderText = text
if not self.toPlainText():
self.update()
def placeholderText(self):
"""
Return the placeholder text.
"""
return str(self.__placeholderText)
placeholderText_ = Property(str, placeholderText, setPlaceholderText,
doc="Placeholder text")
def paint(self, painter, option, widget=None):
QGraphicsTextItem.paint(self, painter, option, widget)
# Draw placeholder text if necessary
if not (self.toPlainText() and self.toHtml()) and \
self.__placeholderText and \
not (self.hasFocus() and \
self.textInteractionFlags() & Qt.TextEditable):
brect = self.boundingRect()
painter.setFont(self.font())
metrics = painter.fontMetrics()
text = metrics.elidedText(self.__placeholderText, Qt.ElideRight,
brect.width())
color = self.defaultTextColor()
color.setAlpha(min(color.alpha(), 150))
painter.setPen(QPen(color))
painter.drawText(brect, Qt.AlignTop | Qt.AlignLeft, text)
class TextAnnotation(Annotation):
"""Text annotation item for the canvas scheme.
"""
editingFinished = Signal()
"""Emitted when the editing is finished (i.e. the item loses focus)."""
textEdited = Signal()
"""Emitted when the edited text changes."""
def __init__(self, parent=None, **kwargs):
Annotation.__init__(self, parent, **kwargs)
self.setFlag(QGraphicsItem.ItemIsMovable)
self.setFlag(QGraphicsItem.ItemIsSelectable)
self.setFocusPolicy(Qt.ClickFocus)
self.__textMargins = (2, 2, 2, 2)
rect = self.geometry().translated(-self.pos())
self.__framePen = QPen(Qt.NoPen)
self.__framePathItem = QGraphicsPathItem(self)
self.__framePathItem.setPen(self.__framePen)
self.__textItem = GraphicsTextEdit(self)
self.__textItem.setPlaceholderText(self.tr("Enter text here"))
self.__textItem.setPos(2, 2)
self.__textItem.setTextWidth(rect.width() - 4)
self.__textItem.setTabChangesFocus(True)
self.__textItem.setTextInteractionFlags(Qt.NoTextInteraction)
self.__textItem.setFont(self.font())
self.__textInteractionFlags = Qt.NoTextInteraction
layout = self.__textItem.document().documentLayout()
layout.documentSizeChanged.connect(self.__onDocumentSizeChanged)
self.__updateFrame()
def adjustSize(self):
"""Resize to a reasonable size.
"""
self.__textItem.setTextWidth(-1)
self.__textItem.adjustSize()
size = self.__textItem.boundingRect().size()
left, top, right, bottom = self.textMargins()
geom = QRectF(self.pos(), size + QSizeF(left + right, top + bottom))
self.setGeometry(geom)
def setFramePen(self, pen):
"""Set the frame pen. By default Qt.NoPen is used (i.e. the frame
is not shown).
"""
if pen != self.__framePen:
self.__framePen = QPen(pen)
self.__updateFrameStyle()
def framePen(self):
"""Return the frame pen.
"""
return QPen(self.__framePen)
def setFrameBrush(self, brush):
"""Set the frame brush.
"""
self.__framePathItem.setBrush(brush)
def frameBrush(self):
"""Return the frame brush.
"""
return self.__framePathItem.brush()
def __updateFrameStyle(self):
if self.isSelected():
pen = QPen(QColor(96, 158, 215), 1.25, Qt.DashDotLine)
else:
pen = self.__framePen
self.__framePathItem.setPen(pen)
def setPlainText(self, text):
"""Set the annotation plain text.
"""
self.__textItem.setPlainText(text)
def toPlainText(self):
return self.__textItem.toPlainText()
def setHtml(self, text):
"""Set the annotation rich text.
"""
self.__textItem.setHtml(text)
def toHtml(self):
return self.__textItem.toHtml()
def setDefaultTextColor(self, color):
"""Set the default text color.
"""
self.__textItem.setDefaultTextColor(color)
def defaultTextColor(self):
return self.__textItem.defaultTextColor()
def setTextMargins(self, left, top, right, bottom):
"""Set the text margins.
"""
margins = (left, top, right, bottom)
if self.__textMargins != margins:
self.__textMargins = margins
self.__textItem.setPos(left, top)
self.__textItem.setTextWidth(
max(self.geometry().width() - left - right, 0)
)
def textMargins(self):
"""Return the text margins.
"""
return self.__textMargins
def document(self):
"""Return the QTextDocument instance used internally.
"""
return self.__textItem.document()
def setTextCursor(self, cursor):
self.__textItem.setTextCursor(cursor)
def textCursor(self):
return self.__textItem.textCursor()
def setTextInteractionFlags(self, flags):
self.__textInteractionFlags = flags
def textInteractionFlags(self):
return self.__textInteractionFlags
def setDefaultStyleSheet(self, stylesheet):
self.document().setDefaultStyleSheet(stylesheet)
def mouseDoubleClickEvent(self, event):
Annotation.mouseDoubleClickEvent(self, event)
if event.buttons() == Qt.LeftButton and \
self.__textInteractionFlags & Qt.TextEditable:
self.startEdit()
def startEdit(self):
"""Start the annotation text edit process.
"""
self.__textItem.setTextInteractionFlags(self.__textInteractionFlags)
self.__textItem.setFocus(Qt.MouseFocusReason)
# Install event filter to find out when the text item loses focus.
self.__textItem.installSceneEventFilter(self)
self.__textItem.document().contentsChanged.connect(
self.textEdited
)
def endEdit(self):
"""End the annotation edit.
"""
self.__textItem.setTextInteractionFlags(Qt.NoTextInteraction)
self.__textItem.removeSceneEventFilter(self)
self.__textItem.document().contentsChanged.disconnect(
self.textEdited
)
cursor = self.__textItem.textCursor()
cursor.clearSelection()
self.__textItem.setTextCursor(cursor)
self.editingFinished.emit()
def __onDocumentSizeChanged(self, size):
# The size of the text document has changed. Expand the text
# control rect's height if the text no longer fits inside.
try:
rect = self.geometry()
_, top, _, bottom = self.textMargins()
if rect.height() < (size.height() + bottom + top):
rect.setHeight(size.height() + bottom + top)
self.setGeometry(rect)
except Exception:
log.error("error in __onDocumentSizeChanged",
exc_info=True)
def __updateFrame(self):
rect = self.geometry()
rect.moveTo(0, 0)
path = QPainterPath()
path.addRect(rect)
self.__framePathItem.setPath(path)
def resizeEvent(self, event):
width = event.newSize().width()
left, _, right, _ = self.textMargins()
self.__textItem.setTextWidth(max(width - left - right, 0))
self.__updateFrame()
QGraphicsWidget.resizeEvent(self, event)
def sceneEventFilter(self, obj, event):
if obj is self.__textItem and event.type() == QEvent.FocusOut:
self.__textItem.focusOutEvent(event)
self.endEdit()
return True
return Annotation.sceneEventFilter(self, obj, event)
def itemChange(self, change, value):
if change == QGraphicsItem.ItemSelectedHasChanged:
self.__updateFrameStyle()
return Annotation.itemChange(self, change, value)
def changeEvent(self, event):
if event.type() == QEvent.FontChange:
self.__textItem.setFont(self.font())
Annotation.changeEvent(self, event)
class ArrowItem(GraphicsPathObject):
#: Arrow Style
Plain, Concave = 1, 2
def __init__(self, parent=None, line=None, lineWidth=4, **kwargs):
GraphicsPathObject.__init__(self, parent, **kwargs)
if line is None:
line = QLineF(0, 0, 10, 0)
self.__line = line
self.__lineWidth = lineWidth
self.__arrowStyle = ArrowItem.Plain
self.__updateArrowPath()
def setLine(self, line):
"""Set the baseline of the arrow (:class:`QLineF`).
"""
if self.__line != line:
self.__line = QLineF(line)
self.__updateArrowPath()
def line(self):
"""Return the baseline of the arrow.
"""
return QLineF(self.__line)
def setLineWidth(self, lineWidth):
"""Set the width of the arrow.
"""
if self.__lineWidth != lineWidth:
self.__lineWidth = lineWidth
self.__updateArrowPath()
def lineWidth(self):
"""Return the width of the arrow.
"""
return self.__lineWidth
def setArrowStyle(self, style):
"""Set the arrow style (`ArrowItem.Plain` or `ArrowItem.Concave`)
"""
if self.__arrowStyle != style:
self.__arrowStyle = style
self.__updateArrowPath()
def arrowStyle(self):
"""Return the arrow style
"""
return self.__arrowStyle
def __updateArrowPath(self):
if self.__arrowStyle == ArrowItem.Plain:
path = arrow_path_plain(self.__line, self.__lineWidth)
else:
path = arrow_path_concave(self.__line, self.__lineWidth)
self.setPath(path)
def arrow_path_plain(line, width):
"""
Return an :class:`QPainterPath` of a plain looking arrow.
"""
path = QPainterPath()
p1, p2 = line.p1(), line.p2()
if p1 == p2:
return path
baseline = QLineF(line)
# Require some minimum length.
baseline.setLength(max(line.length() - width * 3, width * 3))
path.moveTo(baseline.p1())
path.lineTo(baseline.p2())
stroker = QPainterPathStroker()
stroker.setWidth(width)
path = stroker.createStroke(path)
arrow_head_len = width * 4
arrow_head_angle = 50
line_angle = line.angle() - 180
angle_1 = line_angle - arrow_head_angle / 2.0
angle_2 = line_angle + arrow_head_angle / 2.0
points = [p2,
p2 + QLineF.fromPolar(arrow_head_len, angle_1).p2(),
p2 + QLineF.fromPolar(arrow_head_len, angle_2).p2(),
p2]
poly = QPolygonF(points)
path_head = QPainterPath()
path_head.addPolygon(poly)
path = path.united(path_head)
return path
def arrow_path_concave(line, width):
"""
Return a :class:`QPainterPath` of a pretty looking arrow.
"""
path = QPainterPath()
p1, p2 = line.p1(), line.p2()
if p1 == p2:
return path
baseline = QLineF(line)
# Require some minimum length.
baseline.setLength(max(line.length() - width * 3, width * 3))
start, end = baseline.p1(), baseline.p2()
mid = (start + end) / 2.0
normal = QLineF.fromPolar(1.0, baseline.angle() + 90).p2()
path.moveTo(start)
path.lineTo(start + (normal * width / 4.0))
path.quadTo(mid + (normal * width / 4.0),
end + (normal * width / 1.5))
path.lineTo(end - (normal * width / 1.5))
path.quadTo(mid - (normal * width / 4.0),
start - (normal * width / 4.0))
path.closeSubpath()
arrow_head_len = width * 4
arrow_head_angle = 50
line_angle = line.angle() - 180
angle_1 = line_angle - arrow_head_angle / 2.0
angle_2 = line_angle + arrow_head_angle / 2.0
points = [p2,
p2 + QLineF.fromPolar(arrow_head_len, angle_1).p2(),
baseline.p2(),
p2 + QLineF.fromPolar(arrow_head_len, angle_2).p2(),
p2]
poly = QPolygonF(points)
path_head = QPainterPath()
path_head.addPolygon(poly)
path = path.united(path_head)
return path
class ArrowAnnotation(Annotation):
def __init__(self, parent=None, line=None, **kwargs):
Annotation.__init__(self, parent, **kwargs)
self.setFlag(QGraphicsItem.ItemIsMovable)
self.setFlag(QGraphicsItem.ItemIsSelectable)
self.setFocusPolicy(Qt.ClickFocus)
if line is None:
line = QLineF(0, 0, 20, 0)
self.__line = line
self.__color = QColor(Qt.red)
self.__arrowItem = ArrowItem(self)
self.__arrowItem.setLine(line)
self.__arrowItem.setBrush(self.__color)
self.__arrowItem.setPen(QPen(Qt.NoPen))
self.__arrowItem.setArrowStyle(ArrowItem.Concave)
self.__arrowItem.setLineWidth(5)
self.__shadow = QGraphicsDropShadowEffect(
blurRadius=5, offset=QPointF(1.0, 2.0),
)
self.__arrowItem.setGraphicsEffect(self.__shadow)
self.__shadow.setEnabled(True)
self.__autoAdjustGeometry = True
def setAutoAdjustGeometry(self, autoAdjust):
"""
If set to `True` then the geometry will be adjusted whenever
the arrow is changed with `setLine`. Otherwise the geometry
of the item is only updated so the `line` lies within the
`geometry()` rect (i.e. it only grows). True by default
"""
self.__autoAdjustGeometry = autoAdjust
if autoAdjust:
self.adjustGeometry()
def autoAdjustGeometry(self):
"""
Should the geometry of the item be adjusted automatically when
`setLine` is called.
"""
return self.__autoAdjustGeometry
def setLine(self, line):
"""
Set the arrow base line (a `QLineF` in object coordinates).
"""
if self.__line != line:
self.__line = line
# local item coordinate system
geom = self.geometry().translated(-self.pos())
if geom.isNull() and not line.isNull():
geom = QRectF(0, 0, 1, 1)
arrow_shape = arrow_path_concave(line, self.lineWidth())
arrow_rect = arrow_shape.boundingRect()
if not (geom.contains(arrow_rect)):
geom = geom.united(arrow_rect)
if self.__autoAdjustGeometry:
# Shrink the geometry if required.
geom = geom.intersected(arrow_rect)
# topLeft can move changing the local coordinates.
diff = geom.topLeft()
line = QLineF(line.p1() - diff, line.p2() - diff)
self.__arrowItem.setLine(line)
self.__line = line
# parent item coordinate system
geom.translate(self.pos())
self.setGeometry(geom)
def line(self):
"""
Return the arrow base line (`QLineF` in object coordinates).
"""
return QLineF(self.__line)
def setColor(self, color):
"""
Set arrow brush color.
"""
if self.__color != color:
self.__color = QColor(color)
self.__updateStyleState()
def color(self):
"""
Return the arrow brush color.
"""
return QColor(self.__color)
def setLineWidth(self, lineWidth):
"""
Set the arrow line width.
"""
self.__arrowItem.setLineWidth(lineWidth)
def lineWidth(self):
"""
Return the arrow line width.
"""
return self.__arrowItem.lineWidth()
def adjustGeometry(self):
"""
Adjust the widget geometry to exactly fit the arrow inside
while preserving the arrow path scene geometry.
"""
# local system coordinate
geom = self.geometry().translated(-self.pos())
line = self.__line
arrow_rect = self.__arrowItem.shape().boundingRect()
if geom.isNull() and not line.isNull():
geom = QRectF(0, 0, 1, 1)
if not (geom.contains(arrow_rect)):
geom = geom.united(arrow_rect)
geom = geom.intersected(arrow_rect)
diff = geom.topLeft()
line = QLineF(line.p1() - diff, line.p2() - diff)
geom.translate(self.pos())
self.setGeometry(geom)
self.setLine(line)
def shape(self):
arrow_shape = self.__arrowItem.shape()
return self.mapFromItem(self.__arrowItem, arrow_shape)
def itemChange(self, change, value):
if change == QGraphicsItem.ItemSelectedHasChanged:
self.__updateStyleState()
return Annotation.itemChange(self, change, value)
def __updateStyleState(self):
"""
Update the arrows' brush, pen, ... based on it's state
"""
if self.isSelected():
color = self.__color.darker(150)
pen = QPen(QColor(96, 158, 215), Qt.DashDotLine)
pen.setWidthF(1.25)
pen.setCosmetic(True)
self.__shadow.setColor(pen.color().darker(150))
else:
color = self.__color
pen = QPen(Qt.NoPen)
self.__shadow.setColor(QColor(63, 63, 63, 180))
self.__arrowItem.setBrush(color)
self.__arrowItem.setPen(pen)
| |
# Copyright (c) 2009, Joseph Lisee
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of StatePy nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Joseph Lisee ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Joseph Lisee
# File: statepy/test/task.py
# Python Imports
import unittest
# Project Imports
import statepy.state as state
import statepy.task as task
EVENT_A = state.declareEventType('A')
EVENT_B = state.declareEventType('B')
EVENT_C = state.declareEventType('C')
EVENT_D = state.declareEventType('D')
EVENT_E = state.declareEventType('E')
EVENT_F = state.declareEventType('F')
EVENT_B_FAIL = state.declareEventType('B_FAIL')
EVENT_C_FAIL = state.declareEventType('C_FAIL')
class TaskA(task.Task):
#DEFAULT_TIMEOUT = 16
@staticmethod
def _transitions():
return {EVENT_A : task.Next,
EVENT_B : task.Next,
task.TIMEOUT : task.Next }
def enter(self):
#task.Task.enter(self, TaskA.DEFAULT_TIMEOUT)
pass
class TaskB(task.Task):
@staticmethod
def _transitions():
return {EVENT_C : task.Next,
EVENT_D : task.Next,
EVENT_B_FAIL : task.Failure,
task.TIMEOUT : task.Next }
class TaskC(task.Task):
@staticmethod
def _transitions():
return {EVENT_E : task.Next,
EVENT_F : task.Next,
EVENT_C_FAIL : task.Failure }
class BRecovery(state.State):
"""Test state just used to know we went to the proper failure state"""
@staticmethod
def transitions():
return {EVENT_F : BRecovery }
class CRecovery(state.State):
"""Test state just used to know we went to the proper failure state"""
@staticmethod
def transitions():
return {EVENT_F : CRecovery }
class TestTask(unittest.TestCase):
#CFG_TIMEOUT = 47
def setUp(self):
taskOrder = [TaskA, TaskB, TaskC]
failureTasks = {TaskB : BRecovery, TaskC : CRecovery }
# Create our task manager which tells the current state what the next
# state is
self.taskManager = task.TaskManager(taskOrder = taskOrder,
failureTasks = failureTasks)
# Now create our state machine, making sure to pass along the
# taskManager so that Task state have access to it
self.machine = state.Machine(
statevars = {'taskManager' : self.taskManager})
# These are set as object variables for historical reasons
self.TaskAcls = TaskA
self.TaskBcls = TaskB
self.TaskCcls = TaskC
self.BRecoverycls = BRecovery
self.CRecoverycls = CRecovery
def testNextTransitions(self):
"""
Make sure the marking "task.Next" states get replaced with the real
next states.
"""
taskA = self.TaskAcls(taskManager = self.taskManager)
self.assertEqual(self.TaskBcls, taskA.transitions()[EVENT_A])
self.assertEqual(self.TaskBcls, taskA.transitions()[EVENT_B])
taskB = self.TaskBcls(taskManager = self.taskManager)
self.assertEqual(self.TaskCcls, taskB.transitions()[EVENT_C])
self.assertEqual(self.TaskCcls, taskB.transitions()[EVENT_D])
taskC = self.TaskCcls(taskManager = self.taskManager)
self.assertEqual(task.End, taskC.transitions()[EVENT_E])
self.assertEqual(task.End, taskC.transitions()[EVENT_F])
def _injectEvent(self, etype):
event = state.Event()
event.type = etype
self.machine.injectEvent(event)
def testTransition(self):
"""
Now make sure the whole thing works in a real statemachine
"""
# Start up in the TaskA state
self.machine.start(self.TaskAcls)
# Now inject events to move us from the start to the end
self._injectEvent(EVENT_A)
cstate = self.machine.currentState()
self.assertEquals(self.TaskBcls, type(cstate))
self._injectEvent(EVENT_D)
cstate = self.machine.currentState()
self.assertEquals(self.TaskCcls, type(cstate))
self._injectEvent(EVENT_E)
self.assert_(self.machine.complete)
# Now do the failure tasks
self.machine.start(self.TaskBcls)
self._injectEvent(EVENT_B_FAIL)
self.assertEquals(self.BRecoverycls, type(self.machine.currentState()))
self.machine.start(self.TaskCcls)
self._injectEvent(EVENT_C_FAIL)
self.assertEquals(self.CRecoverycls, type(self.machine.currentState()))
# def testDefaultTimeout(self):
# """
# Tests normal timeout procedure
# """
# self._timeoutTest(self.TaskAcls, self.TaskBcls, TaskA.DEFAULT_TIMEOUT)
# def testCfgTimeout(self):
# """
# Tests to make sure the timeout value was read from the cfg properly
# """
# self._timeoutTest(self.TaskBcls, self.TaskCcls, TestTask.CFG_TIMEOUT)
# def testStopTimer(self):
# """
# Tests to make sure the time doesn't fire when the state exits
# """
# Register for the timer
# self._timerFired = False
# def timerHandler(event):
# self._timerFired = True
# self.eventHub.subscribeToType(self.TaskAcls(taskManager = self.taskManager).timeoutEvent,
# timerHandler)
# Start up and make sure we are in the proper state
# self.machine.start(self.TaskAcls)
# startState = self.machine.currentState()
# self.assertEquals(self.TaskAcls, type(startState))
# Move to the next state
# self._injectEvent(EVENT_A)
# cstate = self.machine.currentState()
# self.assertEquals(self.TaskBcls, type(cstate))
# Release the timer and make sure it *wasn't* called
# self.releaseTimer(startState.timeoutEvent)
# self.assertEqual(False, self._timerFired)
# def _timeoutTest(self, startState, expectedEndState, expectedTimeout):
# """
# Helper function for testing whether or not the timeout timer works
# """
# Start up and make sure we are in the proper state
# self.machine.start(startState)
# cstate = self.machine.currentState()
# self.assertEquals(startState, type(cstate))
# Ensure that the time was read correctly
# self.assertEqual(expectedTimeout, cstate.timeoutDuration)
# Release timer and make sure we are in the right state
# self.releaseTimer(cstate.timeoutEvent)
# cstate = self.machine.currentState()
# self.assertEquals(expectedEndState, type(cstate))
| |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import six
import jsonschema
from jsonschema import _validators
from jsonschema.validators import create
from st2common.exceptions.action import InvalidActionParameterException
from st2common.util import jsonify
from st2common.util.misc import deep_update
from st2common.util.deep_copy import fast_deepcopy_dict
__all__ = [
"get_validator",
"get_draft_schema",
"get_action_parameters_schema",
"get_schema_for_action_parameters",
"get_schema_for_resource_parameters",
"is_property_type_single",
"is_property_type_list",
"is_property_type_anyof",
"is_property_type_oneof",
"is_property_nullable",
"is_attribute_type_array",
"is_attribute_type_object",
"validate",
]
# https://github.com/json-schema/json-schema/blob/master/draft-04/schema
# The source material is licensed under the AFL or BSD license.
# Both draft 4 and custom schema has additionalProperties set to false by default.
# The custom schema differs from draft 4 with the extension of position, immutable,
# and draft 3 version of required.
PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)))
SCHEMAS = {
"draft4": jsonify.load_file(os.path.join(PATH, "draft4.json")),
"custom": jsonify.load_file(os.path.join(PATH, "custom.json")),
# Custom schema for action params which doesn't allow parameter "type" attribute to be array
"action_params": jsonify.load_file(os.path.join(PATH, "action_params.json")),
"action_output_schema": jsonify.load_file(
os.path.join(PATH, "action_output_schema.json")
),
}
SCHEMA_ANY_TYPE = {
"anyOf": [
{"type": "array"},
{"type": "boolean"},
{"type": "integer"},
{"type": "number"},
{"type": "object"},
{"type": "string"},
]
}
RUNNER_PARAM_OVERRIDABLE_ATTRS = [
"default",
"description",
"enum",
"immutable",
"required",
]
def get_draft_schema(version="custom", additional_properties=False):
schema = fast_deepcopy_dict(SCHEMAS[version])
if additional_properties and "additionalProperties" in schema:
del schema["additionalProperties"]
return schema
def get_action_output_schema(additional_properties=True):
"""
Return a generic schema which is used for validating action output.
"""
return get_draft_schema(
version="action_output_schema", additional_properties=additional_properties
)
def get_action_parameters_schema(additional_properties=False):
"""
Return a generic schema which is used for validating action parameters definition.
"""
return get_draft_schema(
version="action_params", additional_properties=additional_properties
)
CustomValidator = create(
meta_schema=get_draft_schema(version="custom", additional_properties=True),
validators={
"$ref": _validators.ref,
"additionalItems": _validators.additionalItems,
"additionalProperties": _validators.additionalProperties,
"allOf": _validators.allOf_draft4,
"anyOf": _validators.anyOf_draft4,
"dependencies": _validators.dependencies,
"enum": _validators.enum,
"format": _validators.format,
"items": _validators.items,
"maxItems": _validators.maxItems,
"maxLength": _validators.maxLength,
"maxProperties": _validators.maxProperties_draft4,
"maximum": _validators.maximum,
"minItems": _validators.minItems,
"minLength": _validators.minLength,
"minProperties": _validators.minProperties_draft4,
"minimum": _validators.minimum,
"multipleOf": _validators.multipleOf,
"not": _validators.not_draft4,
"oneOf": _validators.oneOf_draft4,
"pattern": _validators.pattern,
"patternProperties": _validators.patternProperties,
"properties": _validators.properties_draft3,
"type": _validators.type_draft4,
"uniqueItems": _validators.uniqueItems,
},
version="custom_validator",
)
def is_property_type_single(property_schema):
return (
isinstance(property_schema, dict)
and "anyOf" not in list(property_schema.keys())
and "oneOf" not in list(property_schema.keys())
and not isinstance(property_schema.get("type", "string"), list)
)
def is_property_type_list(property_schema):
return isinstance(property_schema, dict) and isinstance(
property_schema.get("type", "string"), list
)
def is_property_type_anyof(property_schema):
return isinstance(property_schema, dict) and "anyOf" in list(property_schema.keys())
def is_property_type_oneof(property_schema):
return isinstance(property_schema, dict) and "oneOf" in list(property_schema.keys())
def is_property_nullable(property_type_schema):
# For anyOf and oneOf, the property_schema is a list of types.
if isinstance(property_type_schema, list):
return (
len(
[
t
for t in property_type_schema
if (
(isinstance(t, six.string_types) and t == "null")
or (isinstance(t, dict) and t.get("type", "string") == "null")
)
]
)
> 0
)
return (
isinstance(property_type_schema, dict)
and property_type_schema.get("type", "string") == "null"
)
def is_attribute_type_array(attribute_type):
return attribute_type == "array" or (
isinstance(attribute_type, list) and "array" in attribute_type
)
def is_attribute_type_object(attribute_type):
return attribute_type == "object" or (
isinstance(attribute_type, list) and "object" in attribute_type
)
def assign_default_values(instance, schema):
"""
Assign default values on the provided instance based on the schema default specification.
"""
instance = fast_deepcopy_dict(instance)
instance_is_dict = isinstance(instance, dict)
instance_is_array = isinstance(instance, list)
if not instance_is_dict and not instance_is_array:
return instance
properties = schema.get("properties", {})
for property_name, property_data in six.iteritems(properties):
has_default_value = "default" in property_data
default_value = property_data.get("default", None)
# Assign default value on the instance so the validation doesn't fail if requires is true
# but the value is not provided
if has_default_value:
if instance_is_dict and instance.get(property_name, None) is None:
instance[property_name] = default_value
elif instance_is_array:
for index, _ in enumerate(instance):
if instance[index].get(property_name, None) is None:
instance[index][property_name] = default_value
# Support for nested properties (array and object)
attribute_type = property_data.get("type", None)
schema_items = property_data.get("items", {})
# Array
if (
is_attribute_type_array(attribute_type)
and schema_items
and schema_items.get("properties", {})
):
array_instance = instance.get(property_name, None)
array_schema = schema["properties"][property_name]["items"]
if array_instance is not None:
# Note: We don't perform subschema assignment if no value is provided
instance[property_name] = assign_default_values(
instance=array_instance, schema=array_schema
)
# Object
if is_attribute_type_object(attribute_type) and property_data.get(
"properties", {}
):
object_instance = instance.get(property_name, None)
object_schema = schema["properties"][property_name]
if object_instance is not None:
# Note: We don't perform subschema assignment if no value is provided
instance[property_name] = assign_default_values(
instance=object_instance, schema=object_schema
)
return instance
def modify_schema_allow_default_none(schema):
"""
Manipulate the provided schema so None is also an allowed value for each attribute which
defines a default value of None.
"""
schema = fast_deepcopy_dict(schema)
properties = schema.get("properties", {})
for property_name, property_data in six.iteritems(properties):
is_optional = not property_data.get("required", False)
has_default_value = "default" in property_data
default_value = property_data.get("default", None)
property_schema = schema["properties"][property_name]
if (has_default_value or is_optional) and default_value is None:
# If property is anyOf and oneOf then it has to be process differently.
if is_property_type_anyof(property_schema) and not is_property_nullable(
property_schema["anyOf"]
):
property_schema["anyOf"].append({"type": "null"})
elif is_property_type_oneof(property_schema) and not is_property_nullable(
property_schema["oneOf"]
):
property_schema["oneOf"].append({"type": "null"})
elif is_property_type_list(property_schema) and not is_property_nullable(
property_schema.get("type")
):
property_schema["type"].append("null")
elif is_property_type_single(property_schema) and not is_property_nullable(
property_schema.get("type")
):
property_schema["type"] = [
property_schema.get("type", "string"),
"null",
]
# Support for nested properties (array and object)
attribute_type = property_data.get("type", None)
schema_items = property_data.get("items", {})
# Array
if (
is_attribute_type_array(attribute_type)
and schema_items
and schema_items.get("properties", {})
):
array_schema = schema_items
array_schema = modify_schema_allow_default_none(schema=array_schema)
schema["properties"][property_name]["items"] = array_schema
# Object
if is_attribute_type_object(attribute_type) and property_data.get(
"properties", {}
):
object_schema = property_data
object_schema = modify_schema_allow_default_none(schema=object_schema)
schema["properties"][property_name] = object_schema
return schema
def validate(
instance,
schema,
cls=None,
use_default=True,
allow_default_none=False,
*args,
**kwargs,
):
"""
Custom validate function which supports default arguments combined with the "required"
property.
Note: This function returns cleaned instance with default values assigned.
:param use_default: True to support the use of the optional "default" property.
:type use_default: ``bool``
"""
instance = fast_deepcopy_dict(instance)
schema_type = schema.get("type", None)
instance_is_dict = isinstance(instance, dict)
if use_default and allow_default_none:
schema = modify_schema_allow_default_none(schema=schema)
if use_default and schema_type == "object" and instance_is_dict:
instance = assign_default_values(instance=instance, schema=schema)
# pylint: disable=assignment-from-no-return
jsonschema.validate(instance=instance, schema=schema, cls=cls, *args, **kwargs)
return instance
VALIDATORS = {"draft4": jsonschema.Draft4Validator, "custom": CustomValidator}
def get_validator(version="custom"):
validator = VALIDATORS[version]
return validator
def validate_runner_parameter_attribute_override(
action_ref, param_name, attr_name, runner_param_attr_value, action_param_attr_value
):
"""
Validate that the provided parameter from the action schema can override the
runner parameter.
"""
param_values_are_the_same = action_param_attr_value == runner_param_attr_value
if (
attr_name not in RUNNER_PARAM_OVERRIDABLE_ATTRS
and not param_values_are_the_same
):
raise InvalidActionParameterException(
'The attribute "%s" for the runner parameter "%s" in action "%s" '
"cannot be overridden." % (attr_name, param_name, action_ref)
)
return True
def get_schema_for_action_parameters(action_db, runnertype_db=None):
"""
Dynamically construct JSON schema for the provided action from the parameters metadata.
Note: This schema is used to validate parameters which are passed to the action.
"""
if not runnertype_db:
from st2common.util.action_db import get_runnertype_by_name
runnertype_db = get_runnertype_by_name(action_db.runner_type["name"])
# Note: We need to perform a deep merge because user can only specify a single parameter
# attribute when overriding it in an action metadata.
parameters_schema = {}
deep_update(parameters_schema, runnertype_db.runner_parameters)
deep_update(parameters_schema, action_db.parameters)
# Perform validation, make sure user is not providing parameters which can't
# be overriden
runner_parameter_names = list(runnertype_db.runner_parameters.keys())
for name, schema in six.iteritems(action_db.parameters):
if name not in runner_parameter_names:
continue
for attribute, value in six.iteritems(schema):
runner_param_value = runnertype_db.runner_parameters[name].get(attribute)
validate_runner_parameter_attribute_override(
action_ref=action_db.ref,
param_name=name,
attr_name=attribute,
runner_param_attr_value=runner_param_value,
action_param_attr_value=value,
)
schema = get_schema_for_resource_parameters(parameters_schema=parameters_schema)
if parameters_schema:
schema["title"] = action_db.name
if action_db.description:
schema["description"] = action_db.description
return schema
def get_schema_for_resource_parameters(
parameters_schema, allow_additional_properties=False
):
"""
Dynamically construct JSON schema for the provided resource from the parameters metadata.
"""
def normalize(x):
return {k: v if v else SCHEMA_ANY_TYPE for k, v in six.iteritems(x)}
schema = {}
properties = {}
properties.update(normalize(parameters_schema))
if properties:
schema["type"] = "object"
schema["properties"] = properties
schema["additionalProperties"] = allow_additional_properties
return schema
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
import os.path
import socket
import sys
import eventlet
import eventlet.wsgi
import greenlet
from oslo.config import cfg
from paste import deploy
import routes.middleware
import ssl
import webob.dec
import webob.exc
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
# Raise the default from 8192 to accommodate large tokens
eventlet.wsgi.MAX_HEADER_LINE = 16384
wsgi_opts = [
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for nova-api'),
cfg.StrOpt('wsgi_log_format',
default='%(client_ip)s "%(request_line)s" status: %(status_code)s'
' len: %(body_length)s time: %(wall_seconds).7f',
help='A python format string that is used as the template to '
'generate log lines. The following values can be formatted '
'into it: client_ip, date_time, request_line, status_code, '
'body_length, wall_seconds.'),
cfg.StrOpt('ssl_ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('ssl_cert_file',
default=None,
help="SSL certificate of API server"),
cfg.StrOpt('ssl_key_file',
default=None,
help="SSL private key of API server"),
cfg.IntOpt('tcp_keepidle',
default=600,
help="Sets the value of TCP_KEEPIDLE in seconds for each "
"server socket. Not supported on OS X.")
]
CONF = cfg.CONF
CONF.register_opts(wsgi_opts)
LOG = logging.getLogger(__name__)
class Server(object):
"""Server class to manage a WSGI server, serving a WSGI application."""
default_pool_size = 1000
def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None,
protocol=eventlet.wsgi.HttpProtocol, backlog=128,
use_ssl=False, max_url_len=None):
"""Initialize, but do not start, a WSGI server.
:param name: Pretty name for logging.
:param app: The WSGI application to serve.
:param host: IP address to serve the application.
:param port: Port number to server the application.
:param pool_size: Maximum number of eventlets to spawn concurrently.
:param backlog: Maximum number of queued connections.
:param max_url_len: Maximum length of permitted URLs.
:returns: None
:raises: nova.exception.InvalidInput
"""
self.name = name
self.app = app
self._server = None
self._protocol = protocol
self._pool = eventlet.GreenPool(pool_size or self.default_pool_size)
self._logger = logging.getLogger("nova.%s.wsgi.server" % self.name)
self._wsgi_logger = logging.WritableLogger(self._logger)
self._use_ssl = use_ssl
self._max_url_len = max_url_len
if backlog < 1:
raise exception.InvalidInput(
reason='The backlog must be more than 1')
bind_addr = (host, port)
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
try:
info = socket.getaddrinfo(bind_addr[0],
bind_addr[1],
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
family = info[0]
bind_addr = info[-1]
except Exception:
family = socket.AF_INET
self._socket = eventlet.listen(bind_addr, family, backlog=backlog)
(self.host, self.port) = self._socket.getsockname()[0:2]
LOG.info(_("%(name)s listening on %(host)s:%(port)s") % self.__dict__)
def start(self):
"""Start serving a WSGI application.
:returns: None
"""
if self._use_ssl:
try:
ca_file = CONF.ssl_ca_file
cert_file = CONF.ssl_cert_file
key_file = CONF.ssl_key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(
_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(
_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(
_("Unable to find key_file : %s") % key_file)
if self._use_ssl and (not cert_file or not key_file):
raise RuntimeError(
_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
ssl_kwargs = {
'server_side': True,
'certfile': cert_file,
'keyfile': key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl_ca_file:
ssl_kwargs['ca_certs'] = ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
self._socket = eventlet.wrap_ssl(self._socket,
**ssl_kwargs)
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE, 1)
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
self._socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
CONF.tcp_keepidle)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("Failed to start %(name)s on %(host)s"
":%(port)s with SSL support") % self.__dict__)
wsgi_kwargs = {
'func': eventlet.wsgi.server,
'sock': self._socket,
'site': self.app,
'protocol': self._protocol,
'custom_pool': self._pool,
'log': self._wsgi_logger,
'log_format': CONF.wsgi_log_format
}
if self._max_url_len:
wsgi_kwargs['url_length_limit'] = self._max_url_len
self._server = eventlet.spawn(**wsgi_kwargs)
def stop(self):
"""Stop this server.
This is not a very nice action, as currently the method by which a
server is stopped is by killing its eventlet.
:returns: None
"""
LOG.info(_("Stopping WSGI server."))
if self._server is not None:
# Resize pool to stop new requests from being processed
self._pool.resize(0)
self._server.kill()
def wait(self):
"""Block, until the server has stopped.
Waits on the server's eventlet to finish, then returns.
:returns: None
"""
try:
self._server.wait()
except greenlet.GreenletExit:
LOG.info(_("WSGI server has stopped."))
class Request(webob.Request):
pass
class Application(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = nova.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import nova.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(detail='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError(_('You must implement __call__'))
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
return cls(app, **local_config)
return _factory
def __init__(self, application):
self.application = application
def process_request(self, req):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
print ('*' * 40) + ' REQUEST ENVIRON'
for key, value in req.environ.items():
print key, '=', value
print
resp = req.get_response(self.application)
print ('*' * 40) + ' RESPONSE HEADERS'
for (key, value) in resp.headers.iteritems():
print key, '=', value
print
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
print ('*' * 40) + ' BODY'
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class Loader(object):
"""Used to load WSGI applications from paste configurations."""
def __init__(self, config_path=None):
"""Initialize the loader, and attempt to find the config.
:param config_path: Full or relative path to the paste config.
:returns: None
"""
config_path = config_path or CONF.api_paste_config
if os.path.exists(config_path):
self.config_path = config_path
else:
self.config_path = CONF.find_file(config_path)
if not self.config_path:
raise exception.ConfigNotFound(path=config_path)
def load_app(self, name):
"""Return the paste URLMap wrapped WSGI application.
:param name: Name of the application to load.
:returns: Paste URLMap object wrapping the requested application.
:raises: `nova.exception.PasteAppNotFound`
"""
try:
LOG.debug(_("Loading app %(name)s from %(path)s") %
{'name': name, 'path': self.config_path})
return deploy.loadapp("config:%s" % self.config_path, name=name)
except LookupError as err:
LOG.error(err)
raise exception.PasteAppNotFound(name=name, path=self.config_path)
| |
# Copyright (c) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver for Dell EMC XtremIO Storage.
supported XtremIO version 2.4 and up
.. code-block:: none
1.0.0 - initial release
1.0.1 - enable volume extend
1.0.2 - added FC support, improved error handling
1.0.3 - update logging level, add translation
1.0.4 - support for FC zones
1.0.5 - add support for XtremIO 4.0
1.0.6 - add support for iSCSI multipath, CA validation, consistency groups,
R/O snapshots, CHAP discovery authentication
1.0.7 - cache glance images on the array
1.0.8 - support for volume retype, CG fixes
"""
import json
import math
import random
import requests
import string
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import units
import six
from six.moves import http_client
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder.objects import fields
from cinder import utils
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
DEFAULT_PROVISIONING_FACTOR = 20.0
XTREMIO_OPTS = [
cfg.StrOpt('xtremio_cluster_name',
default='',
help='XMS cluster id in multi-cluster environment'),
cfg.IntOpt('xtremio_array_busy_retry_count',
default=5,
help='Number of retries in case array is busy'),
cfg.IntOpt('xtremio_array_busy_retry_interval',
default=5,
help='Interval between retries in case array is busy'),
cfg.IntOpt('xtremio_volumes_per_glance_cache',
default=100,
help='Number of volumes created from each cached glance image')]
CONF.register_opts(XTREMIO_OPTS, group=configuration.SHARED_CONF_GROUP)
RANDOM = random.Random()
OBJ_NOT_FOUND_ERR = 'obj_not_found'
VOL_NOT_UNIQUE_ERR = 'vol_obj_name_not_unique'
VOL_OBJ_NOT_FOUND_ERR = 'vol_obj_not_found'
ALREADY_MAPPED_ERR = 'already_mapped'
SYSTEM_BUSY = 'system_is_busy'
TOO_MANY_OBJECTS = 'too_many_objs'
TOO_MANY_SNAPSHOTS_PER_VOL = 'too_many_snapshots_per_vol'
XTREMIO_OID_NAME = 1
XTREMIO_OID_INDEX = 2
class XtremIOClient(object):
def __init__(self, configuration, cluster_id):
self.configuration = configuration
self.cluster_id = cluster_id
self.verify = (self.configuration.
safe_get('driver_ssl_cert_verify') or False)
if self.verify:
verify_path = (self.configuration.
safe_get('driver_ssl_cert_path') or None)
if verify_path:
self.verify = verify_path
def get_base_url(self, ver):
if ver == 'v1':
return 'https://%s/api/json/types' % self.configuration.san_ip
elif ver == 'v2':
return 'https://%s/api/json/v2/types' % self.configuration.san_ip
def req(self, object_type='volumes', method='GET', data=None,
name=None, idx=None, ver='v1'):
@utils.retry(exception.XtremIOArrayBusy,
self.configuration.xtremio_array_busy_retry_count,
self.configuration.xtremio_array_busy_retry_interval, 1)
def _do_req(object_type, method, data, name, idx, ver):
if not data:
data = {}
if name and idx:
msg = _("can't handle both name and index in req")
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
url = '%s/%s' % (self.get_base_url(ver), object_type)
params = {}
key = None
if name:
params['name'] = name
key = name
elif idx:
url = '%s/%d' % (url, idx)
key = str(idx)
if method in ('GET', 'DELETE'):
params.update(data)
self.update_url(params, self.cluster_id)
if method != 'GET':
self.update_data(data, self.cluster_id)
LOG.debug('data: %s', data)
LOG.debug('%(type)s %(url)s', {'type': method, 'url': url})
try:
response = requests.request(
method, url, params=params, data=json.dumps(data),
verify=self.verify, auth=(self.configuration.san_login,
self.configuration.san_password))
except requests.exceptions.RequestException as exc:
msg = (_('Exception: %s') % six.text_type(exc))
raise exception.VolumeDriverException(message=msg)
if (http_client.OK <= response.status_code <
http_client.MULTIPLE_CHOICES):
if method in ('GET', 'POST'):
return response.json()
else:
return ''
self.handle_errors(response, key, object_type)
return _do_req(object_type, method, data, name, idx, ver)
def handle_errors(self, response, key, object_type):
if response.status_code == http_client.BAD_REQUEST:
error = response.json()
err_msg = error.get('message')
if err_msg.endswith(OBJ_NOT_FOUND_ERR):
LOG.warning("object %(key)s of "
"type %(typ)s not found, %(err_msg)s",
{'key': key, 'typ': object_type,
'err_msg': err_msg, })
raise exception.NotFound()
elif err_msg == VOL_NOT_UNIQUE_ERR:
LOG.error("can't create 2 volumes with the same name, %s",
err_msg)
msg = _('Volume by this name already exists')
raise exception.VolumeBackendAPIException(data=msg)
elif err_msg == VOL_OBJ_NOT_FOUND_ERR:
LOG.error("Can't find volume to map %(key)s, %(msg)s",
{'key': key, 'msg': err_msg, })
raise exception.VolumeNotFound(volume_id=key)
elif ALREADY_MAPPED_ERR in err_msg:
raise exception.XtremIOAlreadyMappedError()
elif err_msg == SYSTEM_BUSY:
raise exception.XtremIOArrayBusy()
elif err_msg in (TOO_MANY_OBJECTS, TOO_MANY_SNAPSHOTS_PER_VOL):
raise exception.XtremIOSnapshotsLimitExceeded()
msg = _('Bad response from XMS, %s') % response.text
LOG.error(msg)
raise exception.VolumeBackendAPIException(message=msg)
def update_url(self, data, cluster_id):
return
def update_data(self, data, cluster_id):
return
def get_cluster(self):
return self.req('clusters', idx=1)['content']
def create_snapshot(self, src, dest, ro=False):
"""Create a snapshot of a volume on the array.
XtreamIO array snapshots are also volumes.
:src: name of the source volume to be cloned
:dest: name for the new snapshot
:ro: new snapshot type ro/regular. only applicable to Client4
"""
raise NotImplementedError()
def get_extra_capabilities(self):
return {}
def get_initiator(self, port_address):
raise NotImplementedError()
def add_vol_to_cg(self, vol_id, cg_id):
pass
def get_initiators_igs(self, port_addresses):
ig_indexes = set()
for port_address in port_addresses:
initiator = self.get_initiator(port_address)
ig_indexes.add(initiator['ig-id'][XTREMIO_OID_INDEX])
return list(ig_indexes)
def get_fc_up_ports(self):
targets = [self.req('targets', name=target['name'])['content']
for target in self.req('targets')['targets']]
return [target for target in targets
if target['port-type'] == 'fc' and
target["port-state"] == 'up']
class XtremIOClient3(XtremIOClient):
def __init__(self, configuration, cluster_id):
super(XtremIOClient3, self).__init__(configuration, cluster_id)
self._portals = []
def find_lunmap(self, ig_name, vol_name):
try:
lun_mappings = self.req('lun-maps')['lun-maps']
except exception.NotFound:
raise (exception.VolumeDriverException
(_("can't find lun-map, ig:%(ig)s vol:%(vol)s") %
{'ig': ig_name, 'vol': vol_name}))
for lm_link in lun_mappings:
idx = lm_link['href'].split('/')[-1]
# NOTE(geguileo): There can be races so mapped elements retrieved
# in the listing may no longer exist.
try:
lm = self.req('lun-maps', idx=int(idx))['content']
except exception.NotFound:
continue
if lm['ig-name'] == ig_name and lm['vol-name'] == vol_name:
return lm
return None
def num_of_mapped_volumes(self, initiator):
cnt = 0
for lm_link in self.req('lun-maps')['lun-maps']:
idx = lm_link['href'].split('/')[-1]
# NOTE(geguileo): There can be races so mapped elements retrieved
# in the listing may no longer exist.
try:
lm = self.req('lun-maps', idx=int(idx))['content']
except exception.NotFound:
continue
if lm['ig-name'] == initiator:
cnt += 1
return cnt
def get_iscsi_portals(self):
if self._portals:
return self._portals
iscsi_portals = [t['name'] for t in self.req('iscsi-portals')
['iscsi-portals']]
for portal_name in iscsi_portals:
try:
self._portals.append(self.req('iscsi-portals',
name=portal_name)['content'])
except exception.NotFound:
raise (exception.VolumeBackendAPIException
(data=_("iscsi portal, %s, not found") % portal_name))
return self._portals
def create_snapshot(self, src, dest, ro=False):
data = {'snap-vol-name': dest, 'ancestor-vol-id': src}
self.req('snapshots', 'POST', data)
def get_initiator(self, port_address):
try:
return self.req('initiators', 'GET', name=port_address)['content']
except exception.NotFound:
pass
class XtremIOClient4(XtremIOClient):
def __init__(self, configuration, cluster_id):
super(XtremIOClient4, self).__init__(configuration, cluster_id)
self._cluster_name = None
def req(self, object_type='volumes', method='GET', data=None,
name=None, idx=None, ver='v2'):
return super(XtremIOClient4, self).req(object_type, method, data,
name, idx, ver)
def get_extra_capabilities(self):
return {'consistencygroup_support': True}
def find_lunmap(self, ig_name, vol_name):
try:
return (self.req('lun-maps',
data={'full': 1,
'filter': ['vol-name:eq:%s' % vol_name,
'ig-name:eq:%s' % ig_name]})
['lun-maps'][0])
except (KeyError, IndexError):
raise exception.VolumeNotFound(volume_id=vol_name)
def num_of_mapped_volumes(self, initiator):
return len(self.req('lun-maps',
data={'filter': 'ig-name:eq:%s' % initiator})
['lun-maps'])
def update_url(self, data, cluster_id):
if cluster_id:
data['cluster-name'] = cluster_id
def update_data(self, data, cluster_id):
if cluster_id:
data['cluster-id'] = cluster_id
def get_iscsi_portals(self):
return self.req('iscsi-portals',
data={'full': 1})['iscsi-portals']
def get_cluster(self):
if not self.cluster_id:
self.cluster_id = self.req('clusters')['clusters'][0]['name']
return self.req('clusters', name=self.cluster_id)['content']
def create_snapshot(self, src, dest, ro=False):
data = {'snapshot-set-name': dest, 'snap-suffix': dest,
'volume-list': [src],
'snapshot-type': 'readonly' if ro else 'regular'}
res = self.req('snapshots', 'POST', data, ver='v2')
typ, idx = res['links'][0]['href'].split('/')[-2:]
# rename the snapshot
data = {'name': dest}
try:
self.req(typ, 'PUT', data, idx=int(idx))
except exception.VolumeBackendAPIException:
# reverting
LOG.error('Failed to rename the created snapshot, reverting.')
self.req(typ, 'DELETE', idx=int(idx))
raise
def add_vol_to_cg(self, vol_id, cg_id):
add_data = {'vol-id': vol_id, 'cg-id': cg_id}
self.req('consistency-group-volumes', 'POST', add_data, ver='v2')
def get_initiator(self, port_address):
inits = self.req('initiators',
data={'filter': 'port-address:eq:' + port_address,
'full': 1})['initiators']
if len(inits) == 1:
return inits[0]
else:
pass
def get_fc_up_ports(self):
return self.req('targets',
data={'full': 1,
'filter': ['port-type:eq:fc',
'port-state:eq:up'],
'prop': 'port-address'})["targets"]
class XtremIOClient42(XtremIOClient4):
def get_initiators_igs(self, port_addresses):
init_filter = ','.join('port-address:eq:{}'.format(port_address) for
port_address in port_addresses)
initiators = self.req('initiators',
data={'filter': init_filter,
'full': 1, 'prop': 'ig-id'})['initiators']
return list(set(ig_id['ig-id'][XTREMIO_OID_INDEX]
for ig_id in initiators))
class XtremIOVolumeDriver(san.SanDriver):
"""Executes commands relating to Volumes."""
VERSION = '1.0.9'
# ThirdPartySystems wiki
CI_WIKI_NAME = "EMC_XIO_CI"
driver_name = 'XtremIO'
MIN_XMS_VERSION = [3, 0, 0]
def __init__(self, *args, **kwargs):
super(XtremIOVolumeDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(XTREMIO_OPTS)
self.protocol = None
self.backend_name = (self.configuration.safe_get('volume_backend_name')
or self.driver_name)
self.cluster_id = (self.configuration.safe_get('xtremio_cluster_name')
or '')
self.provisioning_factor = (self.configuration.
safe_get('max_over_subscription_ratio')
or DEFAULT_PROVISIONING_FACTOR)
self._stats = {}
self.client = XtremIOClient3(self.configuration, self.cluster_id)
def _obj_from_result(self, res):
typ, idx = res['links'][0]['href'].split('/')[-2:]
return self.client.req(typ, idx=int(idx))['content']
def check_for_setup_error(self):
try:
name = self.client.req('clusters')['clusters'][0]['name']
cluster = self.client.req('clusters', name=name)['content']
version_text = cluster['sys-sw-version']
except exception.NotFound:
msg = _("XtremIO not initialized correctly, no clusters found")
raise (exception.VolumeBackendAPIException
(data=msg))
ver = [int(n) for n in version_text.split('-')[0].split('.')]
if ver < self.MIN_XMS_VERSION:
msg = (_('Invalid XtremIO version %(cur)s,'
' version %(min)s or up is required') %
{'min': self.MIN_XMS_VERSION,
'cur': ver})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
LOG.info('XtremIO Cluster version %s', version_text)
client_ver = '3'
if ver[0] >= 4:
# get XMS version
xms = self.client.req('xms', idx=1)['content']
xms_version = tuple([int(i) for i in
xms['sw-version'].split('-')[0].split('.')])
LOG.info('XtremIO XMS version %s', version_text)
if xms_version >= (4, 2):
self.client = XtremIOClient42(self.configuration,
self.cluster_id)
client_ver = '4.2'
else:
self.client = XtremIOClient4(self.configuration,
self.cluster_id)
client_ver = '4'
LOG.info('Using XtremIO Client %s', client_ver)
def create_volume(self, volume):
"""Creates a volume."""
data = {'vol-name': volume['id'],
'vol-size': str(volume['size']) + 'g'
}
self.client.req('volumes', 'POST', data)
# Add the volume to a cg in case volume requested a cgid or group_id.
# If both cg_id and group_id exists in a volume. group_id will take
# place.
consistency_group = volume.get('consistencygroup_id')
# if cg_id and group_id are both exists, we gives priority to group_id.
if volume.get('group_id'):
consistency_group = volume.get('group_id')
if consistency_group:
self.client.add_vol_to_cg(volume['id'],
consistency_group)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
if snapshot.get('cgsnapshot_id'):
# get array snapshot id from CG snapshot
snap_by_anc = self._get_snapset_ancestors(snapshot.cgsnapshot)
snapshot_id = snap_by_anc[snapshot['volume_id']]
else:
snapshot_id = snapshot['id']
self.client.create_snapshot(snapshot_id, volume['id'])
# add new volume to consistency group
if (volume.get('consistencygroup_id') and
self.client is XtremIOClient4):
self.client.add_vol_to_cg(volume['id'],
snapshot['consistencygroup_id'])
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
vol = self.client.req('volumes', name=src_vref['id'])['content']
ctxt = context.get_admin_context()
cache = self.db.image_volume_cache_get_by_volume_id(ctxt,
src_vref['id'])
limit = self.configuration.safe_get('xtremio_volumes_per_glance_cache')
if cache and limit and limit > 0 and limit <= vol['num-of-dest-snaps']:
raise exception.CinderException('Exceeded the configured limit of '
'%d snapshots per volume' % limit)
try:
self.client.create_snapshot(src_vref['id'], volume['id'])
except exception.XtremIOSnapshotsLimitExceeded as e:
raise exception.CinderException(e.message)
# extend the snapped volume if requested size is larger then original
if volume['size'] > src_vref['size']:
try:
self.extend_volume(volume, volume['size'])
except Exception:
LOG.error('failes to extend volume %s, '
'reverting clone operation', volume['id'])
# remove the volume in case resize failed
self.delete_volume(volume)
raise
if volume.get('consistencygroup_id') and self.client is XtremIOClient4:
self.client.add_vol_to_cg(volume['id'],
volume['consistencygroup_id'])
def delete_volume(self, volume):
"""Deletes a volume."""
try:
self.client.req('volumes', 'DELETE', name=volume.name_id)
except exception.NotFound:
LOG.info("volume %s doesn't exist", volume.name_id)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.client.create_snapshot(snapshot.volume_id, snapshot.id, True)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
try:
self.client.req('volumes', 'DELETE', name=snapshot.id)
except exception.NotFound:
LOG.info("snapshot %s doesn't exist", snapshot.id)
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
# as the volume name is used to id the volume we need to rename it
name_id = None
provider_location = None
current_name = new_volume['id']
original_name = volume['id']
try:
data = {'name': original_name}
self.client.req('volumes', 'PUT', data, name=current_name)
except exception.VolumeBackendAPIException:
LOG.error('Unable to rename the logical volume '
'for volume: %s', original_name)
# If the rename fails, _name_id should be set to the new
# volume id and provider_location should be set to the
# one from the new volume as well.
name_id = new_volume['_name_id'] or new_volume['id']
provider_location = new_volume['provider_location']
return {'_name_id': name_id, 'provider_location': provider_location}
def _update_volume_stats(self):
sys = self.client.get_cluster()
physical_space = int(sys["ud-ssd-space"]) / units.Mi
used_physical_space = int(sys["ud-ssd-space-in-use"]) / units.Mi
free_physical = physical_space - used_physical_space
actual_prov = int(sys["vol-size"]) / units.Mi
self._stats = {'volume_backend_name': self.backend_name,
'vendor_name': 'Dell EMC',
'driver_version': self.VERSION,
'storage_protocol': self.protocol,
'total_capacity_gb': physical_space,
'free_capacity_gb': (free_physical *
self.provisioning_factor),
'provisioned_capacity_gb': actual_prov,
'max_over_subscription_ratio': self.provisioning_factor,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage':
self.configuration.reserved_percentage,
'QoS_support': False,
'multiattach': False,
}
self._stats.update(self.client.get_extra_capabilities())
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def manage_existing(self, volume, existing_ref, is_snapshot=False):
"""Manages an existing LV."""
lv_name = existing_ref['source-name']
# Attempt to locate the volume.
try:
vol_obj = self.client.req('volumes', name=lv_name)['content']
if (
is_snapshot and
(not vol_obj['ancestor-vol-id'] or
vol_obj['ancestor-vol-id'][XTREMIO_OID_NAME] !=
volume.volume_id)):
kwargs = {'existing_ref': lv_name,
'reason': 'Not a snapshot of vol %s' %
volume.volume_id}
raise exception.ManageExistingInvalidReference(**kwargs)
except exception.NotFound:
kwargs = {'existing_ref': lv_name,
'reason': 'Specified logical %s does not exist.' %
'snapshot' if is_snapshot else 'volume'}
raise exception.ManageExistingInvalidReference(**kwargs)
# Attempt to rename the LV to match the OpenStack internal name.
self.client.req('volumes', 'PUT', data={'vol-name': volume['id']},
idx=vol_obj['index'])
def manage_existing_get_size(self, volume, existing_ref,
is_snapshot=False):
"""Return size of an existing LV for manage_existing."""
# Check that the reference is valid
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
lv_name = existing_ref['source-name']
# Attempt to locate the volume.
try:
vol_obj = self.client.req('volumes', name=lv_name)['content']
except exception.NotFound:
kwargs = {'existing_ref': lv_name,
'reason': 'Specified logical %s does not exist.' %
'snapshot' if is_snapshot else 'volume'}
raise exception.ManageExistingInvalidReference(**kwargs)
# LV size is returned in gigabytes. Attempt to parse size as a float
# and round up to the next integer.
lv_size = int(math.ceil(float(vol_obj['vol-size']) / units.Mi))
return lv_size
def unmanage(self, volume, is_snapshot=False):
"""Removes the specified volume from Cinder management."""
# trying to rename the volume to [cinder name]-unmanged
try:
self.client.req('volumes', 'PUT', name=volume['id'],
data={'vol-name': volume['name'] + '-unmanged'})
except exception.NotFound:
LOG.info("%(typ)s with the name %(name)s wasn't found, "
"can't unmanage",
{'typ': 'Snapshot' if is_snapshot else 'Volume',
'name': volume['id']})
raise exception.VolumeNotFound(volume_id=volume['id'])
def manage_existing_snapshot(self, snapshot, existing_ref):
self.manage_existing(snapshot, existing_ref, True)
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
return self.manage_existing_get_size(snapshot, existing_ref, True)
def unmanage_snapshot(self, snapshot):
self.unmanage(snapshot, True)
def extend_volume(self, volume, new_size):
"""Extend an existing volume's size."""
data = {'vol-size': six.text_type(new_size) + 'g'}
try:
self.client.req('volumes', 'PUT', data, name=volume['id'])
except exception.NotFound:
msg = _("can't find the volume to extend")
raise exception.VolumeDriverException(message=msg)
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector"""
tg_index = '1'
if not connector:
vol = self.client.req('volumes', name=volume.id)['content']
# foce detach, unmap all IGs from volume
IG_OID = 0
ig_indexes = [lun_map[IG_OID][XTREMIO_OID_INDEX] for
lun_map in vol['lun-mapping-list']]
LOG.info('Force detach volume %(vol)s from luns %(luns)s.',
{'vol': vol['name'], 'luns': ig_indexes})
else:
vol = self.client.req('volumes', name=volume.id,
data={'prop': 'index'})['content']
ig_indexes = self._get_ig_indexes_from_initiators(connector)
for ig_idx in ig_indexes:
lm_name = '%s_%s_%s' % (six.text_type(vol['index']),
six.text_type(ig_idx),
tg_index)
LOG.debug('Removing lun map %s.', lm_name)
try:
self.client.req('lun-maps', 'DELETE', name=lm_name)
except exception.NotFound:
LOG.warning("terminate_connection: lun map not found")
def _get_password(self):
return ''.join(RANDOM.choice
(string.ascii_uppercase + string.digits)
for _ in range(12))
def create_lun_map(self, volume, ig, lun_num=None):
try:
data = {'ig-id': ig, 'vol-id': volume['id']}
if lun_num:
data['lun'] = lun_num
res = self.client.req('lun-maps', 'POST', data)
lunmap = self._obj_from_result(res)
LOG.info('Created lun-map:\n%s', lunmap)
except exception.XtremIOAlreadyMappedError:
LOG.info('Volume already mapped, retrieving %(ig)s, %(vol)s',
{'ig': ig, 'vol': volume['id']})
lunmap = self.client.find_lunmap(ig, volume['id'])
return lunmap
def _get_ig_name(self, connector):
raise NotImplementedError()
def _get_ig_indexes_from_initiators(self, connector):
initiator_names = self._get_initiator_names(connector)
return self.client.get_initiators_igs(initiator_names)
def _get_initiator_names(self, connector):
raise NotImplementedError()
def create_consistencygroup(self, context, group):
"""Creates a consistency group.
:param context: the context
:param group: the group object to be created
:returns: dict -- modelUpdate = {'status': 'available'}
:raises: VolumeBackendAPIException
"""
create_data = {'consistency-group-name': group['id']}
self.client.req('consistency-groups', 'POST', data=create_data,
ver='v2')
return {'status': fields.ConsistencyGroupStatus.AVAILABLE}
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group."""
self.client.req('consistency-groups', 'DELETE', name=group['id'],
ver='v2')
volumes_model_update = []
for volume in volumes:
self.delete_volume(volume)
update_item = {'id': volume['id'],
'status': 'deleted'}
volumes_model_update.append(update_item)
model_update = {'status': group['status']}
return model_update, volumes_model_update
def _get_snapset_ancestors(self, snapset_name):
snapset = self.client.req('snapshot-sets',
name=snapset_name)['content']
volume_ids = [s[XTREMIO_OID_INDEX] for s in snapset['vol-list']]
return {v['ancestor-vol-id'][XTREMIO_OID_NAME]: v['name'] for v
in self.client.req('volumes',
data={'full': 1,
'props':
'ancestor-vol-id'})['volumes']
if v['index'] in volume_ids}
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
"""Creates a consistencygroup from source.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be created.
:param volumes: a list of volume dictionaries in the group.
:param cgsnapshot: the dictionary of the cgsnapshot as source.
:param snapshots: a list of snapshot dictionaries in the cgsnapshot.
:param source_cg: the dictionary of a consistency group as source.
:param source_vols: a list of volume dictionaries in the source_cg.
:returns: model_update, volumes_model_update
"""
if not (cgsnapshot and snapshots and not source_cg or
source_cg and source_vols and not cgsnapshot):
msg = _("create_consistencygroup_from_src only supports a "
"cgsnapshot source or a consistency group source. "
"Multiple sources cannot be used.")
raise exception.InvalidInput(msg)
if cgsnapshot:
snap_name = self._get_cgsnap_name(cgsnapshot)
snap_by_anc = self._get_snapset_ancestors(snap_name)
for volume, snapshot in zip(volumes, snapshots):
real_snap = snap_by_anc[snapshot['volume_id']]
self.create_volume_from_snapshot(volume, {'id': real_snap})
elif source_cg:
data = {'consistency-group-id': source_cg['id'],
'snapshot-set-name': group['id']}
self.client.req('snapshots', 'POST', data, ver='v2')
snap_by_anc = self._get_snapset_ancestors(group['id'])
for volume, src_vol in zip(volumes, source_vols):
snap_vol_name = snap_by_anc[src_vol['id']]
self.client.req('volumes', 'PUT', {'name': volume['id']},
name=snap_vol_name)
create_data = {'consistency-group-name': group['id'],
'vol-list': [v['id'] for v in volumes]}
self.client.req('consistency-groups', 'POST', data=create_data,
ver='v2')
return None, None
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a consistency group.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be updated.
:param add_volumes: a list of volume dictionaries to be added.
:param remove_volumes: a list of volume dictionaries to be removed.
:returns: model_update, add_volumes_update, remove_volumes_update
"""
add_volumes = add_volumes if add_volumes else []
remove_volumes = remove_volumes if remove_volumes else []
for vol in add_volumes:
add_data = {'vol-id': vol['id'], 'cg-id': group['id']}
self.client.req('consistency-group-volumes', 'POST', add_data,
ver='v2')
for vol in remove_volumes:
remove_data = {'vol-id': vol['id'], 'cg-id': group['id']}
self.client.req('consistency-group-volumes', 'DELETE', remove_data,
name=group['id'], ver='v2')
return None, None, None
def _get_cgsnap_name(self, cgsnapshot):
group_id = cgsnapshot.get('group_id')
if group_id is None:
group_id = cgsnapshot.get('consistencygroup_id')
return '%(cg)s%(snap)s' % {'cg': group_id
.replace('-', ''),
'snap': cgsnapshot['id'].replace('-', '')}
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot."""
group_id = cgsnapshot.get('group_id')
if group_id is None:
group_id = cgsnapshot.get('consistencygroup_id')
data = {'consistency-group-id': group_id,
'snapshot-set-name': self._get_cgsnap_name(cgsnapshot)}
self.client.req('snapshots', 'POST', data, ver='v2')
return None, None
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot."""
self.client.req('snapshot-sets', 'DELETE',
name=self._get_cgsnap_name(cgsnapshot), ver='v2')
return None, None
def create_group(self, context, group):
"""Creates a group.
:param context: the context of the caller.
:param group: the group object.
:returns: model_update
"""
# the driver treats a group as a CG internally.
# We proxy the calls to the CG api.
return self.create_consistencygroup(context, group)
def delete_group(self, context, group, volumes):
"""Deletes a group.
:param context: the context of the caller.
:param group: the group object.
:param volumes: a list of volume objects in the group.
:returns: model_update, volumes_model_update
"""
# the driver treats a group as a CG internally.
# We proxy the calls to the CG api.
return self.delete_consistencygroup(context, group, volumes)
def update_group(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a group.
:param context: the context of the caller.
:param group: the group object.
:param add_volumes: a list of volume objects to be added.
:param remove_volumes: a list of volume objects to be removed.
:returns: model_update, add_volumes_update, remove_volumes_update
"""
# the driver treats a group as a CG internally.
# We proxy the calls to the CG api.
return self.update_consistencygroup(context, group, add_volumes,
remove_volumes)
def create_group_from_src(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
"""Creates a group from source.
:param context: the context of the caller.
:param group: the Group object to be created.
:param volumes: a list of Volume objects in the group.
:param group_snapshot: the GroupSnapshot object as source.
:param snapshots: a list of snapshot objects in group_snapshot.
:param source_group: the Group object as source.
:param source_vols: a list of volume objects in the source_group.
:returns: model_update, volumes_model_update
"""
# the driver treats a group as a CG internally.
# We proxy the calls to the CG api.
return self.create_consistencygroup_from_src(context, group, volumes,
group_snapshot, snapshots,
source_group, source_vols)
def create_group_snapshot(self, context, group_snapshot, snapshots):
"""Creates a group_snapshot.
:param context: the context of the caller.
:param group_snapshot: the GroupSnapshot object to be created.
:param snapshots: a list of Snapshot objects in the group_snapshot.
:returns: model_update, snapshots_model_update
"""
# the driver treats a group as a CG internally.
# We proxy the calls to the CG api.
return self.create_cgsnapshot(context, group_snapshot, snapshots)
def delete_group_snapshot(self, context, group_snapshot, snapshots):
"""Deletes a group_snapshot.
:param context: the context of the caller.
:param group_snapshot: the GroupSnapshot object to be deleted.
:param snapshots: a list of snapshot objects in the group_snapshot.
:returns: model_update, snapshots_model_update
"""
# the driver treats a group as a CG internally.
# We proxy the calls to the CG api.
return self.delete_cgsnapshot(context, group_snapshot, snapshots)
def _get_ig(self, name):
try:
return self.client.req('initiator-groups', 'GET',
name=name)['content']
except exception.NotFound:
pass
def _create_ig(self, name):
# create an initiator group to hold the initiator
data = {'ig-name': name}
self.client.req('initiator-groups', 'POST', data)
try:
return self.client.req('initiator-groups', name=name)['content']
except exception.NotFound:
raise (exception.VolumeBackendAPIException
(data=_("Failed to create IG, %s") % name))
@interface.volumedriver
class XtremIOISCSIDriver(XtremIOVolumeDriver, driver.ISCSIDriver):
"""Executes commands relating to ISCSI volumes.
We make use of model provider properties as follows:
``provider_location``
if present, contains the iSCSI target information in the same
format as an ietadm discovery
i.e. '<ip>:<port>,<portal> <target IQN>'
``provider_auth``
if present, contains a space-separated triple:
'<auth method> <auth username> <auth password>'.
`CHAP` is the only auth_method in use at the moment.
"""
driver_name = 'XtremIO_ISCSI'
def __init__(self, *args, **kwargs):
super(XtremIOISCSIDriver, self).__init__(*args, **kwargs)
self.protocol = 'iSCSI'
def _add_auth(self, data, login_chap, discovery_chap):
login_passwd, discovery_passwd = None, None
if login_chap:
data['initiator-authentication-user-name'] = 'chap_user'
login_passwd = self._get_password()
data['initiator-authentication-password'] = login_passwd
if discovery_chap:
data['initiator-discovery-user-name'] = 'chap_user'
discovery_passwd = self._get_password()
data['initiator-discovery-password'] = discovery_passwd
return login_passwd, discovery_passwd
def _create_initiator(self, connector, login_chap, discovery_chap):
initiator = self._get_initiator_names(connector)[0]
# create an initiator
data = {'initiator-name': initiator,
'ig-id': initiator,
'port-address': initiator}
l, d = self._add_auth(data, login_chap, discovery_chap)
self.client.req('initiators', 'POST', data)
return l, d
def initialize_connection(self, volume, connector):
try:
sys = self.client.get_cluster()
except exception.NotFound:
msg = _("XtremIO not initialized correctly, no clusters found")
raise exception.VolumeBackendAPIException(data=msg)
login_chap = (sys.get('chap-authentication-mode', 'disabled') !=
'disabled')
discovery_chap = (sys.get('chap-discovery-mode', 'disabled') !=
'disabled')
initiator_name = self._get_initiator_names(connector)[0]
initiator = self.client.get_initiator(initiator_name)
if initiator:
login_passwd = initiator['chap-authentication-initiator-password']
discovery_passwd = initiator['chap-discovery-initiator-password']
ig = self._get_ig(initiator['ig-id'][XTREMIO_OID_NAME])
else:
ig = self._get_ig(self._get_ig_name(connector))
if not ig:
ig = self._create_ig(self._get_ig_name(connector))
(login_passwd,
discovery_passwd) = self._create_initiator(connector,
login_chap,
discovery_chap)
# if CHAP was enabled after the initiator was created
if login_chap and not login_passwd:
LOG.info('Initiator has no password while using chap, adding it.')
data = {}
(login_passwd,
d_passwd) = self._add_auth(data, login_chap, discovery_chap and
not discovery_passwd)
discovery_passwd = (discovery_passwd if discovery_passwd
else d_passwd)
self.client.req('initiators', 'PUT', data, idx=initiator['index'])
# lun mappping
lunmap = self.create_lun_map(volume, ig['ig-id'][XTREMIO_OID_NAME])
properties = self._get_iscsi_properties(lunmap)
if login_chap:
properties['auth_method'] = 'CHAP'
properties['auth_username'] = 'chap_user'
properties['auth_password'] = login_passwd
if discovery_chap:
properties['discovery_auth_method'] = 'CHAP'
properties['discovery_auth_username'] = 'chap_user'
properties['discovery_auth_password'] = discovery_passwd
LOG.debug('init conn params:\n%s',
strutils.mask_dict_password(properties))
return {
'driver_volume_type': 'iscsi',
'data': properties
}
def _get_iscsi_properties(self, lunmap):
"""Gets iscsi configuration.
:target_discovered: boolean indicating whether discovery was used
:target_iqn: the IQN of the iSCSI target
:target_portal: the portal of the iSCSI target
:target_lun: the lun of the iSCSI target
:volume_id: the id of the volume (currently used by xen)
:auth_method:, :auth_username:, :auth_password:
the authentication details. Right now, either auth_method is not
present meaning no authentication, or auth_method == `CHAP`
meaning use CHAP with the specified credentials.
multiple connection return
:target_iqns, :target_portals, :target_luns, which contain lists of
multiple values. The main portal information is also returned in
:target_iqn, :target_portal, :target_lun for backward compatibility.
"""
portals = self.client.get_iscsi_portals()
if not portals:
msg = _("XtremIO not configured correctly, no iscsi portals found")
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
portal = RANDOM.choice(portals)
portal_addr = ('%(ip)s:%(port)d' %
{'ip': portal['ip-addr'].split('/')[0],
'port': portal['ip-port']})
tg_portals = ['%(ip)s:%(port)d' % {'ip': p['ip-addr'].split('/')[0],
'port': p['ip-port']}
for p in portals]
properties = {'target_discovered': False,
'target_iqn': portal['port-address'],
'target_lun': lunmap['lun'],
'target_portal': portal_addr,
'target_iqns': [p['port-address'] for p in portals],
'target_portals': tg_portals,
'target_luns': [lunmap['lun']] * len(portals)}
return properties
def _get_initiator_names(self, connector):
return [connector['initiator']]
def _get_ig_name(self, connector):
return connector['initiator']
@interface.volumedriver
class XtremIOFCDriver(XtremIOVolumeDriver,
driver.FibreChannelDriver):
def __init__(self, *args, **kwargs):
super(XtremIOFCDriver, self).__init__(*args, **kwargs)
self.protocol = 'FC'
self._targets = None
def get_targets(self):
if not self._targets:
try:
targets = self.client.get_fc_up_ports()
self._targets = [target['port-address'].replace(':', '')
for target in targets]
except exception.NotFound:
raise (exception.VolumeBackendAPIException
(data=_("Failed to get targets")))
return self._targets
def _get_free_lun(self, igs):
luns = []
for ig in igs:
luns.extend(lm['lun'] for lm in
self.client.req('lun-maps',
data={'full': 1, 'prop': 'lun',
'filter': 'ig-name:eq:%s' % ig})
['lun-maps'])
uniq_luns = set(luns + [0])
seq = range(len(uniq_luns) + 1)
return min(set(seq) - uniq_luns)
@fczm_utils.add_fc_zone
def initialize_connection(self, volume, connector):
wwpns = self._get_initiator_names(connector)
ig_name = self._get_ig_name(connector)
i_t_map = {}
found = []
new = []
for wwpn in wwpns:
init = self.client.get_initiator(wwpn)
if init:
found.append(init)
else:
new.append(wwpn)
i_t_map[wwpn.replace(':', '')] = self.get_targets()
# get or create initiator group
if new:
ig = self._get_ig(ig_name)
if not ig:
ig = self._create_ig(ig_name)
for wwpn in new:
data = {'initiator-name': wwpn, 'ig-id': ig_name,
'port-address': wwpn}
self.client.req('initiators', 'POST', data)
igs = list(set([i['ig-id'][XTREMIO_OID_NAME] for i in found]))
if new and ig['ig-id'][XTREMIO_OID_NAME] not in igs:
igs.append(ig['ig-id'][XTREMIO_OID_NAME])
if len(igs) > 1:
lun_num = self._get_free_lun(igs)
else:
lun_num = None
for ig in igs:
lunmap = self.create_lun_map(volume, ig, lun_num)
lun_num = lunmap['lun']
return {'driver_volume_type': 'fibre_channel',
'data': {
'target_discovered': False,
'target_lun': lun_num,
'target_wwn': self.get_targets(),
'initiator_target_map': i_t_map}}
@fczm_utils.remove_fc_zone
def terminate_connection(self, volume, connector, **kwargs):
(super(XtremIOFCDriver, self)
.terminate_connection(volume, connector, **kwargs))
has_volumes = (not connector
or self.client.
num_of_mapped_volumes(self._get_ig_name(connector)) > 0)
if has_volumes:
data = {}
else:
i_t_map = {}
for initiator in self._get_initiator_names(connector):
i_t_map[initiator.replace(':', '')] = self.get_targets()
data = {'target_wwn': self.get_targets(),
'initiator_target_map': i_t_map}
return {'driver_volume_type': 'fibre_channel',
'data': data}
def _get_initiator_names(self, connector):
return [wwpn if ':' in wwpn else
':'.join(wwpn[i:i + 2] for i in range(0, len(wwpn), 2))
for wwpn in connector['wwpns']]
def _get_ig_name(self, connector):
return connector['host']
| |
"""tests for passlib.pwhash -- (c) Assurance Technologies 2003-2009"""
#=============================================================================
# imports
#=============================================================================
from __future__ import with_statement
# core
import hashlib
from logging import getLogger
import os
import time
import warnings
import sys
# site
# pkg
from passlib import hash, registry
from passlib.registry import register_crypt_handler, register_crypt_handler_path, \
get_crypt_handler, list_crypt_handlers, _unload_handler_name as unload_handler_name
import passlib.utils.handlers as uh
from passlib.tests.utils import TestCase, catch_warnings
# module
log = getLogger(__name__)
#=============================================================================
# dummy handlers
#
# NOTE: these are defined outside of test case
# since they're used by test_register_crypt_handler_path(),
# which needs them to be available as module globals.
#=============================================================================
class dummy_0(uh.StaticHandler):
name = "dummy_0"
class alt_dummy_0(uh.StaticHandler):
name = "dummy_0"
dummy_x = 1
#=============================================================================
# test registry
#=============================================================================
class RegistryTest(TestCase):
descriptionPrefix = "passlib registry"
def tearDown(self):
for name in ("dummy_0", "dummy_1", "dummy_x", "dummy_bad"):
unload_handler_name(name)
def test_hash_proxy(self):
"test passlib.hash proxy object"
# check dir works
dir(hash)
# check repr works
repr(hash)
# check non-existent attrs raise error
self.assertRaises(AttributeError, getattr, hash, 'fooey')
# GAE tries to set __loader__,
# make sure that doesn't call register_crypt_handler.
old = getattr(hash, "__loader__", None)
test = object()
hash.__loader__ = test
self.assertIs(hash.__loader__, test)
if old is None:
del hash.__loader__
self.assertFalse(hasattr(hash, "__loader__"))
else:
hash.__loader__ = old
self.assertIs(hash.__loader__, old)
# check storing attr calls register_crypt_handler
class dummy_1(uh.StaticHandler):
name = "dummy_1"
hash.dummy_1 = dummy_1
self.assertIs(get_crypt_handler("dummy_1"), dummy_1)
# check storing under wrong name results in error
self.assertRaises(ValueError, setattr, hash, "dummy_1x", dummy_1)
def test_register_crypt_handler_path(self):
"test register_crypt_handler_path()"
# NOTE: this messes w/ internals of registry, shouldn't be used publically.
paths = registry._locations
# check namespace is clear
self.assertTrue('dummy_0' not in paths)
self.assertFalse(hasattr(hash, 'dummy_0'))
# check invalid names are rejected
self.assertRaises(ValueError, register_crypt_handler_path,
"dummy_0", ".test_registry")
self.assertRaises(ValueError, register_crypt_handler_path,
"dummy_0", __name__ + ":dummy_0:xxx")
self.assertRaises(ValueError, register_crypt_handler_path,
"dummy_0", __name__ + ":dummy_0.xxx")
# try lazy load
register_crypt_handler_path('dummy_0', __name__)
self.assertTrue('dummy_0' in list_crypt_handlers())
self.assertTrue('dummy_0' not in list_crypt_handlers(loaded_only=True))
self.assertIs(hash.dummy_0, dummy_0)
self.assertTrue('dummy_0' in list_crypt_handlers(loaded_only=True))
unload_handler_name('dummy_0')
# try lazy load w/ alt
register_crypt_handler_path('dummy_0', __name__ + ':alt_dummy_0')
self.assertIs(hash.dummy_0, alt_dummy_0)
unload_handler_name('dummy_0')
# check lazy load w/ wrong type fails
register_crypt_handler_path('dummy_x', __name__)
self.assertRaises(TypeError, get_crypt_handler, 'dummy_x')
# check lazy load w/ wrong name fails
register_crypt_handler_path('alt_dummy_0', __name__)
self.assertRaises(ValueError, get_crypt_handler, "alt_dummy_0")
# TODO: check lazy load which calls register_crypt_handler (warning should be issued)
sys.modules.pop("passlib.tests._test_bad_register", None)
register_crypt_handler_path("dummy_bad", "passlib.tests._test_bad_register")
with catch_warnings():
warnings.filterwarnings("ignore", "xxxxxxxxxx", DeprecationWarning)
h = get_crypt_handler("dummy_bad")
from passlib.tests import _test_bad_register as tbr
self.assertIs(h, tbr.alt_dummy_bad)
def test_register_crypt_handler(self):
"test register_crypt_handler()"
self.assertRaises(TypeError, register_crypt_handler, {})
self.assertRaises(ValueError, register_crypt_handler, type('x', (uh.StaticHandler,), dict(name=None)))
self.assertRaises(ValueError, register_crypt_handler, type('x', (uh.StaticHandler,), dict(name="AB_CD")))
self.assertRaises(ValueError, register_crypt_handler, type('x', (uh.StaticHandler,), dict(name="ab-cd")))
self.assertRaises(ValueError, register_crypt_handler, type('x', (uh.StaticHandler,), dict(name="ab__cd")))
self.assertRaises(ValueError, register_crypt_handler, type('x', (uh.StaticHandler,), dict(name="default")))
class dummy_1(uh.StaticHandler):
name = "dummy_1"
class dummy_1b(uh.StaticHandler):
name = "dummy_1"
self.assertTrue('dummy_1' not in list_crypt_handlers())
register_crypt_handler(dummy_1)
register_crypt_handler(dummy_1)
self.assertIs(get_crypt_handler("dummy_1"), dummy_1)
self.assertRaises(KeyError, register_crypt_handler, dummy_1b)
self.assertIs(get_crypt_handler("dummy_1"), dummy_1)
register_crypt_handler(dummy_1b, force=True)
self.assertIs(get_crypt_handler("dummy_1"), dummy_1b)
self.assertTrue('dummy_1' in list_crypt_handlers())
def test_get_crypt_handler(self):
"test get_crypt_handler()"
class dummy_1(uh.StaticHandler):
name = "dummy_1"
# without available handler
self.assertRaises(KeyError, get_crypt_handler, "dummy_1")
self.assertIs(get_crypt_handler("dummy_1", None), None)
# already loaded handler
register_crypt_handler(dummy_1)
self.assertIs(get_crypt_handler("dummy_1"), dummy_1)
with catch_warnings():
warnings.filterwarnings("ignore", "handler names should be lower-case, and use underscores instead of hyphens:.*", UserWarning)
# already loaded handler, using incorrect name
self.assertIs(get_crypt_handler("DUMMY-1"), dummy_1)
# lazy load of unloaded handler, using incorrect name
register_crypt_handler_path('dummy_0', __name__)
self.assertIs(get_crypt_handler("DUMMY-0"), dummy_0)
# check system & private names aren't returned
import passlib.hash # ensure module imported, so py3.3 sets __package__
passlib.hash.__dict__["_fake"] = "dummy" # so behavior seen under py2x also
for name in ["_fake", "__package__"]:
self.assertRaises(KeyError, get_crypt_handler, name)
self.assertIs(get_crypt_handler(name, None), None)
def test_list_crypt_handlers(self):
"test list_crypt_handlers()"
from passlib.registry import list_crypt_handlers
# check system & private names aren't returned
import passlib.hash # ensure module imported, so py3.3 sets __package__
passlib.hash.__dict__["_fake"] = "dummy" # so behavior seen under py2x also
for name in list_crypt_handlers():
self.assertFalse(name.startswith("_"), "%r: " % name)
#=============================================================================
# eof
#=============================================================================
| |
# Authors: Lukas Breuer <l.breuer@fz-juelich.de>
"""
----------------------------------------------------------------------
--- jumeg.decompose.fourier_ica --------------------------------------
----------------------------------------------------------------------
author : Lukas Breuer
email : l.breuer@fz-juelich.de
last update: 13.11.2015
version : 1.0
----------------------------------------------------------------------
The implementation of the following methods for automated
selection of the optimal data dimensionality is based on
following publications
----------------------------------------------------------------------
A. Cichocki, & S. Amari, 2002. "Adaptive Blind Signal and Image
Processing - Learning Algorithms and Applications,"
John Wiley & Sons
T. P. Minka, 'Automatic choice of dimensionality
for PCA', MIT Press (2001)
Z. He, A. Cichocki, S. Xie and K. Choi, "Detecting the number
of clusters in n-way probabilistic clustering," IEEE Trans.
Pattern Anal. Mach. Intell., vol. 32, pp. 2006-2021, Nov, 2010.
M. Wax, and T. Kailath, "Detection of signals by
information-theoretic criteria," IEEE Trans. on Acoustics,
vol. 33, pp. 387-392, 1985.
----------------------------------------------------------------------
Overview
----------------------------------------------------------------------
All methods are based on the eigenvalues you get from the
eigenvalue decomposition of the data covariance matrix.
mibs() --> Routine to estimate the MInka Bayesian model
Selection (MIBS) value; can also be used to perform
model order selection based on the Bayesian Information
Criterion (BIC)
gap() --> Routine to estimate the model order using
the GAP value
aic_mdl() --> Routine to estimate the model order using
the Akaike's information criterion (AIC) or the
minimum description length (MDL) criterion.
----------------------------------------------------------------------
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
import numpy as np
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# method to estimate the optimal data dimension for ICA
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def mibs(eigenvalues, n_samples, use_bic=False):
"""
Routine to estimate the MInka Bayesian model
Selection (MIBS) value as introduced in:
T. P. Minka, 'Automatic choice of dimensionality
for PCA', MIT Press (2001)
Note: For numerical stability here ln(MIBS) is
estimated instead of MIBS
Parameters
----------
eigenvalues: eigenvalues received when applying
PCA. Note eigenvalues must be sorted decreasing
n_samples: number of samples/ time slices used to
estimate the covariance matrix for PCA
use_bic: if set the BIC-method is used instead
of MIBS to estimate the optimal dimension
Returns
-------
pca_dim: optimal data dimension
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from math import gamma
# ------------------------------------------
# set variables to be confirm with notation
# in Chichocki and Amari, 'Adaptive Blind
# Signal And Image Processing', (2006), p.93
# ------------------------------------------
N = n_samples
m = len(eigenvalues)
mibs_val = np.zeros(m)
bic_val = np.zeros(m)
log_pi = np.log(np.pi)
log_2pi = np.log(2.0 * np.pi)
log_N = np.log(N)
# ------------------------------------------
# loop over all possible ranks
# ------------------------------------------
for n in range(1, m):
# ------------------------------------------
# define some variables for MIBS and BIC
#------------------------------------------
sigma = np.mean(eigenvalues[n:])
d_n = m*n - 0.5*n*(n+1)
p_n = -n * np.log(2.0)
A_n = 0.0
prod_lambda = np.sum(np.log(eigenvalues[:n]))
eigenvalues_tmp = eigenvalues.copy()
eigenvalues_tmp[n:] = sigma
# ------------------------------------------
# estimate p_n and A_n
# ------------------------------------------
# loop over n
for idx in range(n):
p_n += np.log(gamma(0.5*(m-idx))) - (0.5*(m-idx) * log_pi)
for j in range(idx+1, m):
A_n += np.log(eigenvalues_tmp[idx] - eigenvalues_tmp[j]) +\
np.log(eigenvalues_tmp[j]) + np.log(eigenvalues_tmp[idx]) + \
log_N + np.log(eigenvalues[idx]-eigenvalues[j])
# ------------------------------------------
# estimate the MIBS/BIC value
# ------------------------------------------
mibs_val[n] = p_n - 0.5 * N * prod_lambda - N * (m-n) * np.log(sigma) - \
0.5 * A_n + 0.5*(d_n+n) * log_2pi - 0.5 * n * log_N
bic_val[n] = - 0.5 * N * prod_lambda - N * (m-n) * np.log(sigma) - 0.5*(d_n+n) * log_N
# ------------------------------------------
# get index of maximum MIBS/BIC value
# ------------------------------------------
max_bic = bic_val.argmax()
max_mibs = mibs_val.argmax()
if use_bic:
pca_dim = max_bic
else:
pca_dim = max_mibs
return pca_dim
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# method to estimate the optimal data dimension for ICA
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def gap(eigenvalues):
"""
Routine to estimate the model order using
the GAP value as introduced in:
Z. He, A. Cichocki, S. Xie and K. Choi,
"Detecting the number of clusters in n-way
probabilistic clustering,"
IEEE Trans. Pattern Anal. Mach. Intell.,
vol. 32, pp. 2006-2021, Nov, 2010.
Parameters
----------
eigenvalues: eigenvalues received when applying
PCA. Note eigenvalues must be sorted decreasing
Returns
-------
pca_dim: optimal data dimension
"""
# ------------------------------------------
# check input parameter
# ------------------------------------------
neig = len(eigenvalues)
gap_values = np.ones((neig))
# ------------------------------------------
# loop over all eigenvalues
# ------------------------------------------
for idx in range(0, (neig-2)):
temp = np.mean(eigenvalues[idx+1:])
gap_values[idx] = (eigenvalues[idx+1] - temp) / (eigenvalues[idx] - temp) # np.var(eigenvalues[idx+1:])/np.var(eigenvalues[idx:])
# ------------------------------------------
# get index of maximum GAP value
# ------------------------------------------
pca_dim = gap_values.argmin()
return pca_dim
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# method to estimate the optimal data dimension for ICA
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def aic_mdl(eigenvalues):
"""
Routine to estimate the model order using
the Akaike's information criterion (AIC) or the
minimum description length (MDL) criterion. For
detailed information see:
M. Wax, and T. Kailath,
"Detection of signals by information-theoretic
criteria," IEEE Trans. on Acoustics,
vol. 33, pp. 387-392, 1985.
Parameters
----------
eigenvalues: eigenvalues received when applying
PCA. Note eigenvalues must be sorted decreasing
Returns
-------
aic_dim: optimal data dimension based on the AIC
method
mdl_dim: optimal data dimension based on the MDL
method
"""
# ------------------------------------------
# check input parameter
# ------------------------------------------
neig = len(eigenvalues)
aic = np.ones((neig))
mdl = np.ones((neig))
# ------------------------------------------
# loop over all eigenvalues to estimate AIC
# and MDL values
# ------------------------------------------
for idx in range(1, neig):
log_rho = np.mean(np.log(eigenvalues[idx:])) - np.log(np.mean(eigenvalues[idx:]))
aic[idx] = -2.0 * neig * (neig - idx + 1) * log_rho + 2.0 * (idx + 1) * (2.0 * neig - idx + 1)
mdl[idx] = -1.0 * neig * (neig - idx + 1) * log_rho + 0.5 * (idx + 1) * (2.0 * neig - idx + 1) * np.log(neig)
# ------------------------------------------
# get index of minimum AIC/MDL value
# ------------------------------------------
aic_dim = aic[1:].argmin() + 1
mdl_dim = mdl[1:].argmin() + 1
return aic_dim, mdl_dim
| |
# -*- coding: UTF-8 -*-
# Copyright (c) 2006, 2011, 2013-2018 Matthew Zipay.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""The classes in this module are used to define components and their
dependencies within an Aglyph context (:class:`aglyph.context.Context`).
.. rubric:: Components and Templates
:class:`aglyph.component.Component` tells an
:class:`aglyph.assembler.Assembler` how to create objects of a
particular type. The component defines the initialization and/or
attribute dependencies for objects of that type, as well as the assembly
strategy and any lifecycle methods that should be called.
:class:`aglyph.component.Template` is used to describe dependencies
(initialization and/or attribute) and lifecylce methods that are shared
by multiple components. Templates are similar to abstract classes; they
cannot be assembled, but are instead used as "parents" of other
components (or templates) to achieve a sort of "configuration
inheritance."
.. note::
Both ``Component`` and ``Template`` may serve as the parent of any
other component or template; but only components may be assembled.
.. rubric:: References and Evaluators
A :class:`aglyph.component.Reference` may be used as the value of any
initialization argument (positional or keyword) or attribute in a
component or template. Its value must be the unique ID of a
**component** in the same context. At assembly time, the assembler will
resolve the reference into an object of the component to which it
refers.
An :class:`aglyph.component.Evaluator` is similar to a
:func:`functools.partial` object. It stores a callable factory (function
or class) and related initialization arguments, and can be called
repeatedly to produce new objects. (Unlike a :func:`functools.partial`
object, though, an ``Evaluator`` will resolve any initialization
argument that is a :class:`aglyph.component.Reference`,
:func:`functools.partial`, or ``Evaluator`` **before** calling the
factory.)
.. rubric:: Strategies and Lifecycle methods
:data:`aglyph.component.Strategy` defines the assembly strategies
supported by Aglyph (*"prototype"*, *"singleton"*, *"borg"*,
*"weakref"* and *"_imported"*).
:data:`LifecycleState` defines assmebly states for components at
which Aglyph supports calling named methods on the objects of those
components. (Such methods may be used to perform specialized
initialization or disposal, for example.)
"""
__author__ = "Matthew Zipay <mattz@ninthtest.info>"
from collections import namedtuple, OrderedDict
from functools import partial
from inspect import isclass, ismodule, isroutine
import logging
import warnings
from autologging import logged, traced
from aglyph import AglyphError, _identify, __version__
from aglyph._compat import is_string, name_of, TextType
__all__ = [
"Strategy",
"LifecycleState",
"Reference",
"Evaluator",
"Template",
"Component",
]
_log = logging.getLogger(__name__)
Strategy = namedtuple(
"Strategy", ["PROTOTYPE", "SINGLETON", "BORG", "WEAKREF"])(
"prototype", "singleton", "borg", "weakref")
"""Define the component assembly strategies implemented by Aglyph.
.. rubric:: "prototype"
A new object is always created, initialized, wired, and returned.
.. note::
"prototype" is the default assembly strategy for Aglyph components
that do not specify a member name.
.. rubric:: "singleton"
The cached object is returned if it exists. Otherwise, the object is
created, initialized, wired, cached, and returned.
Singleton component objects are cached by :attr:`Component.unique_id`.
.. rubric:: "borg"
A new instance is always created. The shared-state is assigned to the
new instance's ``__dict__`` if it exists. Otherwise, the new instance is
initialized and wired, its instance ``__dict__`` is cached, and then the
instance is returned.
Borg component instance shared-states are cached by
:attr:`Component.unique_id`.
.. warning::
* The borg assembly strategy is **only** supported for
components that are non-builtin classes.
* The borg assembly strategy is **not** supported for
classes that define or inherit a ``__slots__`` member.
.. rubric:: "weakref"
In the simplest terms, this is a "prototype" that can exhibit
"singleton" behavior: as long as there is at least one "live" reference
to the assembled object in the application runtime, then requests to
assemble this component will return the same (cached) object.
When the only reference to the assembled object that remains is the
cached weak reference, the Python garbage collector is free to destroy
the object, at which point it is automatically removed from the Aglyph
cache.
Subsequent requests to assemble the same component will cause a new
object to be created, initialized, wired, cached (as a weak reference),
and returned.
.. note::
Please refer to the :mod:`weakref` module for a detailed explanation
of weak reference behavior.
.. rubric:: "_imported"
.. versionadded:: 3.0.0
.. note::
The "_imported" strategy is only valid (and is the only allowed
value) when *member_name* is specified for a component.
Since this strategy is implicitly assigned and is intended for
internal use by Aglyph itself, it is not exposed on the
``Strategy`` named tuple.
An already-created (loaded) object is obtained from an imported module
or class (as opposed to creating the object directly).
Such components will always resolve (i.e. be assembled) to the same
objects; but those objects are not cached by Aglyph as they will
exhibit "natural" singleton behavior so long as the containing module
is referenced in :attr:`sys.modules`.
It is not necessary to explicitly set the strategy to "_imported" when
using *member_name* - Aglyph will default to "_imported" when it sees
a non-empty *member_name* defined.
.. warning::
Explicitly setting strategy="_imported" **without** specifying
*member_name* will raise :exc:`AglyphError`.
Specifying *member_name* with any explicit strategy other than
"_imported" will ignore the explicit strategy, change it to
"_imported" internally, and issue a :class:`UserWarning` to that
effect.
"""
LifecycleState = namedtuple(
"LifecycleState", ["AFTER_INJECT", "BEFORE_CLEAR"])(
"after_inject", "before_clear")
"""Define the lifecycle states for which Aglyph will call object methods
on your behalf.
.. _lifecycle-methods:
.. rubric:: Lifecycle methods
Lifecycle methods are called with **no arguments** (positional or
keyword).
If a called lifecycle method raises an exception, the exception is
caught, logged at :attr:`logging.ERROR` level (including a traceback) to
the "aglyph.assembler.Assembler" channel, and a :class:`RuntimeWarning`
is issued.
A method may be registered for a lifecycle state by specifying the
method name at the context (least specific), template, and/or component
(most specific) level.
.. note::
Aglyph only calls **one** method on an object for any lifecycle
state. Refer to **The lifecycle method lookup process** (below) for
details.
Aglyph recognizes the following lifecycle states:
.. rubric:: "after_inject"
A component object is in this state after **all** dependencies (both
initialization arguments and attributes) have been injected into a
newly-created instance, but before the object is cached and/or returned
to the caller.
Aglyph will only call **one** "after_inject" method on any object, and
will determine which method to call by using the lookup process
described below.
.. rubric:: "before_clear"
A component object is in this state after is has been removed from an
internal cache (singleton, borg, or weakref), but before the object
itself is actually discarded.
Aglyph will only call **one** "before_clear" method on any object, and
will determine which method to call by using the lookup process
described below.
.. _lifecycle-method-lookup-process:
.. rubric:: The lifecycle method lookup process
Lifecyle methods may be specified at the context (least specific),
template, and component (most specific) levels.
In order to determine which named method is called for a particular
object, Aglyph looks up the appropriate lifecycle method name in the
following order, using the **first** one found that is not ``None``
*and* is actually defined on the object:
#. The method named by the object's ``Component.<lifecycle-state>``
property.
#. If the object's :attr:`Component.parent_id` is not ``None``, the
method named by the corresponding parent
``Template.<lifecycle-state>`` or ``Component.<lifecycle-state>``
property. (If necessary, lookup continues by examining the
parent-of-the-parent and so on.)
#. The method named by the ``Context.<lifecycle-state>`` property.
When Aglyph finds a named lifecycle method that applies to an object,
but the object itself does not define that method, a
:attr:`logging.WARNING` message is emitted.
.. note::
Either a :class:`Component` or :attr:`Template` may serve as the
parent identified by a ``parent_id``.
However, only a :class:`Component` may actually be assembled into
a usable object. (A :attr:`Template` is like an abstract class -
it defines common dependencies and/or lifecycle methods, but it
cannot be assembled.)
"""
class Reference(TextType):
"""A placeholder used to refer to another :class:`Component`.
A ``Reference`` is used as an alias to identify a component that is
a dependency of another component. The value of a ``Reference`` can
be either a dotted-name or a user-provided unique ID.
A ``Reference`` can be used as an argument for an
:class:`Evaluator`, and can be assembled directly by an
:class:`aglyph.assembler.Assembler`.
.. warning::
A ``Reference`` value MUST correspond to a component ID in the
same context.
.. note::
In Python versions < 3.0, a ``Reference`` representing a
dotted-name *must* consist only of characters in the ASCII
subset of the source encoding (see :pep:`0263`).
But in Python versions >= 3.0, a ``Reference`` representing a
dotted-name *may* contain non-ASCII characters
(see :pep:`3131`).
However, a ``Reference`` may also represent a user-defined
identifier. To accommodate all cases, the super class of
``Reference`` is "dynamic" with respect to the version of Python
under which Aglyph is running (:class:`unicode` under Python 2,
:class:`str` under Python 3). This documentation shows the base
class as ``str`` because the `Sphinx <http://sphinx-doc.org/>`_
documentation generator for Aglyph runs under CPython 3.
"""
def __new__(cls, referent):
"""Create a new reference to *referent*.
:arg referent:
the object that the reference will represent
:raise aglyph.AglyphError:
if *referent* is a class, function, or module but cannot be
imported
If *referent* is a string, it is assumed to be a valid
:attr:`Component.unique_id` and its value is returned as a
``Reference``.
If *referent* is a class, function, or module, its
**importable** dotted name is returned as a ``Reference``.
.. warning::
If *referent* is a class, function, or module, it **must**
be importable.
"""
return TextType.__new__(cls, _identify(referent))
_log.debug("Reference extends %r", TextType)
class _InitializationSupport(object):
"""The base for any class that configures type 2 (constructor)
injection.
"""
__slots__ = ["_args", "_keywords"]
def __init__(self):
"""The positional argument list and keyword argument mapping are
initialized to empty.
"""
#PYVER: arguments to super() are implicit under Python 3
super(_InitializationSupport, self).__init__()
self._args = []
self._keywords = {}
@property
def args(self):
"""The positional initialization arguments."""
return self._args
@property
def keywords(self):
"""The keyword initialization arguments."""
return self._keywords
@traced
@logged
class Evaluator(_InitializationSupport):
"""Perform lazy creation of objects."""
__slots__ = ["_factory"]
def __init__(self, factory, *args, **keywords):
"""
:param factory:
any callable that returns an object
:param tuple args:
the positional arguments to *func*
:param dict keywords:
the keyword arguments to *func*
An ``Evaluator`` is similar to a :func:`functools.partial` in
that they both collect a function and related arguments into a
:obj:`callable` object with a simplified signature that can be
called repeatedly to produce a new object.
*Unlike* a partial function, an ``Evaluator`` may have arguments
that are not truly "frozen," in the sense that any argument may
be defined as a :class:`Reference`, a :func:`functools.partial`,
or even another ``Evaluator``, which needs to be resolved (i.e.
assembled/called) before calling *factory*.
When an ``Evaluator`` is called, its arguments (positional and
keyword) are each resolved in one of the following ways:
* If the argument value is a :class:`Reference`, it is assembled
(by an :class:`aglyph.assembler.Assembler` reference passed to
:meth:`__call__`)
* If the argument value is an ``Evaluator`` or a
:func:`functools.partial`, it is called to produce its value.
* If the argument is a dictionary or a sequence other than a
string type, each item is resolved according to these rules.
* If none of the above cases apply, the argument value is used
as-is.
"""
#PYVER: arguments to super() are implicit under Python 3
super(Evaluator, self).__init__()
if not callable(factory):
raise TypeError("%r is not callable" % factory)
self._factory = factory
self._args = list(args) # mutable args for _InitializationSupport
self._keywords = keywords
@property
def factory(self):
"""The :obj:`callable` that creates new objects *(read-only)*."""
return self._factory
def __call__(self, assembler):
"""Call ``factory(*args, **keywords)`` and return the new object.
:param aglyph.assembly.Assembler assembler:
the assembler that will be used to assemble any
:class:`Reference` encountered in this evaluator's positional
and keyword arguments
"""
resolve = self._resolve
resolved_args = tuple(resolve(arg, assembler) for arg in self._args)
# keywords MUST be strings!
resolved_keywords = dict(
[(keyword, resolve(arg, assembler))
for (keyword, arg) in self._keywords.items()])
return self._factory(*resolved_args, **resolved_keywords)
def _resolve(self, arg, assembler):
"""Return the resolved *arg*.
:param arg:
represents an argument (positional or keyword) to
:attr:`factory`
:param aglyph.assembly.Assembler assembler:
the assembler that will be used to resolve *arg*
:return:
the resolved argument value that will actually be passed to
:attr:`factory`
"""
if isinstance(arg, Reference):
return assembler.assemble(arg)
elif isinstance(arg, Evaluator):
return arg(assembler)
elif isinstance(arg, partial):
return arg()
elif isinstance(arg, dict):
# either keys or values may themselves be References, partials, or
# Evaluators
resolve = self._resolve
return dict(
[(resolve(key, assembler), resolve(value, assembler))
for (key, value) in arg.items()])
elif hasattr(arg, "__iter__") and not is_string(arg):
resolve = self._resolve
# assumption: the iterable class supports initialization with
# __init__(iterable)
return arg.__class__([resolve(value, assembler) for value in arg])
else:
return arg
def __str__(self):
return "<%s %s @%08x>" % (
name_of(self.__class__), self._factory.__name__, id(self))
def __repr__(self):
return "%s.%s(%r, *%r **%r)" % (
self.__class__.__module__, name_of(self.__class__),
self._factory, self._args, self._keywords)
class _DependencySupport(_InitializationSupport):
"""The base for any class that configures both type 1 (setter) and
type 2 (constructor) injection.
"""
__slots__ = ["_attributes"]
def __init__(self):
"""The field/property/setter mapping is initialized to empty."""
#PYVER: arguments to super() are implicit under Python 3
super(_DependencySupport, self).__init__()
self._attributes = OrderedDict()
@property
def attributes(self):
"""The field/property/setter mapping."""
return self._attributes
@traced
@logged
class Template(_DependencySupport):
"""Support for configuring type 1 (setter) and type 2 (constructor)
injection, and lifecycle methods.
"""
__slots__ = [
"_after_inject",
"_before_clear",
"_parent_id",
"_unique_id",
]
def __init__(
self, unique_id, parent_id=None,
after_inject=None, before_clear=None):
"""
:arg str unique_id:
context-unique identifier for this template
:keyword str parent_id:
specifies the ID of a template or component that describes
the default dependencies and/or lifecyle methods for this
template
:keyword str after_inject:
specifies the name of the method that will be called on
objects of components that reference this template after all
component dependencies have been injected
:keyword str before_clear:
specifies the name of the method that will be called on
objects of components that reference this template
immediately before they are cleared from cache
:raise ValueError:
if *unique_id* is ``None`` or empty
.. note::
A ``Template`` cannot be assembled (it is equivalent to an
abstract class).
However, a :class:`Component` can also serve as a template,
so if you need the ability to assemble an object *and* use
its definition as the basis for other components, then define
the default dependencies and/or lifecycle methods in a
:class:`Component` and use that component's ID as the
:attr:`Component.parent_id` in other components.
*unique_id* must be a user-provided identifier that is unique
within the context to which this template is added. A component
may then be instructed to use a template by specifying the same
value for :attr:`Component.parent_id`.
*parent_id* is **another** :attr:`Component.unique_id` or
:attr:`Template.unique_id` in the same context that descibes
**this** template's default dependencies and/or lifecycle
methods.
*after_inject* is the name of a method *of objects of this
component* that will be called after **all** dependencies have
been injected, but before the object is returned to the caller.
This method will be called with **no** arguments (positional or
keyword). Exceptions raised by this method are not caught.
.. note::
``Template.after_inject`` takes precedence over any
*after_inject* method name specified for the template's
parent or context.
*before_clear* is the name of a method *of objects of this
component* that will be called immediately before the object is
cleared from cache via
:meth:`aglyph.assembler.Assembler.clear_singletons()`,
:meth:`aglyph.assembler.Assembler.clear_borgs()`, or
:meth:`aglyph.assembler.Assembler.clear_weakrefs()`.
.. note::
``Template.before_clear`` takes precedence over any
*before_clear* method name specified for the template's
parent or context.
.. warning::
The *before_clear* keyword argument has no meaning for and is
ignored by "prototype" components. If *before_clear* is
specified for a prototype, a :class:`RuntimeWarning` will be
issued.
For "weakref" components, there is a possibility that the
object no longer exists at the moment when the *before_clear*
method would be called. In such cases, the *before_clear*
method is **not** called. No warning is issued, but a
:attr:`logging.WARNING` message is emitted.
"""
#PYVER: arguments to super() are implicit under Python 3
super(Template, self).__init__()
if not unique_id:
raise ValueError(
"%s unique ID must not be None or empty" %
name_of(self.__class__))
self._unique_id = unique_id
self._parent_id = parent_id
self._after_inject = after_inject
self._before_clear = before_clear
@property
def unique_id(self):
"""Uniquely identifies this template in a context *(read-only)*."""
return self._unique_id
@property
def parent_id(self):
"""Identifies this template's parent template or component
*(read-only)*.
"""
return self._parent_id
@property
def after_inject(self):
"""The name of the component object method that will be called
after **all** dependencies have been injected *(read-only)*.
"""
return self._after_inject
@property
def before_clear(self):
"""The name of the component object method that will be called
immediately before the object is cleared from cache
*(read-only)*.
.. warning::
This property is not applicable to "prototype" component
objects, and is **not guaranteed** to be called for "weakref"
component objects.
"""
return self._before_clear
def __str__(self):
return "<%s %r @%08x>" % (
name_of(self.__class__), self._unique_id, id(self))
def __repr__(self):
return "%s.%s(%r, parent_id=%r, after_inject=%r, before_clear=%r)" % (
self.__class__.__module__, name_of(self.__class__),
self._unique_id, self._parent_id, self._after_inject,
self._before_clear)
@traced
@logged
class Component(Template):
"""Define a component and the dependencies needed to create a new
object of that component at runtime.
"""
__slots__ = [
"_dotted_name",
"_factory_name",
"_member_name",
"_strategy",
]
def __init__(
self, component_id, dotted_name=None,
factory_name=None, member_name=None, strategy=None,
parent_id=None,
after_inject=None, before_clear=None):
"""
:arg str component_id:
the context-unique identifier for this component
:keyword str dotted_name:
an **importable** dotted name
:keyword str factory_name:
names a :obj:`callable` member of objects of this component
:keyword str member_name:
names **any** member of objects of this component
:keyword str strategy:
specifies the component assembly strategy
:keyword str parent_id:
specifies the ID of a template or component that describes
the default dependencies and/or lifecyle methods for this
component
:keyword str after_inject:
specifies the name of the method that will be called on
objects of this component after all of its dependencies have
been injected
:keyword str before_clear:
specifies the name of the method that will be called on
objects of this component immediately before they are cleared
from cache
:raise aglyph.AglyphError:
if both *factory_name* and *member_name* are specified
:raise ValueError:
if *strategy* is not a recognized assembly strategy
*component_id* must be a user-provided identifier that is unique
within the context to which this component is added. An
**importable** dotted name may be used (see
:func:`aglyph.resolve_dotted_name`).
*dotted_name*, if provided, must be an **importable** dotted
name (see :func:`aglyph.resolve_dotted_name`).
.. note::
If *dotted_name* is not specified, then *component_id* is
used as the component's dotted name and **must** be an
importable dotted name.
*factory_name* is the name of a :obj:`callable` member of
*dotted-name* (i.e. a function, class, staticmethod, or
classmethod). When provided, the assembler will call this member
to create an object of this component.
*factory_name* enables Aglyph to inject dependencies into
objects that can only be initialized via nested classes,
:obj:`staticmethod`, or :obj:`classmethod`. See
:attr:`factory_name` for details.
*member_name* is the name of a member of *dotted-name*, which
**may or may not** be callable.
*member_name* differs from *factory_name* in two ways:
1. *member_name* is not restricted to callable members; it may
identify **any** member (attribute, property, nested class).
2. When an assembler assembles a component with a
*member_name*, initialization of the object is *bypassed*
(i.e. the assembler will not call the member, and any
initialization arguments defined for the component will be
**ignored**).
*member_name* enables Aglyph to reference class, function,
:obj:`staticmethod`, and :obj:`classmethod` obejcts, as well as
simple attributes or properties, as components and dependencies.
See :attr:`member_name` for details.
.. note::
Both *factory_name* and *member_name* can be dot-separated
names to reference nested members.
.. warning::
The *factory_name* and *member_name* arguments are mutually
exclusive. An exception is raised if both are provided.
*strategy* must be a recognized component assembly strategy, and
defaults to ``Strategy.PROTOTYPE`` (*"prototype"*) if not
specified.
.. versionadded:: 3.0.0
When :attr:`member_name` is specified, the strategy **must**
be *"_imported"*. Aglyph will use the "_imported" strategy
automatically for components that specify *member_name*;
setting strategy to anything other than "_imported" when
specifying *member_name* will issue :class:`UserWarning`.
Please see :data:`Strategy` for a description of the component
assembly strategies supported by Aglyph.
.. warning::
The ``Strategy.BORG`` (*"borg"*) component assembly strategy
is only supported for classes that **do not** define or
inherit ``__slots__``!
*parent_id* is the context-unique ID of a :class:`Template` (or
another ``Component``) that defines default dependencies and/or
lifecycle methods for this component.
*after_inject* is the name of a method *of objects of this
component* that will be called after **all** dependencies have
been injected, but before the object is returned to the caller.
This method will be called with **no** arguments (positional or
keyword). Exceptions raised by this method are not caught.
.. note::
``Component.after_inject`` takes precedence over any
*after_inject* method names specified for the component's
parent or context.
*before_clear* is the name of a method *of objects of this
component* that will be called immediately before the object is
cleared from cache via
:meth:`aglyph.assembler.Assembler.clear_singletons()`,
:meth:`aglyph.assembler.Assembler.clear_borgs()`, or
:meth:`aglyph.assembler.Assembler.clear_weakrefs()`.
.. note::
``Component.before_clear`` takes precedence over any
*before_clear* method names specified for the component's
parent or context.
.. warning::
The *before_clear* keyword argument has no meaning for, and
is ignored by, "prototype" components. If *before_clear* is
specified for a prototype component, a :class:`UserWarning`
is issued **when the component is defined**, and the
component's :attr:`before_clear` attribute is set to
``None``.
.. warning::
For "weakref" components, there is a possibility that the
object no longer exists at the moment when the *before_clear*
method would be called. In such cases, the
:meth:`aglyph.assembler.clear_weakrefs` method will issue a
:class:`RuntimeWarning` (see that method's documentation for
more details).
Once a ``Component`` instance is initialized, the ``args``
(:obj:`list`), ``keywords`` (:obj:`dict`), and ``attributes``
(:class:`collections.OrderedDict`) members can be modified
in-place to define the dependencies that must be injected into
objects of this component at assembly time. For example::
component = Component("http.client.HTTPConnection")
component.args.append("ninthtest.info")
component.args.append(80)
component.keywords["strict"] = True
component.attributes["set_debuglevel"] = 1
In Aglyph, a component may:
* be assembled directly by an
:class:`aglyph.assembler.Assembler`
* identify other components as dependencies (using a
:class:`Reference`)
* be used by other components as a dependency
* use common dependencies and behaviors (*after_inject*,
*before_clear*) defined in a
:class:`aglyph.component.Template`
* use any combination of the above behaviors
"""
#PYVER: arguments to super() are implicit under Python 3
super(Component, self).__init__(
component_id, parent_id=parent_id,
after_inject=after_inject, before_clear=before_clear)
# if a dotted name is not provided, the unique ID is assumed to be a
# dotted name
self._dotted_name = dotted_name if dotted_name else component_id
if factory_name and member_name:
raise AglyphError(
"only one of factory_name or member_name may be specified")
self._factory_name = factory_name
self._member_name = member_name
# issues/5: default strategy is "_imported" when member_name is
# specified, otherwise "prototype"
if strategy is None:
strategy = \
"_imported" if member_name else Strategy.PROTOTYPE
# issues/5: member_name requires "_imported" strategy and vice-versa
if member_name and strategy != "_imported":
warnings.warn(
("ignoring strategy %r for component %r -- strategy MUST be "
"'_imported' (implicit) if member_name is specified") %
(strategy, component_id),
UserWarning)
strategy = "_imported"
elif strategy == "_imported" and not member_name:
raise AglyphError(
"strategy '_imported' is only valid if member_name is specified")
if strategy not in Strategy and strategy != "_imported":
raise ValueError("unrecognized assembly strategy %r" % strategy)
self._strategy = strategy
# issues/5: also see Assembler._call_lifecycle_method, which issues a
# RuntimeWarning for _imported components that describe an after_inject
# method
if (strategy in [Strategy.PROTOTYPE, "_imported"]
and before_clear):
warnings.warn(
"ignoring before_clear=%r for %s component with ID %r" %
(before_clear, strategy, self._unique_id),
UserWarning)
self._before_clear = None
@property
def dotted_name(self):
"""The importable dotted name for objects of this component
*(read-only)*.
"""
return self._dotted_name
@property
def factory_name(self):
"""The name of a :obj:`callable` member of :attr:`dotted_name`
*(read-only)*.
``factory_name`` can be used to initialize objects of the
component when a class is not directly importable (e.g. the
component class is a nested class), or when component objects
need to be initialized via :obj:`staticmethod` or
:obj:`classmethod`.
Consider the following::
# module.py
class Example:
class Nested:
pass
The dotted name "module.Example.Nested" is not importable, and
so cannot be used as a component's ``unique_id`` or
``dotted_name``. To assemble objects of this type, use
``factory_name`` to identify the callable factory (the Nested
class, in this example) that is accessible through the
importable "module.Example"::
component = Component(
"nested-object", dotted_name="module.Example",
factory_name="Nested")
Or using XML configuration::
<component id="nested-object" dotted-name="module.Example"
factory-name="Nested" />
``factory_name`` may also be a dot-separated name to specify an
arbitrarily-nested callable. The following example is equivalent
to the above::
component = Component(
"nested-object", dotted_name="module",
factory_name="Example.Nested")
Or again using XML configuration::
<component id="nested-object" dotted-name="module"
factory-name="Example.Nested" />
.. note::
The important thing to remember is that :attr:`dotted_name`
must be **importable**, and ``factory_name`` must be
accessible from the imported class or module via attribute
access.
"""
return self._factory_name
@property
def member_name(self):
"""The name of any member of :attr:`dotted_name` *(read-only)*.
``member_name`` can be used to obtain an object *directly* from
an importable module or class. The named member is simply
accessed and returned (it is **not** called, even if it is
callable).
Consider the following::
# module.py
class Example:
class Nested:
pass
The following example shows how to define a component that will
produce the ``module.Example.Nested`` class *itself* when
assembled::
component = Component(
"nested-class", dotted_name="module.Example",
member_name="Nested")
Or using XML configuration::
<component id="nested-class" dotted-name="module.Example"
member-name="Nested" />
``member_name`` may also be a dot-separated name to specify an
arbitrarily-nested member. The following example is equivalent
to the above::
component = Component(
"nested-class", dotted_name="module",
member_name="Example.Nested")
Or again using XML configuration::
<component id="nested-class" dotted-name="module"
member-name="Example.Nested" />
.. note::
The important thing to remember is that :attr:`dotted_name`
must be **importable**, and ``member_name`` must be
accessible from the imported class or module via attribute
access.
.. warning::
When a component specifies ``member_name``, initialization is
assumed. In other words, Aglyph **will not** attempt to
initialize the member, and will **ignore** any :attr:`args`
and :attr:`keywords`.
On assembly, if any initialization arguments and/or keyword
arguments have been defined for such a component, they are
discarded and a WARNING-level log record is emitted to the
"aglyph.assembler.Assembler" channel.
(Any :attr:`attributes` that have been specified for the
component will still be processed as setter injection
dependencies, however.)
"""
return self._member_name
@property
def strategy(self):
"""The component assembly strategy *(read-only)*."""
return self._strategy
def __repr__(self):
return (
"%s.%s(%r, dotted_name=%r, factory_name=%r, member_name=%r, "
"strategy=%r, parent_id=%r, after_inject=%r, before_clear=%r)") % (
self.__class__.__module__, name_of(self.__class__),
self._unique_id, self._dotted_name, self._factory_name,
self._member_name, self._strategy, self._parent_id,
self._after_inject, self._before_clear)
| |
import os
import logging
import zmq
import math
import traceback
import datetime
import time
import multiprocessing
import rethinkdb
import utils
__all__ = [
'ImportManager',
'ImportWorker',
]
LOG = logging.getLogger(__name__)
class ImportManager(object):
def __init__(self, controller, config):
super(ImportManager, self).__init__()
self.controller = controller
self.config = config
self.workID = 0
self.activeWorkItemsPerType = {}
self.countsPerType = {}
self.idsPerType = {}
self.workPullSocket = None
self.workPostSocket = None
self.totalEntitiesImported = 0
def importEntities(self, entityConfigs):
"""
Batch import entities from shotgun into the local shotgun cache
Uses multiple processes to speed up retrieval
"""
LOG.debug("Importing {0} entity types".format(len(entityConfigs)))
importStartTime = time.time()
# Reset
self.workID = 0
self.activeWorkItemsPerType = {}
self.countsPerType = {}
self.idsPerType = {}
self.importFailed = False
self.totalEntitiesImported = 0
self.importTimestampsPerType = {}
self.createPostSocket()
self.createPullSocket()
processes = self.launchImportProcesses(entityConfigs)
self.post_countWork(entityConfigs, self.workPostSocket)
while True:
work = self.workPullSocket.recv_pyobj()
if not isinstance(work, dict):
raise TypeError("Invalid work item, expected dict: {0}".format(work))
# Update the count of active work items
configType = work['work']['configType']
activeWorkItems = self.activeWorkItemsPerType[configType]
workID = work['work']['id']
activeWorkItems.remove(workID)
meth_name = 'handle_{0}'.format(work['type'])
if hasattr(self, meth_name):
getattr(self, meth_name)(work)
else:
raise ValueError("Unhandled work type: {0}".format(work['type']))
if not len(self.activeWorkItemsPerType):
break
for proc in processes:
LOG.debug("Terminating import worker process: {0}".format(proc.pid))
proc.terminate()
timeToImport = (time.time() - importStartTime) * 1000 # ms
self.totalEntitiesImported = sum([c['importCount'] for c in self.countsPerType.values()])
self.post_stat(timeToImport, entityConfigs)
if self.importFailed:
raise IOError("Import Process failed for one ore more entities, check log for details")
LOG.debug("Imported {0} entities".format(self.totalEntitiesImported))
def createPostSocket(self):
workPostContext = zmq.Context()
self.workPostSocket = workPostContext.socket(zmq.PUSH)
self.workPostSocket.bind(self.config['import.zmq_pull_url'])
def createPullSocket(self):
workPullSocket = zmq.Context()
self.workPullSocket = workPullSocket.socket(zmq.PULL)
self.workPullSocket.bind(self.config['import.zmq_post_url'])
def launchImportProcesses(self, entityConfigs):
"""
Use multiprocessing to start a pool of entity import processes
Each of these use zmq as a message queue for work items which retrieve
information from shotgun.
"""
# Tried using multiprocessing.Pool
# but had better luck with Processes directly
# due to using the importer class and instance methods
processes = []
numProcesses = self.config['import.processes']
for n in range(numProcesses):
importer = ImportWorker(
config=self.config,
entityConfigs=entityConfigs,
)
proc = multiprocessing.Process(target=importer.start)
proc.start()
processes.append(proc)
LOG.debug("Launched process {0}/{1}: {2}".format(n + 1, numProcesses, proc.pid))
# Give time for all the workers to connect
time.sleep(1)
return processes
def handle_exception(self, work):
entityType = work['work']['configType']
LOG.error("Import Failed for type '{type}'.\n{tb}".format(
type=entityType,
tb=work['data']['traceback']
))
self.importFailed = True
def handle_counts(self, work):
counts = work['data']
entityType = work['work']['configType']
self.countsPerType[entityType] = counts
for page in range(counts['pageCount']):
getEntitiesWork = {
'type': 'getEntities',
'id': self.workID,
'page': page + 1,
'configType': entityType
}
self.workPostSocket.send_pyobj(getEntitiesWork)
self.activeWorkItemsPerType[entityType].append(self.workID)
self.workID += 1
def handle_entitiesImported(self, work):
entities = work['data']['entities']
entityType = work['work']['configType']
pageCount = self.countsPerType[entityType]['pageCount']
self.countsPerType[entityType].setdefault('importCount', 0)
self.countsPerType[entityType]['importCount'] += len(entities)
self.idsPerType.setdefault(entityType, []).extend([e['id'] for e in entities])
LOG.info("Imported {currCount}/{totalCount} entities for type '{typ}' on page {page}/{pageCount}".format(
currCount=self.countsPerType[entityType]['importCount'],
totalCount=self.countsPerType[entityType]['entityCount'],
typ=entityType,
page=work['work']['page'],
pageCount=pageCount,
))
entityConfig = self.controller.entityConfigManager.getConfigForType(entityType)
self.controller.post_entities(entityConfig, entities)
# Store the timestamp for the import
# We'll use this to discard old EventLogEntities that happened before the import
# However, eventlogentry's that are created while importing will still be applied
timestamps = self.importTimestampsPerType.setdefault(entityType, {})
timestamps.setdefault('startImportTimestamp', work['data']['startImportTimestamp'])
if not len(self.activeWorkItemsPerType[entityType]):
LOG.info("Imported all entities for type '{0}'".format(entityType))
# Get a list of
cachedEntityIDs = set(rethinkdb
.table(entityConfig['table'])
.map(lambda asset: asset['id'])
.coerce_to('array')
.run(self.controller.rethink)
)
importedEntityIDs = set(self.idsPerType[entityType])
diffIDs = cachedEntityIDs.difference(importedEntityIDs)
if len(diffIDs):
# Delete these extra entities
# This allows us to update the cache in place without
# having the drop the table before the import, allowing for
# a more seamless import / update process
LOG.info("Deleting extra entities found in cache with IDs: {0}".format(diffIDs))
rethinkdb.db('shotguncache').table(entityConfig['table']).get_all(rethinkdb.args(diffIDs)).delete().run(self.controller.rethink)
self.config.history.setdefault('config_hashes', {})[entityType] = entityConfig.hash
self.config.history.setdefault('cached_entity_types', {})[entityType] = self.importTimestampsPerType[entityType]
self.config.history.save()
self.activeWorkItemsPerType.pop(entityType)
def post_countWork(self, entityConfigs, workSocket):
"""
Send work items to the import processes to load information
about the counts of the entities
"""
for config in entityConfigs:
work = {'type': 'getCount', 'id': self.workID, 'configType': config.type}
self.activeWorkItemsPerType.setdefault(config.type, []).append(self.workID)
workSocket.send_pyobj(work)
self.workID += 1
self.post_entityConfig(config)
def post_entityConfig(self, entityConfig):
LOG.debug("Posting entity config")
schemaTable = self.config['rethink_schema_table']
if schemaTable not in rethinkdb.table_list().run(self.controller.rethink):
LOG.debug("Creating table for schema: {0}".format(entityConfig.type))
rethinkdb.table_create(schemaTable, primary_key='type').run(self.controller.rethink)
entitySchema = self.controller.entityConfigManager.schema[entityConfig.type]
cacheSchema = dict([(field, s) for field, s in entitySchema.items() if field in entityConfig['fields']])
if LOG.getEffectiveLevel() < 10:
LOG.debug("Cache Schema:\n{0}".format(utils.prettyJson(cacheSchema)))
config = {}
config['type'] = entityConfig.type
config['schema'] = cacheSchema
config['created_at'] = datetime.datetime.utcnow().isoformat()
result = rethinkdb.table(schemaTable).insert(config, conflict="replace").run(self.controller.rethink)
if result['errors']:
raise IOError(result['first_error'])
def post_stat(self, totalImportTime, entityConfigs):
"""
Post related stats about the import process to the db to provide analytics.
These are posted based on the overall importEntities process, not individual imports.
"""
stat = {
'type': 'import_entities',
'types_imported_count': len(entityConfigs),
'entity_types': [c.type for c in entityConfigs],
'total_entities_imported': self.totalEntitiesImported,
'duration': round(totalImportTime, 3),
'created_at': datetime.datetime.utcnow().isoformat(),
'processes': self.config['import.processes'],
'batch_size': self.config['import.batch_size'],
'import_failed': self.importFailed,
# Summarize and page counts shotgun calls
'total_shotgun_calls': sum([c['pageCount'] for c in self.countsPerType.values()]) + len(entityConfigs),
}
self.controller.post_stat(stat)
class ImportWorker(object):
def __init__(self, config, entityConfigs):
super(ImportWorker, self).__init__()
self.config = config
self.entityConfigs = dict([(c.type, c) for c in entityConfigs])
self.sg = None
self.incomingContext = None
self.workPullContext = None
self.workPostContext = None
self.workPostSocket = None
def start(self):
self.sg = self.config.createShotgunConnection()
self.createPullSocket()
self.createPostSocket()
self.run()
def createPostSocket(self):
self.workPostContext = zmq.Context()
self.workPostSocket = self.workPostContext.socket(zmq.PUSH)
self.workPostSocket.connect(self.config['import.zmq_post_url'])
def createPullSocket(self):
self.workPullContext = zmq.Context()
self.workPullContext = self.workPullContext.socket(zmq.PULL)
self.workPullContext.connect(self.config['import.zmq_pull_url'])
def run(self):
LOG.debug("Running Entity Import Loop")
while True:
work = self.workPullContext.recv_pyobj()
if not isinstance(work, dict):
raise TypeError("Invalid work item, expected dict: {0}".format(work))
meth_name = 'handle_{0}'.format(work['type'])
if hasattr(self, meth_name):
try:
getattr(self, meth_name)(work)
except Exception, e:
result = {
'type': 'exception',
'data': {
'exc': e,
'traceback': traceback.format_exc()
},
'work': work
}
self.workPostSocket.send_pyobj(result)
else:
raise ValueError("Unhandled work type: {0}".format(work['type']))
def handle_getCount(self, work):
LOG.debug("Getting counts for type '{0}' on process {1}".format(work['configType'], os.getpid()))
entityConfig = self.entityConfigs[work['configType']]
entityCount = self.getEntityCount(entityConfig)
pageCount = int(math.ceil(entityCount / float(self.config['import.batch_size'])))
result = {
'type': 'counts',
'data': {
'entityCount': entityCount,
'pageCount': pageCount,
},
'work': work,
}
self.workPostSocket.send_pyobj(result)
def handle_getEntities(self, work):
LOG.debug("Importing Entities for type '{0}' on page {1} on process {2}".format(work['configType'], work['page'], os.getpid()))
startImportTimestamp = datetime.datetime.utcnow().isoformat()
entityConfig = self.entityConfigs[work['configType']]
entities = self.getEntities(entityConfig, work['page'])
result = {
'type': 'entitiesImported',
'data': {
'entities': entities,
'startImportTimestamp': startImportTimestamp,
},
'work': work,
}
self.workPostSocket.send_pyobj(result)
def getEntities(self, entityConfig, page):
try:
kwargs = dict(
entity_type=entityConfig.type,
fields=entityConfig.get('fields', {}).keys(),
filters=[],
order=[{'column': 'id', 'direction': 'asc'}],
limit=self.config['import.batch_size'],
page=page
)
result = self.sg.find(**kwargs)
except Exception:
LOG.exception("Type: {entity_type}, filters: {filters}, fields: {fields} filterOperator: {filter_operator}".format(**kwargs))
raise
return result
def getEntityCount(self, entityConfig):
result = self.sg.summarize(
entity_type=entityConfig.type,
filters=[],
summary_fields=[{'field': 'id', 'type': 'count'}],
)
return result['summaries']['id']
| |
# -*- coding: utf-8 -*-
"""
babel.support
~~~~~~~~~~~~~
Several classes and functions that help with integrating and using Babel
in applications.
.. note: the code in this module is not used by Babel itself
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import gettext
import locale
from babel.core import Locale
from babel.dates import format_date, format_datetime, format_time, \
format_timedelta
from babel.numbers import format_number, format_decimal, format_currency, \
format_percent, format_scientific
from babel._compat import PY2, text_type, text_to_native
class Format(object):
"""Wrapper class providing the various date and number formatting functions
bound to a specific locale and time-zone.
>>> from babel.util import UTC
>>> from datetime import date
>>> fmt = Format('en_US', UTC)
>>> fmt.date(date(2007, 4, 1))
u'Apr 1, 2007'
>>> fmt.decimal(1.2345)
u'1.234'
"""
def __init__(self, locale, tzinfo=None):
"""Initialize the formatter.
:param locale: the locale identifier or `Locale` instance
:param tzinfo: the time-zone info (a `tzinfo` instance or `None`)
"""
self.locale = Locale.parse(locale)
self.tzinfo = tzinfo
def date(self, date=None, format='medium'):
"""Return a date formatted according to the given pattern.
>>> from datetime import date
>>> fmt = Format('en_US')
>>> fmt.date(date(2007, 4, 1))
u'Apr 1, 2007'
"""
return format_date(date, format, locale=self.locale)
def datetime(self, datetime=None, format='medium'):
"""Return a date and time formatted according to the given pattern.
>>> from datetime import datetime
>>> from pytz import timezone
>>> fmt = Format('en_US', tzinfo=timezone('US/Eastern'))
>>> fmt.datetime(datetime(2007, 4, 1, 15, 30))
u'Apr 1, 2007, 11:30:00 AM'
"""
return format_datetime(datetime, format, tzinfo=self.tzinfo,
locale=self.locale)
def time(self, time=None, format='medium'):
"""Return a time formatted according to the given pattern.
>>> from datetime import datetime
>>> from pytz import timezone
>>> fmt = Format('en_US', tzinfo=timezone('US/Eastern'))
>>> fmt.time(datetime(2007, 4, 1, 15, 30))
u'11:30:00 AM'
"""
return format_time(time, format, tzinfo=self.tzinfo, locale=self.locale)
def timedelta(self, delta, granularity='second', threshold=.85,
format='medium', add_direction=False):
"""Return a time delta according to the rules of the given locale.
>>> from datetime import timedelta
>>> fmt = Format('en_US')
>>> fmt.timedelta(timedelta(weeks=11))
u'3 months'
"""
return format_timedelta(delta, granularity=granularity,
threshold=threshold,
format=format, add_direction=add_direction,
locale=self.locale)
def number(self, number):
"""Return an integer number formatted for the locale.
>>> fmt = Format('en_US')
>>> fmt.number(1099)
u'1,099'
"""
return format_number(number, locale=self.locale)
def decimal(self, number, format=None):
"""Return a decimal number formatted for the locale.
>>> fmt = Format('en_US')
>>> fmt.decimal(1.2345)
u'1.234'
"""
return format_decimal(number, format, locale=self.locale)
def currency(self, number, currency):
"""Return a number in the given currency formatted for the locale.
"""
return format_currency(number, currency, locale=self.locale)
def percent(self, number, format=None):
"""Return a number formatted as percentage for the locale.
>>> fmt = Format('en_US')
>>> fmt.percent(0.34)
u'34%'
"""
return format_percent(number, format, locale=self.locale)
def scientific(self, number):
"""Return a number formatted using scientific notation for the locale.
"""
return format_scientific(number, locale=self.locale)
class LazyProxy(object):
"""Class for proxy objects that delegate to a specified function to evaluate
the actual object.
>>> def greeting(name='world'):
... return 'Hello, %s!' % name
>>> lazy_greeting = LazyProxy(greeting, name='Joe')
>>> print(lazy_greeting)
Hello, Joe!
>>> u' ' + lazy_greeting
u' Hello, Joe!'
>>> u'(%s)' % lazy_greeting
u'(Hello, Joe!)'
This can be used, for example, to implement lazy translation functions that
delay the actual translation until the string is actually used. The
rationale for such behavior is that the locale of the user may not always
be available. In web applications, you only know the locale when processing
a request.
The proxy implementation attempts to be as complete as possible, so that
the lazy objects should mostly work as expected, for example for sorting:
>>> greetings = [
... LazyProxy(greeting, 'world'),
... LazyProxy(greeting, 'Joe'),
... LazyProxy(greeting, 'universe'),
... ]
>>> greetings.sort()
>>> for greeting in greetings:
... print(greeting)
Hello, Joe!
Hello, universe!
Hello, world!
"""
__slots__ = ['_func', '_args', '_kwargs', '_value', '_is_cache_enabled']
def __init__(self, func, *args, **kwargs):
is_cache_enabled = kwargs.pop('enable_cache', True)
# Avoid triggering our own __setattr__ implementation
object.__setattr__(self, '_func', func)
object.__setattr__(self, '_args', args)
object.__setattr__(self, '_kwargs', kwargs)
object.__setattr__(self, '_is_cache_enabled', is_cache_enabled)
object.__setattr__(self, '_value', None)
@property
def value(self):
if self._value is None:
value = self._func(*self._args, **self._kwargs)
if not self._is_cache_enabled:
return value
object.__setattr__(self, '_value', value)
return self._value
def __contains__(self, key):
return key in self.value
def __nonzero__(self):
return bool(self.value)
def __dir__(self):
return dir(self.value)
def __iter__(self):
return iter(self.value)
def __len__(self):
return len(self.value)
def __str__(self):
return str(self.value)
def __unicode__(self):
return unicode(self.value)
def __add__(self, other):
return self.value + other
def __radd__(self, other):
return other + self.value
def __mod__(self, other):
return self.value % other
def __rmod__(self, other):
return other % self.value
def __mul__(self, other):
return self.value * other
def __rmul__(self, other):
return other * self.value
def __call__(self, *args, **kwargs):
return self.value(*args, **kwargs)
def __lt__(self, other):
return self.value < other
def __le__(self, other):
return self.value <= other
def __eq__(self, other):
return self.value == other
def __ne__(self, other):
return self.value != other
def __gt__(self, other):
return self.value > other
def __ge__(self, other):
return self.value >= other
def __delattr__(self, name):
delattr(self.value, name)
def __getattr__(self, name):
return getattr(self.value, name)
def __setattr__(self, name, value):
setattr(self.value, name, value)
def __delitem__(self, key):
del self.value[key]
def __getitem__(self, key):
return self.value[key]
def __setitem__(self, key, value):
self.value[key] = value
def __copy__(self):
return LazyProxy(
self._func,
enable_cache=self._is_cache_enabled,
*self._args,
**self._kwargs
)
def __deepcopy__(self, memo):
from copy import deepcopy
return LazyProxy(
deepcopy(self._func, memo),
enable_cache=deepcopy(self._is_cache_enabled, memo),
*deepcopy(self._args, memo),
**deepcopy(self._kwargs, memo)
)
class NullTranslations(gettext.NullTranslations, object):
DEFAULT_DOMAIN = None
def __init__(self, fp=None):
"""Initialize a simple translations class which is not backed by a
real catalog. Behaves similar to gettext.NullTranslations but also
offers Babel's on *gettext methods (e.g. 'dgettext()').
:param fp: a file-like object (ignored in this class)
"""
# These attributes are set by gettext.NullTranslations when a catalog
# is parsed (fp != None). Ensure that they are always present because
# some *gettext methods (including '.gettext()') rely on the attributes.
self._catalog = {}
self.plural = lambda n: int(n != 1)
super(NullTranslations, self).__init__(fp=fp)
self.files = list(filter(None, [getattr(fp, 'name', None)]))
self.domain = self.DEFAULT_DOMAIN
self._domains = {}
def dgettext(self, domain, message):
"""Like ``gettext()``, but look the message up in the specified
domain.
"""
return self._domains.get(domain, self).gettext(message)
def ldgettext(self, domain, message):
"""Like ``lgettext()``, but look the message up in the specified
domain.
"""
return self._domains.get(domain, self).lgettext(message)
def udgettext(self, domain, message):
"""Like ``ugettext()``, but look the message up in the specified
domain.
"""
return self._domains.get(domain, self).ugettext(message)
# backward compatibility with 0.9
dugettext = udgettext
def dngettext(self, domain, singular, plural, num):
"""Like ``ngettext()``, but look the message up in the specified
domain.
"""
return self._domains.get(domain, self).ngettext(singular, plural, num)
def ldngettext(self, domain, singular, plural, num):
"""Like ``lngettext()``, but look the message up in the specified
domain.
"""
return self._domains.get(domain, self).lngettext(singular, plural, num)
def udngettext(self, domain, singular, plural, num):
"""Like ``ungettext()`` but look the message up in the specified
domain.
"""
return self._domains.get(domain, self).ungettext(singular, plural, num)
# backward compatibility with 0.9
dungettext = udngettext
# Most of the downwards code, until it get's included in stdlib, from:
# http://bugs.python.org/file10036/gettext-pgettext.patch
#
# The encoding of a msgctxt and a msgid in a .mo file is
# msgctxt + "\x04" + msgid (gettext version >= 0.15)
CONTEXT_ENCODING = '%s\x04%s'
def pgettext(self, context, message):
"""Look up the `context` and `message` id in the catalog and return the
corresponding message string, as an 8-bit string encoded with the
catalog's charset encoding, if known. If there is no entry in the
catalog for the `message` id and `context` , and a fallback has been
set, the look up is forwarded to the fallback's ``pgettext()``
method. Otherwise, the `message` id is returned.
"""
ctxt_msg_id = self.CONTEXT_ENCODING % (context, message)
missing = object()
tmsg = self._catalog.get(ctxt_msg_id, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.pgettext(context, message)
return message
# Encode the Unicode tmsg back to an 8-bit string, if possible
if self._output_charset:
return text_to_native(tmsg, self._output_charset)
elif self._charset:
return text_to_native(tmsg, self._charset)
return tmsg
def lpgettext(self, context, message):
"""Equivalent to ``pgettext()``, but the translation is returned in the
preferred system encoding, if no other encoding was explicitly set with
``bind_textdomain_codeset()``.
"""
ctxt_msg_id = self.CONTEXT_ENCODING % (context, message)
missing = object()
tmsg = self._catalog.get(ctxt_msg_id, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.lpgettext(context, message)
return message
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
def npgettext(self, context, singular, plural, num):
"""Do a plural-forms lookup of a message id. `singular` is used as the
message id for purposes of lookup in the catalog, while `num` is used to
determine which plural form to use. The returned message string is an
8-bit string encoded with the catalog's charset encoding, if known.
If the message id for `context` is not found in the catalog, and a
fallback is specified, the request is forwarded to the fallback's
``npgettext()`` method. Otherwise, when ``num`` is 1 ``singular`` is
returned, and ``plural`` is returned in all other cases.
"""
ctxt_msg_id = self.CONTEXT_ENCODING % (context, singular)
try:
tmsg = self._catalog[(ctxt_msg_id, self.plural(num))]
if self._output_charset:
return text_to_native(tmsg, self._output_charset)
elif self._charset:
return text_to_native(tmsg, self._charset)
return tmsg
except KeyError:
if self._fallback:
return self._fallback.npgettext(context, singular, plural, num)
if num == 1:
return singular
else:
return plural
def lnpgettext(self, context, singular, plural, num):
"""Equivalent to ``npgettext()``, but the translation is returned in the
preferred system encoding, if no other encoding was explicitly set with
``bind_textdomain_codeset()``.
"""
ctxt_msg_id = self.CONTEXT_ENCODING % (context, singular)
try:
tmsg = self._catalog[(ctxt_msg_id, self.plural(num))]
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
except KeyError:
if self._fallback:
return self._fallback.lnpgettext(context, singular, plural, num)
if num == 1:
return singular
else:
return plural
def upgettext(self, context, message):
"""Look up the `context` and `message` id in the catalog and return the
corresponding message string, as a Unicode string. If there is no entry
in the catalog for the `message` id and `context`, and a fallback has
been set, the look up is forwarded to the fallback's ``upgettext()``
method. Otherwise, the `message` id is returned.
"""
ctxt_message_id = self.CONTEXT_ENCODING % (context, message)
missing = object()
tmsg = self._catalog.get(ctxt_message_id, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.upgettext(context, message)
return text_type(message)
return tmsg
def unpgettext(self, context, singular, plural, num):
"""Do a plural-forms lookup of a message id. `singular` is used as the
message id for purposes of lookup in the catalog, while `num` is used to
determine which plural form to use. The returned message string is a
Unicode string.
If the message id for `context` is not found in the catalog, and a
fallback is specified, the request is forwarded to the fallback's
``unpgettext()`` method. Otherwise, when `num` is 1 `singular` is
returned, and `plural` is returned in all other cases.
"""
ctxt_message_id = self.CONTEXT_ENCODING % (context, singular)
try:
tmsg = self._catalog[(ctxt_message_id, self.plural(num))]
except KeyError:
if self._fallback:
return self._fallback.unpgettext(context, singular, plural, num)
if num == 1:
tmsg = text_type(singular)
else:
tmsg = text_type(plural)
return tmsg
def dpgettext(self, domain, context, message):
"""Like `pgettext()`, but look the message up in the specified
`domain`.
"""
return self._domains.get(domain, self).pgettext(context, message)
def udpgettext(self, domain, context, message):
"""Like `upgettext()`, but look the message up in the specified
`domain`.
"""
return self._domains.get(domain, self).upgettext(context, message)
# backward compatibility with 0.9
dupgettext = udpgettext
def ldpgettext(self, domain, context, message):
"""Equivalent to ``dpgettext()``, but the translation is returned in the
preferred system encoding, if no other encoding was explicitly set with
``bind_textdomain_codeset()``.
"""
return self._domains.get(domain, self).lpgettext(context, message)
def dnpgettext(self, domain, context, singular, plural, num):
"""Like ``npgettext``, but look the message up in the specified
`domain`.
"""
return self._domains.get(domain, self).npgettext(context, singular,
plural, num)
def udnpgettext(self, domain, context, singular, plural, num):
"""Like ``unpgettext``, but look the message up in the specified
`domain`.
"""
return self._domains.get(domain, self).unpgettext(context, singular,
plural, num)
# backward compatibility with 0.9
dunpgettext = udnpgettext
def ldnpgettext(self, domain, context, singular, plural, num):
"""Equivalent to ``dnpgettext()``, but the translation is returned in
the preferred system encoding, if no other encoding was explicitly set
with ``bind_textdomain_codeset()``.
"""
return self._domains.get(domain, self).lnpgettext(context, singular,
plural, num)
if not PY2:
ugettext = gettext.NullTranslations.gettext
ungettext = gettext.NullTranslations.ngettext
class Translations(NullTranslations, gettext.GNUTranslations):
"""An extended translation catalog class."""
DEFAULT_DOMAIN = 'messages'
def __init__(self, fp=None, domain=None):
"""Initialize the translations catalog.
:param fp: the file-like object the translation should be read from
:param domain: the message domain (default: 'messages')
"""
super(Translations, self).__init__(fp=fp)
self.domain = domain or self.DEFAULT_DOMAIN
if not PY2:
ugettext = gettext.GNUTranslations.gettext
ungettext = gettext.GNUTranslations.ngettext
@classmethod
def load(cls, dirname=None, locales=None, domain=None):
"""Load translations from the given directory.
:param dirname: the directory containing the ``MO`` files
:param locales: the list of locales in order of preference (items in
this list can be either `Locale` objects or locale
strings)
:param domain: the message domain (default: 'messages')
"""
if locales is not None:
if not isinstance(locales, (list, tuple)):
locales = [locales]
locales = [str(locale) for locale in locales]
if not domain:
domain = cls.DEFAULT_DOMAIN
filename = gettext.find(domain, dirname, locales)
if not filename:
return NullTranslations()
with open(filename, 'rb') as fp:
return cls(fp=fp, domain=domain)
def __repr__(self):
return '<%s: "%s">' % (type(self).__name__,
self._info.get('project-id-version'))
def add(self, translations, merge=True):
"""Add the given translations to the catalog.
If the domain of the translations is different than that of the
current catalog, they are added as a catalog that is only accessible
by the various ``d*gettext`` functions.
:param translations: the `Translations` instance with the messages to
add
:param merge: whether translations for message domains that have
already been added should be merged with the existing
translations
"""
domain = getattr(translations, 'domain', self.DEFAULT_DOMAIN)
if merge and domain == self.domain:
return self.merge(translations)
existing = self._domains.get(domain)
if merge and existing is not None:
existing.merge(translations)
else:
translations.add_fallback(self)
self._domains[domain] = translations
return self
def merge(self, translations):
"""Merge the given translations into the catalog.
Message translations in the specified catalog override any messages
with the same identifier in the existing catalog.
:param translations: the `Translations` instance with the messages to
merge
"""
if isinstance(translations, gettext.GNUTranslations):
self._catalog.update(translations._catalog)
if isinstance(translations, Translations):
self.files.extend(translations.files)
return self
| |
import os
import os.path as op
import warnings
import gc
from nose.tools import assert_true, assert_raises
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_array_equal, assert_allclose)
from mne.datasets import testing
from mne.io import Raw
from mne import (read_forward_solution, apply_forward, apply_forward_raw,
average_forward_solutions, write_forward_solution,
convert_forward_solution)
from mne import SourceEstimate, pick_types_forward, read_evokeds
from mne.label import read_label
from mne.utils import (requires_mne, run_subprocess, _TempDir,
run_tests_if_main, slow_test)
from mne.forward import (restrict_forward_to_stc, restrict_forward_to_label,
Forward)
data_path = testing.data_path(download=False)
fname_meeg = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_meeg_grad = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-2-grad-fwd.fif')
fname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
'test_raw.fif')
fname_evoked = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-ave.fif')
fname_mri = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = os.path.join(data_path, 'subjects')
fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-4-src.fif')
def compare_forwards(f1, f2):
"""Helper to compare two potentially converted forward solutions"""
assert_allclose(f1['sol']['data'], f2['sol']['data'])
assert_equal(f1['sol']['ncol'], f2['sol']['ncol'])
assert_allclose(f1['source_nn'], f2['source_nn'])
if f1['sol_grad'] is not None:
assert_true(f2['sol_grad'] is not None)
assert_allclose(f1['sol_grad']['data'], f2['sol_grad']['data'])
assert_equal(f1['sol_grad']['ncol'], f2['sol_grad']['ncol'])
else:
assert_true(f2['sol_grad'] is None)
assert_equal(f1['source_ori'], f2['source_ori'])
assert_equal(f1['surf_ori'], f2['surf_ori'])
@testing.requires_testing_data
def test_convert_forward():
"""Test converting forward solution between different representations
"""
fwd = read_forward_solution(fname_meeg_grad)
assert_true(repr(fwd))
assert_true(isinstance(fwd, Forward))
# look at surface orientation
fwd_surf = convert_forward_solution(fwd, surf_ori=True)
fwd_surf_io = read_forward_solution(fname_meeg_grad, surf_ori=True)
compare_forwards(fwd_surf, fwd_surf_io)
del fwd_surf_io
gc.collect()
# go back
fwd_new = convert_forward_solution(fwd_surf, surf_ori=False)
assert_true(repr(fwd_new))
assert_true(isinstance(fwd_new, Forward))
compare_forwards(fwd, fwd_new)
# now go to fixed
fwd_fixed = convert_forward_solution(fwd_surf, surf_ori=False,
force_fixed=True)
del fwd_surf
gc.collect()
assert_true(repr(fwd_fixed))
assert_true(isinstance(fwd_fixed, Forward))
fwd_fixed_io = read_forward_solution(fname_meeg_grad, surf_ori=False,
force_fixed=True)
compare_forwards(fwd_fixed, fwd_fixed_io)
del fwd_fixed_io
gc.collect()
# now go back to cartesian (original condition)
fwd_new = convert_forward_solution(fwd_fixed)
assert_true(repr(fwd_new))
assert_true(isinstance(fwd_new, Forward))
compare_forwards(fwd, fwd_new)
del fwd, fwd_new, fwd_fixed
gc.collect()
@slow_test
@testing.requires_testing_data
def test_io_forward():
"""Test IO for forward solutions
"""
temp_dir = _TempDir()
# do extensive tests with MEEG + grad
n_channels, n_src = 366, 108
fwd = read_forward_solution(fname_meeg_grad)
assert_true(isinstance(fwd, Forward))
fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)
leadfield = fwd['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src))
assert_equal(len(fwd['sol']['row_names']), n_channels)
fname_temp = op.join(temp_dir, 'test-fwd.fif')
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)
fwd_read = read_forward_solution(fname_temp, surf_ori=True)
leadfield = fwd_read['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src))
assert_equal(len(fwd_read['sol']['row_names']), n_channels)
assert_equal(len(fwd_read['info']['chs']), n_channels)
assert_true('dev_head_t' in fwd_read['info'])
assert_true('mri_head_t' in fwd_read)
assert_array_almost_equal(fwd['sol']['data'], fwd_read['sol']['data'])
fwd = read_forward_solution(fname_meeg_grad, force_fixed=True)
leadfield = fwd['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src / 3))
assert_equal(len(fwd['sol']['row_names']), n_channels)
assert_equal(len(fwd['info']['chs']), n_channels)
assert_true('dev_head_t' in fwd['info'])
assert_true('mri_head_t' in fwd)
assert_true(fwd['surf_ori'])
# test warnings on bad filenames
fwd = read_forward_solution(fname_meeg_grad)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
fwd_badname = op.join(temp_dir, 'test-bad-name.fif.gz')
write_forward_solution(fwd_badname, fwd)
read_forward_solution(fwd_badname)
assert_true(len(w) == 2)
fwd = read_forward_solution(fname_meeg)
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd_read = read_forward_solution(fname_temp)
compare_forwards(fwd, fwd_read)
@testing.requires_testing_data
def test_apply_forward():
"""Test projection of source space data to sensor space
"""
start = 0
stop = 5
n_times = stop - start - 1
sfreq = 10.0
t_start = 0.123
fwd = read_forward_solution(fname_meeg, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True)
assert_true(isinstance(fwd, Forward))
vertno = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
gain_sum = np.sum(fwd['sol']['data'], axis=1)
# Evoked
with warnings.catch_warnings(record=True) as w:
evoked = read_evokeds(fname_evoked, condition=0)
evoked = apply_forward(fwd, stc, evoked, start=start, stop=stop)
assert_equal(len(w), 2)
data = evoked.data
times = evoked.times
# do some tests
assert_array_almost_equal(evoked.info['sfreq'], sfreq)
assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
assert_array_almost_equal(times[0], t_start)
assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq)
# Raw
raw = Raw(fname_raw)
raw_proj = apply_forward_raw(fwd, stc, raw, start=start, stop=stop)
data, times = raw_proj[:, :]
# do some tests
assert_array_almost_equal(raw_proj.info['sfreq'], sfreq)
assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
atol = 1. / sfreq
assert_allclose(raw_proj.first_samp / sfreq, t_start, atol=atol)
assert_allclose(raw_proj.last_samp / sfreq,
t_start + (n_times - 1) / sfreq, atol=atol)
@testing.requires_testing_data
def test_restrict_forward_to_stc():
"""Test restriction of source space to source SourceEstimate
"""
start = 0
stop = 5
n_times = stop - start - 1
sfreq = 10.0
t_start = 0.123
fwd = read_forward_solution(fname_meeg, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True)
vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
fwd_out = restrict_forward_to_stc(fwd, stc)
assert_true(isinstance(fwd_out, Forward))
assert_equal(fwd_out['sol']['ncol'], 20)
assert_equal(fwd_out['src'][0]['nuse'], 15)
assert_equal(fwd_out['src'][1]['nuse'], 5)
assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
fwd = read_forward_solution(fname_meeg, force_fixed=False)
fwd = pick_types_forward(fwd, meg=True)
vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
fwd_out = restrict_forward_to_stc(fwd, stc)
assert_equal(fwd_out['sol']['ncol'], 60)
assert_equal(fwd_out['src'][0]['nuse'], 15)
assert_equal(fwd_out['src'][1]['nuse'], 5)
assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
@testing.requires_testing_data
def test_restrict_forward_to_label():
"""Test restriction of source space to label
"""
fwd = read_forward_solution(fname_meeg, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True)
label_path = op.join(data_path, 'MEG', 'sample', 'labels')
labels = ['Aud-lh', 'Vis-rh']
label_lh = read_label(op.join(label_path, labels[0] + '.label'))
label_rh = read_label(op.join(label_path, labels[1] + '.label'))
fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) +
len(fwd['src'][0]['vertno']))
assert_equal(fwd_out['sol']['ncol'], len(src_sel_lh) + len(src_sel_rh))
assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)
assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)
fwd = read_forward_solution(fname_meeg, force_fixed=False)
fwd = pick_types_forward(fwd, meg=True)
label_path = op.join(data_path, 'MEG', 'sample', 'labels')
labels = ['Aud-lh', 'Vis-rh']
label_lh = read_label(op.join(label_path, labels[0] + '.label'))
label_rh = read_label(op.join(label_path, labels[1] + '.label'))
fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) +
len(fwd['src'][0]['vertno']))
assert_equal(fwd_out['sol']['ncol'],
3 * (len(src_sel_lh) + len(src_sel_rh)))
assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)
assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)
@testing.requires_testing_data
@requires_mne
def test_average_forward_solution():
"""Test averaging forward solutions
"""
temp_dir = _TempDir()
fwd = read_forward_solution(fname_meeg)
# input not a list
assert_raises(TypeError, average_forward_solutions, 1)
# list is too short
assert_raises(ValueError, average_forward_solutions, [])
# negative weights
assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [-1, 0])
# all zero weights
assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0])
# weights not same length
assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0, 0])
# list does not only have all dict()
assert_raises(TypeError, average_forward_solutions, [1, fwd])
# try an easy case
fwd_copy = average_forward_solutions([fwd])
assert_true(isinstance(fwd_copy, Forward))
assert_array_equal(fwd['sol']['data'], fwd_copy['sol']['data'])
# modify a fwd solution, save it, use MNE to average with old one
fwd_copy['sol']['data'] *= 0.5
fname_copy = op.join(temp_dir, 'copy-fwd.fif')
write_forward_solution(fname_copy, fwd_copy, overwrite=True)
cmd = ('mne_average_forward_solutions', '--fwd', fname_meeg, '--fwd',
fname_copy, '--out', fname_copy)
run_subprocess(cmd)
# now let's actually do it, with one filename and one fwd
fwd_ave = average_forward_solutions([fwd, fwd_copy])
assert_array_equal(0.75 * fwd['sol']['data'], fwd_ave['sol']['data'])
# fwd_ave_mne = read_forward_solution(fname_copy)
# assert_array_equal(fwd_ave_mne['sol']['data'], fwd_ave['sol']['data'])
# with gradient
fwd = read_forward_solution(fname_meeg_grad)
fwd_ave = average_forward_solutions([fwd, fwd])
compare_forwards(fwd, fwd_ave)
run_tests_if_main()
| |
import sys
from django import http
from django.core import signals
from django.utils.encoding import force_unicode
from django.utils.importlib import import_module
from django.utils.log import getLogger
logger = getLogger('django.request')
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
]
def __init__(self):
self._request_middleware = self._view_middleware = self._template_response_middleware = self._response_middleware = self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__ in subclasses).
"""
from django.conf import settings
from django.core import exceptions
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
try:
mw_module, mw_classname = middleware_path.rsplit('.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured('%s isn\'t a middleware module' % middleware_path)
try:
mod = import_module(mw_module)
except ImportError, e:
raise exceptions.ImproperlyConfigured('Error importing middleware %s: "%s"' % (mw_module, e))
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured('Middleware module "%s" does not define a "%s" class' % (mw_module, mw_classname))
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
from django.core import exceptions, urlresolvers
from django.conf import settings
try:
# Setup default url resolver for this thread, this code is outside
# the try/except so we don't get a spurious "unbound local
# variable" exception in the event an exception is raised before
# resolver is set
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
try:
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
if hasattr(request, "urlconf"):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception, e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
break
if response is None:
raise
# Complain if the view returned None (a common error).
if response is None:
try:
view_name = callback.func_name # If it's a function
except AttributeError:
view_name = callback.__class__.__name__ + '.__call__' # If it's a class
raise ValueError("The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name))
# If the response supports deferred rendering, apply template
# response middleware and the render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
response = response.render()
except http.Http404, e:
logger.warning('Not Found: %s', request.path,
extra={
'status_code': 404,
'request': request
})
if settings.DEBUG:
from django.views import debug
response = debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
response = callback(request, **param_dict)
except:
try:
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
signals.got_request_exception.send(sender=self.__class__, request=request)
except exceptions.PermissionDenied:
logger.warning(
'Forbidden (Permission denied): %s', request.path,
extra={
'status_code': 403,
'request': request
})
try:
callback, param_dict = resolver.resolve403()
response = callback(request, **param_dict)
except:
try:
response = self.handle_uncaught_exception(request,
resolver, sys.exc_info())
finally:
signals.got_request_exception.send(
sender=self.__class__, request=request)
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else, including SuspiciousOperation, etc.
# Get the exception info now, in case another exception is thrown later.
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
# Reset URLconf for this thread on the way out for complete
# isolation of request.urlconf
urlresolvers.set_urlconf(None)
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
except: # Any exception should be gathered and handled
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
from django.conf import settings
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
logger.error('Internal Server Error: %s', request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
if settings.DEBUG:
from django.views import debug
return debug.technical_500_response(request, *exc_info)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
raise exc_info[1], None, exc_info[2]
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve500()
return callback(request, **param_dict)
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
from django.conf import settings
if settings.FORCE_SCRIPT_NAME is not None:
return force_unicode(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = environ.get('SCRIPT_URL', u'')
if not script_url:
script_url = environ.get('REDIRECT_URL', u'')
if script_url:
return force_unicode(script_url[:-len(environ.get('PATH_INFO', ''))])
return force_unicode(environ.get('SCRIPT_NAME', u''))
| |
import bisect
import copy
import hashlib
import itertools
import json
import operator
import time
from collections import ChainMap
import pmdefaults as PM
from pmdefaults import *
from policy import NEATProperty, PropertyArray, PropertyMultiArray, ImmutablePropertyError, term_separator
CIB_EXPIRED = 2
class CIBEntryError(Exception):
pass
def load_json(filename):
"""
Read CIB node from JSON file
"""
cib_file = open(filename, 'r')
try:
j = json.load(cib_file)
except json.decoder.JSONDecodeError as e:
logging.error("Could not parse CIB file " + filename)
print(e)
return
return j
class CIBNode(object):
cib = None
def __init__(self, node_dict=None):
if node_dict is None:
node_dict = dict()
if not isinstance(node_dict, dict):
raise CIBEntryError("invalid CIB object")
self.root = node_dict.get('root', False)
# otherwise chain matched CIBs
self.link = node_dict.get('link', False)
self.priority = node_dict.get('priority', 0)
# TTL for the CIB node: the node is considered invalid after the time specified
self.expire = node_dict.get('expire', None) or node_dict.get('expires', None) # FIXME expires is deprecated
self.filename = node_dict.get('filename', None)
self.description = node_dict.get('description', '')
# convert to PropertyMultiArray with NEATProperties
properties = node_dict.get('properties', [])
if not isinstance(properties, list):
# properties should be in a list. The list elements are expanded when generating the CIB rows.
properties = [properties]
self.properties = PropertyMultiArray()
for p in properties:
if isinstance(p, list):
self.properties.add([PropertyArray.from_dict(ps) for ps in p])
else:
self.properties.add(PropertyArray.from_dict(p))
self.match = []
# FIXME better error handling if match undefined
for l in node_dict.get('match', []):
# convert to NEATProperties
self.match.append(PropertyArray.from_dict(l))
self.linked = set()
if self.link and not self.match:
logging.warning('link attribute set but no match field!')
self.uid = node_dict.get('uid')
if self.uid is None:
self.uid = self._gen_uid()
def dict(self):
d = {}
for attr in ['uid', 'root', 'link', 'priority', 'filename', 'description', 'expire', ]:
try:
d[attr] = getattr(self, attr)
except AttributeError:
logging.debug("CIB node doesn't contain attribute %s" % attr)
if self.match:
d['match'] = []
for m in self.match:
d['match'].append(m.dict())
d['properties'] = self.properties.list()
return d
@property
def expire(self):
return self._expire
@expire.setter
def expire(self, value):
if value is None:
self._expire = time.time() + CIB_DEFAULT_TIMEOUT
return
value = float(value)
if value == -1:
# does not expire
self._expire = value
elif time.time() > value:
raise CIBEntryError('ignoring expired CIB node', CIB_EXPIRED)
else:
self._expire = value
def _gen_uid(self):
# FIXME generate persistent UIDs
d = self.dict()
for k in ['expire', 'filename', 'uid', ]:
try:
del d[k]
except KeyError:
pass
s = json.dumps(d, indent=0, sort_keys=True)
return hashlib.md5(s.encode('utf-8')).hexdigest()
def json(self, indent=4):
return json.dumps(self.dict(), indent=indent, sort_keys=True)
def resolve_paths(self, path=None):
"""recursively find all paths from this CIBNode to all other matched CIBnodes in the CIB graph"""
if path is None:
path = []
# insert own index based on CIB node priority to resolve overlapping properties later
# FIXME priorities no longer work
pos = bisect.bisect([self.cib[uid].priority for uid in path], self.priority)
path.insert(pos, self.uid)
# no more links to check
if not (self.linked - set(path)):
return [path]
new_paths = []
for uid in self.linked:
if uid in path:
continue
new_paths.extend(self.cib[uid].resolve_links(path.copy()))
return new_paths
def match_entry(self, entry):
for match_properties in self.match:
if match_properties <= entry:
return True
return False
def expand(self):
for p in self.properties.expand():
yield p
def update_links_from_match(self):
"""
Look at the list elements in self.match and try to match all of its properties to another CIB entry. Generates a
list containing the UIDs of the matched rows. The list is stored in self.linked.
"""
for match_properties in self.match:
for node in self.cib.nodes.values():
if node.uid == self.uid: continue # ??
for p in node.expand():
# Check if the properties in the match list are a full subset of some CIB properties.
# Also include the CIB uid as a property while matching
if match_properties <= set(p.values()) | {NEATProperty(('uid', node.uid))}:
self.linked.add(node.uid)
def resolve_graph(self, path=None):
"""new try """
if path is None:
path = []
path.append(self.uid)
remaining = set(self.cib.graph.get(self.uid, [])) - set(path)
if len(remaining) == 0:
return [path]
new_paths = []
for u in remaining:
paths = self.cib.nodes[u].resolve_graph(path.copy())
new_paths.extend(paths)
return new_paths
def resolve_links(self, path=None):
"""find paths from current CIB to all linked CIBS """
if path is None:
path = []
# insert own index based on CIB node priority to resolve overlapping properties later
pos = bisect.bisect([self.cib[uid].priority for uid in path], self.priority)
path.insert(pos, self.uid)
# no more links to check
if not (self.linked - set(path)):
return [path]
new_paths = []
for uid in self.linked:
if uid in path:
continue
new_paths.extend(self.cib[uid].resolve_links(path.copy()))
return new_paths
def expand_rows(self, apply_extended=True):
"""Generate CIB rows by expanding all CIBs pointing to current CIB """
paths = self.resolve_graph()
# for storing expanded rows
rows = []
for path in paths:
expanded_properties = (self.cib[uid].expand() for uid in path)
for pas in itertools.product(*expanded_properties):
chain = ChainMap(*pas)
# For debugging purposes, add the path list to the chain.
# Store as string to preserve path order (NEAT properties are not ordered).
dbg_path = '<<'.join(uid for uid in path)
# insert at position 0 to override any existing entries
# chain.maps.insert(0, PropertyArray(NEATProperty(('cib_uids', dbg_path))))
# convert back to normal PropertyArrays
row = PropertyArray(*(p for p in chain.values()))
row.meta['cib_uids'] = dbg_path
rows.append(row)
if not apply_extended:
return rows
if not self.cib.extenders:
# no extender CIB nodes loaded
return rows
# TODO optimize
extended_rows = rows.copy()
for entry in rows:
# TODO take priorities into account
# iterate extender cib_nodes
for uid, xs in self.cib.extenders.items():
for pa in xs.expand():
if xs.match_entry(entry):
entry_copy = copy.deepcopy(entry)
chain = ChainMap(pa, entry_copy)
new_pa = PropertyArray(*(p for p in chain.values()))
try:
del new_pa['uid']
except KeyError:
pass
extended_rows.append(new_pa)
return extended_rows
def __repr__(self):
s = str(self.properties)
if self.linked:
s += " linked@%s" % self.linked
return s
class CIB(object):
"""
Internal representation of the CIB for testing
"""
cib_dir = PM.CIB_DIR
CIB_EXTENSIONS = ('.cib', '.local', '.connection', '.remote', '.slim')
def __init__(self, cib_dir=None):
# dictionary containing all loaded CIB nodes, keyed by their uid
self.nodes = {}
# track CIB files
self.files = dict()
CIBNode.cib = self
self.graph = {}
if cib_dir:
self.cib_dir = cib_dir
self.reload_files()
def __getitem__(self, uid):
return self.nodes[uid]
def items(self):
return self.nodes.items()
def keys(self):
return self.nodes.keys()
def values(self):
return self.nodes.values()
@property
def roots(self):
return {k: v for k, v in self.nodes.items() if v.root is True}
@property
def extenders(self):
return {k: v for k, v in self.nodes.items() if not v.link}
@property
def rows(self):
"""
Returns a generator containing all expanded root CIB nodes
"""
for uid, r in self.roots.items():
# expand all cib nodes
for entry in r.expand_rows():
entry.cib_node = uid
yield entry
def reload_files(self, cib_dir=None):
"""
Reload CIB files when a change is detected on disk
"""
if not cib_dir:
cib_dir = self.cib_dir
full_names = set()
logging.info("checking for CIB updates...")
if not os.path.exists(cib_dir):
sys.exit('CIB directory %s does not exist' % cib_dir)
for dirpath, dirnames, filenames in os.walk(cib_dir):
for filename in filenames:
if not filename.endswith(CIB.CIB_EXTENSIONS) or filename.startswith(('.', '#')):
continue
full_name = os.path.join(dirpath, filename)
stat = os.stat(full_name)
full_names.add(full_name)
if full_name in self.files:
if self.files[full_name] != stat.st_mtime_ns:
logging.info("CIB node %s has changed", full_name)
self.files[full_name] = stat.st_mtime_ns
self.load_cib_file(full_name)
else:
logging.info("Loading new CIB node %s.", full_name)
self.files[full_name] = stat.st_mtime_ns
self.load_cib_file(full_name)
removed_files = self.files.keys() - full_names
for filename in removed_files:
logging.info("CIB node %s has been removed", filename)
del self.files[filename]
deleted_cs = [cs for cs in self.nodes.values() if cs.filename == filename]
# remove corresponding CIBNode object
for cs in deleted_cs:
self.nodes.pop(uid, None)
self.update_graph()
def load_cib_file(self, filename):
cs = load_json(filename)
if not cs:
logging.warning("CIB node file %s was invalid" % filename)
return
try:
cib_node = CIBNode(cs)
except CIBEntryError as e:
if CIB_EXPIRED in e.args:
logging.debug("Ignoring CIB node %s: %s" % (filename, e.args[0]))
return
logging.error("Unable to load CIB node %s: %s" % (filename, e.args[0]))
return
cib_node.filename = filename
self.register(cib_node)
def update_graph(self):
# FIXME this tree should be rebuilt dynamically
# update links for all registered CIBs
for cs in self.nodes.values():
cs.update_links_from_match()
# FIXME check for invalid pointers
self.graph = {}
for i in self.nodes.values():
if not i.link:
continue
for r in i.linked:
if r not in self.graph:
self.graph[r] = []
if i.uid not in self.graph[r]:
self.graph[r].append(i.uid)
def import_json(self, slim, uid=None):
"""
Import JSON formatted CIB entries into current cib.
"""
# TODO optimize
try:
json_slim = json.loads(slim)
except json.decoder.JSONDecodeError:
logging.warning('invalid CIB file format')
return
# check if we received multiple objects in a list
if isinstance(json_slim, list):
for c in json_slim:
self.import_json(json.dumps(c))
return
# convert to CIB node object to do sanity check
try:
cs = CIBNode(json_slim)
except CIBEntryError as e:
print(e)
return
# no not import cache nodes if disabled
if not PM.CIB_CACHE and any(['__cached' in p for p in cs.properties.expand()]):
logging.debug('Ignoring cache CIB node')
return
if uid is not None:
cs.uid = uid
filename = cs.uid
slim = cs.json()
if not filename:
logging.warning("CIB entry has no UID")
# generate CIB filename
filename = hashlib.md5(slim.encode('utf-8')).hexdigest()
filename = '%s.cib' % filename.lower()
with open(os.path.join(self.cib_dir, '%s' % filename), 'w') as f:
f.write(slim)
logging.debug("CIB entry saved as \"%s\"." % filename)
self.reload_files()
def register(self, cib_node):
if cib_node in self.nodes:
logging.debug("overwriting existing CIB with uid %s" % cib_node.uid)
self.nodes[cib_node.uid] = cib_node
def unregister(self, cib_uid):
del self.nodes[cib_uid]
self.update_graph()
def remove(self, cib_uid):
self.unregister(cib_uid)
def lookup(self, input_properties, candidate_num=5):
"""CIB lookup logic implementation
Return CIB rows that include *all* required properties from the request PropertyArray
"""
assert isinstance(input_properties, PropertyArray)
candidates = [input_properties]
for e in self.rows:
try:
# FIXME better check whether all input properties are included in row - improve matching
# ignore optional properties in input request
required_pa = PropertyArray(
*(p for p in input_properties.values() if p.precedence == NEATProperty.IMMUTABLE))
if len(required_pa & e) != len(required_pa):
continue
except ImmutablePropertyError:
continue
try:
candidate = e + input_properties
candidate.cib_node = e.cib_node
candidates.append(candidate)
except ImmutablePropertyError:
pass
return sorted(candidates, key=operator.attrgetter('score'), reverse=True)[:candidate_num]
def dump(self, show_all=False):
print(term_separator("CIB START"))
# ============================================================================
for i, e in enumerate(self.rows):
print("%3i. %s" % (i, str(e)))
# ============================================================================
print(term_separator("CIB END"))
def __repr__(self):
return 'CIB<%d>' % (len(self.nodes))
if __name__ == "__main__":
cib = CIB('./cib/example/')
b = cib['B']
c = cib['C']
cib.dump()
import code
code.interact(local=locals(), banner='CIB')
for uid in cib.roots:
z = cib[uid].resolve_links([])
print(z)
query = PropertyArray()
test_request_str = '{"MTU": {"value": [1500, Infinity]}, "low_latency": {"precedence": 2, "value": true}, "remote_ip": {"precedence": 2, "value": "10:54:1.23"}, "transport": {"value": "TCP"}}'
test = json.loads(test_request_str)
for k, v in test.items():
query.add(NEATProperty((k, v['value']), precedence=v.get('precedence', 1)))
candidates = cib.lookup(query)
for i in candidates:
print(i)
# print(i, i.cib_node, i.score)
| |
import copy
import datetime
import locale
import os
import pickle
import sys
from ._compat import PY2, string_types, pjoin, iteritems, to_bytes, exists
from ._load import portalocker
from .helpers.classes import SQLCustomType
class Migrator(object):
def __init__(self, adapter):
self.adapter = adapter
@property
def db(self):
return self.adapter.db
@property
def dialect(self):
return self.adapter.dialect
@property
def dbengine(self):
return self.adapter.dbengine
def create_table(self, table, migrate=True, fake_migrate=False,
polymodel=None):
db = table._db
fields = []
# PostGIS geo fields are added after the table has been created
postcreation_fields = []
sql_fields = {}
sql_fields_aux = {}
TFK = {}
tablename = table._tablename
types = self.adapter.types
for sortable, field in enumerate(table, start=1):
field_name = field.name
field_type = field.type
if isinstance(field_type, SQLCustomType):
ftype = field_type.native or field_type.type
elif field_type.startswith(('reference', 'big-reference')):
if field_type.startswith('reference'):
referenced = field_type[10:].strip()
type_name = 'reference'
else:
referenced = field_type[14:].strip()
type_name = 'big-reference'
if referenced == '.':
referenced = tablename
constraint_name = self.dialect.constraint_name(
tablename, field_name)
# if not '.' in referenced \
# and referenced != tablename \
# and hasattr(table,'_primarykey'):
# ftype = types['integer']
#else:
try:
rtable = db[referenced]
rfield = rtable._id
rfieldname = rfield.name
rtablename = referenced
except (KeyError, ValueError, AttributeError) as e:
self.db.logger.debug('Error: %s' % e)
try:
rtablename, rfieldname = referenced.split('.')
rtable = db[rtablename]
rfield = rtable[rfieldname]
except Exception as e:
self.db.logger.debug('Error: %s' % e)
raise KeyError(
'Cannot resolve reference %s in %s definition' % (
referenced, table._tablename))
# must be PK reference or unique
if getattr(rtable, '_primarykey', None) and rfieldname in \
rtable._primarykey or rfield.unique:
ftype = types[rfield.type[:9]] % \
dict(length=rfield.length)
# multicolumn primary key reference?
if not rfield.unique and len(rtable._primarykey) > 1:
# then it has to be a table level FK
if rtablename not in TFK:
TFK[rtablename] = {}
TFK[rtablename][rfieldname] = field_name
else:
fk = rtable.sqlsafe + ' (' + rfield.sqlsafe_name + ')'
ftype = ftype + \
types['reference FK'] % dict(
# should be quoted
constraint_name=constraint_name,
foreign_key=fk,
table_name=table.sqlsafe,
field_name=field.sqlsafe_name,
on_delete_action=field.ondelete)
else:
# make a guess here for circular references
if referenced in db:
id_fieldname = db[referenced]._id.sqlsafe_name
elif referenced == tablename:
id_fieldname = table._id.sqlsafe_name
else: # make a guess
id_fieldname = self.dialect.quote('id')
#gotcha: the referenced table must be defined before
#the referencing one to be able to create the table
#Also if it's not recommended, we can still support
#references to tablenames without rname to make
#migrations and model relationship work also if tables
#are not defined in order
if referenced == tablename:
real_referenced = db[referenced].sqlsafe
else:
real_referenced = (
referenced in db and db[referenced].sqlsafe or
referenced)
rfield = db[referenced]._id
ftype_info = dict(
index_name=self.dialect.quote(field_name+'__idx'),
field_name=field.sqlsafe_name,
constraint_name=self.dialect.quote(constraint_name),
foreign_key='%s (%s)' % (
real_referenced, rfield.sqlsafe_name),
on_delete_action=field.ondelete)
ftype_info['null'] = ' NOT NULL' if field.notnull else \
self.dialect.allow_null
ftype_info['unique'] = ' UNIQUE' if field.unique else ''
ftype = types[type_name] % ftype_info
elif field_type.startswith('list:reference'):
ftype = types[field_type[:14]]
elif field_type.startswith('decimal'):
precision, scale = map(int, field_type[8:-1].split(','))
ftype = types[field_type[:7]] % \
dict(precision=precision, scale=scale)
elif field_type.startswith('geo'):
if not hasattr(self.adapter, 'srid'):
raise RuntimeError('Adapter does not support geometry')
srid = self.adapter.srid
geotype, parms = field_type[:-1].split('(')
if geotype not in types:
raise SyntaxError(
'Field: unknown field type: %s for %s' % (
field_type, field_name))
ftype = types[geotype]
if self.dbengine == 'postgres' and geotype == 'geometry':
if self.db._ignore_field_case is True:
field_name = field_name.lower()
# parameters: schema, srid, dimension
dimension = 2 # GIS.dimension ???
parms = parms.split(',')
if len(parms) == 3:
schema, srid, dimension = parms
elif len(parms) == 2:
schema, srid = parms
else:
schema = parms[0]
ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[geotype]
ftype = ftype % dict(schema=schema,
tablename=tablename,
fieldname=field_name, srid=srid,
dimension=dimension)
postcreation_fields.append(ftype)
elif field_type not in types:
raise SyntaxError('Field: unknown field type: %s for %s' % (
field_type, field_name))
else:
ftype = types[field_type] % {'length': field.length}
if not field_type.startswith(('id', 'reference', 'big-reference')):
if field.notnull:
ftype += ' NOT NULL'
else:
ftype += self.dialect.allow_null
if field.unique:
ftype += ' UNIQUE'
if field.custom_qualifier:
ftype += ' %s' % field.custom_qualifier
# add to list of fields
sql_fields[field_name] = dict(
length=field.length,
unique=field.unique,
notnull=field.notnull,
sortable=sortable,
type=str(field_type),
sql=ftype)
if field.notnull and field.default is not None:
# Caveat: sql_fields and sql_fields_aux
# differ for default values.
# sql_fields is used to trigger migrations and sql_fields_aux
# is used for create tables.
# The reason is that we do not want to trigger
# a migration simply because a default value changes.
not_null = self.dialect.not_null(field.default, field_type)
ftype = ftype.replace('NOT NULL', not_null)
sql_fields_aux[field_name] = dict(sql=ftype)
# Postgres - PostGIS:
# geometry fields are added after the table has been created, not now
if not (self.dbengine == 'postgres' and
field_type.startswith('geom')):
fields.append('%s %s' % (field.sqlsafe_name, ftype))
other = ';'
# backend-specific extensions to fields
if self.dbengine == 'mysql':
if not hasattr(table, "_primarykey"):
fields.append('PRIMARY KEY (%s)' % (
self.dialect.quote(table._id.name)))
engine = self.adapter.adapter_args.get('engine', 'InnoDB')
other = ' ENGINE=%s CHARACTER SET utf8;' % engine
fields = ',\n '.join(fields)
for rtablename in TFK:
rfields = TFK[rtablename]
pkeys = [
self.dialect.quote(pk) for pk in db[rtablename]._primarykey]
fkeys = [self.dialect.quote(rfields[k].name) for k in pkeys]
fields = fields + ',\n ' + \
types['reference TFK'] % dict(
table_name=table.sqlsafe,
field_name=', '.join(fkeys),
foreign_table=table.sqlsafe,
foreign_key=', '.join(pkeys),
on_delete_action=field.ondelete)
if getattr(table, '_primarykey', None):
query = "CREATE TABLE %s(\n %s,\n %s) %s" % \
(table.sqlsafe, fields,
self.dialect.primary_key(', '.join([
self.dialect.quote(pk)
for pk in table._primarykey])), other)
else:
query = "CREATE TABLE %s(\n %s\n)%s" % \
(table.sqlsafe, fields, other)
uri = self.adapter.uri
if uri.startswith('sqlite:///') \
or uri.startswith('spatialite:///'):
if PY2:
path_encoding = sys.getfilesystemencoding() \
or locale.getdefaultlocale()[1] or 'utf8'
dbpath = uri[9:uri.rfind('/')].decode(
'utf8').encode(path_encoding)
else:
dbpath = uri[9:uri.rfind('/')]
else:
dbpath = self.adapter.folder
if not migrate:
return query
elif uri.startswith('sqlite:memory') or \
uri.startswith('spatialite:memory'):
table._dbt = None
elif isinstance(migrate, string_types):
table._dbt = pjoin(dbpath, migrate)
else:
table._dbt = pjoin(
dbpath, '%s_%s.table' % (db._uri_hash, tablename))
if not table._dbt or not self.file_exists(table._dbt):
if table._dbt:
self.log('timestamp: %s\n%s\n'
% (datetime.datetime.today().isoformat(),
query), table)
if not fake_migrate:
self.adapter.create_sequence_and_triggers(query, table)
db.commit()
# Postgres geom fields are added now,
# after the table has been created
for query in postcreation_fields:
self.adapter.execute(query)
db.commit()
if table._dbt:
tfile = self.file_open(table._dbt, 'wb')
pickle.dump(sql_fields, tfile)
self.file_close(tfile)
if fake_migrate:
self.log('faked!\n', table)
else:
self.log('success!\n', table)
else:
tfile = self.file_open(table._dbt, 'rb')
try:
sql_fields_old = pickle.load(tfile)
except EOFError:
self.file_close(tfile)
raise RuntimeError('File %s appears corrupted' % table._dbt)
self.file_close(tfile)
if sql_fields != sql_fields_old:
self.migrate_table(
table,
sql_fields, sql_fields_old,
sql_fields_aux, None,
fake_migrate=fake_migrate
)
return query
def _fix(self, item):
k, v = item
if not isinstance(v, dict):
v = dict(type='unknown', sql=v)
if self.db._ignore_field_case is not True:
return k, v
return k.lower(), v
def migrate_table(self, table, sql_fields, sql_fields_old, sql_fields_aux,
logfile, fake_migrate=False):
# logfile is deprecated (moved to adapter.log method)
db = table._db
db._migrated.append(table._tablename)
tablename = table._tablename
# make sure all field names are lower case to avoid
# migrations because of case cahnge
sql_fields = dict(map(self._fix, iteritems(sql_fields)))
sql_fields_old = dict(map(self._fix, iteritems(sql_fields_old)))
sql_fields_aux = dict(map(self._fix, iteritems(sql_fields_aux)))
if db._debug:
db.logger.debug('migrating %s to %s' % (
sql_fields_old, sql_fields))
keys = list(sql_fields.keys())
for key in sql_fields_old:
if key not in keys:
keys.append(key)
new_add = self.dialect.concat_add(tablename)
metadata_change = False
sql_fields_current = copy.copy(sql_fields_old)
for key in keys:
query = None
if key not in sql_fields_old:
sql_fields_current[key] = sql_fields[key]
if self.dbengine in ('postgres',) and \
sql_fields[key]['type'].startswith('geometry'):
# 'sql' == ftype in sql
query = [sql_fields[key]['sql']]
else:
query = ['ALTER TABLE %s ADD %s %s;' % (
table.sqlsafe, key,
sql_fields_aux[key]['sql'].replace(', ', new_add))]
metadata_change = True
elif self.dbengine in ('sqlite', 'spatialite'):
if key in sql_fields:
sql_fields_current[key] = sql_fields[key]
metadata_change = True
elif key not in sql_fields:
del sql_fields_current[key]
ftype = sql_fields_old[key]['type']
if self.dbengine == 'postgres' and \
ftype.startswith('geometry'):
geotype, parms = ftype[:-1].split('(')
schema = parms.split(',')[0]
query = ["SELECT DropGeometryColumn ('%(schema)s', \
'%(table)s', '%(field)s');" % dict(
schema=schema, table=tablename, field=key)]
elif self.dbengine in ('firebird',):
query = ['ALTER TABLE %s DROP %s;' % (
self.dialect.quote(tablename),
self.dialect.quote(key))]
else:
query = ['ALTER TABLE %s DROP COLUMN %s;' % (
self.dialect.quote(tablename),
self.dialect.quote(key))]
metadata_change = True
elif (
sql_fields[key]['sql'] != sql_fields_old[key]['sql'] and not
(key in table.fields and
isinstance(table[key].type, SQLCustomType)) and not
sql_fields[key]['type'].startswith('reference') and not
sql_fields[key]['type'].startswith('double') and not
sql_fields[key]['type'].startswith('id')):
sql_fields_current[key] = sql_fields[key]
t = tablename
tt = sql_fields_aux[key]['sql'].replace(', ', new_add)
if self.dbengine in ('firebird',):
drop_expr = 'ALTER TABLE %s DROP %s;'
else:
drop_expr = 'ALTER TABLE %s DROP COLUMN %s;'
key_tmp = key + '__tmp'
query = [
'ALTER TABLE %s ADD %s %s;' % (
self.dialect.quote(t), self.dialect.quote(key_tmp),
tt),
'UPDATE %s SET %s=%s;' % (
self.dialect.quote(t), self.dialect.quote(key_tmp),
self.dialect.quote(key)),
drop_expr % (
self.dialect.quote(t), self.dialect.quote(key)),
'ALTER TABLE %s ADD %s %s;' % (
self.dialect.quote(t),
self.dialect.quote(key), tt),
'UPDATE %s SET %s=%s;' % (
self.dialect.quote(t), self.dialect.quote(key),
self.dialect.quote(key_tmp)),
drop_expr % (
self.dialect.quote(t), self.dialect.quote(key_tmp))
]
metadata_change = True
elif sql_fields[key]['type'] != sql_fields_old[key]['type']:
sql_fields_current[key] = sql_fields[key]
metadata_change = True
if query:
self.log(
'timestamp: %s\n' % datetime.datetime.today().isoformat(),
table)
for sub_query in query:
self.log(sub_query + '\n', table)
if fake_migrate:
if db._adapter.commit_on_alter_table:
self.save_dbt(table, sql_fields_current)
self.log('faked!\n', table)
else:
self.adapter.execute(sub_query)
# Caveat: mysql, oracle and firebird
# do not allow multiple alter table
# in one transaction so we must commit
# partial transactions and
# update table._dbt after alter table.
if db._adapter.commit_on_alter_table:
db.commit()
self.save_dbt(table, sql_fields_current)
self.log('success!\n', table)
elif metadata_change:
self.save_dbt(table, sql_fields_current)
if metadata_change and not (
query and db._adapter.commit_on_alter_table):
db.commit()
self.save_dbt(table, sql_fields_current)
self.log('success!\n', table)
def save_dbt(self, table, sql_fields_current):
tfile = self.file_open(table._dbt, 'wb')
pickle.dump(sql_fields_current, tfile)
self.file_close(tfile)
def log(self, message, table=None):
isabs = None
logfilename = self.adapter.adapter_args.get('logfile', 'sql.log')
writelog = bool(logfilename)
if writelog:
isabs = os.path.isabs(logfilename)
if table and table._dbt and writelog and self.adapter.folder:
if isabs:
table._loggername = logfilename
else:
table._loggername = pjoin(self.adapter.folder, logfilename)
logfile = self.file_open(table._loggername, 'ab')
logfile.write(to_bytes(message))
self.file_close(logfile)
@staticmethod
def file_open(filename, mode='rb', lock=True):
#to be used ONLY for files that on GAE may not be on filesystem
if lock:
fileobj = portalocker.LockedFile(filename, mode)
else:
fileobj = open(filename, mode)
return fileobj
@staticmethod
def file_close(fileobj):
#to be used ONLY for files that on GAE may not be on filesystem
if fileobj:
fileobj.close()
@staticmethod
def file_delete(filename):
os.unlink(filename)
@staticmethod
def file_exists(filename):
#to be used ONLY for files that on GAE may not be on filesystem
return exists(filename)
| |
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
from nova.api.openstack.compute import availability_zone as az_v21
from nova.api.openstack.compute import servers as servers_v21
from nova import availability_zones
from nova.compute import api as compute_api
from nova import context
from nova.db import api as db
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.image import fake
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_service
FAKE_UUID = fakes.FAKE_UUID
def fake_service_get_all(context, filters=None, **kwargs):
def __fake_service(binary, availability_zone,
created_at, updated_at, host, disabled):
db_s = dict(test_service.fake_service,
binary=binary,
availability_zone=availability_zone,
available_zones=availability_zone,
created_at=created_at,
updated_at=updated_at,
host=host,
disabled=disabled)
# The version field is immutable so remove that before creating the obj
db_s.pop('version', None)
return objects.Service(context, **db_s)
svcs = [__fake_service("nova-compute", "zone-2",
datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", True),
__fake_service("nova-scheduler", "internal",
datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", True),
__fake_service("nova-compute", "zone-1",
datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("nova-sched", "internal",
datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
# nova-conductor is in the same zone and host as nova-sched
# and is here to make sure /detail filters out duplicates.
__fake_service("nova-conductor", "internal",
datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False)]
if filters and 'disabled' in filters:
svcs = [svc for svc in svcs if svc.disabled == filters['disabled']]
return objects.ServiceList(objects=svcs)
class AvailabilityZoneApiTestV21(test.NoDBTestCase):
availability_zone = az_v21
def setUp(self):
super(AvailabilityZoneApiTestV21, self).setUp()
availability_zones.reset_cache()
fakes.stub_out_nw_api(self)
self.stub_out('nova.availability_zones.set_availability_zones',
lambda c, services: services)
self.stub_out('nova.servicegroup.API.service_is_up',
lambda s, service: True)
self.controller = self.availability_zone.AvailabilityZoneController()
self.mock_service_get_all = mock.patch.object(
self.controller.host_api, 'service_get_all',
side_effect=fake_service_get_all).start()
self.addCleanup(self.mock_service_get_all.stop)
def test_filtered_availability_zones(self):
zones = ['zone1', 'internal']
expected = [{'zoneName': 'zone1',
'zoneState': {'available': True},
"hosts": None}]
result = self.controller._get_filtered_availability_zones(zones, True)
self.assertEqual(result, expected)
expected = [{'zoneName': 'zone1',
'zoneState': {'available': False},
"hosts": None}]
result = self.controller._get_filtered_availability_zones(zones,
False)
self.assertEqual(result, expected)
def test_availability_zone_index(self):
req = fakes.HTTPRequest.blank('')
resp_dict = self.controller.index(req)
self.assertIn('availabilityZoneInfo', resp_dict)
zones = resp_dict['availabilityZoneInfo']
self.assertEqual(len(zones), 2)
self.assertEqual(zones[0]['zoneName'], u'zone-1')
self.assertTrue(zones[0]['zoneState']['available'])
self.assertIsNone(zones[0]['hosts'])
self.assertEqual(zones[1]['zoneName'], u'zone-2')
self.assertFalse(zones[1]['zoneState']['available'])
self.assertIsNone(zones[1]['hosts'])
def test_availability_zone_detail(self):
req = fakes.HTTPRequest.blank('')
resp_dict = self.controller.detail(req)
self.assertIn('availabilityZoneInfo', resp_dict)
zones = resp_dict['availabilityZoneInfo']
self.assertEqual(len(zones), 3)
timestamp = iso8601.parse_date("2012-12-26T14:45:25Z")
expected = [
{
'zoneName': 'internal',
'zoneState': {'available': True},
'hosts': {
'fake_host-1': {
'nova-sched': {
'active': True,
'available': True,
'updated_at': timestamp
},
'nova-conductor': {
'active': True,
'available': True,
'updated_at': timestamp
}
}
}
},
{
'zoneName': 'zone-1',
'zoneState': {'available': True},
'hosts': {
'fake_host-1': {
'nova-compute': {
'active': True,
'available': True,
'updated_at': timestamp
}
}
}
},
{
'zoneName': 'zone-2',
'zoneState': {'available': False},
'hosts': None
}
]
self.assertEqual(expected, zones)
self.assertEqual(1, self.mock_service_get_all.call_count,
self.mock_service_get_all.call_args_list)
@mock.patch.object(availability_zones, 'get_availability_zones',
return_value=[['nova'], []])
def test_availability_zone_detail_no_services(self, mock_get_az):
expected_response = {'availabilityZoneInfo':
[{'zoneState': {'available': True},
'hosts': {},
'zoneName': 'nova'}]}
req = fakes.HTTPRequest.blank('')
resp_dict = self.controller.detail(req)
self.assertThat(resp_dict,
matchers.DictMatches(expected_response))
class ServersControllerCreateTestV21(test.TestCase):
base_url = '/v2/%s/' % fakes.FAKE_PROJECT_ID
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTestV21, self).setUp()
self.instance_cache_num = 0
fakes.stub_out_nw_api(self)
self._set_up_controller()
def create_db_entry_for_new_instance(*args, **kwargs):
instance = args[4]
instance.uuid = FAKE_UUID
return instance
fake.stub_out_image_service(self)
self.stub_out('nova.compute.api.API.create_db_entry_for_new_instance',
create_db_entry_for_new_instance)
def _set_up_controller(self):
self.controller = servers_v21.ServersController()
def _create_instance_with_availability_zone(self, zone_name):
def create(*args, **kwargs):
self.assertIn('availability_zone', kwargs)
self.assertEqual('nova', kwargs['availability_zone'])
return old_create(*args, **kwargs)
old_create = compute_api.API.create
self.stub_out('nova.compute.api.API.create', create)
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
body = {
'server': {
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
'availability_zone': zone_name,
},
}
req = fakes.HTTPRequest.blank('')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body)
req.headers['content-type'] = 'application/json'
admin_context = context.get_admin_context()
db.service_create(admin_context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
agg = objects.Aggregate(admin_context,
name='agg1',
uuid=uuidsentinel.agg_uuid,
metadata={'availability_zone': 'nova'})
agg.create()
agg.add_host('host1_zones')
return req, body
def test_create_instance_with_availability_zone(self):
zone_name = 'nova'
req, body = self._create_instance_with_availability_zone(zone_name)
res = self.controller.create(req, body=body).obj
server = res['server']
self.assertEqual(fakes.FAKE_UUID, server['id'])
def test_create_instance_with_invalid_availability_zone_too_long(self):
zone_name = 'a' * 256
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_with_invalid_availability_zone_too_short(self):
zone_name = ''
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_with_invalid_availability_zone_not_str(self):
zone_name = 111
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_without_availability_zone(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
body = {
'server': {
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
req = fakes.HTTPRequest.blank('')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body)
req.headers['content-type'] = 'application/json'
res = self.controller.create(req, body=body).obj
server = res['server']
self.assertEqual(fakes.FAKE_UUID, server['id'])
| |
from __future__ import unicode_literals
import datetime
import functools
import re
from itertools import chain
from django.conf import settings
from django.db import models
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.operations.models import AlterModelOptions
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.utils import COMPILED_REGEX_TYPE, RegexObject
from django.utils import six
from .topological_sort import stable_topological_sort
class MigrationAutodetector(object):
"""
Takes a pair of ProjectStates, and compares them to see what the
first would need doing to make it match the second (the second
usually being the project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
self.existing_apps = {app for app, model in from_state.models}
def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None):
"""
Main entry point to produce a list of applicable changes.
Takes a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps, graph)
changes = self.arrange_for_graph(changes, graph, migration_name)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
"""
Recursive deconstruction for a field and its arguments.
Used for full comparison for rename/alter; sometimes a single-level
deconstruction will not compare correctly.
"""
if isinstance(obj, list):
return [self.deep_deconstruct(value) for value in obj]
elif isinstance(obj, tuple):
return tuple(self.deep_deconstruct(value) for value in obj)
elif isinstance(obj, dict):
return {
key: self.deep_deconstruct(value)
for key, value in obj.items()
}
elif isinstance(obj, functools.partial):
return (obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords))
elif isinstance(obj, COMPILED_REGEX_TYPE):
return RegexObject(obj)
elif isinstance(obj, type):
# If this is a type that implements 'deconstruct' as an instance method,
# avoid treating this as being deconstructible itself - see #22951
return obj
elif hasattr(obj, 'deconstruct'):
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
# we have a field which also returns a name
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
{
key: self.deep_deconstruct(value)
for key, value in kwargs.items()
},
)
else:
return obj
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to.
Used for detecting renames (as, of course, the related fields
change during renames)
"""
fields_def = []
for name, field in sorted(fields):
deconstruction = self.deep_deconstruct(field)
if field.remote_field and field.remote_field.model:
del deconstruction[2]['to']
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None, graph=None):
"""
Returns a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
convert_apps is the list of apps to convert to use migrations
(i.e. to make initial migrations for, in the usual case)
graph is an optional argument that, if provided, can help improve
dependency generation and avoid potential circular dependencies.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# We'll then go through that list later and order it and split
# into migrations to resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
# Prepare some old/new state and model lists, separating
# proxy models and ignoring unmigrated apps.
self.old_apps = self.from_state.concrete_apps
self.new_apps = self.to_state.apps
self.old_model_keys = []
self.old_proxy_keys = []
self.old_unmanaged_keys = []
self.new_model_keys = []
self.new_proxy_keys = []
self.new_unmanaged_keys = []
for al, mn in sorted(self.from_state.models.keys()):
model = self.old_apps.get_model(al, mn)
if not model._meta.managed:
self.old_unmanaged_keys.append((al, mn))
elif al not in self.from_state.real_apps:
if model._meta.proxy:
self.old_proxy_keys.append((al, mn))
else:
self.old_model_keys.append((al, mn))
for al, mn in sorted(self.to_state.models.keys()):
model = self.new_apps.get_model(al, mn)
if not model._meta.managed:
self.new_unmanaged_keys.append((al, mn))
elif (
al not in self.from_state.real_apps or
(convert_apps and al in convert_apps)
):
if model._meta.proxy:
self.new_proxy_keys.append((al, mn))
else:
self.new_model_keys.append((al, mn))
# Renames have to come first
self.generate_renamed_models()
# Prepare lists of fields and generate through model map
self._prepare_field_lists()
self._generate_through_model_map()
# Generate non-rename model operations
self.generate_deleted_models()
self.generate_created_models()
self.generate_deleted_proxies()
self.generate_created_proxies()
self.generate_altered_options()
self.generate_altered_managers()
# Generate field operations
self.generate_renamed_fields()
self.generate_removed_fields()
self.generate_added_fields()
self.generate_altered_fields()
self.generate_altered_unique_together()
self.generate_altered_index_together()
self.generate_altered_db_table()
self.generate_altered_order_with_respect_to()
self._sort_migrations()
self._build_migration_list(graph)
self._optimize_migrations()
return self.migrations
def _prepare_field_lists(self):
"""
Prepare field lists, and prepare a list of the fields that used
through models in the old state so we can make dependencies
from the through model deletion to the field that uses it.
"""
self.kept_model_keys = set(self.old_model_keys).intersection(self.new_model_keys)
self.kept_proxy_keys = set(self.old_proxy_keys).intersection(self.new_proxy_keys)
self.kept_unmanaged_keys = set(self.old_unmanaged_keys).intersection(self.new_unmanaged_keys)
self.through_users = {}
self.old_field_keys = set()
self.new_field_keys = set()
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
self.old_field_keys.update((app_label, model_name, x) for x, y in old_model_state.fields)
self.new_field_keys.update((app_label, model_name, x) for x, y in new_model_state.fields)
def _generate_through_model_map(self):
"""
Through model map generation
"""
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields:
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(field_name)
if (hasattr(old_field, "remote_field") and getattr(old_field.remote_field, "through", None) and
not old_field.remote_field.through._meta.auto_created):
through_key = (
old_field.remote_field.through._meta.app_label,
old_field.remote_field.through._meta.model_name,
)
self.through_users[through_key] = (app_label, old_model_name, field_name)
def _build_migration_list(self, graph=None):
"""
We need to chop the lists of operations up into migrations with
dependencies on each other. We do this by stepping up an app's list of
operations until we find one that has an outgoing dependency that isn't
in another app's migration yet (hasn't been chopped off its list). We
then chop off the operations before it into a migration and move onto
the next app. If we loop back around without doing anything, there's a
circular dependency (which _should_ be impossible as the operations are
all split at this point so they can't depend and be depended on).
"""
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# On every iteration, we step through all the apps and see if there
# is a completed set of operations.
# If we find that a subset of the operations are complete we can
# try to chop it off from the rest and continue, but we only
# do this if we've already been through the list once before
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations.keys()):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
is_swappable_dep = False
if dep[0] == "__setting__":
# We need to temporarily resolve the swappable dependency to prevent
# circular references. While keeping the dependency checks on the
# resolved model we still add the swappable dependencies.
# See #23322
resolved_app_label, resolved_object_name = getattr(settings, dep[1]).split('.')
original_dep = dep
dep = (resolved_app_label, resolved_object_name.lower(), dep[2], dep[3])
is_swappable_dep = True
if dep[0] != app_label and dep[0] != "__setting__":
# External app dependency. See if it's not yet
# satisfied.
for other_operation in self.generated_operations.get(dep[0], []):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if is_swappable_dep:
operation_dependencies.add((original_dep[0], original_dep[1]))
elif dep[0] in self.migrations:
operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name))
else:
# If we can't find the other app, we add a first/last dependency,
# but only if we've already been through once and checked everything
if chop_mode:
# If the app already exists, we add a dependency on the last migration,
# as we don't know which migration contains the target field.
# If it's not yet migrated or has no migrations, we use __first__
if graph and graph.leaf_nodes(dep[0]):
operation_dependencies.add(graph.leaf_nodes(dep[0])[0])
else:
operation_dependencies.add((dep[0], "__first__"))
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
self.generated_operations[app_label] = self.generated_operations[app_label][1:]
else:
break
# Make a migration! Well, only if there's stuff to put in it
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type(str("Migration"), (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label)
instance.dependencies = list(dependencies)
instance.operations = chopped
instance.initial = app_label not in self.existing_apps
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = chopped + self.generated_operations[app_label]
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations)
num_ops = new_num_ops
def _sort_migrations(self):
"""
Reorder to make things possible. The order we have already isn't bad,
but we need to pull a few things around so FKs work nicely inside the
same app
"""
for app_label, ops in sorted(self.generated_operations.items()):
# construct a dependency graph for intra-app dependencies
dependency_graph = {op: set() for op in ops}
for op in ops:
for dep in op._auto_deps:
if dep[0] == app_label:
for op2 in ops:
if self.check_dependency(op2, dep):
dependency_graph[op].add(op2)
# we use a stable sort for deterministic tests & general behavior
self.generated_operations[app_label] = stable_topological_sort(ops, dependency_graph)
def _optimize_migrations(self):
# Add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label)
def check_dependency(self, operation, dependency):
"""
Returns ``True`` if the given operation depends on the given dependency,
``False`` otherwise.
"""
# Created model
if dependency[2] is None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower()
)
# Created field
elif dependency[2] is not None and dependency[3] is True:
return (
(
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower() and
any(dependency[2] == x for x, y in operation.fields)
) or
(
isinstance(operation, operations.AddField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
)
# Removed field
elif dependency[2] is not None and dependency[3] is False:
return (
isinstance(operation, operations.RemoveField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# Removed model
elif dependency[2] is None and dependency[3] is False:
return (
isinstance(operation, operations.DeleteModel) and
operation.name_lower == dependency[1].lower()
)
# Field being altered
elif dependency[2] is not None and dependency[3] == "alter":
return (
isinstance(operation, operations.AlterField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# order_with_respect_to being unset for a field
elif dependency[2] is not None and dependency[3] == "order_wrt_unset":
return (
isinstance(operation, operations.AlterOrderWithRespectTo) and
operation.name_lower == dependency[1].lower() and
(operation.order_with_respect_to or "").lower() != dependency[2].lower()
)
# Field is removed and part of an index/unique_together
elif dependency[2] is not None and dependency[3] == "foo_together_change":
return (
isinstance(operation, (operations.AlterUniqueTogether,
operations.AlterIndexTogether)) and
operation.name_lower == dependency[1].lower()
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency, ))
def add_operation(self, app_label, operation, dependencies=None, beginning=False):
# Dependencies are (app_label, model_name, field_name, create/delete as True/False)
operation._auto_deps = dependencies or []
if beginning:
self.generated_operations.setdefault(app_label, []).insert(0, operation)
else:
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
"""
Sorting key function that places potential swappable models first in
lists of created models (only real way to solve #22783)
"""
try:
model = self.new_apps.get_model(item[0], item[1])
base_names = [base.__name__ for base in model.__bases__]
string_version = "%s.%s" % (item[0], item[1])
if (
model._meta.swappable or
"AbstractUser" in base_names or
"AbstractBaseUser" in base_names or
settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
"""
Finds any renamed models, and generates the operations for them,
and removes the old entry from the model lists.
Must be run before other model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = set(self.new_model_keys) - set(self.old_model_keys)
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = set(self.old_model_keys) - set(self.new_model_keys)
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[rem_app_label, rem_model_name]
rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(rem_model_state, model_state):
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
)
)
self.renamed_models[app_label, model_name] = rem_model_name
renamed_models_rel_key = '%s.%s' % (rem_model_state.app_label, rem_model_state.name)
self.renamed_models_rel[renamed_models_rel_key] = '%s.%s' % (
model_state.app_label,
model_state.name,
)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.append((app_label, model_name))
break
def generate_created_models(self):
"""
Find all new models (both managed and unmanaged) and make create
operations for them as well as separate operations to create any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
We also defer any model options that refer to collections of fields
that might be deferred (e.g. unique_together, index_together).
"""
old_keys = set(self.old_model_keys).union(self.old_unmanaged_keys)
added_models = set(self.new_model_keys) - old_keys
added_unmanaged_models = set(self.new_unmanaged_keys) - old_keys
all_added_models = chain(
sorted(added_models, key=self.swappable_first_key, reverse=True),
sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True)
)
for app_label, model_name in all_added_models:
model_state = self.to_state.models[app_label, model_name]
model_opts = self.new_apps.get_model(app_label, model_name)._meta
# Gather related fields
related_fields = {}
primary_key_rel = None
for field in model_opts.local_fields:
if field.remote_field:
if field.remote_field.model:
if field.primary_key:
primary_key_rel = field.remote_field.model
elif not field.remote_field.parent_link:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if (getattr(field.remote_field, "through", None) and
not field.remote_field.through._meta.auto_created):
related_fields[field.name] = field
for field in model_opts.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
# Are there unique/index_together to defer?
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
order_with_respect_to = model_state.options.pop('order_with_respect_to', None)
# Depend on the deletion of any possible proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Depend on the other end of the primary key if it's a relation
if primary_key_rel:
dependencies.append((
primary_key_rel._meta.app_label,
primary_key_rel._meta.object_name,
None,
True
))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[d for d in model_state.fields if d[0] not in related_fields],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
dependencies=dependencies,
beginning=True,
)
# Don't add operations which modify the database for unmanaged models
if not model_opts.managed:
continue
# Generate operations for each related field
for name, field in sorted(related_fields.items()):
dependencies = self._get_dependecies_for_foreign_key(field)
# Depend on our own model being created
dependencies.append((app_label, model_name, None, True))
# Make operation
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
# Generate other opns
related_dependencies = [
(app_label, model_name, name, True)
for name, field in sorted(related_fields.items())
]
related_dependencies.append((app_label, model_name, None, True))
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=index_together,
),
dependencies=related_dependencies
)
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
(app_label, model_name, order_with_respect_to, True),
(app_label, model_name, None, True),
]
)
def generate_created_proxies(self):
"""
Makes CreateModel statements for proxy models.
We use the same statements as that way there's less code duplication,
but of course for proxy models we can skip all that pointless field
stuff and just chuck out an operation.
"""
added = set(self.new_proxy_keys) - set(self.old_proxy_keys)
for app_label, model_name in sorted(added):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy")
# Depend on the deletion of any possible non-proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
# Depend on the deletion of any possible non-proxy version of us
dependencies=dependencies,
)
def generate_deleted_models(self):
"""
Find all deleted models (managed and unmanaged) and make delete
operations for them as well as separate operations to delete any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
We also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models().
"""
new_keys = set(self.new_model_keys).union(self.new_unmanaged_keys)
deleted_models = set(self.old_model_keys) - new_keys
deleted_unmanaged_models = set(self.old_unmanaged_keys) - new_keys
all_deleted_models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models))
for app_label, model_name in all_deleted_models:
model_state = self.from_state.models[app_label, model_name]
model = self.old_apps.get_model(app_label, model_name)
if not model._meta.managed:
# Skip here, no need to handle fields for unmanaged models
continue
# Gather related fields
related_fields = {}
for field in model._meta.local_fields:
if field.remote_field:
if field.remote_field.model:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if (getattr(field.remote_field, "through", None) and
not field.remote_field.through._meta.auto_created):
related_fields[field.name] = field
for field in model._meta.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
# Generate option removal first
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
)
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=None,
)
)
# Then remove each related field
for name, field in sorted(related_fields.items()):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
)
)
# Finally, remove the model.
# This depends on both the removal/alteration of all incoming fields
# and the removal of all its own related fields, and if it's
# a through model the field that references it.
dependencies = []
for related_object in model._meta.related_objects:
related_object_app_label = related_object.related_model._meta.app_label
object_name = related_object.related_model._meta.object_name
field_name = related_object.field.name
dependencies.append((related_object_app_label, object_name, field_name, False))
if not related_object.many_to_many:
dependencies.append((related_object_app_label, object_name, field_name, "alter"))
for name, field in sorted(related_fields.items()):
dependencies.append((app_label, model_name, name, False))
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name_lower))
if through_user:
dependencies.append((through_user[0], through_user[1], through_user[2], False))
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
"""
Makes DeleteModel statements for proxy models.
"""
deleted = set(self.old_proxy_keys) - set(self.new_proxy_keys)
for app_label, model_name in sorted(deleted):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy")
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def generate_renamed_fields(self):
"""
Works out renamed fields
"""
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys):
if rem_app_label == app_label and rem_model_name == model_name:
old_field_dec = self.deep_deconstruct(old_model_state.get_field_by_name(rem_field_name))
if field.remote_field and field.remote_field.model and 'to' in old_field_dec[2]:
old_rel_to = old_field_dec[2]['to']
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to]
if old_field_dec == field_dec:
if self.questioner.ask_rename(model_name, rem_field_name, field_name, field):
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
)
)
self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[app_label, model_name, field_name] = rem_field_name
break
def generate_added_fields(self):
"""
Fields that have been added
"""
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
self._generate_added_field(app_label, model_name, field_name)
def _generate_added_field(self, app_label, model_name, field_name):
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Fields that are foreignkeys/m2ms depend on stuff
dependencies = []
if field.remote_field and field.remote_field.model:
dependencies.extend(self._get_dependecies_for_foreign_key(field))
# You can't just add NOT NULL fields with no default or fields
# which don't allow empty strings as default.
preserve_default = True
if (not field.null and not field.has_default() and not field.many_to_many and
not (field.blank and field.empty_strings_allowed)):
field = field.clone()
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
preserve_default = False
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
def generate_removed_fields(self):
"""
Fields that have been removed.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys):
self._generate_removed_field(app_label, model_name, field_name)
def _generate_removed_field(self, app_label, model_name, field_name):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
# We might need to depend on the removal of an
# order_with_respect_to or index/unique_together operation;
# this is safely ignored if there isn't one
dependencies=[
(app_label, model_name, field_name, "order_wrt_unset"),
(app_label, model_name, field_name, "foo_together_change"),
],
)
def generate_altered_fields(self):
"""
Fields that have been altered.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys.intersection(self.new_field_keys)):
# Did the field change?
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name)
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(old_field_name)
new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Implement any model renames on relations; these are handled by RenameModel
# so we need to exclude them from the comparison
if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "model", None):
rename_key = (
new_field.remote_field.model._meta.app_label,
new_field.remote_field.model._meta.model_name,
)
if rename_key in self.renamed_models:
new_field.remote_field.model = old_field.remote_field.model
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
if old_field_dec != new_field_dec:
both_m2m = old_field.many_to_many and new_field.many_to_many
neither_m2m = not old_field.many_to_many and not new_field.many_to_many
if both_m2m or neither_m2m:
# Either both fields are m2m or neither is
preserve_default = True
if (old_field.null and not new_field.null and not new_field.has_default() and
not new_field.many_to_many):
field = new_field.clone()
new_default = self.questioner.ask_not_null_alteration(field_name, model_name)
if new_default is not models.NOT_PROVIDED:
field.default = new_default
preserve_default = False
else:
field = new_field
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
)
)
else:
# We cannot alter between m2m and concrete fields
self._generate_removed_field(app_label, model_name, field_name)
self._generate_added_field(app_label, model_name, field_name)
def _get_dependecies_for_foreign_key(self, field):
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.remote_field.model._meta.app_label
dep_object_name = field.remote_field.model._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
dependencies.append((
field.remote_field.through._meta.app_label,
field.remote_field.through._meta.object_name,
None,
True,
))
return dependencies
def _generate_altered_foo_together(self, operation):
option_name = operation.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# We run the old version through the field renames to account for those
old_value = old_model_state.options.get(option_name) or set()
if old_value:
old_value = {
tuple(
self.renamed_fields.get((app_label, model_name, n), n)
for n in unique
)
for unique in old_value
}
new_value = new_model_state.options.get(option_name) or set()
if new_value:
new_value = set(new_value)
if old_value != new_value:
dependencies = []
for foo_togethers in new_value:
for field_name in foo_togethers:
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
if field.remote_field and field.remote_field.model:
dependencies.extend(self._get_dependecies_for_foreign_key(field))
self.add_operation(
app_label,
operation(
name=model_name,
**{option_name: new_value}
),
dependencies=dependencies,
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_index_together(self):
self._generate_altered_foo_together(operations.AlterIndexTogether)
def generate_altered_db_table(self):
models_to_check = self.kept_model_keys.union(self.kept_proxy_keys).union(self.kept_unmanaged_keys)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_db_table_name = old_model_state.options.get('db_table')
new_db_table_name = new_model_state.options.get('db_table')
if old_db_table_name != new_db_table_name:
self.add_operation(
app_label,
operations.AlterModelTable(
name=model_name,
table=new_db_table_name,
)
)
def generate_altered_options(self):
"""
Works out if any non-schema-affecting options have changed and
makes an operation to represent them in state changes (in case Python
code in migrations needs them)
"""
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys
).union(
self.kept_unmanaged_keys
).union(
# unmanaged converted to managed
set(self.old_unmanaged_keys).intersection(self.new_model_keys)
).union(
# managed converted to unmanaged
set(self.old_model_keys).intersection(self.new_unmanaged_keys)
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = dict(
option for option in old_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
new_options = dict(
option for option in new_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
)
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if (old_model_state.options.get("order_with_respect_to") !=
new_model_state.options.get("order_with_respect_to")):
# Make sure it comes second if we're adding
# (removal dependency is part of RemoveField)
dependencies = []
if new_model_state.options.get("order_with_respect_to"):
dependencies.append((
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
True,
))
# Actually generate the operation
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get('order_with_respect_to'),
),
dependencies=dependencies,
)
def generate_altered_managers(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.managers != new_model_state.managers:
self.add_operation(
app_label,
operations.AlterModelManagers(
name=model_name,
managers=new_model_state.managers,
)
)
def arrange_for_graph(self, changes, graph, migration_name=None):
"""
Takes in a result from changes() and a MigrationGraph,
and fixes the names and dependencies of the changes so they
extend the graph from the leaf nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_%s" % migration_name if migration_name else "0001_initial"
else:
new_name = "%04i_%s" % (
next_number,
migration_name or self.suggest_name(migration.operations)[:100],
)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for app_label, migrations in changes.items():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Takes changes from arrange_for_graph and set of app labels and
returns a modified set of changes which trims out as many migrations
that are not in app_labels as possible.
Note that some other migrations may still be present, as they may be
required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
for app_label in list(required_apps):
required_apps.update(app_dependencies.get(app_label, set()))
# Remove all migrations that aren't needed
for app_label in list(changes.keys()):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggests a name for the migration
they might represent. Names are not guaranteed to be unique,
but we put some effort in to the fallback name to avoid VCS conflicts
if we can.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name_lower
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name_lower
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif len(ops) > 1:
if all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name_lower for o in ops))
return "auto_%s" % datetime.datetime.now().strftime("%Y%m%d_%H%M")
@classmethod
def parse_number(cls, name):
"""
Given a migration name, tries to extract a number from the
beginning of it. If no number found, returns None.
"""
match = re.match(r'^\d+', name)
if match:
return int(match.group())
return None
| |
# This module should be kept compatible with Python 2.1.
"""distutils.command.install_lib
Implements the Distutils 'install_lib' command
(install all Python modules)."""
__revision__ = "$Id: install_lib.py 81308 2010-05-18 23:37:50Z georg.brandl $"
import os
from types import IntType
import sys
from distutils.core import Command
from distutils.errors import DistutilsOptionError
# Extension for Python source files.
if hasattr(os, 'extsep'):
PYTHON_SOURCE_EXTENSION = os.extsep + "py"
else:
PYTHON_SOURCE_EXTENSION = ".py"
class install_lib (Command):
description = "install all Python modules (extensions and pure Python)"
# The byte-compilation options are a tad confusing. Here are the
# possible scenarios:
# 1) no compilation at all (--no-compile --no-optimize)
# 2) compile .pyc only (--compile --no-optimize; default)
# 3) compile .pyc and "level 1" .pyo (--compile --optimize)
# 4) compile "level 1" .pyo only (--no-compile --optimize)
# 5) compile .pyc and "level 2" .pyo (--compile --optimize-more)
# 6) compile "level 2" .pyo only (--no-compile --optimize-more)
#
# The UI for this is two option, 'compile' and 'optimize'.
# 'compile' is strictly boolean, and only decides whether to
# generate .pyc files. 'optimize' is three-way (0, 1, or 2), and
# decides both whether to generate .pyo files and what level of
# optimization to use.
user_options = [
('install-dir=', 'd', "directory to install to"),
('build-dir=','b', "build directory (where to install from)"),
('force', 'f', "force installation (overwrite existing files)"),
('compile', 'c', "compile .py to .pyc [default]"),
('no-compile', None, "don't compile .py files"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('skip-build', None, "skip the build steps"),
]
boolean_options = ['force', 'compile', 'skip-build']
negative_opt = {'no-compile' : 'compile'}
def initialize_options (self):
# let the 'install' command dictate our installation directory
self.install_dir = None
self.build_dir = None
self.force = 0
self.compile = None
self.optimize = None
self.skip_build = None
def finalize_options (self):
# Get all the information we need to install pure Python modules
# from the umbrella 'install' command -- build (source) directory,
# install (target) directory, and whether to compile .py files.
self.set_undefined_options('install',
('build_lib', 'build_dir'),
('install_lib', 'install_dir'),
('force', 'force'),
('compile', 'compile'),
('optimize', 'optimize'),
('skip_build', 'skip_build'),
)
if self.compile is None:
self.compile = 1
if self.optimize is None:
self.optimize = 0
if type(self.optimize) is not IntType:
try:
self.optimize = int(self.optimize)
if self.optimize not in (0, 1, 2):
raise AssertionError
except (ValueError, AssertionError):
raise DistutilsOptionError, "optimize must be 0, 1, or 2"
def run (self):
# Make sure we have built everything we need first
self.build()
# Install everything: simply dump the entire contents of the build
# directory to the installation directory (that's the beauty of
# having a build directory!)
outfiles = self.install()
# (Optionally) compile .py to .pyc
if outfiles is not None and self.distribution.has_pure_modules():
self.byte_compile(outfiles)
# run ()
# -- Top-level worker functions ------------------------------------
# (called from 'run()')
def build (self):
if not self.skip_build:
if self.distribution.has_pure_modules():
self.run_command('build_py')
if self.distribution.has_ext_modules():
self.run_command('build_ext')
def install (self):
if os.path.isdir(self.build_dir):
outfiles = self.copy_tree(self.build_dir, self.install_dir)
else:
self.warn("'%s' does not exist -- no Python modules to install" %
self.build_dir)
return
return outfiles
def byte_compile (self, files):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
# Get the "--root" directory supplied to the "install" command,
# and use it as a prefix to strip off the purported filename
# encoded in bytecode files. This is far from complete, but it
# should at least generate usable bytecode in RPM distributions.
install_root = self.get_finalized_command('install').root
if self.compile:
byte_compile(files, optimize=0,
force=self.force, prefix=install_root,
dry_run=self.dry_run)
if self.optimize > 0:
byte_compile(files, optimize=self.optimize,
force=self.force, prefix=install_root,
verbose=self.verbose, dry_run=self.dry_run)
# -- Utility methods -----------------------------------------------
def _mutate_outputs (self, has_any, build_cmd, cmd_option, output_dir):
if not has_any:
return []
build_cmd = self.get_finalized_command(build_cmd)
build_files = build_cmd.get_outputs()
build_dir = getattr(build_cmd, cmd_option)
prefix_len = len(build_dir) + len(os.sep)
outputs = []
for file in build_files:
outputs.append(os.path.join(output_dir, file[prefix_len:]))
return outputs
# _mutate_outputs ()
def _bytecode_filenames (self, py_filenames):
bytecode_files = []
for py_file in py_filenames:
# Since build_py handles package data installation, the
# list of outputs can contain more than just .py files.
# Make sure we only report bytecode for the .py files.
ext = os.path.splitext(os.path.normcase(py_file))[1]
if ext != PYTHON_SOURCE_EXTENSION:
continue
if self.compile:
bytecode_files.append(py_file + "c")
if self.optimize > 0:
bytecode_files.append(py_file + "o")
return bytecode_files
# -- External interface --------------------------------------------
# (called by outsiders)
def get_outputs (self):
"""Return the list of files that would be installed if this command
were actually run. Not affected by the "dry-run" flag or whether
modules have actually been built yet.
"""
pure_outputs = \
self._mutate_outputs(self.distribution.has_pure_modules(),
'build_py', 'build_lib',
self.install_dir)
if self.compile:
bytecode_outputs = self._bytecode_filenames(pure_outputs)
else:
bytecode_outputs = []
ext_outputs = \
self._mutate_outputs(self.distribution.has_ext_modules(),
'build_ext', 'build_lib',
self.install_dir)
return pure_outputs + bytecode_outputs + ext_outputs
# get_outputs ()
def get_inputs (self):
"""Get the list of files that are input to this command, ie. the
files that get installed as they are named in the build tree.
The files in this list correspond one-to-one to the output
filenames returned by 'get_outputs()'.
"""
inputs = []
if self.distribution.has_pure_modules():
build_py = self.get_finalized_command('build_py')
inputs.extend(build_py.get_outputs())
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
inputs.extend(build_ext.get_outputs())
return inputs
# class install_lib
| |
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import textwrap
import ddt
import mock
import pep8
from manila.hacking import checks
from manila import test
@ddt.ddt
class HackingTestCase(test.TestCase):
"""Hacking test cases
This class tests the hacking checks in manila.hacking.checks by passing
strings to the check methods like the pep8/flake8 parser would. The parser
loops over each line in the file and then passes the parameters to the
check method. The parameter names in the check method dictate what type of
object is passed to the check method. The parameter types are::
logical_line: A processed line with the following modifications:
- Multi-line statements converted to a single line.
- Stripped left and right.
- Contents of strings replaced with "xxx" of same length.
- Comments removed.
physical_line: Raw line of text from the input file.
lines: a list of the raw lines from the input file
tokens: the tokens that contribute to this logical line
line_number: line number in the input file
total_lines: number of lines in the input file
blank_lines: blank lines before this one
indent_char: indentation character in this file (" " or "\t")
indent_level: indentation (with tabs expanded to multiples of 8)
previous_indent_level: indentation on previous line
previous_logical: previous logical line
filename: Path of the file being run through pep8
When running a test on a check method the return will be False/None if
there is no violation in the sample input. If there is an error a tuple is
returned with a position in the line, and a message. So to check the result
just assertTrue if the check is expected to fail and assertFalse if it
should pass.
"""
def test_no_translate_debug_logs(self):
self.assertEqual(1, len(list(checks.no_translate_debug_logs(
"LOG.debug(_('foo'))", "manila/scheduler/foo.py"))))
self.assertEqual(0, len(list(checks.no_translate_debug_logs(
"LOG.debug('foo')", "manila/scheduler/foo.py"))))
self.assertEqual(0, len(list(checks.no_translate_debug_logs(
"LOG.info(_('foo'))", "manila/scheduler/foo.py"))))
def test_check_explicit_underscore_import(self):
self.assertEqual(1, len(list(checks.check_explicit_underscore_import(
"LOG.info(_('My info message'))",
"manila/tests/other_files.py"))))
self.assertEqual(1, len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"manila/tests/other_files.py"))))
self.assertEqual(0, len(list(checks.check_explicit_underscore_import(
"from manila.i18n import _",
"manila/tests/other_files.py"))))
self.assertEqual(0, len(list(checks.check_explicit_underscore_import(
"LOG.info(_('My info message'))",
"manila/tests/other_files.py"))))
self.assertEqual(0, len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"manila/tests/other_files.py"))))
self.assertEqual(0, len(list(checks.check_explicit_underscore_import(
"from manila.i18n import _LE, _, _LW",
"manila/tests/other_files2.py"))))
self.assertEqual(0, len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"manila/tests/other_files2.py"))))
self.assertEqual(0, len(list(checks.check_explicit_underscore_import(
"_ = translations.ugettext",
"manila/tests/other_files3.py"))))
self.assertEqual(0, len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"manila/tests/other_files3.py"))))
# Complete code coverage by falling through all checks
self.assertEqual(0, len(list(checks.check_explicit_underscore_import(
"LOG.info('My info message')",
"manila.tests.unit/other_files4.py"))))
self.assertEqual(0, len(list(checks.check_explicit_underscore_import(
"from manila.i18n import _LW",
"manila.tests.unit/other_files5.py"))))
self.assertEqual(1, len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"manila.tests.unit/other_files5.py"))))
# We are patching pep8 so that only the check under test is actually
# installed.
@mock.patch('pep8._checks',
{'physical_line': {}, 'logical_line': {}, 'tree': {}})
def _run_check(self, code, checker, filename=None):
pep8.register_check(checker)
lines = textwrap.dedent(code).strip().splitlines(True)
checker = pep8.Checker(filename=filename, lines=lines)
checker.check_all()
checker.report._deferred_print.sort()
return checker.report._deferred_print
def _assert_has_errors(self, code, checker, expected_errors=None,
filename=None):
actual_errors = [e[:3] for e in
self._run_check(code, checker, filename)]
self.assertEqual(expected_errors or [], actual_errors)
def _assert_has_no_errors(self, code, checker, filename=None):
self._assert_has_errors(code, checker, filename=filename)
def test_logging_format_no_tuple_arguments(self):
checker = checks.CheckLoggingFormatArgs
code = """
import logging
LOG = logging.getLogger()
LOG.info("Message without a second argument.")
LOG.critical("Message with %s arguments.", 'two')
LOG.debug("Volume %s caught fire and is at %d degrees C and"
" climbing.", 'volume1', 500)
"""
self._assert_has_no_errors(code, checker)
@ddt.data(*checks.CheckLoggingFormatArgs.LOG_METHODS)
def test_logging_with_tuple_argument(self, log_method):
checker = checks.CheckLoggingFormatArgs
code = """
import logging
LOG = logging.getLogger()
LOG.{0}("Volume %s caught fire and is at %d degrees C and "
"climbing.", ('volume1', 500))
"""
self._assert_has_errors(code.format(log_method), checker,
expected_errors=[(4, 21, 'M310')])
def test_str_on_exception(self):
checker = checks.CheckForStrUnicodeExc
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
p = str(e)
return p
"""
errors = [(5, 16, 'M325')]
self._assert_has_errors(code, checker, expected_errors=errors)
def test_no_str_unicode_on_exception(self):
checker = checks.CheckForStrUnicodeExc
code = """
def f(a, b):
try:
p = unicode(a) + str(b)
except ValueError as e:
p = e
return p
"""
self._assert_has_no_errors(code, checker)
def test_unicode_on_exception(self):
checker = checks.CheckForStrUnicodeExc
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
p = unicode(e)
return p
"""
errors = [(5, 20, 'M325')]
self._assert_has_errors(code, checker, expected_errors=errors)
def test_str_on_multiple_exceptions(self):
checker = checks.CheckForStrUnicodeExc
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
try:
p = unicode(a) + unicode(b)
except ValueError as ve:
p = str(e) + str(ve)
p = e
return p
"""
errors = [(8, 20, 'M325'), (8, 29, 'M325')]
self._assert_has_errors(code, checker, expected_errors=errors)
def test_str_unicode_on_multiple_exceptions(self):
checker = checks.CheckForStrUnicodeExc
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
try:
p = unicode(a) + unicode(b)
except ValueError as ve:
p = str(e) + unicode(ve)
p = str(e)
return p
"""
errors = [(8, 20, 'M325'), (8, 33, 'M325'), (9, 16, 'M325')]
self._assert_has_errors(code, checker, expected_errors=errors)
def test_trans_add(self):
checker = checks.CheckForTransAdd
code = """
def fake_tran(msg):
return msg
_ = fake_tran
_LI = _
_LW = _
_LE = _
_LC = _
def f(a, b):
msg = _('test') + 'add me'
msg = _LI('test') + 'add me'
msg = _LW('test') + 'add me'
msg = _LE('test') + 'add me'
msg = _LC('test') + 'add me'
msg = 'add to me' + _('test')
return msg
"""
# Python 3.4.0 introduced a change to the column calculation during AST
# parsing. This was reversed in Python 3.4.3, hence the version-based
# expected value calculation. See #1499743 for more background.
if sys.version_info < (3, 4, 0) or sys.version_info >= (3, 4, 3):
errors = [(13, 10, 'M326'), (14, 10, 'M326'), (15, 10, 'M326'),
(16, 10, 'M326'), (17, 10, 'M326'), (18, 24, 'M326')]
else:
errors = [(13, 11, 'M326'), (14, 13, 'M326'), (15, 13, 'M326'),
(16, 13, 'M326'), (17, 13, 'M326'), (18, 25, 'M326')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
msg = 'test' + 'add me'
return msg
"""
errors = []
self._assert_has_errors(code, checker, expected_errors=errors)
def test_dict_constructor_with_list_copy(self):
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict([(i, connect_info[i])"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" attrs = dict([(k, _from_json(v))"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" type_names = dict((value, key) for key, value in"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict((value, key) for key, value in"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
"foo(param=dict((k, v) for k, v in bar.items()))"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict([[i,i] for i in range(3)])"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dd = dict([i,i] for i in range(3))"))))
self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy(
" create_kwargs = dict(snapshot=snapshot,"))))
self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy(
" self._render_dict(xml, data_el, data.__dict__)"))))
def test_no_xrange(self):
self.assertEqual(1, len(list(checks.no_xrange("xrange(45)"))))
self.assertEqual(0, len(list(checks.no_xrange("range(45)"))))
def test_validate_assertTrue(self):
test_value = True
self.assertEqual(0, len(list(checks.validate_assertTrue(
"assertTrue(True)"))))
self.assertEqual(1, len(list(checks.validate_assertTrue(
"assertEqual(True, %s)" % test_value))))
def test_validate_assertIsNone(self):
test_value = None
self.assertEqual(0, len(list(checks.validate_assertIsNone(
"assertIsNone(None)"))))
self.assertEqual(1, len(list(checks.validate_assertIsNone(
"assertEqual(None, %s)" % test_value))))
| |
"""
Test the hashing module.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import nose
import time
import hashlib
import tempfile
import os
import gc
import io
import collections
from ..hashing import hash
from ..func_inspect import filter_args
from ..memory import Memory
from .common import np, with_numpy
from .test_memory import env as test_memory_env
from .test_memory import setup_module as test_memory_setup_func
from .test_memory import teardown_module as test_memory_teardown_func
try:
# Python 2/Python 3 compat
unicode('str')
except NameError:
unicode = lambda s: s
###############################################################################
# Helper functions for the tests
def time_func(func, *args):
""" Time function func on *args.
"""
times = list()
for _ in range(3):
t1 = time.time()
func(*args)
times.append(time.time() - t1)
return min(times)
def relative_time(func1, func2, *args):
""" Return the relative time between func1 and func2 applied on
*args.
"""
time_func1 = time_func(func1, *args)
time_func2 = time_func(func2, *args)
relative_diff = 0.5 * (abs(time_func1 - time_func2)
/ (time_func1 + time_func2))
return relative_diff
class Klass(object):
def f(self, x):
return x
class KlassWithCachedMethod(object):
def __init__(self):
mem = Memory(cachedir=test_memory_env['dir'])
self.f = mem.cache(self.f)
def f(self, x):
return x
###############################################################################
# Tests
def test_trival_hash():
""" Smoke test hash on various types.
"""
obj_list = [1, 1., 1 + 1j,
'a',
(1, ), [1, ], {1:1},
None,
]
for obj1 in obj_list:
for obj2 in obj_list:
yield nose.tools.assert_equal, hash(obj1) == hash(obj2), \
obj1 is obj2
def test_hash_methods():
# Check that hashing instance methods works
a = io.StringIO(unicode('a'))
nose.tools.assert_equal(hash(a.flush), hash(a.flush))
a1 = collections.deque(range(10))
a2 = collections.deque(range(9))
nose.tools.assert_not_equal(hash(a1.extend), hash(a2.extend))
@with_numpy
def test_hash_numpy():
""" Test hashing with numpy arrays.
"""
rnd = np.random.RandomState(0)
arr1 = rnd.random_sample((10, 10))
arr2 = arr1.copy()
arr3 = arr2.copy()
arr3[0] += 1
obj_list = (arr1, arr2, arr3)
for obj1 in obj_list:
for obj2 in obj_list:
yield nose.tools.assert_equal, hash(obj1) == hash(obj2), \
np.all(obj1 == obj2)
d1 = {1: arr1, 2: arr1}
d2 = {1: arr2, 2: arr2}
yield nose.tools.assert_equal, hash(d1), hash(d2)
d3 = {1: arr2, 2: arr3}
yield nose.tools.assert_not_equal, hash(d1), hash(d3)
yield nose.tools.assert_not_equal, hash(arr1), hash(arr1.T)
@with_numpy
def test_hash_memmap():
""" Check that memmap and arrays hash identically if coerce_mmap is
True.
"""
filename = tempfile.mktemp()
try:
m = np.memmap(filename, shape=(10, 10), mode='w+')
a = np.asarray(m)
for coerce_mmap in (False, True):
yield (nose.tools.assert_equal,
hash(a, coerce_mmap=coerce_mmap)
== hash(m, coerce_mmap=coerce_mmap),
coerce_mmap)
finally:
if 'm' in locals():
del m
# Force a garbage-collection cycle, to be certain that the
# object is delete, and we don't run in a problem under
# Windows with a file handle still open.
gc.collect()
try:
os.unlink(filename)
except OSError as e:
# Under windows, some files don't get erased.
if not os.name == 'nt':
raise e
@with_numpy
def test_hash_numpy_performance():
""" Check the performance of hashing numpy arrays:
In [22]: a = np.random.random(1000000)
In [23]: %timeit hashlib.md5(a).hexdigest()
100 loops, best of 3: 20.7 ms per loop
In [24]: %timeit hashlib.md5(pickle.dumps(a, protocol=2)).hexdigest()
1 loops, best of 3: 73.1 ms per loop
In [25]: %timeit hashlib.md5(cPickle.dumps(a, protocol=2)).hexdigest()
10 loops, best of 3: 53.9 ms per loop
In [26]: %timeit hash(a)
100 loops, best of 3: 20.8 ms per loop
"""
rnd = np.random.RandomState(0)
a = rnd.random_sample(1000000)
if hasattr(np, 'getbuffer'):
# Under python 3, there is no getbuffer
getbuffer = np.getbuffer
else:
getbuffer = memoryview
md5_hash = lambda x: hashlib.md5(getbuffer(x)).hexdigest()
relative_diff = relative_time(md5_hash, hash, a)
yield nose.tools.assert_true, relative_diff < 0.1
# Check that hashing an tuple of 3 arrays takes approximately
# 3 times as much as hashing one array
time_hashlib = 3 * time_func(md5_hash, a)
time_hash = time_func(hash, (a, a, a))
relative_diff = 0.5 * (abs(time_hash - time_hashlib)
/ (time_hash + time_hashlib))
yield nose.tools.assert_true, relative_diff < 0.2
def test_bound_methods_hash():
""" Make sure that calling the same method on two different instances
of the same class does resolve to the same hashes.
"""
a = Klass()
b = Klass()
nose.tools.assert_equal(hash(filter_args(a.f, [], (1, ))),
hash(filter_args(b.f, [], (1, ))))
@nose.tools.with_setup(test_memory_setup_func, test_memory_teardown_func)
def test_bound_cached_methods_hash():
""" Make sure that calling the same _cached_ method on two different
instances of the same class does resolve to the same hashes.
"""
a = KlassWithCachedMethod()
b = KlassWithCachedMethod()
nose.tools.assert_equal(hash(filter_args(a.f.func, [], (1, ))),
hash(filter_args(b.f.func, [], (1, ))))
@with_numpy
def test_hash_object_dtype():
""" Make sure that ndarrays with dtype `object' hash correctly."""
a = np.array([np.arange(i) for i in range(6)], dtype=object)
b = np.array([np.arange(i) for i in range(6)], dtype=object)
nose.tools.assert_equal(hash(a),
hash(b))
def test_dict_hash():
# Check that dictionaries hash consistently, eventhough the ordering
# of the keys is not garanteed
k = KlassWithCachedMethod()
d = {'#s12069__c_maps.nii.gz': [33],
'#s12158__c_maps.nii.gz': [33],
'#s12258__c_maps.nii.gz': [33],
'#s12277__c_maps.nii.gz': [33],
'#s12300__c_maps.nii.gz': [33],
'#s12401__c_maps.nii.gz': [33],
'#s12430__c_maps.nii.gz': [33],
'#s13817__c_maps.nii.gz': [33],
'#s13903__c_maps.nii.gz': [33],
'#s13916__c_maps.nii.gz': [33],
'#s13981__c_maps.nii.gz': [33],
'#s13982__c_maps.nii.gz': [33],
'#s13983__c_maps.nii.gz': [33]}
a = k.f(d)
b = k.f(a)
nose.tools.assert_equal(hash(a),
hash(b))
def test_set_hash():
# Check that sets hash consistently, eventhough their ordering
# is not garanteed
k = KlassWithCachedMethod()
s = set(['#s12069__c_maps.nii.gz',
'#s12158__c_maps.nii.gz',
'#s12258__c_maps.nii.gz',
'#s12277__c_maps.nii.gz',
'#s12300__c_maps.nii.gz',
'#s12401__c_maps.nii.gz',
'#s12430__c_maps.nii.gz',
'#s13817__c_maps.nii.gz',
'#s13903__c_maps.nii.gz',
'#s13916__c_maps.nii.gz',
'#s13981__c_maps.nii.gz',
'#s13982__c_maps.nii.gz',
'#s13983__c_maps.nii.gz'])
a = k.f(s)
b = k.f(a)
nose.tools.assert_equal(hash(a), hash(b))
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import variable_scope as vs
def stack_bidirectional_rnn(cells_fw,
cells_bw,
inputs,
initial_states_fw=None,
initial_states_bw=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a bidirectional recurrent neural network.
Stacks several bidirectional rnn layers. The combined forward and backward
layer outputs are used as input of the next layer. tf.bidirectional_rnn
does not allow to share forward and backward information between layers.
The input_size of the first forward and backward cells must match.
The initial state for both directions is zero and no intermediate states
are returned.
As described in https://arxiv.org/abs/1303.5778
Args:
cells_fw: List of instances of RNNCell, one per layer,
to be used for forward direction.
cells_bw: List of instances of RNNCell, one per layer,
to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, input_size], or a nested tuple of such elements.
initial_states_fw: (optional) A list of the initial states (one per layer)
for the forward RNN.
Each tensor must has an appropriate type and shape
`[batch_size, cell_fw.state_size]`.
initial_states_bw: (optional) Same as for `initial_states_fw`, but using
the corresponding properties of `cells_bw`.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to None.
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs is a length `T` list of outputs (one for each input), which
are depth-concatenated forward and backward outputs.
output_states_fw is the final states, one tensor per layer,
of the forward rnn.
output_states_bw is the final states, one tensor per layer,
of the backward rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
ValueError: If inputs is None, not a list or an empty list.
"""
if not cells_fw:
raise ValueError("Must specify at least one fw cell for BidirectionalRNN.")
if not cells_bw:
raise ValueError("Must specify at least one bw cell for BidirectionalRNN.")
if not isinstance(cells_fw, list):
raise ValueError("cells_fw must be a list of RNNCells (one per layer).")
if not isinstance(cells_bw, list):
raise ValueError("cells_bw must be a list of RNNCells (one per layer).")
if len(cells_fw) != len(cells_bw):
raise ValueError("Forward and Backward cells must have the same depth.")
if (initial_states_fw is not None and
(not isinstance(initial_states_fw, list) or
len(initial_states_fw) != len(cells_fw))):
raise ValueError(
"initial_states_fw must be a list of state tensors (one per layer).")
if (initial_states_bw is not None and
(not isinstance(initial_states_bw, list) or
len(initial_states_bw) != len(cells_bw))):
raise ValueError(
"initial_states_bw must be a list of state tensors (one per layer).")
states_fw = []
states_bw = []
prev_layer = inputs
with vs.variable_scope(scope or "stack_bidirectional_rnn"):
for i, (cell_fw, cell_bw) in enumerate(zip(cells_fw, cells_bw)):
initial_state_fw = None
initial_state_bw = None
if initial_states_fw:
initial_state_fw = initial_states_fw[i]
if initial_states_bw:
initial_state_bw = initial_states_bw[i]
with vs.variable_scope("cell_%d" % i) as cell_scope:
prev_layer, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell_fw,
cell_bw,
prev_layer,
initial_state_fw=initial_state_fw,
initial_state_bw=initial_state_bw,
sequence_length=sequence_length,
dtype=dtype,
scope=cell_scope)
states_fw.append(state_fw)
states_bw.append(state_bw)
return prev_layer, tuple(states_fw), tuple(states_bw)
def stack_bidirectional_dynamic_rnn(cells_fw,
cells_bw,
inputs,
initial_states_fw=None,
initial_states_bw=None,
dtype=None,
sequence_length=None,
parallel_iterations=None,
time_major=False,
scope=None):
"""Creates a dynamic bidirectional recurrent neural network.
Stacks several bidirectional rnn layers. The combined forward and backward
layer outputs are used as input of the next layer. tf.bidirectional_rnn
does not allow to share forward and backward information between layers.
The input_size of the first forward and backward cells must match.
The initial state for both directions is zero and no intermediate states
are returned.
Args:
cells_fw: List of instances of RNNCell, one per layer,
to be used for forward direction.
cells_bw: List of instances of RNNCell, one per layer,
to be used for backward direction.
inputs: The RNN inputs. this must be a tensor of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such elements.
initial_states_fw: (optional) A list of the initial states (one per layer)
for the forward RNN.
Each tensor must has an appropriate type and shape
`[batch_size, cell_fw.state_size]`.
initial_states_bw: (optional) Same as for `initial_states_fw`, but using
the corresponding properties of `cells_bw`.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
time_major: The shape format of the inputs and outputs Tensors. If true,
these Tensors must be shaped [max_time, batch_size, depth]. If false,
these Tensors must be shaped [batch_size, max_time, depth]. Using
time_major = True is a bit more efficient because it avoids transposes at
the beginning and end of the RNN calculation. However, most TensorFlow
data is batch-major, so by default this function accepts input and emits
output in batch-major form.
scope: VariableScope for the created subgraph; defaults to None.
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs: Output `Tensor` shaped:
`[batch_size, max_time, layers_output]`. Where layers_output
are depth-concatenated forward and backward outputs.
output_states_fw is the final states, one tensor per layer,
of the forward rnn.
output_states_bw is the final states, one tensor per layer,
of the backward rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
ValueError: If inputs is `None`.
"""
if not cells_fw:
raise ValueError("Must specify at least one fw cell for BidirectionalRNN.")
if not cells_bw:
raise ValueError("Must specify at least one bw cell for BidirectionalRNN.")
if not isinstance(cells_fw, list):
raise ValueError("cells_fw must be a list of RNNCells (one per layer).")
if not isinstance(cells_bw, list):
raise ValueError("cells_bw must be a list of RNNCells (one per layer).")
if len(cells_fw) != len(cells_bw):
raise ValueError("Forward and Backward cells must have the same depth.")
if (initial_states_fw is not None and
(not isinstance(initial_states_fw, list) or
len(initial_states_fw) != len(cells_fw))):
raise ValueError(
"initial_states_fw must be a list of state tensors (one per layer).")
if (initial_states_bw is not None and
(not isinstance(initial_states_bw, list) or
len(initial_states_bw) != len(cells_bw))):
raise ValueError(
"initial_states_bw must be a list of state tensors (one per layer).")
states_fw = []
states_bw = []
prev_layer = inputs
with vs.variable_scope(scope or "stack_bidirectional_rnn"):
for i, (cell_fw, cell_bw) in enumerate(zip(cells_fw, cells_bw)):
initial_state_fw = None
initial_state_bw = None
if initial_states_fw:
initial_state_fw = initial_states_fw[i]
if initial_states_bw:
initial_state_bw = initial_states_bw[i]
with vs.variable_scope("cell_%d" % i):
outputs, (state_fw, state_bw) = rnn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
prev_layer,
initial_state_fw=initial_state_fw,
initial_state_bw=initial_state_bw,
sequence_length=sequence_length,
parallel_iterations=parallel_iterations,
dtype=dtype,
time_major=time_major)
# Concat the outputs to create the new input.
prev_layer = array_ops.concat(outputs, 2)
states_fw.append(state_fw)
states_bw.append(state_bw)
return prev_layer, tuple(states_fw), tuple(states_bw)
| |
import unittest
import datetime
from PIL import Image, ImageDraw
from tmstiler.rtm import RasterTileManager
SPHERICAL_MERCATOR_SRID = 3857 # google maps projection
class Point:
def __init__(self, x, y, srid=SPHERICAL_MERCATOR_SRID):
self.x = x
self.y = y
self.srid = srid
class DummyMeasurement:
def __init__(self, location, date, counts, value):
self.location = location
self.date = date
self.counts = counts
self.value = value
class Legend:
def get_color_str(self, value):
"""
:param value:
:return: rgb or hsl color string in the format:
rgb(255,0,0)
rgb(100%,0%,0%)
hsl(hue, saturation%, lightness%)
where:
hue is the color given as an angle between 0 and 360 (red=0, green=120, blue=240)
saturation is a value between 0% and 100% (gray=0%, full color=100%)
lightness is a value between 0% and 100% (black=0%, normal=50%, white=100%).
For example, hsl(0,100%,50%) is pure red.
"""
return "hsl(0,100%,50%)" # pure red for now...
class TestRasterTileManager(unittest.TestCase):
def test_tile_sphericalmercator_extent_munich_z8(self):
"""
Test upper-right coord
"""
# Munich (sphericl mercator)
# 11.5804, 48.1394
# x = 1289124.2311824248
# y = 6130077.43992735
z = 8
tilex = 136
tiley = 167
rtm = RasterTileManager()
# expected results taken from:
# http://www.maptiler.org/google-maps-coordinates-tile-bounds-projection/
expected_extent = (1252344.271424327, 6105178.323193599, 1408887.3053523675, 6261721.357121639)
actual_extent = rtm.tile_sphericalmercator_extent(z, tilex, tiley)
coord_type = {0:"xmin", 1: "ymin", 2: "xmax", 3: "ymax"}
for idx, (actual, expected) in enumerate(zip(actual_extent, expected_extent)):
msg = "actual_{}({}) != expected_{}({})\nNumber Tiles at zoom:{}".format(coord_type[idx],
round(actual, 2),
coord_type[idx],
round(expected, 2),
rtm.tiles_per_dimension(z))
msg += "\nSphericalMercator (xmax, ymax): {}, {}".format(rtm.spherical_mercator_xmax,
rtm.spherical_mercator_ymax)
msg += "\nzoom: {}".format(z)
msg += "\ntilex: {}".format(tilex)
msg += "\ntiley: {}".format(tiley)
msg += "\nactual extents: {}".format(actual_extent)
msg += "\nexpected extents: {}".format(expected_extent)
self.assertTrue(round(actual, 2) == round(expected, 2), msg)
def test_tile_sphericalmercator_extent_sydney_z5(self):
"""
Test lower-right coord
"""
# Sydney (spherical mercator)
z = 5
tilex = 29
tiley = 12
rtm = RasterTileManager()
# expected results taken from:
# http://www.maptiler.org/google-maps-coordinates-tile-bounds-projection/
expected_extent = (16280475.528516259, -5009377.085697312, 17532819.79994059, -3757032.814272983)
actual_extent = rtm.tile_sphericalmercator_extent(z, tilex, tiley)
coord_type = {0:"xmin", 1: "ymin", 2: "xmax", 3: "ymax"}
for idx, (actual, expected) in enumerate(zip(actual_extent, expected_extent)):
msg = "actual_{}({}) != expected_{}({})\nNumber Tiles at zoom:{}".format(coord_type[idx],
round(actual, 2),
coord_type[idx],
round(expected, 2),
rtm.tiles_per_dimension(z))
msg += "\nSphericalMercator (xmax, ymax): {}, {}".format(rtm.spherical_mercator_xmax,
rtm.spherical_mercator_ymax)
msg += "\nzoom: {}".format(z)
msg += "\ntilex: {}".format(tilex)
msg += "\ntiley: {}".format(tiley)
msg += "\nactual extents: {}".format(actual_extent)
msg += "\nexpected extents: {}".format(expected_extent)
self.assertTrue(round(actual, 2) == round(expected, 2), msg)
def test_tile_sphericalmercator_extent_santiago_z7(self):
"""
Test lower-left coord
"""
z = 7
tilex = 38
tiley = 51
rtm = RasterTileManager()
# expected results taken from:
# http://www.maptiler.org/google-maps-coordinates-tile-bounds-projection/
expected_extent = (-8140237.7642581295, -4070118.8821290657, -7827151.696402049, -3757032.814272983)
actual_extent = rtm.tile_sphericalmercator_extent(z, tilex, tiley)
coord_type = {0: "xmin", 1: "ymin", 2: "xmax", 3: "ymax"}
for idx, (actual, expected) in enumerate(zip(actual_extent, expected_extent)):
msg = "actual_{}({}) != expected_{}({})\nNumber Tiles at zoom:{}".format(coord_type[idx],
round(actual, 1),
coord_type[idx],
round(expected, 1),
rtm.tiles_per_dimension(z))
msg += "\nSphericalMercator (xmax, ymax): {}, {}".format(rtm.spherical_mercator_xmax,
rtm.spherical_mercator_ymax)
msg += "\nzoom: {}".format(z)
msg += "\ntilex: {}".format(tilex)
msg += "\ntiley: {}".format(tiley)
msg += "\nactual extents: {}".format(actual_extent)
msg += "\nexpected extents: {}".format(expected_extent)
self.assertTrue(round(actual, 1) == round(expected, 1), msg)
def test_tile_sphericalmercator_extent_chicago_z6(self):
"""
Test upper-left coord
"""
z = 6
tilex = 16
tiley = 40
rtm = RasterTileManager()
# expected results taken from:
# http://www.maptiler.org/google-maps-coordinates-tile-bounds-projection/
expected_extent = (-10018754.171394622, 5009377.085697312, -9392582.035682458, 5635549.221409474)
actual_extent = rtm.tile_sphericalmercator_extent(z, tilex, tiley)
coord_type = {0:"xmin", 1: "ymin", 2: "xmax", 3: "ymax"}
for idx, (actual, expected) in enumerate(zip(actual_extent, expected_extent)):
msg = "actual_{}({}) != expected_{}({})\nNumber Tiles at zoom:{}".format(coord_type[idx],
round(actual, 1),
coord_type[idx],
round(expected, 1),
rtm.tiles_per_dimension(z))
msg += "\nSphericalMercator (xmax, ymax): {}, {}".format(rtm.spherical_mercator_xmax,
rtm.spherical_mercator_ymax)
msg += "\nzoom: {}".format(z)
msg += "\ntilex: {}".format(tilex)
msg += "\ntiley: {}".format(tiley)
msg += "\nactual extents: {}".format(actual_extent)
msg += "\nexpected extents: {}".format(expected_extent)
self.assertTrue(round(actual, 1) == round(expected, 1), msg)
def test_tile_sphericalmercator_extent_fukushima_z10(self):
z = 10
tilex = 911
tiley = 626
rtm = RasterTileManager()
expected_extent = (15615167.634322088, 4461476.466949169, 15654303.392804097, 4500612.225431178)
actual_extent = rtm.tile_sphericalmercator_extent(z, tilex, tiley)
coord_type = {0: "xmin", 1: "ymin", 2: "xmax", 3: "ymax"}
for idx, (actual, expected) in enumerate(zip(actual_extent, expected_extent)):
msg = "actual_{}({}) != expected_{}({})\nNumber Tiles at zoom:{}".format(coord_type[idx],
round(actual, 1),
coord_type[idx],
round(expected, 1),
rtm.tiles_per_dimension(z))
msg += "\nSphericalMercator (xmax, ymax): {}, {}".format(rtm.spherical_mercator_xmax,
rtm.spherical_mercator_ymax)
msg += "\nzoom: {}".format(z)
msg += "\ntilex: {}".format(tilex)
msg += "\ntiley: {}".format(tiley)
msg += "\nactual extents: {}".format(actual_extent)
msg += "\nexpected extents: {}".format(expected_extent)
self.assertTrue(round(actual, 1) == round(expected, 1), msg)
def test_sphericalmercator_to_pixel_japan(self):
pixel_size_meters = 250
tile_pixel_size = 256
zoom = 10
tilex = 911
tiley = 626
rtmgr = RasterTileManager()
tile_extent = rtmgr.tile_sphericalmercator_extent(zoom, tilex, tiley)
# create dummy data in extent
upperleft_x = tile_extent[0] # minx
upperleft_y = tile_extent[3] # maxy
# create measurement instances for the left half of the tile
# --> get the x halfway point
xmin = tile_extent[0] # minx
xmax = tile_extent[2] # maxx
ymin = tile_extent[1] # miny
tile_width = xmax - xmin
half_tile_width = tile_width/2
halfx = xmin + half_tile_width
# create DummyMeasurement() objects for half of the tile
d = datetime.date(2014, 11, 28)
x = upperleft_x
created_measurement_count = 0
temp_measurements = []
while x <= halfx:
y = upperleft_y
while y >= ymin:
point = Point(x, y, srid=SPHERICAL_MERCATOR_SRID)
m = DummyMeasurement(location=point,
date=d,
counts=50,
value=1)
temp_measurements.append(m)
created_measurement_count += 1
y -= pixel_size_meters
x += pixel_size_meters
# create tile image from data
tile_image = Image.new("RGBA",
(tile_pixel_size, tile_pixel_size),
(255, 255, 255, 0))
draw = ImageDraw.Draw(tile_image)
legend = Legend()
processed_pixel_count = 0
for pixel in temp_measurements:
color_str = legend.get_color_str(pixel.value)
self.assertTrue(color_str == "hsl(0,100%,50%)")
# pixel x, y expected to be in spherical-mercator
# attempt to transform, note if srid is not defined this will generate an error
if pixel.location.srid != SPHERICAL_MERCATOR_SRID:
pixel.location.transform(SPHERICAL_MERCATOR_SRID)
# adjust to upper-left/nw
upperleft_point = pixel.location
# (xmin, ymin, xmax, ymax)
sphericalmercator_bbox = (upperleft_point.x,
upperleft_point.y - pixel_size_meters,
upperleft_point.x + pixel_size_meters,
upperleft_point.y)
# transform pixel spherical-mercator coords to image pixel coords
# --> min values
xmin, ymin = sphericalmercator_bbox[:2]
pxmin, pymin = rtmgr.sphericalmercator_to_pixel(zoom, tilex, tiley, xmin, ymin)
# --> max values
xmax, ymax = sphericalmercator_bbox[2:]
pxmax, pymax = rtmgr.sphericalmercator_to_pixel(zoom, tilex, tiley, xmax, ymax )
upperleft_point = (pxmin, pymax)
upperright_point = (pxmax, pymax)
lowerright_point = (pxmax, pymin)
lowerleft_point = (pxmin, pymin)
coords = (upperleft_point, upperright_point, lowerright_point, lowerleft_point, upperleft_point)
# draw pixel on tile
draw.polygon(coords, fill=color_str)
processed_pixel_count += 1
# confirm that half of image is red
# --> get percentage of image that is red.
color_counts = tile_image.getcolors() #
red = (255, 0, 0, 255)
red_percentage = sum(count for count, color in color_counts if color == red)/sum(count for count, _ in color_counts)
self.assertTrue(red_percentage >= 0.48, "Resulting Tile image Red({}) < 0.48".format(round(red_percentage, 4)))
def test_sphericalmercator_to_pixel_chicago(self):
pixel_size_meters = 2500
tile_pixel_size = 256
zoom = 6
tilex = 16
tiley = 40
rtmgr = RasterTileManager()
tile_extent = rtmgr.tile_sphericalmercator_extent(zoom, tilex, tiley)
# create dummy data in extent
upperleft_x = tile_extent[0] # minx
upperleft_y = tile_extent[3] # maxy
# create measurement instances for the left half of the tile
# --> get the x halfway point
xmin = tile_extent[0] # minx
xmax = tile_extent[2] # maxx
ymin = tile_extent[1] # miny
tile_width = xmax - xmin
half_tile_width = tile_width/2
halfx = xmin + half_tile_width
# create DummyMeasurement() objects for half of the tile
d = datetime.date(2014, 11, 28)
x = upperleft_x
created_measurement_count = 0
temp_measurements = []
while x <= halfx:
y = upperleft_y
while y >= ymin:
point = Point(x, y, srid=SPHERICAL_MERCATOR_SRID)
m = DummyMeasurement(location=point,
date=d,
counts=50,
value=1)
temp_measurements.append(m)
created_measurement_count += 1
y -= pixel_size_meters
x += pixel_size_meters
# create tile image from data
tile_image = Image.new("RGBA", (tile_pixel_size, tile_pixel_size), (255,255,255, 0))
draw = ImageDraw.Draw(tile_image)
legend = Legend()
processed_pixel_count = 0
for pixel in temp_measurements:
color_str = legend.get_color_str(pixel.value)
self.assertTrue(color_str == "hsl(0,100%,50%)")
# pixel x, y expected to be in spherical-mercator
# attempt to transform, note if srid is not defined this will generate an error
if pixel.location.srid != SPHERICAL_MERCATOR_SRID:
pixel.location.transform(SPHERICAL_MERCATOR_SRID)
# adjust to upper-left/nw
upperleft_point = pixel.location
# (xmin, ymin, xmax, ymax)
sphericalmercator_bbox = (upperleft_point.x ,
upperleft_point.y - pixel_size_meters,
upperleft_point.x + pixel_size_meters,
upperleft_point.y)
# transform pixel spherical-mercator coords to image pixel coords
# --> min values
xmin, ymin = sphericalmercator_bbox[:2]
pxmin, pymin = rtmgr.sphericalmercator_to_pixel(zoom, tilex, tiley, xmin, ymin)
# --> max values
xmax, ymax = sphericalmercator_bbox[2:]
pxmax, pymax = rtmgr.sphericalmercator_to_pixel(zoom, tilex, tiley, xmax, ymax )
upperleft_point = (pxmin, pymax)
upperright_point = (pxmax, pymax)
lowerright_point = (pxmax, pymin)
lowerleft_point = (pxmin, pymin)
coords = (upperleft_point, upperright_point, lowerright_point, lowerleft_point, upperleft_point)
# draw pixel on tile
draw.polygon(coords, fill=color_str)
processed_pixel_count += 1
# confirm that half of image is red
# --> get percentage of image that is red.
color_counts = tile_image.getcolors() #
red = (255, 0, 0, 255)
red_percentage = sum(count for count, color in color_counts if color == red)/sum(count for count, _ in color_counts)
self.assertTrue(red_percentage >= 0.48, "Resulting Tile image Red({}) < 0.48".format(round(red_percentage, 4)))
def test_sphericalmercator_to_pixel_rio(self):
pixel_size_meters = 2500
tile_pixel_size = 256
zoom = 6
tilex = 24
tiley = 27
rtmgr = RasterTileManager()
tile_extent = rtmgr.tile_sphericalmercator_extent(zoom, tilex, tiley)
# create dummy data in extent
upperleft_x = tile_extent[0] # minx
upperleft_y = tile_extent[3] # maxy
# create measurement instances for the left half of the tile
# --> get the x halfway point
xmin = tile_extent[0] # minx
xmax = tile_extent[2] # maxx
ymin = tile_extent[1] # miny
tile_width = xmax - xmin
half_tile_width = tile_width/2
halfx = xmin + half_tile_width
# create DummyMeasurement() objects for half of the tile
d = datetime.date(2014, 11, 28)
x = upperleft_x
created_measurement_count = 0
temp_measurements = []
while x <= halfx:
y = upperleft_y
while y >= ymin:
point = Point(x, y, srid=SPHERICAL_MERCATOR_SRID)
m = DummyMeasurement(location=point,
date=d,
counts=50,
value=1)
temp_measurements.append(m)
created_measurement_count += 1
y -= pixel_size_meters
x += pixel_size_meters
# create tile image from data
tile_image = Image.new("RGBA",
(tile_pixel_size, tile_pixel_size),
(255, 255, 255, 0))
draw = ImageDraw.Draw(tile_image)
legend = Legend()
processed_pixel_count = 0
for pixel in temp_measurements:
color_str = legend.get_color_str(pixel.value)
self.assertTrue(color_str == "hsl(0,100%,50%)")
# pixel x, y expected to be in spherical-mercator
# attempt to transform, note if srid is not defined this will generate an error
if pixel.location.srid != SPHERICAL_MERCATOR_SRID:
pixel.location.transform(SPHERICAL_MERCATOR_SRID)
# adjust to upper-left/nw
upperleft_point = pixel.location
# (xmin, ymin, xmax, ymax)
sphericalmercator_bbox = (upperleft_point.x ,
upperleft_point.y - pixel_size_meters,
upperleft_point.x + pixel_size_meters,
upperleft_point.y)
# transform pixel spherical-mercator coords to image pixel coords
# --> min values
xmin, ymin = sphericalmercator_bbox[:2]
pxmin, pymin = rtmgr.sphericalmercator_to_pixel(zoom, tilex, tiley, xmin, ymin)
# --> max values
xmax, ymax = sphericalmercator_bbox[2:]
pxmax, pymax = rtmgr.sphericalmercator_to_pixel(zoom, tilex, tiley, xmax, ymax )
upperleft_point = (pxmin, pymax)
upperright_point = (pxmax, pymax)
lowerright_point = (pxmax, pymin)
lowerleft_point = (pxmin, pymin)
coords = (upperleft_point, upperright_point, lowerright_point, lowerleft_point, upperleft_point)
# draw pixel on tile
draw.polygon(coords, fill=color_str)
processed_pixel_count += 1
# confirm that half of image is red
# --> get percentage of image that is red.
color_counts = tile_image.getcolors() #
red = (255, 0, 0, 255)
red_percentage = sum(count for count, color in color_counts if color == red)/sum(count for count, _ in color_counts)
self.assertTrue(red_percentage >= 0.48, "Resulting Tile image Red({}) < 0.48".format(round(red_percentage, 4)))
def test_sphericalmercator_to_pixel_sydney(self):
pixel_size_meters = 2500
tile_pixel_size = 256
zoom = 7
tilex = 117
tiley = 51
rtmgr = RasterTileManager()
tile_extent = rtmgr.tile_sphericalmercator_extent(zoom, tilex, tiley)
# create dummy data in extent
upperleft_x = tile_extent[0] # minx
upperleft_y = tile_extent[3] # maxy
# create measurement instances for the left half of the tile
# --> get the x halfway point
xmin = tile_extent[0] # minx
xmax = tile_extent[2] # maxx
ymin = tile_extent[1] # miny
tile_width = xmax - xmin
half_tile_width = tile_width/2
halfx = xmin + half_tile_width
# create DummyMeasurement() objects for half of the tile
d = datetime.date(2014, 11, 28)
x = upperleft_x
created_measurement_count = 0
temp_measurements = []
while x <= halfx:
y = upperleft_y
while y >= ymin:
point = Point(x, y, srid=SPHERICAL_MERCATOR_SRID)
m = DummyMeasurement(location=point,
date=d,
counts=50,
value=1)
temp_measurements.append(m)
created_measurement_count += 1
y -= pixel_size_meters
x += pixel_size_meters
# create tile image from data
tile_image = Image.new("RGBA",
(tile_pixel_size, tile_pixel_size),
(255, 255, 255, 0))
draw = ImageDraw.Draw(tile_image)
legend = Legend()
processed_pixel_count = 0
for pixel in temp_measurements:
color_str = legend.get_color_str(pixel.value)
self.assertTrue(color_str == "hsl(0,100%,50%)")
# pixel x, y expected to be in spherical-mercator
# attempt to transform, note if srid is not defined this will generate an error
if pixel.location.srid != SPHERICAL_MERCATOR_SRID:
pixel.location.transform(SPHERICAL_MERCATOR_SRID)
# adjust to upper-left/nw
upperleft_point = pixel.location
# (xmin, ymin, xmax, ymax)
sphericalmercator_bbox = (upperleft_point.x ,
upperleft_point.y - pixel_size_meters,
upperleft_point.x + pixel_size_meters,
upperleft_point.y)
# transform pixel spherical-mercator coords to image pixel coords
# --> min values
xmin, ymin = sphericalmercator_bbox[:2]
pxmin, pymin = rtmgr.sphericalmercator_to_pixel(zoom, tilex, tiley, xmin, ymin)
# --> max values
xmax, ymax = sphericalmercator_bbox[2:]
pxmax, pymax = rtmgr.sphericalmercator_to_pixel(zoom, tilex, tiley, xmax, ymax )
upperleft_point = (pxmin, pymax)
upperright_point = (pxmax, pymax)
lowerright_point = (pxmax, pymin)
lowerleft_point = (pxmin, pymin)
coords = (upperleft_point, upperright_point, lowerright_point, lowerleft_point, upperleft_point)
# draw pixel on tile
draw.polygon(coords, fill=color_str)
processed_pixel_count += 1
# confirm that half of image is red
# --> get percentage of image that is red.
color_counts = tile_image.getcolors() #
red = (255, 0, 0, 255)
red_percentage = sum(count for count, color in color_counts if color == red)/sum(count for count, _ in color_counts)
self.assertTrue(red_percentage >= 0.48, "Resulting Tile image Red({}) < 0.48".format(round(red_percentage, 4)))
def test_lonlat_to_tile(self):
rtmgr = RasterTileManager()
longitude = 7.56198
latitude = 47.47607
zoom = 11
tilex, tiley = rtmgr.lonlat_to_tile(zoom, longitude, latitude)
self.assertTrue(tilex == 1067 and tiley == 716)
def test_get_neighbor_tiles(self):
rtmgr = RasterTileManager()
zoom = 6
tilex = 63
tiley = 60
actual = rtmgr.get_neighbor_tiles(zoom, tilex, tiley)
expected = [(63, 61), (62, 61), (62, 60), (62, 59), (63, 59)]
msg = 'actual({}) != expected({})'.format(actual, expected)
self.assertTrue(set(actual) == set(expected), msg)
zoom = 7
tilex = 119
tiley = 0
actual = rtmgr.get_neighbor_tiles(zoom, tilex, tiley)
expected = [(118, 1), (119, 1), (120, 1), (118, 0), (120, 0)]
msg = 'actual({}) != expected({})'.format(actual, expected)
self.assertTrue(set(actual) == set(expected), msg)
zoom = 11
tilex = 1893
tiley = 15
actual = rtmgr.get_neighbor_tiles(zoom, tilex, tiley)
expected = [(1893, 16), (1892, 16), (1894, 16), (1892, 15), (1894, 15), (1892, 14), (1893, 14), (1894, 14)]
msg = 'actual({}) != expected({})'.format(actual, expected)
self.assertTrue(set(actual) == set(expected), msg)
if __name__ == '__main__':
unittest.main(verbosity=2)
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import os
import re
import time
import uuid
from pants.rwbuf.read_write_buffer import FileBackedRWBuf
from pants.util.dirutil import safe_mkdir_for
class WorkUnit(object):
"""A hierarchical unit of work, for the purpose of timing and reporting.
A WorkUnit can be subdivided into further WorkUnits. The WorkUnit concept is deliberately
decoupled from the goal/task hierarchy. This allows some flexibility in having, say,
sub-units inside a task. E.g., there might be one WorkUnit representing an entire pants run,
and that can be subdivided into WorkUnits for each goal. Each of those can be subdivided into
WorkUnits for each task, and a task can subdivide that into further work units, if finer-grained
timing and reporting is needed.
"""
# The outcome of a workunit.
# It can only be set to a new value <= the old one.
ABORTED = 0
FAILURE = 1
WARNING = 2
SUCCESS = 3
UNKNOWN = 4
@staticmethod
def choose_for_outcome(outcome, aborted_val, failure_val, warning_val, success_val, unknown_val):
"""Returns one of the 5 arguments, depending on the outcome."""
if outcome not in range(0, 5):
raise Exception('Invalid outcome: %s' % outcome)
return (aborted_val, failure_val, warning_val, success_val, unknown_val)[outcome]
@staticmethod
def outcome_string(outcome):
"""Returns a human-readable string describing the outcome."""
return WorkUnit.choose_for_outcome(outcome,
'ABORTED', 'FAILURE', 'WARNING', 'SUCCESS', 'UNKNOWN')
# Labels describing a workunit. Reporting code can use this to decide how to display
# information about this workunit.
#
# Note that a workunit can have multiple labels where this makes sense, e.g., TOOL, COMPILER
# and NAILGUN.
SETUP = 0 # Parsing build files etc.
GOAL = 1 # Executing a goal.
TASK = 2 # Executing a task within a goal.
GROUP = 3 # Executing a group.
BOOTSTRAP = 4 # Invocation of code to fetch a tool.
TOOL = 5 # Single invocations of a tool.
MULTITOOL = 6 # Multiple consecutive invocations of the same tool.
COMPILER = 7 # Invocation of a compiler.
TEST = 8 # Running a test.
JVM = 9 # Running a tool via the JVM.
NAILGUN = 10 # Running a tool via nailgun.
RUN = 11 # Running a binary.
REPL = 12 # Running a repl.
PREP = 13 # Running a prep command
def __init__(self, run_tracker, parent, name, labels=None, cmd=''):
"""
- run_tracker: The RunTracker that tracks this WorkUnit.
- parent: The containing workunit, if any. E.g., 'compile' might contain 'java', 'scala' etc.,
'scala' might contain 'compile', 'split' etc.
- name: A short name for this work. E.g., 'resolve', 'compile', 'scala', 'zinc'.
- labels: An optional iterable of labels. The reporters can use this to decide how to
display information about this work.
- cmd: An optional longer string representing this work.
E.g., the cmd line of a compiler invocation.
"""
self._outcome = WorkUnit.UNKNOWN
self.run_tracker = run_tracker
self.parent = parent
self.children = []
self.name = name
self.labels = set(labels or ())
self.cmd = cmd
self.id = uuid.uuid4()
# In seconds since the epoch. Doubles, to account for fractional seconds.
self.start_time = 0
self.end_time = 0
# A workunit may have multiple outputs, which we identify by a name.
# E.g., a tool invocation may have 'stdout', 'stderr', 'debug_log' etc.
self._outputs = {} # name -> output buffer.
# Do this last, as the parent's _self_time() might get called before we're
# done initializing ourselves.
# TODO: Ensure that a parent can't be ended before all its children are.
if self.parent:
self.parent.children.append(self)
def has_label(self, label):
return label in self.labels
def start(self):
"""Mark the time at which this workunit started."""
self.start_time = time.time()
def end(self):
"""Mark the time at which this workunit ended."""
self.end_time = time.time()
for output in self._outputs.values():
output.close()
is_tool = self.has_label(WorkUnit.TOOL)
path = self.path()
self.run_tracker.cumulative_timings.add_timing(path, self.duration(), is_tool)
self.run_tracker.self_timings.add_timing(path, self._self_time(), is_tool)
def outcome(self):
"""Returns the outcome of this workunit."""
return self._outcome
def set_outcome(self, outcome):
"""Set the outcome of this work unit.
We can set the outcome on a work unit directly, but that outcome will also be affected by
those of its subunits. The right thing happens: The outcome of a work unit is the
worst outcome of any of its subunits and any outcome set on it directly."""
if outcome < self._outcome:
self._outcome = outcome
self.choose(0, 0, 0, 0, 0) # Dummy call, to validate outcome.
if self.parent: self.parent.set_outcome(self._outcome)
_valid_name_re = re.compile(r'\w+')
def output(self, name):
"""Returns the output buffer for the specified output name (e.g., 'stdout')."""
m = WorkUnit._valid_name_re.match(name)
if not m or m.group(0) != name:
raise Exception('Invalid output name: %s' % name)
if name not in self._outputs:
path = os.path.join(self.run_tracker.info_dir, 'tool_outputs', '%s.%s' % (self.id, name))
safe_mkdir_for(path)
self._outputs[name] = FileBackedRWBuf(path)
return self._outputs[name]
def outputs(self):
"""Returns the map of output name -> output buffer."""
return self._outputs
def choose(self, aborted_val, failure_val, warning_val, success_val, unknown_val):
"""Returns one of the 5 arguments, depending on our outcome."""
return WorkUnit.choose_for_outcome(self._outcome,
aborted_val, failure_val, warning_val, success_val, unknown_val)
def duration(self):
"""Returns the time (in fractional seconds) spent in this workunit and its children."""
return (self.end_time or time.time()) - self.start_time
def start_time_string(self):
"""A convenient string representation of start_time."""
return time.strftime('%H:%M:%S', time.localtime(self.start_time))
def start_delta_string(self):
"""A convenient string representation of how long after the run started we started."""
delta = int(self.start_time) - int(self.root().start_time)
return '%02d:%02d' % (delta / 60, delta % 60)
def root(self):
ret = self
while ret.parent is not None:
ret = ret.parent
return ret
def ancestors(self):
"""Returns a list consisting of this workunit and those enclosing it, up to the root."""
ret = []
workunit = self
while workunit is not None:
ret.append(workunit)
workunit = workunit.parent
return ret
def path(self):
"""Returns a path string for this workunit, E.g., 'all:compile:jvm:scalac'."""
return ':'.join(reversed([w.name for w in self.ancestors()]))
def unaccounted_time(self):
"""Returns non-leaf time spent in this workunit.
This assumes that all major work should be done in leaves.
TODO: Is this assumption valid?
"""
return 0 if len(self.children) == 0 else self._self_time()
def to_dict(self):
"""Useful for providing arguments to templates."""
ret = {}
for key in ['name', 'cmd', 'id', 'start_time', 'end_time',
'outcome', 'start_time_string', 'start_delta_string']:
val = getattr(self, key)
ret[key] = val() if hasattr(val, '__call__') else val
ret['parent'] = self.parent.to_dict() if self.parent else None
return ret
def _self_time(self):
"""Returns the time spent in this workunit outside of any children."""
return self.duration() - sum([child.duration() for child in self.children])
| |
#!/usr/bin/env python
#******************************************************************************
# Name: gamma_filter.py
# Purpose: ; gamma MAP adaptive filtering for polarized SAR intensity images
# Ref: Oliver and Quegan (2004) Understanding SAR Images, Scitech
# Usage:
# python gamma_filter.py [-h] [-d dims] infile enl
#
# Copyright (c) 2015, Mort Canty
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import auxil.auxil as auxil
import auxil.congrid as congrid
import os, sys, time, getopt
import numpy as np
from osgeo import gdal
from osgeo.gdalconst import GDT_Float32, GA_ReadOnly
templates = np.zeros((8,7,7),dtype=int)
for j in range(7):
templates[0,j,0:3] = 1
templates[1,1,0] = 1
templates[1,2,:2] = 1
templates[1,3,:3] = 1
templates[1,4,:4] = 1
templates[1,5,:5] = 1
templates[1,6,:6] = 1
templates[2] = np.rot90(templates[0])
templates[3] = np.rot90(templates[1])
templates[4] = np.rot90(templates[2])
templates[5] = np.rot90(templates[3])
templates[6] = np.rot90(templates[4])
templates[7] = np.rot90(templates[5])
tmp = np.zeros((8,21),dtype=int)
for i in range(8):
tmp[i,:] = np.where(templates[i].ravel())[0]
templates = tmp
edges = np.zeros((4,3,3),dtype=int)
edges[0] = [[-1,0,1],[-1,0,1],[-1,0,1]]
edges[1] = [[0,1,1],[-1,0,1],[-1,-1,0]]
edges[2] = [[1,1,1],[0,0,0],[-1,-1,-1]]
edges[3] = [[1,1,0],[1,0,-1],[0,-1,-1]]
def get_windex(j,cols):
windex = np.zeros(49,dtype=int)
six = np.array([0,1,2,3,4,5,6])
windex[0:7] = (j-3)*cols + six
windex[7:14] = (j-2)*cols + six
windex[14:21] = (j-1)*cols + six
windex[21:28] = (j)*cols + six
windex[28:35] = (j+1)*cols + six
windex[35:42] = (j+2)*cols + six
windex[42:49] = (j+3)*cols + six
return windex
def gamma_filter(k,inimage,outimage,rows,cols,m):
result = np.copy(inimage[k])
arr = outimage[k].ravel()
print 'filtering band %i'%(k+1)
print 'row: ',
sys.stdout.flush()
for j in range(3,rows-3):
if j%50 == 0:
print '%i '%j,
sys.stdout.flush()
windex = get_windex(j,cols)
for i in range(3,cols-3):
# central pixel, always from original input image
g = inimage[k,j,i]
wind = np.reshape(arr[windex],(7,7))
# 3x3 compression
w = congrid.congrid(wind,(3,3),method='linear',centre=True)
# get appropriate edge mask
es = [np.sum(edges[p]*w) for p in range(4)]
idx = np.argmax(es)
if idx == 0:
if np.abs(w[1,1]-w[1,0]) < np.abs(w[1,1]-w[1,2]):
edge = templates[0]
else:
edge = templates[4]
elif idx == 1:
if np.abs(w[1,1]-w[2,0]) < np.abs(w[1,1]-w[0,2]):
edge = templates[1]
else:
edge = templates[5]
elif idx == 2:
if np.abs(w[1,1]-w[0,1]) < np.abs(w[1,1]-w[2,1]):
edge = templates[6]
else:
edge = templates[2]
elif idx == 3:
if np.abs(w[1,1]-w[0,0]) < np.abs(w[1,1]-w[2,2]):
edge = templates[7]
else:
edge = templates[3]
wind = wind.ravel()[edge]
var = np.var(wind)
if var > 0:
mu = np.mean(wind)
alpha = (1 +1.0/m)/(var/mu**2 - 1/m)
if alpha < 0:
alpha = np.abs(alpha)
a = mu*(alpha-m-1)
x = (a+np.sqrt(4*g*m*alpha*mu+a**2))/(2*alpha)
result[j,i] = x
windex += 1
print ' done'
return result
def main():
usage = '''
Usage:
------------------------------------------------
python %s [-h] [-d dims] filename enl
Run a gamma Map filter in the diagonal elements of a C or T matrix
------------------------------------------------''' %sys.argv[0]
options,args = getopt.getopt(sys.argv[1:],'hd:')
dims = None
for option, value in options:
if option == '-h':
print usage
return
elif option == '-d':
dims = eval(value)
if len(args) != 2:
print 'Incorrect number of arguments'
print usage
sys.exit(1)
infile = args[0]
m = int(args[1])
gdal.AllRegister()
inDataset = gdal.Open(infile,GA_ReadOnly)
cols = inDataset.RasterXSize
rows = inDataset.RasterYSize
bands = inDataset.RasterCount
if dims == None:
dims = [0,0,cols,rows]
x0,y0,cols,rows = dims
path = os.path.abspath(infile)
dirn = os.path.dirname(path)
basename = os.path.basename(infile)
root, ext = os.path.splitext(basename)
outfile = dirn + '/' + root + '_gamma' + ext
# process diagonal bands only
driver = inDataset.GetDriver()
if bands == 9:
outDataset = driver.Create(outfile,cols,rows,3,GDT_Float32)
inimage = np.zeros((3,rows,cols))
band = inDataset.GetRasterBand(1)
inimage[0] = band.ReadAsArray(x0,y0,cols,rows)
band = inDataset.GetRasterBand(6)
inimage[1] = band.ReadAsArray(x0,y0,cols,rows)
band = inDataset.GetRasterBand(9)
inimage[2] = band.ReadAsArray(x0,y0,cols,rows)
elif bands == 4:
outDataset = driver.Create(outfile,cols,rows,2,GDT_Float32)
inimage = np.zeros((2,rows,cols))
band = inDataset.GetRasterBand(1)
inimage[0] = band.ReadAsArray(x0,y0,cols,rows)
band = inDataset.GetRasterBand(4)
inimage[1] = band.ReadAsArray(x0,y0,cols,rows)
else:
outDataset = driver.Create(outfile,cols,rows,1,GDT_Float32)
inimage = inDataset.GetRasterBand(1)
outimage = np.copy(inimage)
print '========================='
print ' GAMMA MAP FILTER'
print '========================='
print time.asctime()
print 'infile: %s'%infile
print 'equivalent number of looks: %i'%m
start = time.time()
if bands == 9:
for k in range(3):
outimage[k] = gamma_filter(k,inimage,outimage,rows,cols,m)
elif bands == 4:
for k in range(2):
outimage[k] = gamma_filter(k,inimage,outimage,rows,cols,m)
else:
outimage = gamma_filter(0,inimage,outimage,rows,cols,m)
geotransform = inDataset.GetGeoTransform()
if geotransform is not None:
gt = list(geotransform)
gt[0] = gt[0] + x0*gt[1]
gt[3] = gt[3] + y0*gt[5]
outDataset.SetGeoTransform(tuple(gt))
projection = inDataset.GetProjection()
if projection is not None:
outDataset.SetProjection(projection)
if bands == 9:
for k in range(3):
outBand = outDataset.GetRasterBand(k+1)
outBand.WriteArray(outimage[k],0,0)
outBand.FlushCache()
elif bands == 4:
for k in range(2):
outBand = outDataset.GetRasterBand(k+1)
outBand.WriteArray(outimage[k],0,0)
outBand.FlushCache()
else:
outBand = outDataset.GetRasterBand(1)
outBand.WriteArray(outimage,0,0)
outBand.FlushCache()
outDataset = None
print 'result written to: '+outfile
print 'elapsed time: '+str(time.time()-start)
if __name__ == '__main__':
main()
| |
# orm/dependency.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Relationship dependencies.
"""
from .. import sql, util, exc as sa_exc
from . import attributes, exc, sync, unitofwork, \
util as mapperutil
from .interfaces import ONETOMANY, MANYTOONE, MANYTOMANY
class DependencyProcessor(object):
def __init__(self, prop):
self.prop = prop
self.cascade = prop.cascade
self.mapper = prop.mapper
self.parent = prop.parent
self.secondary = prop.secondary
self.direction = prop.direction
self.post_update = prop.post_update
self.passive_deletes = prop.passive_deletes
self.passive_updates = prop.passive_updates
self.enable_typechecks = prop.enable_typechecks
if self.passive_deletes:
self._passive_delete_flag = attributes.PASSIVE_NO_INITIALIZE
else:
self._passive_delete_flag = attributes.PASSIVE_OFF
if self.passive_updates:
self._passive_update_flag = attributes.PASSIVE_NO_INITIALIZE
else:
self._passive_update_flag = attributes.PASSIVE_OFF
self.key = prop.key
if not self.prop.synchronize_pairs:
raise sa_exc.ArgumentError(
"Can't build a DependencyProcessor for relationship %s. "
"No target attributes to populate between parent and "
"child are present" %
self.prop)
@classmethod
def from_relationship(cls, prop):
return _direction_to_processor[prop.direction](prop)
def hasparent(self, state):
"""return True if the given object instance has a parent,
according to the ``InstrumentedAttribute`` handled by this
``DependencyProcessor``.
"""
return self.parent.class_manager.get_impl(self.key).hasparent(state)
def per_property_preprocessors(self, uow):
"""establish actions and dependencies related to a flush.
These actions will operate on all relevant states in
the aggregate.
"""
uow.register_preprocessor(self, True)
def per_property_flush_actions(self, uow):
after_save = unitofwork.ProcessAll(uow, self, False, True)
before_delete = unitofwork.ProcessAll(uow, self, True, True)
parent_saves = unitofwork.SaveUpdateAll(
uow,
self.parent.primary_base_mapper
)
child_saves = unitofwork.SaveUpdateAll(
uow,
self.mapper.primary_base_mapper
)
parent_deletes = unitofwork.DeleteAll(
uow,
self.parent.primary_base_mapper
)
child_deletes = unitofwork.DeleteAll(
uow,
self.mapper.primary_base_mapper
)
self.per_property_dependencies(uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete
)
def per_state_flush_actions(self, uow, states, isdelete):
"""establish actions and dependencies related to a flush.
These actions will operate on all relevant states
individually. This occurs only if there are cycles
in the 'aggregated' version of events.
"""
parent_base_mapper = self.parent.primary_base_mapper
child_base_mapper = self.mapper.primary_base_mapper
child_saves = unitofwork.SaveUpdateAll(uow, child_base_mapper)
child_deletes = unitofwork.DeleteAll(uow, child_base_mapper)
# locate and disable the aggregate processors
# for this dependency
if isdelete:
before_delete = unitofwork.ProcessAll(uow, self, True, True)
before_delete.disabled = True
else:
after_save = unitofwork.ProcessAll(uow, self, False, True)
after_save.disabled = True
# check if the "child" side is part of the cycle
if child_saves not in uow.cycles:
# based on the current dependencies we use, the saves/
# deletes should always be in the 'cycles' collection
# together. if this changes, we will have to break up
# this method a bit more.
assert child_deletes not in uow.cycles
# child side is not part of the cycle, so we will link per-state
# actions to the aggregate "saves", "deletes" actions
child_actions = [
(child_saves, False), (child_deletes, True)
]
child_in_cycles = False
else:
child_in_cycles = True
# check if the "parent" side is part of the cycle
if not isdelete:
parent_saves = unitofwork.SaveUpdateAll(
uow,
self.parent.base_mapper)
parent_deletes = before_delete = None
if parent_saves in uow.cycles:
parent_in_cycles = True
else:
parent_deletes = unitofwork.DeleteAll(
uow,
self.parent.base_mapper)
parent_saves = after_save = None
if parent_deletes in uow.cycles:
parent_in_cycles = True
# now create actions /dependencies for each state.
for state in states:
# detect if there's anything changed or loaded
# by a preprocessor on this state/attribute. if not,
# we should be able to skip it entirely.
sum_ = state.manager[self.key].impl.get_all_pending(
state, state.dict)
if not sum_:
continue
if isdelete:
before_delete = unitofwork.ProcessState(uow,
self, True, state)
if parent_in_cycles:
parent_deletes = unitofwork.DeleteState(
uow,
state,
parent_base_mapper)
else:
after_save = unitofwork.ProcessState(uow, self, False, state)
if parent_in_cycles:
parent_saves = unitofwork.SaveUpdateState(
uow,
state,
parent_base_mapper)
if child_in_cycles:
child_actions = []
for child_state, child in sum_:
if child_state not in uow.states:
child_action = (None, None)
else:
(deleted, listonly) = uow.states[child_state]
if deleted:
child_action = (
unitofwork.DeleteState(
uow, child_state,
child_base_mapper),
True)
else:
child_action = (
unitofwork.SaveUpdateState(
uow, child_state,
child_base_mapper),
False)
child_actions.append(child_action)
# establish dependencies between our possibly per-state
# parent action and our possibly per-state child action.
for child_action, childisdelete in child_actions:
self.per_state_dependencies(uow, parent_saves,
parent_deletes,
child_action,
after_save, before_delete,
isdelete, childisdelete)
def presort_deletes(self, uowcommit, states):
return False
def presort_saves(self, uowcommit, states):
return False
def process_deletes(self, uowcommit, states):
pass
def process_saves(self, uowcommit, states):
pass
def prop_has_changes(self, uowcommit, states, isdelete):
if not isdelete or self.passive_deletes:
passive = attributes.PASSIVE_NO_INITIALIZE
elif self.direction is MANYTOONE:
passive = attributes.PASSIVE_NO_FETCH_RELATED
else:
passive = attributes.PASSIVE_OFF
for s in states:
# TODO: add a high speed method
# to InstanceState which returns: attribute
# has a non-None value, or had one
history = uowcommit.get_attribute_history(
s,
self.key,
passive)
if history and not history.empty():
return True
else:
return states and \
not self.prop._is_self_referential and \
self.mapper in uowcommit.mappers
def _verify_canload(self, state):
if self.prop.uselist and state is None:
raise exc.FlushError(
"Can't flush None value found in "
"collection %s" % (self.prop, ))
elif state is not None and \
not self.mapper._canload(state,
allow_subtypes=not self.enable_typechecks):
if self.mapper._canload(state, allow_subtypes=True):
raise exc.FlushError('Attempting to flush an item of type '
'%(x)s as a member of collection '
'"%(y)s". Expected an object of type '
'%(z)s or a polymorphic subclass of '
'this type. If %(x)s is a subclass of '
'%(z)s, configure mapper "%(zm)s" to '
'load this subtype polymorphically, or '
'set enable_typechecks=False to allow '
'any subtype to be accepted for flush. '
% {
'x': state.class_,
'y': self.prop,
'z': self.mapper.class_,
'zm': self.mapper,
})
else:
raise exc.FlushError(
'Attempting to flush an item of type '
'%(x)s as a member of collection '
'"%(y)s". Expected an object of type '
'%(z)s or a polymorphic subclass of '
'this type.' % {
'x': state.class_,
'y': self.prop,
'z': self.mapper.class_,
})
def _synchronize(self, state, child, associationrow,
clearkeys, uowcommit):
raise NotImplementedError()
def _get_reversed_processed_set(self, uow):
if not self.prop._reverse_property:
return None
process_key = tuple(sorted(
[self.key] +
[p.key for p in self.prop._reverse_property]
))
return uow.memo(
('reverse_key', process_key),
set
)
def _post_update(self, state, uowcommit, related):
for x in related:
if x is not None:
uowcommit.issue_post_update(
state,
[r for l, r in self.prop.synchronize_pairs]
)
break
def _pks_changed(self, uowcommit, state):
raise NotImplementedError()
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.prop)
class OneToManyDP(DependencyProcessor):
def per_property_dependencies(self, uow, parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete,
):
if self.post_update:
child_post_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
False)
child_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
True)
uow.dependencies.update([
(child_saves, after_save),
(parent_saves, after_save),
(after_save, child_post_updates),
(before_delete, child_pre_updates),
(child_pre_updates, parent_deletes),
(child_pre_updates, child_deletes),
])
else:
uow.dependencies.update([
(parent_saves, after_save),
(after_save, child_saves),
(after_save, child_deletes),
(child_saves, parent_deletes),
(child_deletes, parent_deletes),
(before_delete, child_saves),
(before_delete, child_deletes),
])
def per_state_dependencies(self, uow,
save_parent,
delete_parent,
child_action,
after_save, before_delete,
isdelete, childisdelete):
if self.post_update:
child_post_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
False)
child_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
True)
# TODO: this whole block is not covered
# by any tests
if not isdelete:
if childisdelete:
uow.dependencies.update([
(child_action, after_save),
(after_save, child_post_updates),
])
else:
uow.dependencies.update([
(save_parent, after_save),
(child_action, after_save),
(after_save, child_post_updates),
])
else:
if childisdelete:
uow.dependencies.update([
(before_delete, child_pre_updates),
(child_pre_updates, delete_parent),
])
else:
uow.dependencies.update([
(before_delete, child_pre_updates),
(child_pre_updates, delete_parent),
])
elif not isdelete:
uow.dependencies.update([
(save_parent, after_save),
(after_save, child_action),
(save_parent, child_action)
])
else:
uow.dependencies.update([
(before_delete, child_action),
(child_action, delete_parent)
])
def presort_deletes(self, uowcommit, states):
# head object is being deleted, and we manage its list of
# child objects the child objects have to have their
# foreign key to the parent set to NULL
should_null_fks = not self.cascade.delete and \
not self.passive_deletes == 'all'
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.deleted:
if child is not None and self.hasparent(child) is False:
if self.cascade.delete_orphan:
uowcommit.register_object(child, isdelete=True)
else:
uowcommit.register_object(child)
if should_null_fks:
for child in history.unchanged:
if child is not None:
uowcommit.register_object(child,
operation="delete", prop=self.prop)
def presort_saves(self, uowcommit, states):
children_added = uowcommit.memo(('children_added', self), set)
for state in states:
pks_changed = self._pks_changed(uowcommit, state)
if not pks_changed or self.passive_updates:
passive = attributes.PASSIVE_NO_INITIALIZE
else:
passive = attributes.PASSIVE_OFF
history = uowcommit.get_attribute_history(
state,
self.key,
passive)
if history:
for child in history.added:
if child is not None:
uowcommit.register_object(child, cancel_delete=True,
operation="add",
prop=self.prop)
children_added.update(history.added)
for child in history.deleted:
if not self.cascade.delete_orphan:
uowcommit.register_object(child, isdelete=False,
operation='delete',
prop=self.prop)
elif self.hasparent(child) is False:
uowcommit.register_object(child, isdelete=True,
operation="delete", prop=self.prop)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
'delete', child):
uowcommit.register_object(
st_,
isdelete=True)
if pks_changed:
if history:
for child in history.unchanged:
if child is not None:
uowcommit.register_object(
child,
False,
self.passive_updates,
operation="pk change",
prop=self.prop)
def process_deletes(self, uowcommit, states):
# head object is being deleted, and we manage its list of
# child objects the child objects have to have their foreign
# key to the parent set to NULL this phase can be called
# safely for any cascade but is unnecessary if delete cascade
# is on.
if self.post_update or not self.passive_deletes == 'all':
children_added = uowcommit.memo(('children_added', self), set)
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.deleted:
if child is not None and \
self.hasparent(child) is False:
self._synchronize(
state,
child,
None, True,
uowcommit, False)
if self.post_update and child:
self._post_update(child, uowcommit, [state])
if self.post_update or not self.cascade.delete:
for child in set(history.unchanged).\
difference(children_added):
if child is not None:
self._synchronize(
state,
child,
None, True,
uowcommit, False)
if self.post_update and child:
self._post_update(child,
uowcommit,
[state])
# technically, we can even remove each child from the
# collection here too. but this would be a somewhat
# inconsistent behavior since it wouldn't happen
#if the old parent wasn't deleted but child was moved.
def process_saves(self, uowcommit, states):
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
attributes.PASSIVE_NO_INITIALIZE)
if history:
for child in history.added:
self._synchronize(state, child, None,
False, uowcommit, False)
if child is not None and self.post_update:
self._post_update(child, uowcommit, [state])
for child in history.deleted:
if not self.cascade.delete_orphan and \
not self.hasparent(child):
self._synchronize(state, child, None, True,
uowcommit, False)
if self._pks_changed(uowcommit, state):
for child in history.unchanged:
self._synchronize(state, child, None,
False, uowcommit, True)
def _synchronize(self, state, child,
associationrow, clearkeys, uowcommit,
pks_changed):
source = state
dest = child
self._verify_canload(child)
if dest is None or \
(not self.post_update and uowcommit.is_deleted(dest)):
return
if clearkeys:
sync.clear(dest, self.mapper, self.prop.synchronize_pairs)
else:
sync.populate(source, self.parent, dest, self.mapper,
self.prop.synchronize_pairs, uowcommit,
self.passive_updates and pks_changed)
def _pks_changed(self, uowcommit, state):
return sync.source_modified(
uowcommit,
state,
self.parent,
self.prop.synchronize_pairs)
class ManyToOneDP(DependencyProcessor):
def __init__(self, prop):
DependencyProcessor.__init__(self, prop)
self.mapper._dependency_processors.append(DetectKeySwitch(prop))
def per_property_dependencies(self, uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete):
if self.post_update:
parent_post_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
False)
parent_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
True)
uow.dependencies.update([
(child_saves, after_save),
(parent_saves, after_save),
(after_save, parent_post_updates),
(after_save, parent_pre_updates),
(before_delete, parent_pre_updates),
(parent_pre_updates, child_deletes),
])
else:
uow.dependencies.update([
(child_saves, after_save),
(after_save, parent_saves),
(parent_saves, child_deletes),
(parent_deletes, child_deletes)
])
def per_state_dependencies(self, uow,
save_parent,
delete_parent,
child_action,
after_save, before_delete,
isdelete, childisdelete):
if self.post_update:
if not isdelete:
parent_post_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
False)
if childisdelete:
uow.dependencies.update([
(after_save, parent_post_updates),
(parent_post_updates, child_action)
])
else:
uow.dependencies.update([
(save_parent, after_save),
(child_action, after_save),
(after_save, parent_post_updates)
])
else:
parent_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
True)
uow.dependencies.update([
(before_delete, parent_pre_updates),
(parent_pre_updates, delete_parent),
(parent_pre_updates, child_action)
])
elif not isdelete:
if not childisdelete:
uow.dependencies.update([
(child_action, after_save),
(after_save, save_parent),
])
else:
uow.dependencies.update([
(after_save, save_parent),
])
else:
if childisdelete:
uow.dependencies.update([
(delete_parent, child_action)
])
def presort_deletes(self, uowcommit, states):
if self.cascade.delete or self.cascade.delete_orphan:
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
if self.cascade.delete_orphan:
todelete = history.sum()
else:
todelete = history.non_deleted()
for child in todelete:
if child is None:
continue
uowcommit.register_object(child, isdelete=True,
operation="delete", prop=self.prop)
t = self.mapper.cascade_iterator('delete', child)
for c, m, st_, dct_ in t:
uowcommit.register_object(
st_, isdelete=True)
def presort_saves(self, uowcommit, states):
for state in states:
uowcommit.register_object(state, operation="add", prop=self.prop)
if self.cascade.delete_orphan:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.deleted:
if self.hasparent(child) is False:
uowcommit.register_object(child, isdelete=True,
operation="delete", prop=self.prop)
t = self.mapper.cascade_iterator('delete', child)
for c, m, st_, dct_ in t:
uowcommit.register_object(st_, isdelete=True)
def process_deletes(self, uowcommit, states):
if self.post_update and \
not self.cascade.delete_orphan and \
not self.passive_deletes == 'all':
# post_update means we have to update our
# row to not reference the child object
# before we can DELETE the row
for state in states:
self._synchronize(state, None, None, True, uowcommit)
if state and self.post_update:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
self._post_update(state, uowcommit, history.sum())
def process_saves(self, uowcommit, states):
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
attributes.PASSIVE_NO_INITIALIZE)
if history:
for child in history.added:
self._synchronize(state, child, None, False,
uowcommit, "add")
if self.post_update:
self._post_update(state, uowcommit, history.sum())
def _synchronize(self, state, child, associationrow,
clearkeys, uowcommit, operation=None):
if state is None or \
(not self.post_update and uowcommit.is_deleted(state)):
return
if operation is not None and \
child is not None and \
not uowcommit.session._contains_state(child):
util.warn(
"Object of type %s not in session, %s "
"operation along '%s' won't proceed" %
(mapperutil.state_class_str(child), operation, self.prop))
return
if clearkeys or child is None:
sync.clear(state, self.parent, self.prop.synchronize_pairs)
else:
self._verify_canload(child)
sync.populate(child, self.mapper, state,
self.parent,
self.prop.synchronize_pairs,
uowcommit,
False)
class DetectKeySwitch(DependencyProcessor):
"""For many-to-one relationships with no one-to-many backref,
searches for parents through the unit of work when a primary
key has changed and updates them.
Theoretically, this approach could be expanded to support transparent
deletion of objects referenced via many-to-one as well, although
the current attribute system doesn't do enough bookkeeping for this
to be efficient.
"""
def per_property_preprocessors(self, uow):
if self.prop._reverse_property:
if self.passive_updates:
return
else:
if False in (prop.passive_updates for \
prop in self.prop._reverse_property):
return
uow.register_preprocessor(self, False)
def per_property_flush_actions(self, uow):
parent_saves = unitofwork.SaveUpdateAll(
uow,
self.parent.base_mapper)
after_save = unitofwork.ProcessAll(uow, self, False, False)
uow.dependencies.update([
(parent_saves, after_save)
])
def per_state_flush_actions(self, uow, states, isdelete):
pass
def presort_deletes(self, uowcommit, states):
pass
def presort_saves(self, uow, states):
if not self.passive_updates:
# for non-passive updates, register in the preprocess stage
# so that mapper save_obj() gets a hold of changes
self._process_key_switches(states, uow)
def prop_has_changes(self, uow, states, isdelete):
if not isdelete and self.passive_updates:
d = self._key_switchers(uow, states)
return bool(d)
return False
def process_deletes(self, uowcommit, states):
assert False
def process_saves(self, uowcommit, states):
# for passive updates, register objects in the process stage
# so that we avoid ManyToOneDP's registering the object without
# the listonly flag in its own preprocess stage (results in UPDATE)
# statements being emitted
assert self.passive_updates
self._process_key_switches(states, uowcommit)
def _key_switchers(self, uow, states):
switched, notswitched = uow.memo(
('pk_switchers', self),
lambda: (set(), set())
)
allstates = switched.union(notswitched)
for s in states:
if s not in allstates:
if self._pks_changed(uow, s):
switched.add(s)
else:
notswitched.add(s)
return switched
def _process_key_switches(self, deplist, uowcommit):
switchers = self._key_switchers(uowcommit, deplist)
if switchers:
# if primary key values have actually changed somewhere, perform
# a linear search through the UOW in search of a parent.
for state in uowcommit.session.identity_map.all_states():
if not issubclass(state.class_, self.parent.class_):
continue
dict_ = state.dict
related = state.get_impl(self.key).get(state, dict_,
passive=self._passive_update_flag)
if related is not attributes.PASSIVE_NO_RESULT and \
related is not None:
related_state = attributes.instance_state(dict_[self.key])
if related_state in switchers:
uowcommit.register_object(state,
False,
self.passive_updates)
sync.populate(
related_state,
self.mapper, state,
self.parent, self.prop.synchronize_pairs,
uowcommit, self.passive_updates)
def _pks_changed(self, uowcommit, state):
return bool(state.key) and sync.source_modified(uowcommit,
state,
self.mapper,
self.prop.synchronize_pairs)
class ManyToManyDP(DependencyProcessor):
def per_property_dependencies(self, uow, parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete
):
uow.dependencies.update([
(parent_saves, after_save),
(child_saves, after_save),
(after_save, child_deletes),
# a rowswitch on the parent from deleted to saved
# can make this one occur, as the "save" may remove
# an element from the
# "deleted" list before we have a chance to
# process its child rows
(before_delete, parent_saves),
(before_delete, parent_deletes),
(before_delete, child_deletes),
(before_delete, child_saves),
])
def per_state_dependencies(self, uow,
save_parent,
delete_parent,
child_action,
after_save, before_delete,
isdelete, childisdelete):
if not isdelete:
if childisdelete:
uow.dependencies.update([
(save_parent, after_save),
(after_save, child_action),
])
else:
uow.dependencies.update([
(save_parent, after_save),
(child_action, after_save),
])
else:
uow.dependencies.update([
(before_delete, child_action),
(before_delete, delete_parent)
])
def presort_deletes(self, uowcommit, states):
# TODO: no tests fail if this whole
# thing is removed !!!!
if not self.passive_deletes:
# if no passive deletes, load history on
# the collection, so that prop_has_changes()
# returns True
for state in states:
uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
def presort_saves(self, uowcommit, states):
if not self.passive_updates:
# if no passive updates, load history on
# each collection where parent has changed PK,
# so that prop_has_changes() returns True
for state in states:
if self._pks_changed(uowcommit, state):
history = uowcommit.get_attribute_history(
state,
self.key,
attributes.PASSIVE_OFF)
if not self.cascade.delete_orphan:
return
# check for child items removed from the collection
# if delete_orphan check is turned on.
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
attributes.PASSIVE_NO_INITIALIZE)
if history:
for child in history.deleted:
if self.hasparent(child) is False:
uowcommit.register_object(child, isdelete=True,
operation="delete", prop=self.prop)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
'delete',
child):
uowcommit.register_object(
st_, isdelete=True)
def process_deletes(self, uowcommit, states):
secondary_delete = []
secondary_insert = []
secondary_update = []
processed = self._get_reversed_processed_set(uowcommit)
tmp = set()
for state in states:
# this history should be cached already, as
# we loaded it in preprocess_deletes
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.non_added():
if child is None or \
(processed is not None and
(state, child) in processed):
continue
associationrow = {}
if not self._synchronize(
state,
child,
associationrow,
False, uowcommit, "delete"):
continue
secondary_delete.append(associationrow)
tmp.update((c, state) for c in history.non_added())
if processed is not None:
processed.update(tmp)
self._run_crud(uowcommit, secondary_insert,
secondary_update, secondary_delete)
def process_saves(self, uowcommit, states):
secondary_delete = []
secondary_insert = []
secondary_update = []
processed = self._get_reversed_processed_set(uowcommit)
tmp = set()
for state in states:
need_cascade_pks = not self.passive_updates and \
self._pks_changed(uowcommit, state)
if need_cascade_pks:
passive = attributes.PASSIVE_OFF
else:
passive = attributes.PASSIVE_NO_INITIALIZE
history = uowcommit.get_attribute_history(state, self.key,
passive)
if history:
for child in history.added:
if (processed is not None and
(state, child) in processed):
continue
associationrow = {}
if not self._synchronize(state,
child,
associationrow,
False, uowcommit, "add"):
continue
secondary_insert.append(associationrow)
for child in history.deleted:
if (processed is not None and
(state, child) in processed):
continue
associationrow = {}
if not self._synchronize(state,
child,
associationrow,
False, uowcommit, "delete"):
continue
secondary_delete.append(associationrow)
tmp.update((c, state)
for c in history.added + history.deleted)
if need_cascade_pks:
for child in history.unchanged:
associationrow = {}
sync.update(state,
self.parent,
associationrow,
"old_",
self.prop.synchronize_pairs)
sync.update(child,
self.mapper,
associationrow,
"old_",
self.prop.secondary_synchronize_pairs)
secondary_update.append(associationrow)
if processed is not None:
processed.update(tmp)
self._run_crud(uowcommit, secondary_insert,
secondary_update, secondary_delete)
def _run_crud(self, uowcommit, secondary_insert,
secondary_update, secondary_delete):
connection = uowcommit.transaction.connection(self.mapper)
if secondary_delete:
associationrow = secondary_delete[0]
statement = self.secondary.delete(sql.and_(*[
c == sql.bindparam(c.key, type_=c.type)
for c in self.secondary.c
if c.key in associationrow
]))
result = connection.execute(statement, secondary_delete)
if result.supports_sane_multi_rowcount() and \
result.rowcount != len(secondary_delete):
raise exc.StaleDataError(
"DELETE statement on table '%s' expected to delete "
"%d row(s); Only %d were matched." %
(self.secondary.description, len(secondary_delete),
result.rowcount)
)
if secondary_update:
associationrow = secondary_update[0]
statement = self.secondary.update(sql.and_(*[
c == sql.bindparam("old_" + c.key, type_=c.type)
for c in self.secondary.c
if c.key in associationrow
]))
result = connection.execute(statement, secondary_update)
if result.supports_sane_multi_rowcount() and \
result.rowcount != len(secondary_update):
raise exc.StaleDataError(
"UPDATE statement on table '%s' expected to update "
"%d row(s); Only %d were matched." %
(self.secondary.description, len(secondary_update),
result.rowcount)
)
if secondary_insert:
statement = self.secondary.insert()
connection.execute(statement, secondary_insert)
def _synchronize(self, state, child, associationrow,
clearkeys, uowcommit, operation):
# this checks for None if uselist=True
self._verify_canload(child)
# but if uselist=False we get here. If child is None,
# no association row can be generated, so return.
if child is None:
return False
if child is not None and not uowcommit.session._contains_state(child):
if not child.deleted:
util.warn(
"Object of type %s not in session, %s "
"operation along '%s' won't proceed" %
(mapperutil.state_class_str(child), operation, self.prop))
return False
sync.populate_dict(state, self.parent, associationrow,
self.prop.synchronize_pairs)
sync.populate_dict(child, self.mapper, associationrow,
self.prop.secondary_synchronize_pairs)
return True
def _pks_changed(self, uowcommit, state):
return sync.source_modified(
uowcommit,
state,
self.parent,
self.prop.synchronize_pairs)
_direction_to_processor = {
ONETOMANY: OneToManyDP,
MANYTOONE: ManyToOneDP,
MANYTOMANY: ManyToManyDP,
}
| |
# # # # #
# system.py
#
# University of Illinois/NCSA Open Source License
# Copyright (c) 2015 Information Trust Institute
# All rights reserved.
#
# Developed by:
#
# Information Trust Institute
# University of Illinois
# http://www.iti.illinois.edu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal with
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimers. Redistributions in binary form must
# reproduce the above copyright notice, this list of conditions and the following
# disclaimers in the documentation and/or other materials provided with the
# distribution.
#
# Neither the names of Information Trust Institute, University of Illinois, nor
# the names of its contributors may be used to endorse or promote products derived
# from this Software without specific prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
#
# # # # #
import sys
from domains.support.lib.common import *
from domains.support.lib.memory import *
import domains.support.config as confLib
import socket
import re
import psutil
import subprocess
def getUptime():
last = cmd("uptime -p")
uptime = " ".join(re.split('[\s|\t|\n]+', last)[1:])
return uptime
def getHostname():
return open('/etc/hostname', 'r').readlines()[0].rstrip()
def getUserStats():
last = re.split('[\n]+', cmd("last"))
hostname = socket.gethostname()
ret = []
for l in last:
l = list(filter(None, re.split('[\s|\t]', l)))
if len(l) > 5 and l[1] == ":0" and l[7] + " " + l[8] + " " + l[9] == "still logged in":
ret.append({
"name": l[0],
"started": l[3] + " " + l[4] + " " + l[5] + " " + l[6],
"host": hostname
})
return ret
def getProcessCpuUsage(processName):
usageFlt = 0.0
try:
usageArr = cmd("ps -C " + processName + " -o %cpu").split('\n')
usageStr = usageArr[1]
usageFlt = float(usageStr)
except:
None
return usageFlt
def getProcessStatus(processName):
status = "error"
state = "stopped"
try:
statusCmdResp = cmd("pgrep -c -x " + processName).rstrip()
if int(statusCmdResp) > 0:
status = "success"
state = "running"
except subprocess.CalledProcessError as e:
if e.returncode == 2:
print("Invalid options specified for {0}: {1}".format(processName, e.cmd))
if e.returncode == 3:
print("Internal Error getting {0} status".format(processName))
#getArmoreStatus()
return status, state
def getProcessDict(processName):
processInfo = getProcessStatus(processName)
ret = {}
ret["name"] = processName.capitalize() if processName.islower() else processName
ret["status"] = processInfo[0]
ret["state"] = processInfo[1]
ret["cpuPercent"] = getProcessCpuUsage(processName)
return ret
def getArmoreStatus():
with open('/etc/network/interfaces') as f:
modeLines = [x.strip() for x in f.readlines()]
ret = {"name":"Proxy"}
for l in modeLines:
if re.match(".*Proxy Mode.*", l, re.I):
ret["name"] = "Proxy"
break
if re.match(".*Passive.*", l, re.I):
ret["name"] = "Passive"
break
if re.match(".*Transparent.*", l, re.I):
ret["name"] = "Transparent"
break
ret["status"] = "error"
ret["state"] = "stopped"
if ret["name"].lower() == 'proxy':
ret = getProcessDict("ARMOREProxy")
elif ret["name"].lower() in ["transparent","passive"]:
ret["status"] = "success"
ret["state"] = "running"
return ret
def getDashboardInfo():
systems = []
systems.append(getArmoreStatus())
systems.append(getProcessDict("bro"))
return systems
def getStatuses(inpSystems):
output = []
for inpSys in inpSystems:
if inpSys.lower() in ["bro","armoreproxy"]:
output.append(getProcessDict(inpSys))
else:
inp = {}
inp["name"] = inpSys.capitalize()
currConf = confLib.getProxyConfig()
inp["status"] = "warning" if currConf["Encryption"] == "Disabled" else "success"
inp["state"] = "disabled" if currConf["Encryption"] == "Disabled" else "enabled"
output.append(inp)
return output
def getSwapStats():
vmstat = re.split('[\s|\n|\t*]+', cmd("vmstat"))
free = re.split('[\s|\n|\t*]+', cmd("free -b"))
ret = {
"total": free[19],
"used": free[20],
"percent": float(free[20])/float(free[19]),
"free": free[21],
"swapped_in": vmstat[29],
"swapped_out": vmstat[30]
}
swapStats = psutil.swap_memory()
total = swapStats[0]
used = swapStats[1]
free = swapStats[2]
sin = swapStats[4]
sout = swapStats[5]
ret = {
"total": total,
"used": used,
"percent": used/total * 100,
"free": free,
"swapped_in": sin,
"swapped_out": sout
}
return ret
def getDiskStats():
f = re.split('[\n]+', cmd("df"))[1:]
ret = []
for d in f:
e = re.split('[\s|\t*|\n]+', d)
if len(e) == 6:
ret.append({
"device": e[0],
"mountpoint": e[5],
"space_total": float(e[1]) * 1024,
"space_used": float(e[2]) * 1024,
"space_used_percent": e[4],
"space_free": float(e[3]) * 1024
})
return ret
def getLoadAvg():
f = open("/proc/loadavg", 'r')
laa = f.readlines()[0]
laa = [float(x) for x in re.split('[\s|\t|\n]+', laa)[:3]]
return laa
def getCpuStats():
cpuStats = psutil.cpu_times_percent()
user = cpuStats[0]
system = cpuStats[2]
idle = cpuStats[3]
iowait = cpuStats[4]
cpu = {
"user": user,
"system": system,
"idle": idle,
"iowait": iowait
}
return cpu
def getMemoryStats():
memStats = psutil.virtual_memory()
total = memStats[0]
buffered = memStats[7]
cached = memStats[8]
free = memStats[4]
avail = memStats[1]
ex_used = total - avail
ex_used_percent = ex_used/total * 100
in_used = total - free
in_used_percent = in_used/total * 100
memStats = {
"total": total,
"available": avail,
"ex_used": ex_used,
"ex_percent": ex_used_percent,
"in_used": in_used,
"in_percent": in_used_percent,
"free": free
}
return memStats
def getNumOfCores():
return psutil.cpu_count()
def sect(array, theSection):
return array
def getProcessStats(sortStr=None, order=None, filter=None, pid=None, section=None):
procs = []
for p in psutil.process_iter():
proc = {}
proc["pid"] = p.pid
proc["name"] = p.name()
proc["user"] = p.username()
proc["status"] = p.status()
proc["created"] = timestampToPrettyDate(p.create_time())
proc["mem_rss"] = p.memory_info()[0]
proc["mem_vms"] = p.memory_info()[1]
proc["mem_percent"] = p.memory_percent()
proc["cpu_percent"] = p.cpu_percent()
proc["cmdline"] = p.cmdline()
procs.append(proc)
if filter or pid:
procs = filt(procs, filter, pid)
if sortStr or order:
procs = sortOrderBy(procs, sortStr, order)
if section:
procs = sect(procs, section)
return procs
def getNumberOfProcs():
procs = 0
user_procs = 0
for p in psutil.process_iter():
procs += 1
user_procs += 1 if p.username() != 'root' else 0
return procs, user_procs
def getArmoreState(proxy=None, bro=None):
if proxy is None:
proxy = getProcessDict("ARMOREProxy")
if bro is None:
bro = getProcessDict("bro")
status = "error"
state = "stopped"
if proxy["status"] == "success":
if bro["status"] == "success":
status = "success"
state = 'running'
else:
status = "warning"
state = "running with errors"
ret = {}
ret["name"] = 'ARMORE'
ret["status"] = status
ret["state"] = state
return ret
def getCommonInfo(currUser, page):
ret = {}
ret["uptime"] = getUptime()
ret["currUser"] = currUser
ret["page"] = page
ret["hostname"] = getHostname()
return ret
| |
from test import test_support
import random
import sys
import unittest
verbose = test_support.verbose
nerrors = 0
def check(tag, expected, raw, compare=None):
global nerrors
if verbose:
print " checking", tag
orig = raw[:] # save input in case of error
if compare:
raw.sort(compare)
else:
raw.sort()
if len(expected) != len(raw):
print "error in", tag
print "length mismatch;", len(expected), len(raw)
print expected
print orig
print raw
nerrors += 1
return
for i, good in enumerate(expected):
maybe = raw[i]
if good is not maybe:
print "error in", tag
print "out of order at index", i, good, maybe
print expected
print orig
print raw
nerrors += 1
return
class TestBase(unittest.TestCase):
def testStressfully(self):
# Try a variety of sizes at and around powers of 2, and at powers of 10.
sizes = [0]
for power in range(1, 10):
n = 2 ** power
sizes.extend(range(n-1, n+2))
sizes.extend([10, 100, 1000])
class Complains(object):
maybe_complain = True
def __init__(self, i):
self.i = i
def __lt__(self, other):
if Complains.maybe_complain and random.random() < 0.001:
if verbose:
print " complaining at", self, other
raise RuntimeError
return self.i < other.i
def __repr__(self):
return "Complains(%d)" % self.i
class Stable(object):
def __init__(self, key, i):
self.key = key
self.index = i
def __cmp__(self, other):
return cmp(self.key, other.key)
__hash__ = None # Silence Py3k warning
def __repr__(self):
return "Stable(%d, %d)" % (self.key, self.index)
for n in sizes:
x = range(n)
if verbose:
print "Testing size", n
s = x[:]
check("identity", x, s)
s = x[:]
s.reverse()
check("reversed", x, s)
s = x[:]
random.shuffle(s)
check("random permutation", x, s)
y = x[:]
y.reverse()
s = x[:]
check("reversed via function", y, s, lambda a, b: cmp(b, a))
if verbose:
print " Checking against an insane comparison function."
print " If the implementation isn't careful, this may segfault."
s = x[:]
s.sort(lambda a, b: int(random.random() * 3) - 1)
check("an insane function left some permutation", x, s)
x = [Complains(i) for i in x]
s = x[:]
random.shuffle(s)
Complains.maybe_complain = True
it_complained = False
try:
s.sort()
except RuntimeError:
it_complained = True
if it_complained:
Complains.maybe_complain = False
check("exception during sort left some permutation", x, s)
s = [Stable(random.randrange(10), i) for i in xrange(n)]
augmented = [(e, e.index) for e in s]
augmented.sort() # forced stable because ties broken by index
x = [e for e, i in augmented] # a stable sort of s
check("stability", x, s)
#==============================================================================
class TestBugs(unittest.TestCase):
def test_bug453523(self):
# bug 453523 -- list.sort() crasher.
# If this fails, the most likely outcome is a core dump.
# Mutations during a list sort should raise a ValueError.
class C:
def __lt__(self, other):
if L and random.random() < 0.75:
L.pop()
else:
L.append(3)
return random.random() < 0.5
L = [C() for i in range(50)]
self.assertRaises(ValueError, L.sort)
def test_cmpNone(self):
# Testing None as a comparison function.
L = range(50)
random.shuffle(L)
L.sort(None)
self.assertEqual(L, range(50))
def test_undetected_mutation(self):
# Python 2.4a1 did not always detect mutation
memorywaster = []
for i in range(20):
def mutating_cmp(x, y):
L.append(3)
L.pop()
return cmp(x, y)
L = [1,2]
self.assertRaises(ValueError, L.sort, mutating_cmp)
def mutating_cmp(x, y):
L.append(3)
del L[:]
return cmp(x, y)
self.assertRaises(ValueError, L.sort, mutating_cmp)
memorywaster = [memorywaster]
#==============================================================================
class TestDecorateSortUndecorate(unittest.TestCase):
def test_decorated(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
copy = data[:]
random.shuffle(data)
data.sort(key=str.lower)
copy.sort(cmp=lambda x,y: cmp(x.lower(), y.lower()))
def test_baddecorator(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
self.assertRaises(TypeError, data.sort, None, lambda x,y: 0)
def test_stability(self):
data = [(random.randrange(100), i) for i in xrange(200)]
copy = data[:]
data.sort(key=lambda x: x[0]) # sort on the random first field
copy.sort() # sort using both fields
self.assertEqual(data, copy) # should get the same result
def test_cmp_and_key_combination(self):
# Verify that the wrapper has been removed
def compare(x, y):
self.assertEqual(type(x), str)
self.assertEqual(type(x), str)
return cmp(x, y)
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
data.sort(cmp=compare, key=str.lower)
def test_badcmp_with_key(self):
# Verify that the wrapper has been removed
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
self.assertRaises(TypeError, data.sort, "bad", str.lower)
def test_key_with_exception(self):
# Verify that the wrapper has been removed
data = range(-2,2)
dup = data[:]
self.assertRaises(ZeroDivisionError, data.sort, None, lambda x: 1 // x)
self.assertEqual(data, dup)
def test_key_with_mutation(self):
data = range(10)
def k(x):
del data[:]
data[:] = range(20)
return x
self.assertRaises(ValueError, data.sort, key=k)
def test_key_with_mutating_del(self):
data = range(10)
class SortKiller(object):
def __init__(self, x):
pass
def __del__(self):
del data[:]
data[:] = range(20)
self.assertRaises(ValueError, data.sort, key=SortKiller)
def test_key_with_mutating_del_and_exception(self):
data = range(10)
## dup = data[:]
class SortKiller(object):
def __init__(self, x):
if x > 2:
raise RuntimeError
def __del__(self):
del data[:]
data[:] = range(20)
self.assertRaises(RuntimeError, data.sort, key=SortKiller)
## major honking subtlety: we *can't* do:
##
## self.assertEqual(data, dup)
##
## because there is a reference to a SortKiller in the
## traceback and by the time it dies we're outside the call to
## .sort() and so the list protection gimmicks are out of
## date (this cost some brain cells to figure out...).
def test_reverse(self):
data = range(100)
random.shuffle(data)
data.sort(reverse=True)
self.assertEqual(data, range(99,-1,-1))
self.assertRaises(TypeError, data.sort, "wrong type")
def test_reverse_stability(self):
data = [(random.randrange(100), i) for i in xrange(200)]
copy1 = data[:]
copy2 = data[:]
data.sort(cmp=lambda x,y: cmp(x[0],y[0]), reverse=True)
copy1.sort(cmp=lambda x,y: cmp(y[0],x[0]))
self.assertEqual(data, copy1)
copy2.sort(key=lambda x: x[0], reverse=True)
self.assertEqual(data, copy2)
#==============================================================================
def test_main(verbose=None):
test_classes = (
TestBase,
TestDecorateSortUndecorate,
TestBugs,
)
with test_support.check_py3k_warnings(
("the cmp argument is not supported", DeprecationWarning)):
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# Copyright (C) 2006 Alec Thomas
# Copyright (C) 2007 Eli Carter
# Copyright (C) 2007 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Eli Carter
from ConfigParser import ParsingError, RawConfigParser
from StringIO import StringIO
from collections import defaultdict
from functools import partial
from pkg_resources import resource_filename
from genshi.builder import tag
from trac.config import Configuration, ConfigSection
from trac.core import *
from trac.env import IEnvironmentSetupParticipant
from trac.perm import PermissionSystem
from trac.ticket.api import ITicketActionController, TicketSystem
from trac.ticket.model import Resolution
from trac.util import get_reporter_id, to_list
from trac.util.presentation import separated
from trac.util.translation import _, tag_, cleandoc_
from trac.web.chrome import Chrome, add_script, add_script_data
from trac.wiki.formatter import system_message
from trac.wiki.macros import WikiMacroBase
# -- Utilities for the ConfigurableTicketWorkflow
def parse_workflow_config(rawactions):
"""Given a list of options from [ticket-workflow]"""
default = {
'oldstates': [],
'newstate': '',
'name': '',
'default': 0,
'operations': [],
'permissions': []
}
actions = defaultdict(lambda: default.copy())
for option, value in rawactions:
parts = option.split('.')
name = parts[0]
if len(parts) == 1:
# Base name, of the syntax: old,states,here -> newstate
try:
oldstates, newstate = [x.strip() for x in value.split('->')]
except ValueError:
continue # Syntax error, a warning will be logged later
actions[name]['oldstates'] = to_list(oldstates)
actions[name]['newstate'] = newstate
else:
attribute = parts[1]
if attribute == 'default':
actions[name][attribute] = int(value)
elif attribute in ('operations', 'permissions'):
actions[name][attribute] = to_list(value)
else:
actions[name][attribute] = value
for name, attrs in actions.iteritems():
if not attrs.get('name'):
attrs['name'] = name
return actions
def get_workflow_config(config):
"""Usually passed self.config, this will return the parsed ticket-workflow
section.
"""
raw_actions = list(config.options('ticket-workflow'))
actions = parse_workflow_config(raw_actions)
return actions
def load_workflow_config_snippet(config, filename):
"""Loads the ticket-workflow section from the given file (expected to be in
the 'workflows' tree) into the provided config.
"""
filename = resource_filename('trac.ticket', 'workflows/%s' % filename)
new_config = Configuration(filename)
for name, value in new_config.options('ticket-workflow'):
config.set('ticket-workflow', name, value)
class ConfigurableTicketWorkflow(Component):
"""Ticket action controller which provides actions according to a
workflow defined in trac.ini.
The workflow is defined in the `[ticket-workflow]` section of the
[wiki:TracIni#ticket-workflow-section trac.ini] configuration file.
"""
implements(IEnvironmentSetupParticipant, ITicketActionController)
ticket_workflow_section = ConfigSection('ticket-workflow',
"""The workflow for tickets is controlled by plugins. By default,
there's only a `ConfigurableTicketWorkflow` component in charge.
That component allows the workflow to be configured via this section
in the `trac.ini` file. See TracWorkflow for more details.
(''since 0.11'')""")
def __init__(self, *args, **kwargs):
self.actions = self.get_all_actions()
self.log.debug('Workflow actions at initialization: %s\n',
self.actions)
# IEnvironmentSetupParticipant methods
def environment_created(self):
"""When an environment is created, we provide the basic-workflow,
unless a ticket-workflow section already exists.
"""
if 'ticket-workflow' not in self.config.sections():
load_workflow_config_snippet(self.config, 'basic-workflow.ini')
self.config.save()
self.actions = self.get_all_actions()
def environment_needs_upgrade(self, db):
"""The environment needs an upgrade if there is no [ticket-workflow]
section in the config.
"""
return not list(self.config.options('ticket-workflow'))
def upgrade_environment(self, db):
"""Insert a [ticket-workflow] section using the original-workflow"""
load_workflow_config_snippet(self.config, 'original-workflow.ini')
self.config.save()
self.actions = self.get_all_actions()
info_message = """
==== Upgrade Notice ====
The ticket Workflow is now configurable.
Your environment has been upgraded, but configured to use the original
workflow. It is recommended that you look at changing this configuration to use
basic-workflow.
Read TracWorkflow for more information (don't forget to 'wiki upgrade' as well)
"""
self.log.info(info_message.replace('\n', ' ').replace('==', ''))
print info_message
# ITicketActionController methods
def get_ticket_actions(self, req, ticket):
"""Returns a list of (weight, action) tuples that are valid for this
request and this ticket."""
# Get the list of actions that can be performed
# Determine the current status of this ticket. If this ticket is in
# the process of being modified, we need to base our information on the
# pre-modified state so that we don't try to do two (or more!) steps at
# once and get really confused.
status = ticket._old.get('status', ticket['status']) or 'new'
ticket_perm = req.perm(ticket.resource)
allowed_actions = []
for action_name, action_info in self.actions.items():
oldstates = action_info['oldstates']
if oldstates == ['*'] or status in oldstates:
# This action is valid in this state. Check permissions.
required_perms = action_info['permissions']
if self._is_action_allowed(ticket_perm, required_perms):
allowed_actions.append((action_info['default'],
action_name))
# Append special `_reset` action if status is invalid.
if status not in TicketSystem(self.env).get_all_status() + \
['new', 'closed']:
required_perms = self.actions['_reset'].get('permissions')
if self._is_action_allowed(ticket_perm, required_perms):
default = self.actions['_reset'].get('default')
allowed_actions.append((default, '_reset'))
return allowed_actions
def _is_action_allowed(self, ticket_perm, required_perms):
if not required_perms:
return True
for permission in required_perms:
if permission in ticket_perm:
return True
return False
def get_all_status(self):
"""Return a list of all states described by the configuration.
"""
all_status = set()
for attributes in self.actions.values():
all_status.update(attributes['oldstates'])
all_status.add(attributes['newstate'])
all_status.discard('*')
all_status.discard('')
return all_status
def render_ticket_action_control(self, req, ticket, action):
self.log.debug('render_ticket_action_control: action "%s"', action)
this_action = self.actions[action]
status = this_action['newstate']
operations = this_action['operations']
current_owner = ticket._old.get('owner', ticket['owner'])
author = get_reporter_id(req, 'author')
format_author = partial(Chrome(self.env).format_author, req)
formatted_current_owner = format_author(current_owner or _("(none)"))
control = [] # default to nothing
hints = []
if 'reset_workflow' in operations:
control.append(_("from invalid state"))
hints.append(_("Current state no longer exists"))
if 'del_owner' in operations:
hints.append(_("The ticket will be disowned"))
if 'set_owner' in operations:
id = 'action_%s_reassign_owner' % action
if 'set_owner' in this_action:
owners = [x.strip() for x in
this_action['set_owner'].split(',')]
elif self.config.getbool('ticket', 'restrict_owner'):
perm = PermissionSystem(self.env)
owners = perm.get_users_with_permission('TICKET_MODIFY')
owners.sort()
else:
owners = None
if owners is None:
owner = req.args.get(id, author)
control.append(tag_("to %(owner)s",
owner=tag.input(type='text', id=id,
name=id, value=owner)))
hints.append(_("The owner will be changed from "
"%(current_owner)s to the specified user",
current_owner=formatted_current_owner))
elif len(owners) == 1:
owner = tag.input(type='hidden', id=id, name=id,
value=owners[0])
formatted_new_owner = format_author(owners[0])
control.append(tag_("to %(owner)s",
owner=tag(formatted_new_owner, owner)))
if ticket['owner'] != owners[0]:
hints.append(_("The owner will be changed from "
"%(current_owner)s to %(selected_owner)s",
current_owner=formatted_current_owner,
selected_owner=formatted_new_owner))
else:
selected_owner = req.args.get(id, req.authname)
control.append(tag_("to %(owner)s", owner=tag.select(
[tag.option(x, value=x,
selected=(x == selected_owner or None))
for x in owners],
id=id, name=id)))
hints.append(_("The owner will be changed from "
"%(current_owner)s to the selected user",
current_owner=formatted_current_owner))
elif 'set_owner_to_self' in operations and \
ticket._old.get('owner', ticket['owner']) != author:
hints.append(_("The owner will be changed from %(current_owner)s "
"to %(authname)s",
current_owner=formatted_current_owner,
authname=format_author(author)))
if 'set_resolution' in operations:
if 'set_resolution' in this_action:
resolutions = [x.strip() for x in
this_action['set_resolution'].split(',')]
else:
resolutions = [r.name for r in Resolution.select(self.env)]
if not resolutions:
raise TracError(_("Your workflow attempts to set a resolution "
"but none is defined (configuration issue, "
"please contact your Trac admin)."))
id = 'action_%s_resolve_resolution' % action
if len(resolutions) == 1:
resolution = tag.input(type='hidden', id=id, name=id,
value=resolutions[0])
control.append(tag_("as %(resolution)s",
resolution=tag(resolutions[0],
resolution)))
hints.append(_("The resolution will be set to %(name)s",
name=resolutions[0]))
else:
selected_option = req.args.get(id,
TicketSystem(self.env).default_resolution)
control.append(tag_("as %(resolution)s",
resolution=tag.select(
[tag.option(x, value=x,
selected=(x == selected_option or None))
for x in resolutions],
id=id, name=id)))
hints.append(_("The resolution will be set"))
if 'del_resolution' in operations:
hints.append(_("The resolution will be deleted"))
if 'leave_status' in operations:
control.append(_("as %(status)s",
status= ticket._old.get('status',
ticket['status'])))
if len(operations) == 1:
hints.append(_("The owner will remain %(current_owner)s",
current_owner=formatted_current_owner)
if current_owner else
_("The ticket will remain with no owner"))
else:
if status != '*':
hints.append(_("Next status will be '%(name)s'", name=status))
return (this_action.get('name', action), tag(separated(control, ' ')),
'. '.join(hints) + '.' if hints else '')
def get_ticket_changes(self, req, ticket, action):
this_action = self.actions[action]
# Enforce permissions
if not self._has_perms_for_action(req, this_action, ticket.resource):
# The user does not have any of the listed permissions, so we won't
# do anything.
return {}
updated = {}
# Status changes
status = this_action['newstate']
if status != '*':
updated['status'] = status
for operation in this_action['operations']:
if operation == 'del_owner':
updated['owner'] = ''
elif operation == 'set_owner':
newowner = req.args.get('action_%s_reassign_owner' % action,
this_action.get('set_owner', '').strip())
# If there was already an owner, we get a list, [new, old],
# but if there wasn't we just get new.
if type(newowner) == list:
newowner = newowner[0]
updated['owner'] = newowner
elif operation == 'set_owner_to_self':
updated['owner'] = get_reporter_id(req, 'author')
elif operation == 'del_resolution':
updated['resolution'] = ''
elif operation == 'set_resolution':
newresolution = req.args.get('action_%s_resolve_resolution' % \
action,
this_action.get('set_resolution', '').strip())
updated['resolution'] = newresolution
# reset_workflow is just a no-op here, so we don't look for it.
# leave_status is just a no-op here, so we don't look for it.
return updated
def apply_action_side_effects(self, req, ticket, action):
pass
def _has_perms_for_action(self, req, action, resource):
required_perms = action['permissions']
if required_perms:
for permission in required_perms:
if permission in req.perm(resource):
break
else:
# The user does not have any of the listed permissions
return False
return True
# Public methods (for other ITicketActionControllers that want to use
# our config file and provide an operation for an action)
def get_all_actions(self):
actions = parse_workflow_config(self.ticket_workflow_section.options())
# Special action that gets enabled if the current status no longer
# exists, as no other action can then change its state. (#5307/#11850)
if '_reset' not in actions:
reset = {
'default': 0,
'name': 'reset',
'newstate': 'new',
'oldstates': [],
'operations': ['reset_workflow'],
'permissions': ['TICKET_ADMIN']
}
for key, val in reset.items():
actions['_reset'][key] = val
for name, info in actions.iteritems():
if not info['newstate']:
self.log.warning("Ticket workflow action '%s' doesn't define "
"any transitions", name)
return actions
def get_actions_by_operation(self, operation):
"""Return a list of all actions with a given operation
(for use in the controller's get_all_status())
"""
actions = [(info['default'], action) for action, info
in self.actions.items()
if operation in info['operations']]
return actions
def get_actions_by_operation_for_req(self, req, ticket, operation):
"""Return list of all actions with a given operation that are valid
in the given state for the controller's get_ticket_actions().
If state='*' (the default), all actions with the given operation are
returned.
"""
# Be sure to look at the original status.
status = ticket._old.get('status', ticket['status'])
actions = [(info['default'], action)
for action, info in self.actions.items()
if operation in info['operations'] and
('*' in info['oldstates'] or
status in info['oldstates']) and
self._has_perms_for_action(req, info, ticket.resource)]
return actions
class WorkflowMacro(WikiMacroBase):
_domain = 'messages'
_description = cleandoc_(
"""Render a workflow graph.
This macro accepts a TracWorkflow configuration and renders the states
and transitions as a directed graph. If no parameters are given, the
current ticket workflow is rendered. In WikiProcessors mode the `width`
and `height` arguments can be specified.
(Defaults: `width = 800` and `heigth = 600`)
Examples:
{{{
[[Workflow()]]
[[Workflow(go = here -> there; return = there -> here)]]
{{{
#!Workflow width=700 height=700
leave = * -> *
leave.operations = leave_status
leave.default = 1
accept = new,assigned,accepted,reopened -> accepted
accept.permissions = TICKET_MODIFY
accept.operations = set_owner_to_self
resolve = new,assigned,accepted,reopened -> closed
resolve.permissions = TICKET_MODIFY
resolve.operations = set_resolution
reassign = new,assigned,accepted,reopened -> assigned
reassign.permissions = TICKET_MODIFY
reassign.operations = set_owner
reopen = closed -> reopened
reopen.permissions = TICKET_CREATE
reopen.operations = del_resolution
}}}
}}}
""")
def expand_macro(self, formatter, name, text, args):
if not text:
raw_actions = self.config.options('ticket-workflow')
else:
if args is None:
text = '\n'.join([line.lstrip() for line in text.split(';')])
if '[ticket-workflow]' not in text:
text = '[ticket-workflow]\n' + text
parser = RawConfigParser()
try:
parser.readfp(StringIO(text))
except ParsingError, e:
return system_message(_("Error parsing workflow."),
unicode(e))
raw_actions = list(parser.items('ticket-workflow'))
actions = parse_workflow_config(raw_actions)
states = list(set(
[state for action in actions.itervalues()
for state in action['oldstates']] +
[action['newstate'] for action in actions.itervalues()]))
action_labels = [attrs.get('name') or name
for name, attrs in actions.items()]
action_names = actions.keys()
edges = []
for name, action in actions.items():
new_index = states.index(action['newstate'])
name_index = action_names.index(name)
for old_state in action['oldstates']:
old_index = states.index(old_state)
edges.append((old_index, new_index, name_index))
args = args or {}
width = args.get('width', 800)
height = args.get('height', 600)
graph = {'nodes': states, 'actions': action_labels, 'edges': edges,
'width': width, 'height': height}
graph_id = '%012x' % id(graph)
req = formatter.req
add_script(req, 'common/js/excanvas.js', ie_if='IE')
add_script(req, 'common/js/workflow_graph.js')
add_script_data(req, {'graph_%s' % graph_id: graph})
return tag(
tag.div('', class_='trac-workflow-graph trac-noscript',
id='trac-workflow-graph-%s' % graph_id,
style="display:inline-block;width:%spx;height:%spx" %
(width, height)),
tag.noscript(
tag.div(_("Enable JavaScript to display the workflow graph."),
class_='system-message')))
| |
"""
This sample demonstrates a system for drying material. Various pieces
of material must be dried on defined or higher temperature for defined
period of time, uninterrupted. This system keeps track on the temperature
in the oven, as well as the material in the oven and how long it has been
drying.
"""
from collections import namedtuple
import datetime
import wx
import hotmodel
import hotwidgets
class Material(namedtuple(
"_Material",
[
"name",
"status", # "DRYING", "DRIED", "IDLE"
"req_temp", # degrees centigrate
"req_time", # seconds
"start_tm", # datetime if currently "DRYING" or undefined
],),
):
"""
A material status. Can be IDLE, DRYING, DRIED. Keeps track
on requested drying temperature, and time, and if DRYING also
time when drying started.
Material is a collections.namedtuple, which is a tuple, therefore
immutable.
"""
def update(self, temperature, tm=None):
"""
Should be called when the temperature has changed. If the
change causes status change, then make the change and return
an updated object. If there is no change, return self.
Never modifies self.
"""
if "DRIED" == self.status:
return self
if "IDLE" == self.status and temperature < self.req_temp:
return self
if tm is None:
tm = datetime.datetime.now()
if "DRYING" == self.status and temperature >= self.req_temp:
# maybe dry?
secs = (tm - self.start_tm).total_seconds()
if secs >= self.req_time:
return Material(
self.name,
"DRIED",
self.req_temp,
self.req_time,
tm,
)
return self
return Material(
self.name,
"DRYING" if temperature >= self.req_temp else "IDLE",
self.req_temp,
self.req_time,
tm,
)
class MaterialList(hotmodel.TypedHotList):
"""
A HotList that accepts only Material instances as items.
"""
def __init__(self, init_iterable=None, name=None, container=None):
super(MaterialList, self).__init__(
Material, init_iterable, name, container,
)
class Model(hotmodel.HotContainer):
"""
Keeps track on the material and the temperature in the oven.
"""
material = hotmodel.HotTypedProperty(MaterialList)
temperature = hotmodel.HotProperty()
def __init__(self):
super(Model, self).__init__()
self.material = []
self.temperature = 20
def set_temperature(self, temp):
"""
Set the temperature in the oven. Check the material with regard
to this new temperature.
"""
if temp == self.temperature:
return
self.temperature = temp
self.update_mat()
def update_mat(self):
"""
Should be called periodically to manipulate states of the material.
"""
tm = datetime.datetime.now()
for (index, mat) in enumerate(self.material):
new_mat = mat.update(self.temperature, tm)
if new_mat != mat:
self.material[index] = new_mat
def add_material(self, mat):
"""
Adds the material mat to the model.
"""
mat = mat.update(self.temperature)
self.material.append(mat)
class MaterialView(hotwidgets.MVCList):
"""
A table displaying one Material on each row.
"""
def __init__(self, parent):
super(MaterialView, self).__init__(
parent, -1, style=wx.LC_REPORT,
columns=[
("label", "Material"),
("status", ""),
("req_temp", "Dries at"),
("req_time", "How long (seconds)"),
("start_tm", "Until"),
],
)
def update_item(self, index, data):
"""
The "Until" column requires some computation. Also setting
the background color of the item.
"""
super(MaterialView, self).update_item(index, data)
dry_at = ""
color = "LIGHT GREY"
if "DRYING" == data.status:
tm = data.start_tm + datetime.timedelta(0, data.req_time)
tm = datetime.time(tm.hour, tm.minute, tm.second)
dry_at = str(tm)
color = "PINK"
elif "DRIED" == data.status:
color = "PALE GREEN"
self.SetStringItem(index, 4, dry_at)
self.SetItemBackgroundColour(index, wx.TheColourDatabase.Find(color))
class MatDrierView(wx.Dialog):
"""
The viewer for the whole Material Drying model.
"""
def __init__(
self, parent, dummy_app, title, model,
):
super(MatDrierView, self).__init__(
parent, -1,
title,
)
self.model = model
box = wx.GridBagSizer(5, 5)
self.mat_view = MaterialView(self)
self.mat_view.SetMinSize((500, 300))
self.temperature = wx.StaticText(self, -1)
# material input
self.mat_name = wx.TextCtrl(self, -1, "")
self.req_time = wx.TextCtrl(self, -1, "")
self.req_temp = wx.TextCtrl(self, -1, "")
add_mat = wx.Button(self, -1, "Add material")
self.temp_goal = wx.TextCtrl(self, -1, "75")
box.Add(self.mat_view, (0, 0), (1, 7), flag=wx.EXPAND)
box.Add(self.temperature, (1, 0))
box.Add(
wx.StaticLine(self, -1, style=wx.LI_HORIZONTAL),
(2, 0), (1, 7), flag=wx.EXPAND,
)
box.Add(
wx.StaticText(self, -1, "Material:"),
(3, 0), flag=wx.ALIGN_CENTER_VERTICAL,
)
box.Add(self.mat_name, (3, 1))
box.Add(
wx.StaticText(self, -1, "Dry time:"),
(3, 2), flag=wx.ALIGN_CENTER_VERTICAL,
)
box.Add(self.req_time, (3, 3))
box.Add(
wx.StaticText(self, -1, "Dry temperature:"),
(3, 4), flag=wx.ALIGN_CENTER_VERTICAL,
)
box.Add(self.req_temp, (3, 5))
box.Add(add_mat, (3, 6))
box.Add((10, 10), (4, 0))
box.Add(
wx.StaticText(self, -1, "Set oven temperature to:"),
(5, 0), flag=wx.ALIGN_CENTER_VERTICAL,
)
box.Add(self.temp_goal, (5, 1))
box.Add((10, 10), (6, 0))
self.mapper = hotmodel.Mapper()
self.mat_view.add_routes(self.mapper, "material")
self.mapper.add_route("temperature", "", self.on_temperature)
self.model.add_listener(self.mapper)
self.Bind(wx.EVT_BUTTON, self.on_add_mat, add_mat)
box.AddGrowableCol(6)
box.AddGrowableRow(0)
self.SetSizerAndFit(box)
self.timer = wx.Timer(self, -1)
self.timer.Start(1000)
self.Bind(wx.EVT_TIMER, self.on_timer)
def on_timer(self, evt):
evt.Skip()
goal = int(self.temp_goal.GetValue())
temp = self.model.temperature
if goal > self.model.temperature:
temp += 1
elif goal < self.model.temperature:
temp -= 1
self.model.set_temperature(temp)
self.model.update_mat()
def on_add_mat(self, evt):
evt.Skip()
mat = self.mat_name.GetValue()
tm = int(self.req_time.GetValue())
temp = int(self.req_temp.GetValue())
if mat:
self.model.add_material(Material(
mat, "IDLE", temp, tm, None
))
def on_temperature(self, model, fqname, event_name, key):
"""
The temperature has changed in the model.
"""
self.temperature.SetLabel("Current temperature: %s" % model)
if "__main__" == __name__:
MODEL = Model()
APP = wx.App(redirect=False)
FRAME = MatDrierView(None, APP, "Material Drier", MODEL)
# throw in some values to have more than just an empty dialog box
MODEL.add_material(Material("MAT-001", "IDLE", 60, 100, None))
MODEL.add_material(Material("MAT-002", "IDLE", 60, 30, None))
MODEL.add_material(Material("MAT-003", "IDLE", 70, 30, None))
MODEL.add_material(Material("MAT-004", "IDLE", 80, 100, None))
MODEL.set_temperature(55)
FRAME.ShowModal()
FRAME.Destroy()
APP.MainLoop()
| |
# Copyright 2018 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline section."""
import time
import datetime
import uuid
from google.appengine.api import app_identity
from google.appengine.api import urlfetch
from google.cloud.logging import DESCENDING
import werkzeug
from flask import Blueprint, json
from flask_restful import abort
from flask_restful import fields
from flask_restful import marshal_with
from flask_restful import Resource
from flask_restful import reqparse
from core import cloud_logging
from core import insight
from core.models import Job
from core.models import Pipeline
from ibackend.extensions import api
blueprint = Blueprint('pipeline', __name__)
parser = reqparse.RequestParser()
parser.add_argument('name')
parser.add_argument('emails_for_notifications')
parser.add_argument('run_on_schedule')
parser.add_argument('schedules', type=list, location='json')
parser.add_argument('params', type=list, location='json')
schedule_fields = {
'id': fields.Integer,
'pipeline_id': fields.Integer,
'cron': fields.String,
}
param_fields = {
'id': fields.Integer,
'name': fields.String,
'type': fields.String,
'value': fields.Raw(attribute='api_value'),
'label': fields.String
}
pipeline_fields = {
'id': fields.Integer,
'name': fields.String,
'emails_for_notifications': fields.String,
'status': fields.String(attribute='state'),
'updated_at': fields.String,
'run_on_schedule': fields.Boolean,
'schedules': fields.List(fields.Nested(schedule_fields)),
'params': fields.List(fields.Nested(param_fields)),
'message': fields.String,
'has_jobs': fields.Boolean,
}
def abort_if_pipeline_doesnt_exist(pipeline, pipeline_id):
if pipeline is None:
abort(404, message="Pipeline {} doesn't exist".format(pipeline_id))
class PipelineSingle(Resource):
"""Shows a single pipeline item and lets you delete a pipeline item"""
@marshal_with(pipeline_fields)
def get(self, pipeline_id):
pipeline = Pipeline.find(pipeline_id)
abort_if_pipeline_doesnt_exist(pipeline, pipeline_id)
return pipeline
@marshal_with(pipeline_fields)
def delete(self, pipeline_id):
pipeline = Pipeline.find(pipeline_id)
abort_if_pipeline_doesnt_exist(pipeline, pipeline_id)
if pipeline.is_blocked():
return {
'message': 'Removing of active pipeline is unavailable'
}, 422
pipeline.destroy()
return {}, 204
@marshal_with(pipeline_fields)
def put(self, pipeline_id):
pipeline = Pipeline.find(pipeline_id)
abort_if_pipeline_doesnt_exist(pipeline, pipeline_id)
if pipeline.is_blocked():
return {
'message': 'Editing of active pipeline is unavailable'
}, 422
args = parser.parse_args()
pipeline.assign_attributes(args)
pipeline.save()
pipeline.save_relations(args)
return pipeline, 200
class PipelineList(Resource):
"""Shows a list of all pipelines, and lets you POST to add new pipelines"""
@marshal_with(pipeline_fields)
def get(self):
tracker = insight.GAProvider()
tracker.track_event(category='pipelines', action='list')
pipelines = Pipeline.all()
return pipelines
@marshal_with(pipeline_fields)
def post(self):
args = parser.parse_args()
pipeline = Pipeline(name=args['name'])
pipeline.assign_attributes(args)
pipeline.save()
pipeline.save_relations(args)
tracker = insight.GAProvider()
tracker.track_event(category='pipelines', action='create')
return pipeline, 201
class PipelineStart(Resource):
"""Class for run pipeline"""
@marshal_with(pipeline_fields)
def post(self, pipeline_id):
pipeline = Pipeline.find(pipeline_id)
pipeline.start()
tracker = insight.GAProvider()
tracker.track_event(category='pipelines', action='manual_run')
return pipeline
class PipelineStop(Resource):
"""Class for stopping of pipeline"""
@marshal_with(pipeline_fields)
def post(self, pipeline_id):
pipeline = Pipeline.find(pipeline_id)
pipeline.stop()
tracker = insight.GAProvider()
tracker.track_event(category='pipelines', action='manual_stop')
return pipeline
class PipelineExport(Resource):
"""Class for exporting of pipeline in yaml format"""
def get(self, pipeline_id):
tracker = insight.GAProvider()
tracker.track_event(category='pipelines', action='export')
pipeline = Pipeline.find(pipeline_id)
jobs = self.__get_jobs__(pipeline)
pipeline_params = []
for param in pipeline.params:
pipeline_params.append({
'name': param.name,
'value': param.value,
'type': param.type,
})
pipeline_schedules = []
for schedule in pipeline.schedules:
pipeline_schedules.append({
'cron': schedule.cron,
})
data = {
'name': pipeline.name,
'jobs': jobs,
'params': pipeline_params,
'schedules': pipeline_schedules
}
ts = time.time()
pipeline_date = datetime.datetime.fromtimestamp(ts)
pipeline_date_formatted = pipeline_date.strftime('%Y%m%d%H%M%S')
filename = pipeline.name.lower() + "-" + pipeline_date_formatted + ".json"
return data, 200, {
'Access-Control-Expose-Headers': 'Filename',
'Content-Disposition': "attachment; filename=" + filename,
'Filename': filename,
'Content-type': 'text/json'
}
def __get_jobs__(self, pipeline):
job_mapping = {}
for job in pipeline.jobs:
job_mapping[job.id] = uuid.uuid4().hex
jobs = []
for job in pipeline.jobs:
params = []
for param in job.params:
params.append({
'name': param.name,
'value': param.api_value,
'label': param.label,
'is_required': param.is_required,
'type': param.type,
'description': param.description
})
start_conditions = []
for start_condition in job.start_conditions:
start_conditions.append({
'preceding_job_id': job_mapping[start_condition.preceding_job_id],
'condition': start_condition.condition
})
jobs.append({
'id': job_mapping[job.id],
'name': job.name,
'worker_class': job.worker_class,
'params': params,
'hash_start_conditions': start_conditions
})
return jobs
import_parser = reqparse.RequestParser()
import_parser.add_argument(
'upload_file',
type=werkzeug.datastructures.FileStorage,
location='files'
)
class PipelineImport(Resource):
"""Class for importing of pipeline in yaml format"""
@marshal_with(pipeline_fields)
def post(self):
tracker = insight.GAProvider()
tracker.track_event(category='pipelines', action='import')
args = import_parser.parse_args()
file_ = args['upload_file']
data = {}
if file_:
data = json.loads(file_.read())
pipeline = Pipeline(name=data['name'])
pipeline.save()
pipeline.import_data(data)
return pipeline, 201
return data
class PipelineRunOnSchedule(Resource):
@marshal_with(pipeline_fields)
def patch(self, pipeline_id):
pipeline = Pipeline.find(pipeline_id)
args = parser.parse_args()
schedule_pipeline = (args['run_on_schedule'] == 'True')
pipeline.update(run_on_schedule=schedule_pipeline)
tracker = insight.GAProvider()
tracker.track_event(
category='pipelines',
action=('schedule' if schedule_pipeline else 'unschedule'))
return pipeline
log_parser = reqparse.RequestParser()
log_parser.add_argument('next_page_token')
log_parser.add_argument('worker_class')
log_parser.add_argument('job_id')
log_parser.add_argument('log_level')
log_parser.add_argument('query')
log_parser.add_argument('fromdate')
log_parser.add_argument('todate')
log_fields = {
'timestamp': fields.String,
'payload': fields.Raw,
'job_name': fields.String
}
logs_fields = {
'entries': fields.List(fields.Nested(log_fields)),
'next_page_token': fields.String
}
class PipelineLogs(Resource):
def get(self, pipeline_id):
args = log_parser.parse_args()
entries = []
urlfetch.set_default_fetch_deadline(300)
next_page_token = args.get('next_page_token')
page_size = 20
from core import cloud_logging
project_id = app_identity.get_application_id()
filter_ = 'logName="projects/%s/logs/%s"' % (project_id, cloud_logging.logger_name)
filter_ += ' AND jsonPayload.labels.pipeline_id="%s"' % pipeline_id
if args.get('worker_class'):
filter_ += ' AND jsonPayload.labels.worker_class="%s"' \
% args.get('worker_class')
if args.get('job_id'):
filter_ += ' AND jsonPayload.labels.job_id="%s"' % args.get('job_id')
if args.get('log_level'):
filter_ += ' AND jsonPayload.log_level="%s"' % args.get('log_level')
if args.get('query'):
filter_ += ' AND jsonPayload.message:"%s"' % args.get('query')
if args.get('fromdate'):
filter_ += ' AND timestamp>="%s"' % args.get('fromdate')
if args.get('todate'):
filter_ += ' AND timestamp<="%s"' % args.get('todate')
iterator = cloud_logging.client.list_entries(
projects=[project_id],
filter_=filter_,
order_by=DESCENDING,
page_size=page_size,
page_token=next_page_token
)
page = next(iterator.pages)
for entry in page:
# print ' Page number: %d' % (iterator.page_number,)
# print ' Items in page: %d' % (page.num_items,)
# print 'Items remaining: %d' % (page.remaining,)
# print 'Next page token: %s' % (iterator.next_page_token,)
# print '----------------------------'
if isinstance(entry.payload, dict) \
and entry.payload.get('labels') \
and entry.payload.get('labels').get('job_id'):
job = Job.find(entry.payload.get('labels').get('job_id'))
if job:
log = {
'timestamp': entry.timestamp.__str__(),
'payload': entry.payload,
'job_name': job.name,
'log_level': entry.payload.get('log_level', 'INFO')
}
else:
log = {
'timestamp': entry.timestamp.__str__(),
'payload': entry.payload,
'job_name': 'N/A',
'log_level': entry.payload.get('log_level', 'INFO')
}
entries.append(log)
next_page_token = iterator.next_page_token
return {
'entries': entries,
'next_page_token': next_page_token
}
api.add_resource(PipelineList, '/pipelines')
api.add_resource(PipelineSingle, '/pipelines/<pipeline_id>')
api.add_resource(PipelineStart, '/pipelines/<pipeline_id>/start')
api.add_resource(PipelineStop, '/pipelines/<pipeline_id>/stop')
api.add_resource(PipelineExport, '/pipelines/<pipeline_id>/export')
api.add_resource(PipelineImport, '/pipelines/import')
api.add_resource(
PipelineRunOnSchedule,
'/pipelines/<pipeline_id>/run_on_schedule'
)
api.add_resource(PipelineLogs, '/pipelines/<pipeline_id>/logs')
| |
"""
Django settings for marco_portal project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
from p97settings import IniParser
from os.path import abspath, dirname
# from social.backends.google import GooglePlusAuth
# Absolute filesystem path to the Django project directory:
PROJECT_ROOT = dirname(dirname(dirname(abspath(__file__))))
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ASSETS_DIR = os.path.realpath(os.path.join(BASE_DIR, '..', 'assets'))
STYLES_DIR = os.path.realpath(os.path.join(ASSETS_DIR, 'styles'))
CONFIG_FILE = os.path.normpath(os.path.join(BASE_DIR, 'config.ini'))
cfg = IniParser()
cfg.read(CONFIG_FILE)
DEBUG = cfg.getboolean('APP', 'DEBUG', True)
TEMPLATE_DEBUG = cfg.getboolean('APP', 'TEMPLATE_DEBUG', True)
SECRET_KEY = cfg.get('APP', 'SECRET_KEY', 'you forgot to set the secret key')
ALLOWED_HOSTS = cfg.getlist('APP', 'ALLOWED_HOSTS')
# Set logging to default, and then make admin error emails come through as HTML
from django.utils.log import DEFAULT_LOGGING
LOGGING = DEFAULT_LOGGING
LOGGING['handlers']['mail_admins']['include_html'] = True
# Application definition
INSTALLED_APPS = (
'marco_site',
'kombu.transport.django',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.gis',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.webdesign',
'p97settings',
'email_log',
'djcelery_email',
'compressor',
'taggit',
'modelcluster',
'rpc4django',
'tinymce',
'captcha',
'wagtail.wagtailcore',
'wagtail.wagtailadmin',
'wagtail.wagtaildocs',
'wagtail.wagtailsnippets',
'wagtail.wagtailusers',
'wagtail.wagtailsites',
'wagtail.wagtailimages',
'wagtail.wagtailembeds',
'wagtail.wagtailsearch',
'wagtail.wagtailredirects',
'wagtail.wagtailforms',
'wagtail.contrib.wagtailsitemaps',
'portal.base',
'portal.menu',
'portal.home',
'portal.pages',
'portal.ocean_stories',
'portal.calendar',
'portal.data_gaps',
'portal.grid_pages',
'portal.data_catalog',
'portal.initial_data',
'portal.welcome_snippet',
'portal.news',
'rest_framework',
'flatblocks',
'data_manager',
'visualize',
'features',
'scenarios',
'drawing',
'manipulators',
'explore',
# Account management
'accounts.apps.AccountsAppConfig',
'django_social_share',
'mapgroups',
'import_export',
'social_django',
# Multilayer Dimensions in Data Manager
'nested_admin',
)
AUTHENTICATION_BACKENDS = (
'social_core.backends.google.GoogleOAuth2',
# 'social_core.backends.google.GoogleOpenId',
'social_core.backends.facebook.FacebookOAuth2',
'social_core.backends.twitter.TwitterOAuth',
'django.contrib.auth.backends.ModelBackend',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
'marco.host_site_middleware.HostSiteMiddleware',
)
# Valid site IDs are 1 and 2, corresponding to the primary site(1) and the
# test site(2)
SITE_ID = 1
INTERNAL_IPS = ('127.0.0.1',)
ROOT_URLCONF = 'marco.urls'
WSGI_APPLICATION = 'marco.wsgi.application'
default = {
'ENGINE': cfg.get('DATABASE', 'ENGINE',
'django.contrib.gis.db.backends.postgis'),
}
if default['ENGINE'].endswith('spatialite'):
SPATIALITE_LIBRARY_PATH = cfg.get('DATABASE', 'SPATIALITE_LIBRARY_PATH')
default['NAME'] = cfg.get('DATABASE', 'NAME', os.path.join(BASE_DIR, 'marco.db'))
else:
default['NAME'] = cfg.get('DATABASE', 'NAME')
if cfg.has_option('DATABASE', 'USER'):
default['USER'] = cfg.get('DATABASE', 'USER')
default['HOST'] = cfg.get('DATABASE', 'HOST', 'localhost')
default['PORT'] = cfg.getint('DATABASE', 'PORT', 5432)
default['PASSWORD'] = cfg.get('DATABASE', 'PASSWORD')
DATABASES = {'default': default}
CACHES = {
'default': {
'BACKEND': cfg.get('CACHES', 'BACKEND'),
'LOCATION': cfg.get('CACHES', 'LOCATION'),
'KEY_PREFIX': 'marco_portal',
'OPTIONS': {
'CLIENT_CLASS': 'redis_cache.client.DefaultClient',
}
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = cfg.get('APP', 'TIME_ZONE', 'UTC')
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = cfg.get('APP', 'STATIC_ROOT', os.path.join(BASE_DIR, 'static'))
STATIC_URL = cfg.get('APP', 'STATIC_URL', '/static/')
STATICFILES_DIRS = (
STYLES_DIR,
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
'compressor.finders.CompressorFinder',
)
MEDIA_ROOT = cfg.get('APP', 'MEDIA_ROOT', os.path.join(BASE_DIR, 'media'))
MEDIA_URL = cfg.get('APP', 'MEDIA_URL', '/media/')
# Django compressor settings
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'), # for wagtail
)
COMPRESS_OFFLINE = not DEBUG
# Template configuration
from django.conf import global_settings
# RDH 20191114 - if tuple, make into a list
TEMPLATE_CONTEXT_PROCESSORS = [x for x in global_settings.TEMPLATE_CONTEXT_PROCESSORS] + [
'django.core.context_processors.request',
'social_django.context_processors.backends',
'portal.base.context_processors.search_disabled',
]
TEMPLATE_LOADERS = [x for x in global_settings.TEMPLATE_LOADERS] + [
'apptemplates.Loader',
]
# Wagtail settings
LOGIN_URL = 'account:index'
# LOGIN_REDIRECT_URL = 'wagtailadmin_home'
WAGTAIL_SITE_NAME = 'MARCO Portal'
WAGTAILSEARCH_RESULTS_TEMPLATE = 'portal/search_results.html'
# WAGTAILSEARCH_BACKENDS = {
# 'default': {
# 'BACKEND': 'wagtail.wagtailsearch.backends.elasticsearch.ElasticSearch',
# # 'URLS': ['https://iu20e5efzd:dibenj5fn5@point-97-elasticsear-6230081365.us-east-1.bonsai.io'],
# 'URLS': ['https://site:a379ac680e6aaa45f0c129c2cd28d064@bofur-us-east-1.searchly.com'],
# 'INDEX': 'marco_portal',
# 'TIMEOUT': 5,
# }
# }
# Whether to use face/feature detection to improve image cropping - requires OpenCV
WAGTAILIMAGES_FEATURE_DETECTION_ENABLED = False
# Override the Image class used by wagtailimages with a custom one
WAGTAILIMAGES_IMAGE_MODEL = 'base.PortalImage'
FEEDBACK_IFRAME_URL = "//docs.google.com/a/pointnineseven.com/forms/d/1HMBSzAJ6QNpCOI01Z1CHHtrB0Fq6M081yXv5vBdBLm8/viewform?c=0&w=1"
# madrona-features
SHARING_TO_PUBLIC_GROUPS = ['Share with Public']
SHARING_TO_STAFF_GROUPS = ['Share with Staff']
# KML SETTINGS
KML_SIMPLIFY_TOLERANCE = 20 # meters
KML_SIMPLIFY_TOLERANCE_DEGREES = 0.0002 # Very roughly ~ 20 meters
KML_EXTRUDE_HEIGHT = 100
KML_ALTITUDEMODE_DEFAULT = 'absolute'
# madrona-scenarios
GEOMETRY_DB_SRID = 3857
GEOMETRY_CLIENT_SRID = 3857 #for latlon
GEOJSON_SRID = 3857
GEOJSON_DOWNLOAD = True # force headers to treat like an attachment
# authentication
SOCIAL_AUTH_NEW_USER_URL = '/account/?new=true&login=django'
SOCIAL_AUTH_FACBEOOK_NEW_USER_URL = '/account/?new=true&login=facebook'
# SOCIAL_AUTH_GOOGLE_PLUS_NEW_USER_URL = '/account/?new=true&login=gplus'
SOCIAL_AUTH_TWITTER_NEW_USER_URL = '/account/?new=true&login=twitter'
SOCIAL_AUTH_GOOGLE_NEW_USER_URL = '/account/?new=true&login=google'
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/account/?login=django'
# SOCIAL_AUTH_GOOGLE_PLUS_LOGIN_REDIRECT_URL = '/account/?login=gplus'
SOCIAL_AUTH_FACEBOOK_LOGIN_REDIRECT_URL = '/account/?login=facebook'
SOCIAL_AUTH_TWITTER_LOGIN_REDIRECT_URL = '/account/?login=twitter'
SOCIAL_AUTH_GOOGLE_LOGIN_REDIRECT_URL = '/account/?login=google'
# SOCIAL_AUTH_GOOGLE_PLUS_KEY = ''
# SOCIAL_AUTH_GOOGLE_PLUS_SECRET = ''
# SOCIAL_AUTH_GOOGLE_PLUS_SCOPES = (
# 'https://www.googleapis.com/auth/plus.login', # Minimum needed to login
# 'https://www.googleapis.com/auth/plus.profile.emails.read', # emails
# )
SOCIAL_AUTH_FACEBOOK_KEY = cfg.get('SOCIAL_AUTH', 'FACEBOOK_KEY', '')
SOCIAL_AUTH_FACEBOOK_SECRET = cfg.get('SOCIAL_AUTH', 'FACEBOOK_SECRET', '')
SOCIAL_AUTH_FACEBOOK_SCOPE = ['public_profile,email']
SOCIAL_AUTH_TWITTER_KEY = cfg.get('SOCIAL_AUTH', 'TWITTER_KEY', '')
SOCIAL_AUTH_TWITTER_SECRET = cfg.get('SOCIAL_AUTH', 'TWITTER_SECRET', '')
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = cfg.get('SOCIAL_AUTH', 'GOOGLE_KEY', '')
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = cfg.get('SOCIAL_AUTH', 'GOOGLE_SECRET', '')
#SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = []
SOCIAL_AUTH_GOOGLE_OAUTH2_USE_DEPRECATED_API = True
# SOCIAL_AUTH_EMAIL_FORCE_EMAIL_VALIDATION = True
SOCIAL_AUTH_EMAIL_VALIDATION_FUNCTION = 'accounts.pipeline.send_validation_email'
SOCIAL_AUTH_EMAIL_VALIDATION_URL = '/account/validate'
SOCIAL_AUTH_DISCONNECT_REDIRECT_URL = '/'
SOCIAL_AUTH_POSTGRES_JSONFIELD = True
# Our authentication pipeline
SOCIAL_AUTH_PIPELINE = (
'accounts.pipeline.clean_session',
# Get the information we can about the user and return it in a simple
# format to create the user instance later. On some cases the details are
# already part of the auth response from the provider, but sometimes this
# could hit a provider API.
'social_core.pipeline.social_auth.social_details',
# Get the social uid from whichever service we're authing thru. The uid is
# the unique identifier of the given user in the provider.
'social_core.pipeline.social_auth.social_uid',
# Verifies that the current auth process is valid within the current
# project, this is were emails and domains whitelists are applied (if
# defined).
'social_core.pipeline.social_auth.auth_allowed',
# Checks if the current social-account is already associated in the site.
'social_core.pipeline.social_auth.social_user',
# Make up a username for this person, appends a random string at the end if
# there's any collision.
'social_core.pipeline.user.get_username',
# Confirm with the user that they really want to make an account, also
# make them enter an email address if they somehow didn't
# 'accounts.pipeline.confirm_account',
# Send a validation email to the user to verify its email address.
'social_core.pipeline.mail.mail_validation',
# Associates the current social details with another user account with
# a similar email address. Disabled by default.
# 'social_core.pipeline.social_auth.associate_by_email',
# Create a user account if we haven't found one yet.
'social_core.pipeline.user.create_user',
# Create the record that associated the social account with this user.
'social_core.pipeline.social_auth.associate_user',
# Populate the extra_data field in the social record with the values
# specified by settings (and the default ones like access_token, etc).
'social_core.pipeline.social_auth.load_extra_data',
# Update the user record with any changed info from the auth service.
'social_core.pipeline.user.user_details',
# Set up default django permission groups for new users.
'accounts.pipeline.set_user_permissions',
# Grab relevant information from the social provider (avatar)
'accounts.pipeline.get_social_details',
# 'social_core.pipeline.debug.debug',
'accounts.pipeline.clean_session',
)
EMAIL_HOST = cfg.get('EMAIL', 'HOST', 'localhost')
EMAIL_PORT = cfg.getint('EMAIL', 'PORT', 8025)
if cfg.has_option('EMAIL', 'HOST_USER') and \
cfg.has_option('EMAIL', 'HOST_PASSWORD'):
EMAIL_HOST_USER = cfg.get('EMAIL', 'HOST_USER')
EMAIL_HOST_PASSWORD = cfg.get('EMAIL', 'HOST_PASSWORD')
EMAIL_BACKEND = 'djcelery_email.backends.CeleryEmailBackend'
CELERY_EMAIL_BACKEND = 'email_log.backends.EmailBackend'
DEFAULT_FROM_EMAIL = "MARCO Portal Team <portal@midatlanticocean.org>"
SERVER_EMAIL = "MARCO Site Errors <developers@pointnineseven.com>"
# for mail to admins/managers only
EMAIL_SUBJECT_PREFIX = cfg.get('APP', 'EMAIL_SUBJECT_PREFIX', '[MARCO]') + ' '
CELERY_RESULT_BACKEND = cfg.get('CELERY', 'RESULT_BACKEND', '')
BROKER_URL = cfg.get('CELERY', 'BROKER_URL', '')
GA_ACCOUNT = cfg.get('APP', 'GA_ACCOUNT', '')
ADMINS = (('KSDev', 'ksdev@ecotrust.org'),)
NOCAPTCHA = True
RECAPTCHA_PUBLIC_KEY = '6LevfQoUAAAAAPIKTQHJt3_Y2NDXkZQ3HYQHDNHk'
RECAPTCHA_PRIVATE_KEY = '6LevfQoUAAAAACp-4BPAgx4oMgeJrn1d9IMyReoI'
# OL2 doesn't support reprojecting rasters, so for WMS servers that don't provide
# EPSG:3857 we send it to a proxy to be re-projected.
WMS_PROXY = 'http://tiles.ecotrust.org/mapserver/'
WMS_PROXY_MAPFILE_FIELD = 'map'
WMS_PROXY_MAPFILE = '/mapfiles/generic.map'
WMS_PROXY_LAYERNAME = 'LAYERNAME'
WMS_PROXY_CONNECTION = 'CONN'
WMS_PROXY_FORMAT = 'FORMAT'
WMS_PROXY_VERSION = 'VERSION'
WMS_PROXY_SOURCE_SRS = 'SOURCESRS'
WMS_PROXY_SOURCE_STYLE = 'SRCSTYLE'
WMS_PROXY_TIME_EXTENT = 'TIMEEXT'
WMS_PROXY_TIME = 'TIME'
WMS_PROXY_TIME_DEFAULT = 'TIMEDEF'
WMS_PROXY_TIME_ITEM = 'TIMEITEM'
WMS_PROXY_GENERIC_LAYER = 'generic'
WMS_PROXY_TIME_LAYER = 'time'
if False:
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar',)
DEBUG_TOOLBAR_PANELS2 = (
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.profiling.ProfilingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.templates.panel.TemplatesPanel',
'debug_toolbar.panels.timer.TimerPanel',
# 'debug_toolbar.sql.panel.SQLPanel',
)
import debug_toolbar.panels
| |
from cuttsum.l2s._base import _SearchBase
class SelectLexNextOracle(_SearchBase):
def setup_cache(self):
return set()
def update_cache(self, pred, sents, df, cache):
cache.update(df.iloc[pred]["lemmas stopped"])
return cache
def make_select_example(self, sent, sents, df, cache):
tokens = df.iloc[sent]["lemmas stopped"]
cache_feats = [tok for tok in tokens if tok in cache]
ex = self.example(lambda:
{"a": tokens if len(tokens) > 0 else ["__none__"],
"b": cache_feats if len(cache_feats) > 0 else ["__none__"],
},
labelType=self.vw.lCostSensitive)
return ex
def make_next_example(self, sents, df, cache, is_oracle):
if is_oracle:
return self.example(lambda: {"c": ["all_clear"],},
labelType=self.vw.lCostSensitive)
else:
return self.example(lambda: {"c": ["stay"],},
labelType=self.vw.lCostSensitive)
def get_feature_weights(self, dataframes):
vocab = set([w for df in dataframes
for words in df["lemmas stopped"].tolist()
for w in words])
vocab = list(vocab)
lexical_feats = vocab + ["__none__"]
lexical_cache_feats = vocab
for w in vocab:
assert not isinstance(w, unicode)
ex = self.vw.example(
{"a": lexical_feats,
"b": lexical_cache_feats,
"c": ["stay", "all_clear"],
},
labelType=self.vw.lCostSensitive)
fw = []
for i, feat in enumerate(lexical_feats):
w = self.vw.get_weight(ex.feature("a", i))
fw.append(("a:" + feat, w))
for i, feat in enumerate(lexical_cache_feats):
w = self.vw.get_weight(ex.feature("b", i))
fw.append(("b:" + feat, w))
fw.append(("c:stay", self.vw.get_weight(ex.feature("c", 0))))
fw.append(("c:all_clear", self.vw.get_weight(ex.feature("c", 0))))
fw.sort(key=lambda x: x[1])
return fw
class SelectLexNextLex(SelectLexNextOracle):
def make_next_example(self, sents, df, cache, is_oracle):
if len(sents) > 0:
tokens = set([w for words in df.iloc[sents]["lemmas stopped"].tolist()
for w in words])
tokens = list(tokens)
return self.example(lambda: {"c": tokens,},
labelType=self.vw.lCostSensitive)
else:
return self.example(lambda: {"c": ["__none__"],},
labelType=self.vw.lCostSensitive)
def get_feature_weights(self, dataframes):
vocab = set([w for df in dataframes
for words in df["lemmas stopped"].tolist()
for w in words])
vocab = list(vocab)
lexical_feats = vocab + ["__none__"]
lexical_cache_feats = vocab
for w in vocab:
assert not isinstance(w, unicode)
ex = self.vw.example(
{"a": lexical_feats,
"b": lexical_cache_feats,
"c": lexical_feats,
},
labelType=self.vw.lCostSensitive)
fw = []
for i, feat in enumerate(lexical_feats):
w = self.vw.get_weight(ex.feature("a", i))
fw.append(("a:" + feat, w))
for i, feat in enumerate(lexical_cache_feats):
w = self.vw.get_weight(ex.feature("b", i))
fw.append(("b:" + feat, w))
for i, feat in enumerate(lexical_feats):
w = self.vw.get_weight(ex.feature("c", i))
fw.append(("c:" + feat, w))
fw.sort(key=lambda x: x[1])
return fw
class SelectLexNextLexCache(SelectLexNextOracle):
def make_next_example(self, sents, df, cache, is_oracle):
if len(sents) > 0:
tokens = set([w for words in df.iloc[sents]["lemmas stopped"].tolist()
for w in words])
tokens = list(tokens)
cache_feats = [tok for tok in tokens if tok in cache]
return self.example(lambda: {
"c": tokens if len(tokens) > 0 else "__none__",
"d": cache_feats if len(cache_feats) > 0 else "__none__"},
labelType=self.vw.lCostSensitive)
else:
return self.example(lambda: {"c": ["__none__"], "d": ["__none__"]},
labelType=self.vw.lCostSensitive)
def get_feature_weights(self, dataframes):
vocab = set([w for df in dataframes
for words in df["lemmas stopped"].tolist()
for w in words])
vocab = list(vocab)
lexical_feats = vocab + ["__none__"]
lexical_cache_feats = vocab
for w in vocab:
assert not isinstance(w, unicode)
ex = self.vw.example(
{"a": lexical_feats,
"b": lexical_cache_feats,
"c": lexical_feats,
"d": lexical_feats,
},
labelType=self.vw.lCostSensitive)
fw = []
for i, feat in enumerate(lexical_feats):
w = self.vw.get_weight(ex.feature("a", i))
fw.append(("a:" + feat, w))
for i, feat in enumerate(lexical_cache_feats):
w = self.vw.get_weight(ex.feature("b", i))
fw.append(("b:" + feat, w))
for i, feat in enumerate(lexical_feats):
w = self.vw.get_weight(ex.feature("c", i))
fw.append(("c:" + feat, w))
for i, feat in enumerate(lexical_feats):
w = self.vw.get_weight(ex.feature("d", i))
fw.append(("d:" + feat, w))
fw.sort(key=lambda x: x[1])
return fw
class SelectLexGenericNextOracle(SelectLexNextOracle):
def basic_cols(self):
return [
"BASIC length", "BASIC char length", "BASIC doc position",
"BASIC all caps ratio", "BASIC upper ratio", "BASIC lower ratio",
"BASIC punc ratio", "BASIC person ratio", "BASIC organization ratio",
"BASIC date ratio", "BASIC time ratio", "BASIC duration ratio",
"BASIC number ratio", "BASIC ordinal ratio", "BASIC percent ratio",
"BASIC money ratio", "BASIC set ratio", "BASIC misc ratio"]
def lm_cols(self):
return ["LM domain avg lp",
"LM gw avg lp"]
def setup_cache(self):
return set()
def update_cache(self, pred, sents, df, cache):
cache.update(df.iloc[pred]["lemmas stopped"])
return cache
def make_select_example(self, sent, sents, df, cache):
tokens = df.iloc[sent]["lemmas stopped"]
cache_feats = [tok for tok in tokens if tok in cache]
gen_cols = self.basic_cols() + self.lm_cols()
ex = self.example(lambda:
{"a": tokens if len(tokens) > 0 else ["__none__"],
"b": cache_feats if len(cache_feats) > 0 else ["__none__"],
"g": [x for x in df.iloc[sent][gen_cols].iteritems()],
},
labelType=self.vw.lCostSensitive)
return ex
def make_next_example(self, sents, df, cache, is_oracle):
if is_oracle:
return self.example(lambda: {"c": ["all_clear"],},
labelType=self.vw.lCostSensitive)
else:
return self.example(lambda: {"c": ["stay"],},
labelType=self.vw.lCostSensitive)
def get_feature_weights(self, dataframes):
vocab = set([w for df in dataframes
for words in df["lemmas stopped"].tolist()
for w in words])
vocab = list(vocab)
lexical_feats = vocab + ["__none__"]
lexical_cache_feats = vocab
generics = self.basic_cols() + self.lm_cols()
for w in vocab:
assert not isinstance(w, unicode)
ex = self.vw.example(
{"a": lexical_feats,
"b": lexical_cache_feats,
"g": [(feat, 1) for feat in generics],
"c": ["stay", "all_clear"],
},
labelType=self.vw.lCostSensitive)
fw = []
for i, feat in enumerate(lexical_feats):
w = self.vw.get_weight(ex.feature("a", i))
fw.append(("a:" + feat, w))
for i, feat in enumerate(lexical_cache_feats):
w = self.vw.get_weight(ex.feature("b", i))
fw.append(("b:" + feat, w))
for i, feat in enumerate(generics):
w = self.vw.get_weight(ex.feature("g", i))
fw.append(("g:" + feat, w))
fw.append(("c:stay", self.vw.get_weight(ex.feature("c", 0))))
fw.append(("c:all_clear", self.vw.get_weight(ex.feature("c", 0))))
fw.sort(key=lambda x: x[1])
return fw
| |
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from django.conf import settings
from django.core.paginator import InvalidPage, Paginator
from django.http import Http404
from django.shortcuts import render
from haystack.forms import FacetedSearchForm, ModelSearchForm
from haystack.query import EmptySearchQuerySet
RESULTS_PER_PAGE = getattr(settings, 'HAYSTACK_SEARCH_RESULTS_PER_PAGE', 20)
class SearchView(object):
template = 'search/search.html'
extra_context = {}
query = ''
results = EmptySearchQuerySet()
request = None
form = None
results_per_page = RESULTS_PER_PAGE
def __init__(self, template=None, load_all=True, form_class=None, searchqueryset=None, results_per_page=None):
self.load_all = load_all
self.form_class = form_class
self.searchqueryset = searchqueryset
if form_class is None:
self.form_class = ModelSearchForm
if not results_per_page is None:
self.results_per_page = results_per_page
if template:
self.template = template
def __call__(self, request):
"""
Generates the actual response to the search.
Relies on internal, overridable methods to construct the response.
"""
self.request = request
self.form = self.build_form()
self.query = self.get_query()
self.results = self.get_results()
return self.create_response()
def build_form(self, form_kwargs=None):
"""
Instantiates the form the class should use to process the search query.
"""
data = None
kwargs = {
'load_all': self.load_all,
}
if form_kwargs:
kwargs.update(form_kwargs)
if len(self.request.GET):
data = self.request.GET
if self.searchqueryset is not None:
kwargs['searchqueryset'] = self.searchqueryset
return self.form_class(data, **kwargs)
def get_query(self):
"""
Returns the query provided by the user.
Returns an empty string if the query is invalid.
"""
if self.form.is_valid():
return self.form.cleaned_data['q']
return ''
def get_results(self):
"""
Fetches the results via the form.
Returns an empty list if there's no query to search with.
"""
return self.form.search()
def build_page(self):
"""
Paginates the results appropriately.
In case someone does not want to use Django's built-in pagination, it
should be a simple matter to override this method to do what they would
like.
"""
try:
page_no = int(self.request.GET.get('page', 1))
except (TypeError, ValueError):
raise Http404("Not a valid number for page.")
if page_no < 1:
raise Http404("Pages should be 1 or greater.")
start_offset = (page_no - 1) * self.results_per_page
self.results[start_offset:start_offset + self.results_per_page]
paginator = Paginator(self.results, self.results_per_page)
try:
page = paginator.page(page_no)
except InvalidPage:
raise Http404("No such page!")
return (paginator, page)
def extra_context(self):
"""
Allows the addition of more context variables as needed.
Must return a dictionary.
"""
return {}
def get_context(self):
(paginator, page) = self.build_page()
context = {
'query': self.query,
'form': self.form,
'page': page,
'paginator': paginator,
'suggestion': None,
}
if hasattr(self.results, 'query') and self.results.query.backend.include_spelling:
context['suggestion'] = self.form.get_suggestion()
context.update(self.extra_context())
return context
def create_response(self):
"""
Generates the actual HttpResponse to send back to the user.
"""
context = self.get_context()
return render(self.request, self.template, context)
def search_view_factory(view_class=SearchView, *args, **kwargs):
def search_view(request):
return view_class(*args, **kwargs)(request)
return search_view
class FacetedSearchView(SearchView):
def __init__(self, *args, **kwargs):
# Needed to switch out the default form class.
if kwargs.get('form_class') is None:
kwargs['form_class'] = FacetedSearchForm
super(FacetedSearchView, self).__init__(*args, **kwargs)
def build_form(self, form_kwargs=None):
if form_kwargs is None:
form_kwargs = {}
# This way the form can always receive a list containing zero or more
# facet expressions:
form_kwargs['selected_facets'] = self.request.GET.getlist("selected_facets")
return super(FacetedSearchView, self).build_form(form_kwargs)
def extra_context(self):
extra = super(FacetedSearchView, self).extra_context()
extra['request'] = self.request
extra['facets'] = self.results.facet_counts()
return extra
def basic_search(request, template='search/search.html', load_all=True, form_class=ModelSearchForm, searchqueryset=None, extra_context=None, results_per_page=None):
"""
A more traditional view that also demonstrate an alternative
way to use Haystack.
Useful as an example of for basing heavily custom views off of.
Also has the benefit of thread-safety, which the ``SearchView`` class may
not be.
Template:: ``search/search.html``
Context::
* form
An instance of the ``form_class``. (default: ``ModelSearchForm``)
* page
The current page of search results.
* paginator
A paginator instance for the results.
* query
The query received by the form.
"""
query = ''
results = EmptySearchQuerySet()
if request.GET.get('q'):
form = form_class(request.GET, searchqueryset=searchqueryset, load_all=load_all)
if form.is_valid():
query = form.cleaned_data['q']
results = form.search()
else:
form = form_class(searchqueryset=searchqueryset, load_all=load_all)
paginator = Paginator(results, results_per_page or RESULTS_PER_PAGE)
try:
page = paginator.page(int(request.GET.get('page', 1)))
except InvalidPage:
raise Http404("No such page of results!")
context = {
'form': form,
'page': page,
'paginator': paginator,
'query': query,
'suggestion': None,
}
if results.query.backend.include_spelling:
context['suggestion'] = form.get_suggestion()
if extra_context:
context.update(extra_context)
return render(request, template, context)
| |
from itertools import chain
import numpy as np
from algorithms.graph import BaseGraph
class NodeMapping:
"""Graphs with labeled nodes is not the fundamental graph theory
problem, so use this utility class if you need labeled vertices"""
def __init__(self, labels):
labels = sorted(labels)
# id -> label
self.label = dict((i, label) for i, label in enumerate(labels))
# label -> id
self.id = dict((label, i) for i, label in enumerate(labels))
def add(self, label, node=None):
if node is None:
node = len(self.label)
self.id[label] = node
self.label[node] = label
def normalize_adjacency_dict(adj_dict):
"""For a given adjacency dict return unlabeled adjacency list and node
mapping"""
labels = set()
for node, adjacent in adj_dict.items():
labels.add(node)
labels.update(adjacent)
mapping = NodeMapping(labels)
adj_list = [None] * len(labels)
for i in range(len(labels)):
adj_list[i] = []
for node, adjacent in adj_dict.items():
adj_list[mapping.id[node]] = [mapping.id[i] for i in adjacent]
return adj_list, mapping
def normalize_edge_list(edges, weighted=None):
"""For a given edge list return unlabeled edge list and node mapping"""
if weighted is None:
e = edges[0]
weighted = len(e) > 2
labels = set()
for e in edges:
u, v = e[:2]
labels.add(u)
labels.add(v)
mapping = NodeMapping(labels)
new_edges = []
for e in edges:
u, v = e[:2]
if weighted:
e_ = (mapping.id[u], mapping.id[v], e[2])
new_edges.append(e_)
else:
e_ = (mapping.id[u], mapping.id[v])
new_edges.append(e_)
return new_edges, mapping
def to_adjacency_matrix(g: BaseGraph):
n = g.order()
mx = np.zeros((n, n))
for u in g:
for v in g.successors(u):
w = v[1] if g.weighted else 1
v_ = v[0] if g.weighted else v
mx[u, v_] = w
return mx
def to_adjacency_list(g: BaseGraph):
n = g.order()
adj_list = [[] for _ in range(n)]
names = {k: v for k, v in zip(g, range(n))}
for u in g:
for v in g.successors(u):
if g.weighted:
ui, vi = names[u], names[v[0]]
adj_list[ui].append((vi, v[1]))
else:
ui, vi = names[u], names[v]
adj_list[ui].append(vi)
return adj_list
def to_edge_list(g: BaseGraph):
n = g.order()
edge_list = []
names = {k: v for k, v in zip(g, range(n))}
if not g.directed:
edge_set = set()
for u in g:
for v in g.successors(u):
if g.weighted:
edge = names[u], names[v[0]], v[1]
edge_rev = names[v[0]], names[u], v[1]
else:
edge = (names[u], names[v])
edge_rev = names[v], names[u]
if not g.directed and edge not in edge_set:
edge_set.add(edge_rev)
edge_set.add(edge)
edge_list.append(edge)
elif g.directed:
edge_list.append(edge)
return sorted(edge_list)
def subgraph(g: BaseGraph, nodes):
nodes = frozenset(nodes)
weighted = g.weighted
directed = g.directed
g_new = g.__class__(weighted=weighted, directed=True)
for u in g:
if u in nodes:
for v in g.successors(u):
if g.weighted and v[0] in nodes:
g_new.add_edge(u, v[0], v[1])
elif v in nodes:
g_new.add_edge(u, v)
g_new._directed = directed
return g_new
def graph_union(g1: BaseGraph, g2: BaseGraph):
weighted = g1.weighted
directed = g1.directed
g_new = g1.__class__(weighted, directed)
for u in chain(g1, g2):
for v in g1.successors(u):
if g1.weighted:
g_new.add_edge(u, v[0], v[1])
else:
g_new.add_edge(u, v)
return g_new
def graph_join(g1, g2, weight=1):
g1_nodes = list(g1)
g2_nodes = list(g2)
inter_edges = []
for v in g1_nodes:
for u in g2_nodes:
inter_edges.append((u, v, weight))
if g1.directed:
inter_edges.append((v, u, weight))
g_new = g1.__class__(directed=g1.directed, weighted=g1.weighted)
for u in chain(g1, g2):
for v in g1.successors(u):
if g1.weighted:
g_new.add_edge(u, v[0], v[1])
else:
g_new.add_edge(u, v)
g_new.add_edges_from(inter_edges)
def graph_copy(g: BaseGraph):
g_new = g.__class__(directed=g.directed, weighted=g.weighted)
for u in g:
for v in g.successors(u):
if g.weighted:
g_new.add_edge(u, v[0], v[1])
else:
g_new.add_edge(u, v)
return g_new
def graph_intersect(g1: BaseGraph, g2: BaseGraph):
g1_edges = set(to_edge_list(g1))
g2_edges = set(to_edge_list(g2))
intersection = g1_edges.intersection(g2_edges)
return g1.__class__.from_edge_list(
intersection, directed=g1.directed, weighted=g1.weighted
)
def to_undirected(g: BaseGraph, min_weight=True):
g_new = g.__class__(weighted=g.weighted, directed=False)
if g.weighted:
edges = {}
for u in g:
if u not in g_new:
g_new.add_node(u)
for v in g.successors(u):
if g.weighted:
# chose max weight between (x,y) and (y,x)
e = (max(u, v[0]), min(u, v[0]))
if e in edges:
if min_weight:
d = min(v[1], edges[e])
g_new.remove_edge(u, v[0])
g_new.add_edge(u, v[0], d)
else:
d = max(v[1], edges[e])
g_new.remove_edge(u, v[0])
g_new.add_edge(u, v[0], d)
else:
g_new.add_edge(u, v[0], v[1])
else:
g_new.add_edge(u, v)
return g_new
def to_unweighted(g: BaseGraph):
g_new = g.__class__(directed=g.directed, weighted=False)
for u in g:
if u not in g_new:
g_new.add_node(u)
for v in g.successors(u):
if g.weighted:
g_new.add_edge(u, v[0])
else:
g_new.add_edge(u, v)
return g_new
def is_complete_graph(g: BaseGraph):
n = g.order()
for u in g:
if g.degree(u) != n - 1:
return False
return True
| |
"""
Tests for bitarray
Author: Ilan Schnell
"""
import os
import sys
import unittest
import tempfile
import shutil
from random import randint
from cStringIO import StringIO
if __name__ == '__main__':
from __init__ import bitarray, bits2bytes
repr_type = "<class '__init__.bitarray'>"
else:
from bitarray import bitarray, bits2bytes
repr_type = "<class 'bitarray.bitarray'>"
tests = []
class Util(object):
def randombitarrays(self):
for n in range(25) + [randint(1000, 2000)]:
yield bitarray([randint(0, 1) for d in xrange(n)],
endian='big' if randint(0, 1) else 'little')
def randomlists(self):
for n in range(25) + [randint(1000, 2000)]:
yield [bool(randint(0, 1)) for d in xrange(n)]
def rndsliceidx(self, length):
return randint(-2*length, 2*length-1) if randint(0, 1) == 1 else None
def slicelen(self, r, length):
return getIndicesEx(r, length)[-1]
def check_obj(self, a):
self.assertEqual(repr(type(a)), repr_type)
unused = 8 * a.buffer_info()[1] - len(a)
self.assert_(0 <= unused < 8)
self.assertEqual(unused, a.buffer_info()[3])
def assertEQUAL(self, a, b):
self.assertEqual(a, b)
self.assertEqual(a.endian(), b.endian())
self.check_obj(a)
self.check_obj(b)
def getIndicesEx(r, length):
if not isinstance(r, slice):
raise TypeError("slice object expected")
start = r.start
stop = r.stop
step = r.step
if r.step is None:
step = 1
else:
if step == 0:
raise ValueError("slice step cannot be zero")
defstart = length-1 if step < 0 else 0
defstop = -1 if step < 0 else length
if r.start is None:
start = defstart
else:
if start < 0: start += length
if start < 0: start = -1 if step < 0 else 0
if start >= length: start = length-1 if step < 0 else length
if r.stop is None:
stop = defstop
else:
if stop < 0: stop += length
if stop < 0: stop = -1
if stop > length: stop = length
if (step < 0 and stop >= length) or (step > 0 and start >= stop):
slicelength = 0
elif step < 0:
slicelength = (stop-start+1) / step + 1
else:
slicelength = (stop-start-1) / step + 1
if slicelength < 0:
slicelength = 0
return start, stop, step, slicelength
# ---------------------------------------------------------------------------
class TestsModuleFunctions(unittest.TestCase, Util):
def test_bits2bytes(self):
for arg in ['foo', [], None, {}]:
self.assertRaises(TypeError, bits2bytes, arg)
self.assertRaises(TypeError, bits2bytes)
self.assertRaises(TypeError, bits2bytes, 1, 2)
self.assertRaises(ValueError, bits2bytes, -1)
self.assertRaises(ValueError, bits2bytes, -924L)
for n in xrange(1000):
self.assertEqual(bits2bytes(n),
0 if n==0 else ((n - 1) / 8 + 1));
for n, m in [(0, 0), (1, 1), (2, 1), (7, 1), (8, 1), (9, 2),
(10, 2), (15, 2), (16, 2), (64, 8), (65, 9),
(0L, 0), (1L, 1), (65L, 9), (2**29, 2**26),
(2**31, 2**28), (2**32, 2**29), (2**34, 2**31),
(2**34+793, 2**31+100), (2**35-8, 2**32-1),
(2**62, 2**59), (2**63-8, 2**60-1)]:
self.assertEqual(bits2bytes(n), m)
tests.append(TestsModuleFunctions)
# ---------------------------------------------------------------------------
class CreateObjectTests(unittest.TestCase, Util):
def test_noInitializer(self):
a = bitarray()
self.assertEqual(len(a), 0)
self.assertEqual(a.tolist(), [])
self.check_obj(a)
def test_endian(self):
a = bitarray(endian='little')
a.fromstring('A')
self.assertEqual(a.endian(), 'little')
self.check_obj(a)
b = bitarray(endian='big')
b.fromstring('A')
self.assertEqual(b.endian(), 'big')
self.check_obj(b)
self.assertEqual(a.tostring(), b.tostring())
a = bitarray(endian=u'little')
a.fromstring(' ')
self.assertEqual(a.endian(), 'little')
self.check_obj(a)
b = bitarray(endian=u'big')
b.fromstring(' ')
self.assertEqual(b.endian(), 'big')
self.check_obj(b)
self.assertEqual(a.tostring(), b.tostring())
self.assertRaises(TypeError, bitarray.__new__, bitarray, endian=0)
self.assertRaises(ValueError, bitarray.__new__, bitarray, endian='')
def test_integers(self):
for n in xrange(50):
a = bitarray(n)
self.assertEqual(len(a), n)
self.check_obj(a)
a = bitarray(long(n))
self.assertEqual(len(a), n)
self.check_obj(a)
self.assertRaises(ValueError, bitarray.__new__, bitarray, -1)
self.assertRaises(ValueError, bitarray.__new__, bitarray, -924L)
def test_list(self):
lst = ['foo', None, [1], {}]
a = bitarray(lst)
self.assertEqual(a.tolist(), [True, False, True, False])
self.check_obj(a)
for n in xrange(50):
lst = [bool(randint(0, 1)) for d in xrange(n)]
a = bitarray(lst)
self.assertEqual(a.tolist(), lst)
self.check_obj(a)
def test_tuple(self):
tup = ('', True, [], {1:2})
a = bitarray(tup)
self.assertEqual(a.tolist(), [False, True, False, True])
self.check_obj(a)
for n in xrange(50):
lst = [bool(randint(0, 1)) for d in xrange(n)]
a = bitarray(tuple(lst))
self.assertEqual(a.tolist(), lst)
self.check_obj(a)
def test_iter(self):
for n in xrange(50):
lst = [bool(randint(0, 1)) for d in xrange(n)]
a = bitarray(iter(lst))
self.assertEqual(a.tolist(), lst)
self.check_obj(a)
def test_iter2(self):
for lst in self.randomlists():
def foo():
for x in lst:
yield x
a = bitarray(foo())
self.assertEqual(a, bitarray(lst))
self.check_obj(a)
def test_01(self):
a = bitarray('0010111')
self.assertEqual(a.tolist(), [0, 0, 1, 0, 1, 1, 1])
self.check_obj(a)
for n in xrange(50):
lst = [bool(randint(0, 1)) for d in xrange(n)]
s = ''.join('1' if x else '0' for x in lst)
a = bitarray(s)
self.assertEqual(a.tolist(), lst)
self.check_obj(a)
self.assertRaises(ValueError, bitarray.__new__, bitarray, '01012100')
def test_bitarray(self):
for n in xrange(50):
a = bitarray(n)
b = bitarray(a)
self.assert_(a is not b)
self.assertEQUAL(a, b)
for end in ('little', 'big'):
a = bitarray(endian=end)
c = bitarray(a)
self.assertEqual(c.endian(), end)
c = bitarray(a, endian='little')
self.assertEqual(c.endian(), 'little')
c = bitarray(a, endian='big')
self.assertEqual(c.endian(), 'big')
def test_None(self):
self.assertEQUAL(bitarray(), bitarray(0))
self.assertEQUAL(bitarray(), bitarray(None))
def test_WrongArgs(self):
self.assertRaises(TypeError, bitarray.__new__, bitarray, 'A', 42, 69)
self.assertRaises(TypeError, bitarray.__new__, bitarray, Ellipsis)
self.assertRaises(TypeError, bitarray.__new__, bitarray, slice(0))
self.assertRaises(TypeError, bitarray.__new__, bitarray, 2.345)
self.assertRaises(TypeError, bitarray.__new__, bitarray, 4+3j)
self.assertRaises(TypeError, bitarray.__new__, bitarray, '', 0, 42)
self.assertRaises(ValueError, bitarray.__new__, bitarray, 0, 'foo')
tests.append(CreateObjectTests)
# ---------------------------------------------------------------------------
class MetaDataTests(unittest.TestCase):
def test_buffer_info(self):
a = bitarray('0000111100001', endian='little')
self.assertEqual(a.buffer_info()[1:4], (2, 'little', 3))
a = bitarray()
self.assertRaises(TypeError, a.buffer_info, 42)
bi = a.buffer_info()
self.assert_(isinstance(bi, tuple))
self.assertEqual(len(bi), 5)
self.assert_(isinstance(bi[0], (int, long)))
self.assert_(isinstance(bi[1], (int, long)))
self.assert_(isinstance(bi[2], str))
self.assert_(isinstance(bi[3], int))
self.assert_(isinstance(bi[4], (int, long)))
for n in xrange(50):
bi = bitarray(n).buffer_info()
self.assertEqual(bi[1], bits2bytes(n))
self.assertEqual(bi[3] + n, 8 * bi[1])
self.assert_(bi[4] >= bi[1])
a = bitarray(endian='little')
self.assertEqual(a.buffer_info()[2], 'little')
a = bitarray(endian='big')
self.assertEqual(a.buffer_info()[2], 'big')
def test_endian(self):
a = bitarray(endian='little')
self.assertEqual(a.endian(), 'little')
a = bitarray(endian='big')
self.assertEqual(a.endian(), 'big')
def test_length(self):
for n in xrange(1000):
a = bitarray(n)
self.assertEqual(len(a), n)
self.assertEqual(a.length(), n)
tests.append(MetaDataTests)
# ---------------------------------------------------------------------------
class SliceTests(unittest.TestCase, Util):
def test_getitem(self):
a = bitarray()
self.assertRaises(IndexError, a.__getitem__, 0)
a.append(True)
self.assertEqual(a[0], True)
self.assertRaises(IndexError, a.__getitem__, 1)
self.assertRaises(IndexError, a.__getitem__, -2)
a.append(False)
self.assertEqual(a[1], False)
self.assertRaises(IndexError, a.__getitem__, 2)
self.assertRaises(IndexError, a.__getitem__, -3)
a = bitarray('1100010')
for i, b in enumerate([True, True, False, False, False, True, False]):
self.assertEqual(a[i], b)
self.assertEqual(a[i-7], b)
self.assertRaises(IndexError, a.__getitem__, 7)
self.assertRaises(IndexError, a.__getitem__, -8)
a = bitarray('0100000100001')
self.assertEQUAL(a[:], a)
self.assert_(a[:] is not a)
aa = a.tolist()
self.assertEQUAL(a[11:2:-3], bitarray(aa[11:2:-3]))
self.check_obj(a[:])
self.assertRaises(ValueError, a.__getitem__, slice(None, None, 0))
self.assertRaises(TypeError, a.__getitem__, (1, 2))
for a in self.randombitarrays():
aa = a.tolist()
la = len(a)
if la == 0: continue
for dum in xrange(10):
step = self.rndsliceidx(la)
if step == 0: step = None
s = slice(self.rndsliceidx(la),
self.rndsliceidx(la), step)
self.assertEQUAL(a[s], bitarray(aa[s], endian=a.endian()))
def test_setitem(self):
a = bitarray([False])
a[0] = 1
self.assertEqual(a.tolist(), [True])
a = bitarray(2)
a[0] = 0
a[1] = 1
self.assertEqual(a.tolist(), [False, True])
a[-1] = 0
a[-2] = 1
self.assertEqual(a.tolist(), [True, False])
self.assertRaises(IndexError, a.__setitem__, 2, True)
self.assertRaises(IndexError, a.__setitem__, -3, False)
for a in self.randombitarrays():
la = len(a)
if la == 0:
continue
i = randint(0, la-1)
aa = a.tolist()
ida = id(a)
val = bool(randint(0, 1))
a[i] = val
aa[i] = val
self.assertEqual(a.tolist(), aa)
self.assertEqual(id(a), ida)
self.check_obj(a)
b = bitarray(la)
b[0:la] = bitarray(a)
self.assertEqual(a, b)
self.assertNotEqual(id(a), id(b))
b = bitarray(la)
b[:] = bitarray(a)
self.assertEqual(a, b)
self.assertNotEqual(id(a), id(b))
b = bitarray(la)
b[::-1] = bitarray(a)
self.assertEqual(a.tolist()[::-1], b.tolist())
a = bitarray(5*[False])
a[0] = 1
a[-2] = 1
self.assertEqual(a, bitarray('10010'))
self.assertRaises(IndexError, a.__setitem__, 5, 'foo')
self.assertRaises(IndexError, a.__setitem__, -6, 'bar')
for a in self.randombitarrays():
la = len(a)
if la == 0: continue
for dum in xrange(3):
step = self.rndsliceidx(la)
if step == 0: step = None
s = slice(self.rndsliceidx(la),
self.rndsliceidx(la), step)
for b in self.randombitarrays():
if len(b) == self.slicelen(s, len(a)) or step is None:
c = bitarray(a)
d = c
c[s] = b
self.assert_(c is d)
self.check_obj(c)
cc = a.tolist()
cc[s] = b.tolist()
self.assertEqual(c, bitarray(cc))
def test_setslice_to_bool(self):
a = bitarray('11111111')
a[::2] = False
self.assertEqual(a, bitarray('01010101'))
a[4::] = True
self.assertEqual(a, bitarray('01011111'))
a[-2:] = False
self.assertEqual(a, bitarray('01011100'))
a[:2:] = True
self.assertEqual(a, bitarray('11011100'))
a[:] = True
self.assertEqual(a, bitarray('11111111'))
def test_delitem(self):
a = bitarray('100110')
del a[1]
self.assertEqual(len(a), 5)
del a[3]
del a[-2]
self.assertEqual(a, bitarray('100'))
self.assertRaises(IndexError, a.__delitem__, 3)
self.assertRaises(IndexError, a.__delitem__, -4)
for a in self.randombitarrays():
la = len(a)
if la == 0: continue
for dum in xrange(10):
step = self.rndsliceidx(la)
if step == 0: step = None
s = slice(self.rndsliceidx(la),
self.rndsliceidx(la), step)
c = bitarray(a)
d = c
del c[s]
self.assert_(c is d)
self.check_obj(c)
cc = a.tolist()
del cc[s]
self.assertEQUAL(c, bitarray(cc, endian=c.endian()))
tests.append(SliceTests)
# ---------------------------------------------------------------------------
class MiscTests(unittest.TestCase, Util):
def test_booleanness(self):
self.assertEqual(bool(bitarray('')), False)
self.assertEqual(bool(bitarray('0')), True)
self.assertEqual(bool(bitarray('1')), True)
def test_iterate(self):
for lst in self.randomlists():
acc = []
for b in bitarray(lst):
acc.append(b)
self.assertEqual(acc, lst)
def test_iterable(self):
a = iter(bitarray('011'))
self.assertEqual(a.next(), False)
self.assertEqual(a.next(), True)
self.assertEqual(a.next(), True)
self.assertRaises(StopIteration, a.next)
def test_assignment(self):
a = bitarray('00110111001')
a[1:3] = a[7:9]
a[-1:] = a[:1]
b = bitarray('01010111000')
self.assertEqual(a, b)
def test_compare(self):
for a in self.randombitarrays():
aa = a.tolist()
for b in self.randombitarrays():
bb = b.tolist()
self.assertEqual(a == b, aa == bb)
self.assertEqual(a != b, aa != bb)
self.assertEqual(a <= b, aa <= bb)
self.assertEqual(a < b, aa < bb)
self.assertEqual(a >= b, aa >= bb)
self.assertEqual(a > b, aa > bb)
def test_subclassing(self):
class ExaggeratingBitarray(bitarray):
def __new__(cls, data, offset):
return bitarray.__new__(cls, data)
def __init__(self, data, offset):
self.offset = offset
def __getitem__(self, i):
return bitarray.__getitem__(self, i - self.offset)
for a in self.randombitarrays():
if len(a) == 0:
continue
b = ExaggeratingBitarray(a, 1234)
for i in xrange(len(a)):
self.assertEqual(a[i], b[i+1234])
def test_endianness(self):
a = bitarray(endian='little')
a.fromstring('\x01')
self.assertEqual(a.to01(), '10000000')
b = bitarray(endian='little')
b.fromstring('\x80')
self.assertEqual(b.to01(), '00000001')
c = bitarray(endian='big')
c.fromstring('\x80')
self.assertEqual(c.to01(), '10000000')
d = bitarray(endian='big')
d.fromstring('\x01')
self.assertEqual(d.to01(), '00000001')
self.assertEqual(a, c)
self.assertEqual(b, d)
a = bitarray(8, endian='little')
a.setall(False)
a[0] = True
self.assertEqual(a.tostring(), '\x01')
a[1] = True
self.assertEqual(a.tostring(), '\x03')
a.fromstring(' ')
self.assertEqual(a.tostring(), '\x03 ')
self.assertEqual(a.to01(), '1100000000000100')
a = bitarray(8, endian='big')
a.setall(False)
a[7] = True
self.assertEqual(a.tostring(), '\x01')
a[6] = True
self.assertEqual(a.tostring(), '\x03')
a.fromstring(' ')
self.assertEqual(a.tostring(), '\x03 ')
self.assertEqual(a.to01(), '0000001100100000')
a = bitarray('00100000', endian='big')
self.assertEqual(a.tostring(), ' ')
b = bitarray('00000100', endian='little')
self.assertEqual(b.tostring(), ' ')
self.assertNotEqual(a, b)
a = bitarray('11100000', endian='little')
b = bitarray(a, endian='big')
self.assertNotEqual(a, b)
self.assertEqual(a.tostring(), b.tostring())
def test_pickle(self):
from pickle import loads, dumps
for a in self.randombitarrays():
b = loads(dumps(a))
self.assert_(b is not a)
self.assertEQUAL(a, b)
def test_cPickle(self):
from cPickle import loads, dumps
for a in self.randombitarrays():
b = loads(dumps(a))
self.assert_(b is not a)
self.assertEQUAL(a, b)
def test_overflow(self):
from platform import architecture
if architecture()[0] == '64bit':
return
self.assertRaises(OverflowError, bitarray.__new__,
bitarray, 2**34 + 1)
a = bitarray(10**6)
self.assertRaises(OverflowError, a.__imul__, 17180)
tests.append(MiscTests)
# ---------------------------------------------------------------------------
class SpecialMethodTests(unittest.TestCase, Util):
def test_all(self):
a = bitarray()
self.assertTrue(a.all())
for a in self.randombitarrays():
self.assertEqual(all(a), a.all())
self.assertEqual(all(a.tolist()), a.all())
def test_any(self):
a = bitarray()
self.assertFalse(a.any())
for a in self.randombitarrays():
self.assertEqual(any(a), a.any())
self.assertEqual(any(a.tolist()), a.any())
def test_repr(self):
a = bitarray()
self.assertEqual(repr(a), "bitarray()")
a = bitarray('10111')
self.assertEqual(repr(a), "bitarray('10111')")
for a in self.randombitarrays():
b = eval(repr(a))
self.assert_(b is not a)
self.assertEqual(a, b)
self.check_obj(b)
def test_copy(self):
import copy
for a in self.randombitarrays():
b = a.copy()
self.assert_(b is not a)
self.assertEQUAL(a, b)
b = copy.copy(a)
self.assert_(b is not a)
self.assertEQUAL(a, b)
b = copy.deepcopy(a)
self.assert_(b is not a)
self.assertEQUAL(a, b)
tests.append(SpecialMethodTests)
# ---------------------------------------------------------------------------
class NumberTests(unittest.TestCase, Util):
def test_add(self):
c = bitarray('001') + bitarray('110')
self.assertEQUAL(c, bitarray('001110'))
for a in self.randombitarrays():
aa = a.copy()
for b in self.randombitarrays():
bb = b.copy()
c = a + b
self.assertEqual(c, bitarray(a.tolist() + b.tolist()))
self.assertEqual(c.endian(), a.endian())
self.check_obj(c)
self.assertEQUAL(a, aa)
self.assertEQUAL(b, bb)
a = bitarray()
self.assertRaises(TypeError, a.__add__, 42)
def test_iadd(self):
c = bitarray('001')
c += bitarray('110')
self.assertEQUAL(c, bitarray('001110'))
for a in self.randombitarrays():
for b in self.randombitarrays():
c = bitarray(a)
d = c
d += b
self.assertEqual(d, a + b)
self.assert_(c is d)
self.assertEQUAL(c, d)
self.assertEqual(d.endian(), a.endian())
self.check_obj(d)
a = bitarray()
self.assertRaises(TypeError, a.__iadd__, 42)
def test_mul(self):
c = 0 * bitarray('1001111')
self.assertEQUAL(c, bitarray())
c = 3 * bitarray('001')
self.assertEQUAL(c, bitarray('001001001'))
c = bitarray('110') * 3
self.assertEQUAL(c, bitarray('110110110'))
for a in self.randombitarrays():
b = a.copy()
for n in xrange(-10, 20):
c = a * n
self.assertEQUAL(c, bitarray(n * a.tolist(),
endian=a.endian()))
c = n * a
self.assertEqual(c, bitarray(n * a.tolist(),
endian=a.endian()))
self.assertEQUAL(a, b)
a = bitarray()
self.assertRaises(TypeError, a.__mul__, None)
def test_imul(self):
c = bitarray('1101110011')
idc = id(c)
c *= 0
self.assertEQUAL(c, bitarray())
self.assertEqual(idc, id(c))
c = bitarray('110')
c *= 3
self.assertEQUAL(c, bitarray('110110110'))
for a in self.randombitarrays():
for n in xrange(-10, 10):
b = a.copy()
idb = id(b)
b *= n
self.assertEQUAL(b, bitarray(n * a.tolist(),
endian=a.endian()))
self.assertEqual(idb, id(b))
a = bitarray()
self.assertRaises(TypeError, a.__imul__, None)
tests.append(NumberTests)
# ---------------------------------------------------------------------------
class BitwiseTests(unittest.TestCase, Util):
def test_misc(self):
for a in self.randombitarrays():
b = ~a
c = a & b
self.assertEqual(c.any(), False)
self.assertEqual(a, a ^ c)
d = a ^ b
self.assertEqual(d.all(), True)
b &= d
self.assertEqual(~b, a)
def test_and(self):
a = bitarray('11001')
b = bitarray('10011')
self.assertEQUAL(a & b, bitarray('10001'))
b = bitarray('1001')
self.assertRaises(ValueError, a.__and__, b) # not same length
self.assertRaises(TypeError, a.__and__, 42)
def test_iand(self):
a = bitarray('110010110')
ida = id(a)
a &= bitarray('100110011')
self.assertEQUAL(a, bitarray('100010010'))
self.assertEqual(ida, id(a))
def test_or(self):
a = bitarray('11001')
b = bitarray('10011')
self.assertEQUAL(a | b, bitarray('11011'))
def test_iand(self):
a = bitarray('110010110')
a |= bitarray('100110011')
self.assertEQUAL(a, bitarray('110110111'))
def test_xor(self):
a = bitarray('11001')
b = bitarray('10011')
self.assertEQUAL(a ^ b, bitarray('01010'))
def test_ixor(self):
a = bitarray('110010110')
a ^= bitarray('100110011')
self.assertEQUAL(a, bitarray('010100101'))
def test_invert(self):
a = bitarray()
a.invert()
self.assertEQUAL(a, bitarray())
a = bitarray('11011')
a.invert()
self.assertEQUAL(a, bitarray('00100'))
a = bitarray('11011')
b = ~a
self.assertEQUAL(b, bitarray('00100'))
self.assertEQUAL(a, bitarray('11011'))
self.assert_(a is not b)
for a in self.randombitarrays():
aa = a.tolist()
b = bitarray(a)
b.invert()
for i in xrange(len(a)):
self.assertEqual(b[i], not aa[i])
self.check_obj(b)
c = ~a
self.assert_(c is not a)
self.assertEQUAL(a, bitarray(aa, endian=a.endian()))
for i in xrange(len(a)):
self.assertEqual(c[i], not aa[i])
self.check_obj(b)
tests.append(BitwiseTests)
# ---------------------------------------------------------------------------
class SequenceTests(unittest.TestCase, Util):
def test_contains(self):
a = bitarray()
self.assert_(False not in a)
self.assert_(True not in a)
a.append(True)
self.assert_(True in a)
self.assert_(False not in a)
a = bitarray([False])
self.assert_(False in a)
self.assert_(True not in a)
a.append(True)
self.assert_(0 in a)
self.assert_(1 in a)
for n in xrange(2, 100):
a = bitarray(n)
a.setall(0)
self.assert_(False in a)
self.assert_(True not in a)
a[randint(0, n-1)] = 1
self.assert_(True in a)
self.assert_(False in a)
a.setall(1)
self.assert_(True in a)
self.assert_(False not in a)
a[randint(0, n-1)] = 0
self.assert_(True in a)
self.assert_(False in a)
a = bitarray('011010000001')
self.assert_('1' in a)
self.assert_('11' in a)
self.assert_('111' not in a)
self.assert_(bitarray('00') in a)
self.assert_([0, 0, 0, 1] in a)
self.assert_((0, 0, 0, 1, 1) not in a)
self.assert_((0, 0, 0, 0, 2) in a)
tests.append(SequenceTests)
# ---------------------------------------------------------------------------
class ExtendTests(unittest.TestCase, Util):
def test_wrongArgs(self):
a = bitarray()
self.assertRaises(TypeError, a.extend)
self.assertRaises(TypeError, a.extend, None)
self.assertRaises(TypeError, a.extend, True)
self.assertRaises(TypeError, a.extend, 24)
self.assertRaises(ValueError, a.extend, '0011201')
def test_bitarray(self):
a = bitarray()
a.extend(bitarray())
self.assertEqual(a, bitarray())
a.extend(bitarray('110'))
self.assertEqual(a, bitarray('110'))
a.extend(bitarray('1110'))
self.assertEqual(a, bitarray('1101110'))
a = bitarray('00001111', endian='little')
a.extend(bitarray('00111100', endian='big'))
self.assertEqual(a, bitarray('0000111100111100'))
for a in self.randombitarrays():
for b in self.randombitarrays():
c = bitarray(a)
idc = id(c)
c.extend(b)
self.assertEqual(id(c), idc)
self.assertEqual(c, a + b)
def test_list(self):
a = bitarray()
a.extend([0, 1, 3, None, {}])
self.assertEqual(a, bitarray('01100'))
a.extend([True, False])
self.assertEqual(a, bitarray('0110010'))
for a in self.randomlists():
for b in self.randomlists():
c = bitarray(a)
idc = id(c)
c.extend(b)
self.assertEqual(id(c), idc)
self.assertEqual(c.tolist(), a + b)
self.check_obj(c)
def test_iterable(self):
def bar():
for x in ('', '1', None, True, []):
yield x
a = bitarray()
a.extend(bar())
self.assertEqual(a, bitarray('01010'))
for a in self.randomlists():
for b in self.randomlists():
def foo():
for e in b:
yield e
c = bitarray(a)
idc = id(c)
c.extend(foo())
self.assertEqual(id(c), idc)
self.assertEqual(c.tolist(), a + b)
self.check_obj(c)
def test_tuple(self):
a = bitarray()
a.extend((0, 1, 2, 0, 3))
self.assertEqual(a, bitarray('01101'))
for a in self.randomlists():
for b in self.randomlists():
c = bitarray(a)
idc = id(c)
c.extend(tuple(b))
self.assertEqual(id(c), idc)
self.assertEqual(c.tolist(), a + b)
self.check_obj(c)
def test_iter(self):
a = bitarray()
a.extend(iter([3, 9, 0, 1, -2]))
self.assertEqual(a, bitarray('11011'))
for a in self.randomlists():
for b in self.randomlists():
c = bitarray(a)
idc = id(c)
c.extend(iter(b))
self.assertEqual(id(c), idc)
self.assertEqual(c.tolist(), a + b)
self.check_obj(c)
def test_string01(self):
a = bitarray()
a.extend('0110111')
self.assertEqual(a, bitarray('0110111'))
for a in self.randomlists():
for b in self.randomlists():
c = bitarray(a)
idc = id(c)
c.extend(''.join(('1' if x else '0') for x in b))
self.assertEqual(id(c), idc)
self.assertEqual(c.tolist(), a + b)
self.check_obj(c)
tests.append(ExtendTests)
# ---------------------------------------------------------------------------
class MethodTests(unittest.TestCase, Util):
def test_append(self):
a = bitarray()
a.append(True)
a.append(False)
a.append(False)
self.assertEQUAL(a, bitarray('100'))
for a in self.randombitarrays():
aa = a.tolist()
b = a
b.append(1)
self.assert_(a is b)
self.check_obj(b)
self.assertEQUAL(b, bitarray(aa+[1], endian=a.endian()))
b.append('')
self.assertEQUAL(b, bitarray(aa+[1, 0], endian=a.endian()))
def test_insert(self):
a = bitarray()
b = a
a.insert(0, True)
self.assert_(a is b)
self.assertEqual(a, bitarray('1'))
self.assertRaises(TypeError, a.insert)
self.assertRaises(TypeError, a.insert, None)
for a in self.randombitarrays():
aa = a.tolist()
item = bool(randint(0, 1))
pos = randint(-len(a), len(a))
a.insert(pos, item)
aa.insert(pos, item)
self.assertEqual(a.tolist(), aa)
self.check_obj(a)
def test_index(self):
a = bitarray()
for i in (True, False, 1, 0):
self.assertRaises(ValueError, a.index, i)
a = bitarray(100*[False])
self.assertRaises(ValueError, a.index, True)
a[20] = a[27] = 54
self.assertEqual(a.index(42), 20)
self.assertEqual(a.index(0), 0)
a = bitarray(200*[True])
self.assertRaises(ValueError, a.index, False)
a[173] = a[187] = 0
self.assertEqual(a.index(False), 173)
self.assertEqual(a.index(True), 0)
for n in xrange(50):
for m in xrange(n):
a = bitarray(n)
a.setall(0)
self.assertRaises(ValueError, a.index, 1)
a[m] = 1
self.assertEqual(a.index(1), m)
a.setall(1)
self.assertRaises(ValueError, a.index, 0)
a[m] = 0
self.assertEqual(a.index(0), m)
def test_count(self):
a = bitarray('10011')
self.assertEqual(a.count(), 3)
self.assertEqual(a.count(True), 3)
self.assertEqual(a.count(False), 2)
self.assertEqual(a.count(1), 3)
self.assertEqual(a.count(0), 2)
self.assertRaises(TypeError, a.count, 'A')
for a in self.randombitarrays():
self.assertEqual(a.count(), a.count(1))
self.assertEqual(a.count(1), a.to01().count('1'))
self.assertEqual(a.count(0), a.to01().count('0'))
def test_search(self):
a = bitarray('')
self.assertEqual(a.search(bitarray('0')), [])
self.assertEqual(a.search(bitarray('1')), [])
a = bitarray('1')
self.assertEqual(a.search(bitarray('0')), [])
self.assertEqual(a.search(bitarray('1')), [0])
self.assertEqual(a.search(bitarray('11')), [])
a = bitarray(100*'1')
self.assertEqual(a.search(bitarray('0')), [])
self.assertEqual(a.search(bitarray('1')), range(100))
a = bitarray('10011')
for s, res in [('0', [1, 2]), ('1', [0, 3, 4]),
('01', [2]), ('11', [3]),
('000', []), ('1001', [0]),
('011', [2]), ('0011', [1]),
('10011', [0]), ('100111', [])]:
self.assertEqual(a.search(s), res)
b = bitarray(s)
self.assertEqual(a.search(b), res)
self.assertEqual(a.search(list(b)), res)
self.assertEqual(a.search(tuple(b)), res)
a = bitarray('10010101110011111001011')
for limit in xrange(10):
self.assertEqual(a.search('011', limit),
[6, 11, 20][:limit])
def test_fill(self):
a = bitarray('')
self.assertEqual(a.fill(), 0)
self.assertEqual(len(a), 0)
a = bitarray('101')
self.assertEqual(a.fill(), 5)
self.assertEQUAL(a, bitarray('10100000'))
self.assertEqual(a.fill(), 0)
self.assertEQUAL(a, bitarray('10100000'))
for a in self.randombitarrays():
aa = a.tolist()
la = len(a)
b = a
self.assert_(0 <= b.fill() < 8)
self.assertEqual(b.endian(), a.endian())
bb = b.tolist()
lb = len(b)
self.assert_(a is b)
self.check_obj(b)
if la % 8 == 0:
self.assertEqual(bb, aa)
self.assertEqual(lb, la)
else:
self.assert_(lb % 8 == 0)
self.assertNotEqual(bb, aa)
self.assertEqual(bb[:la], aa)
self.assertEqual(b[la:], (lb-la)*bitarray('0'))
self.assert_(0 < lb-la < 8)
def test_sort(self):
a = bitarray('1101000')
a.sort()
self.assertEqual(a, bitarray('0000111'))
a = bitarray('1101000')
a.sort(reverse=True)
self.assertEqual(a, bitarray('1110000'))
a = bitarray('1101000')
a.sort(True)
self.assertEqual(a, bitarray('1110000'))
self.assertRaises(TypeError, a.sort, 'A')
for a in self.randombitarrays():
ida = id(a)
rev = randint(0, 1)
a.sort(rev)
self.assertEqual(a, bitarray(sorted(a.tolist(), reverse=rev)))
self.assertEqual(id(a), ida)
def test_reverse(self):
self.assertRaises(TypeError, bitarray().reverse, 42)
a = bitarray()
a.reverse()
self.assertEQUAL(a, bitarray())
a = bitarray('1001111')
a.reverse()
self.assertEQUAL(a, bitarray('1111001'))
a = bitarray('11111000011')
a.reverse()
self.assertEQUAL(a, bitarray('11000011111'))
for a in self.randombitarrays():
aa = a.tolist()
ida = id(a)
a.reverse()
self.assertEqual(ida, id(a))
self.assertEQUAL(a, bitarray(aa[::-1], endian=a.endian()))
def test_tolist(self):
a = bitarray()
self.assertEqual(a.tolist(), [])
a = bitarray('110')
self.assertEqual(a.tolist(), [True, True, False])
for lst in self.randomlists():
a = bitarray(lst)
self.assertEqual(a.tolist(), lst)
def test_remove(self):
a = bitarray()
for i in (True, False, 1, 0):
self.assertRaises(ValueError, a.remove, i)
a = bitarray(21)
a.setall(0)
self.assertRaises(ValueError, a.remove, 1)
a.setall(1)
self.assertRaises(ValueError, a.remove, 0)
a = bitarray('1010110')
a.remove(False); self.assertEqual(a, bitarray('110110'))
a.remove(True); self.assertEqual(a, bitarray('10110'))
a.remove(1); self.assertEqual(a, bitarray('0110'))
a.remove(0); self.assertEqual(a, bitarray('110'))
a = bitarray('0010011')
b = a
b.remove('1')
self.assert_(b is a)
self.assertEQUAL(b, bitarray('000011'))
def test_pop(self):
a = bitarray()
self.assertRaises(IndexError, a.pop)
for a in self.randombitarrays():
self.assertRaises(IndexError, a.pop, len(a))
self.assertRaises(IndexError, a.pop, -len(a)-1)
if len(a) == 0:
continue
aa = a.tolist()
enda = a.endian()
self.assertEqual(a.pop(), aa[-1])
self.check_obj(a)
self.assertEqual(a.endian(), enda)
for a in self.randombitarrays():
if len(a) == 0:
continue
n = randint(-len(a), len(a)-1)
aa = a.tolist()
self.assertEqual(a.pop(n), aa[n])
self.check_obj(a)
def test_setall(self):
a = bitarray(5)
a.setall(True)
self.assertEQUAL(a, bitarray('11111'))
for a in self.randombitarrays():
val = randint(0, 1)
b = a
b.setall(val)
self.assertEqual(b, bitarray(len(b)*[val]))
self.assert_(a is b)
self.check_obj(b)
def test_bytereverse(self):
a = bitarray()
a.bytereverse()
self.assertEqual(a, bitarray())
a = bitarray('1011')
a.bytereverse()
self.assertEqual(a, bitarray('0000'))
a = bitarray('111011')
a.bytereverse()
self.assertEqual(a, bitarray('001101'))
a = bitarray('11101101')
a.bytereverse()
self.assertEqual(a, bitarray('10110111'))
for i in xrange(256):
a = bitarray()
a.fromstring(chr(i))
aa = a.tolist()
b = a
b.bytereverse()
self.assertEqual(b, bitarray(aa[::-1]))
self.assert_(a is b)
self.check_obj(b)
def test_fromtoword(self):
for bits in range (17):
for word in range(5,(1<<bits)):
init = '1'
r = bitarray(init, endian='little')
r.fromword (word, bits=bits)
self.assertEqual(len(r), len(init)+bits)
self.assertEqual(len(r.to01()), len(init)+bits)
word2 = r.toword(len(init), bits=bits)
self.assertEqual (word, word2)
tests.append(MethodTests)
# ---------------------------------------------------------------------------
class StringTests(unittest.TestCase, Util):
def randomstrings(self):
for n in xrange(1, 20):
yield ''.join(chr(randint(0, 255)) for x in xrange(n))
def test_fromstring(self):
a = bitarray(endian='big')
a.fromstring('A')
self.assertEqual(a, bitarray('01000001'))
b = a
b.fromstring('BC')
self.assertEQUAL(b, bitarray('01000001' '01000010' '01000011'))
self.assert_(b is a)
for b in self.randombitarrays():
c = b.__copy__()
b.fromstring('')
self.assertEQUAL(b, c)
for b in self.randombitarrays():
for s in self.randomstrings():
a = bitarray(endian=b.endian())
a.fromstring(s)
c = b.__copy__()
b.fromstring(s)
self.assertEQUAL(b[-len(a):], a)
self.assertEQUAL(b[:-len(a)], c)
self.assertEQUAL(c + a, b)
def test_tostring(self):
a = bitarray()
self.assertEqual(a.tostring(), '')
for end in ('big', 'little'):
a = bitarray(endian=end)
a.fromstring('foo')
self.assertEqual(a.tostring(), "foo")
for s in self.randomstrings():
a = bitarray(endian=end)
a.fromstring(s)
self.assertEqual(a.tostring(), s)
for n, s in [(1, '\x01'), (2, '\x03'), (3, '\x07'), (4, '\x0f'),
(5, '\x1f'), (6, '\x3f'), (7, '\x7f'), (8, '\xff'),
(12, '\xff\x0f'), (15, '\xff\x7f'), (16, '\xff\xff'),
(17, '\xff\xff\x01'), (24, '\xff\xff\xff')]:
a = bitarray(n, endian='little')
a.setall(1)
self.assertEqual(a.tostring(), s)
def test_unpack(self):
a = bitarray('01')
self.assertEqual(a.unpack(), '\x00\xff')
self.assertEqual(a.unpack('A'), 'A\xff')
self.assertEqual(a.unpack('0', '1'), '01')
self.assertEqual(a.unpack(one='\x01'), '\x00\x01')
self.assertEqual(a.unpack(zero='A'), 'A\xff')
self.assertEqual(a.unpack(one='t', zero='f'), 'ft')
self.assertEqual(a.unpack('a', one='b'), 'ab')
self.assertRaises(TypeError, a.unpack, 'a', zero='b')
self.assertRaises(TypeError, a.unpack, foo='b')
self.assertRaises(TypeError, a.unpack, 'a', 'b', 'c')
for a in self.randombitarrays():
self.assertEqual(a.unpack('0', '1'), a.to01())
b = bitarray()
b.pack(a.unpack())
self.assertEqual(b, a)
b = bitarray()
b.pack(a.unpack('\x01', '\x00'))
b.invert()
self.assertEqual(b, a)
def test_pack(self):
a = bitarray()
a.pack('\x00')
self.assertEqual(a, bitarray('0'))
a.pack('\xff')
self.assertEqual(a, bitarray('01'))
a.pack('\x01\x00\x7a')
self.assertEqual(a, bitarray('01101'))
a = bitarray()
for n in xrange(256):
a.pack(chr(n))
self.assertEqual(a, bitarray('0' + 255 * '1'))
self.assertRaises(TypeError, a.pack, 0)
self.assertRaises(TypeError, a.pack, u'1')
self.assertRaises(TypeError, a.pack, [1, 3])
self.assertRaises(TypeError, a.pack, bitarray())
tests.append(StringTests)
# ---------------------------------------------------------------------------
class FileTests(unittest.TestCase, Util):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.tmpfname = os.path.join(self.tmpdir, 'testfile')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_cPickle(self):
from cPickle import load, dump
for a in self.randombitarrays():
fo = open(self.tmpfname, 'wb')
dump(a, fo)
fo.close()
b = load(open(self.tmpfname, 'rb'))
self.assert_(b is not a)
self.assertEQUAL(a, b)
def test_shelve(self):
import shelve, hashlib
d = shelve.open(self.tmpfname)
stored = []
for a in self.randombitarrays():
key = hashlib.md5(repr(a) + a.endian()).hexdigest()
d[key] = a
stored.append((key, a))
d.close()
del d
d = shelve.open(self.tmpfname)
for k, v in stored:
self.assertEQUAL(d[k], v)
d.close()
def test_fromfile_wrong_args(self):
b = bitarray()
self.assertRaises(TypeError, b.fromfile)
self.assertRaises(TypeError, b.fromfile, StringIO()) # file not open
self.assertRaises(TypeError, b.fromfile, 42)
self.assertRaises(TypeError, b.fromfile, 'bar')
def test_from_empty_file(self):
fo = open(self.tmpfname, 'wb')
fo.close()
a = bitarray()
a.fromfile(open(self.tmpfname, 'rb'))
self.assertEqual(a, bitarray())
def test_from_large_file(self):
N = 100000
fo = open(self.tmpfname, 'wb')
fo.write(N * 'X')
fo.close()
a = bitarray()
a.fromfile(open(self.tmpfname, 'rb'))
self.assertEqual(len(a), 8 * N)
self.assertEqual(a.buffer_info()[1], N)
# make sure there is no over-allocation
self.assertEqual(a.buffer_info()[4], N)
def test_fromfile_Foo(self):
fo = open(self.tmpfname, 'wb')
fo.write('Foo\n')
fo.close()
a = bitarray(endian='big')
a.fromfile(open(self.tmpfname, 'rb'))
self.assertEqual(a, bitarray('01000110011011110110111100001010'))
a = bitarray(endian='little')
a.fromfile(open(self.tmpfname, 'rb'))
self.assertEqual(a, bitarray('01100010111101101111011001010000'))
a = bitarray('1', endian='little')
a.fromfile(open(self.tmpfname, 'rb'))
self.assertEqual(a, bitarray('101100010111101101111011001010000'))
for n in xrange(20):
a = bitarray(n, endian='little')
a.setall(1)
a.fromfile(open(self.tmpfname, 'rb'))
self.assertEqual(a,
n*bitarray('1') +
bitarray('01100010111101101111011001010000'))
def test_fromfile_n(self):
a = bitarray()
a.fromstring('ABCDEFGHIJ')
fo = open(self.tmpfname, 'wb')
a.tofile(fo)
fo.close()
b = bitarray()
f = open(self.tmpfname, 'rb')
b.fromfile(f, 1); self.assertEqual(b.tostring(), 'A')
f.read(1)
b = bitarray()
b.fromfile(f, 2); self.assertEqual(b.tostring(), 'CD')
b.fromfile(f, 1); self.assertEqual(b.tostring(), 'CDE')
b.fromfile(f, 0); self.assertEqual(b.tostring(), 'CDE')
b.fromfile(f); self.assertEqual(b.tostring(), 'CDEFGHIJ')
b.fromfile(f); self.assertEqual(b.tostring(), 'CDEFGHIJ')
f.close()
b = bitarray()
f = open(self.tmpfname, 'rb')
f.read(1);
self.assertRaises(EOFError, b.fromfile, f, 10)
f.close()
self.assertEqual(b.tostring(), 'BCDEFGHIJ')
b = bitarray()
f = open(self.tmpfname, 'rb')
b.fromfile(f);
self.assertEqual(b.tostring(), 'ABCDEFGHIJ')
self.assertRaises(EOFError, b.fromfile, f, 1)
f.close()
def test_tofile(self):
a = bitarray()
f = open(self.tmpfname, 'wb')
a.tofile(f)
f.close()
fi = open(self.tmpfname, 'rb')
self.assertEqual(fi.read(), '')
fi.close()
a = bitarray('01000110011011110110111100001010', endian='big')
f = open(self.tmpfname, 'wb')
a.tofile(f)
f.close()
fi = open(self.tmpfname, 'rb')
self.assertEqual(fi.read(), 'Foo\n')
fi.close()
for a in self.randombitarrays():
b = bitarray(a, endian='big')
fo = open(self.tmpfname, 'wb')
b.tofile(fo)
fo.close()
s = open(self.tmpfname, 'rb').read()
self.assertEqual(len(s), a.buffer_info()[1])
for n in xrange(3):
a.fromstring(n * 'A')
self.assertRaises(TypeError, a.tofile)
self.assertRaises(TypeError, a.tofile, StringIO())
f = open(self.tmpfname, 'wb')
a.tofile(f)
f.close()
self.assertRaises(TypeError, a.tofile, f)
for n in xrange(20):
a = n * bitarray('1', endian='little')
fo = open(self.tmpfname, 'wb')
a.tofile(fo)
fo.close()
s = open(self.tmpfname, 'rb').read()
self.assertEqual(len(s), a.buffer_info()[1])
b = a.__copy__()
b.fill()
c = bitarray(endian='little')
c.fromstring(s)
self.assertEqual(c, b)
tests.append(FileTests)
# ---------------------------------------------------------------------------
class PrefixCodeTests(unittest.TestCase):
def test_encode_check_codedict(self):
a = bitarray()
self.assertRaises(TypeError, a.encode, 0, '')
self.assertRaises(ValueError, a.encode, {}, '')
self.assertRaises(TypeError, a.encode, {'a':42}, '')
self.assertRaises(ValueError, a.encode, {'a':bitarray()}, '')
# 42 not iterable
self.assertRaises(TypeError, a.encode, {'a':bitarray('0')}, 42)
self.assertEqual(len(a), 0)
def test_encode_string(self):
a = bitarray()
d = {'a':bitarray('0')}
a.encode(d, '')
self.assertEqual(a, bitarray())
a.encode(d, 'a')
self.assertEqual(a, bitarray('0'))
self.assertEqual(d, {'a':bitarray('0')})
def test_encode_list(self):
a = bitarray()
d = {'a':bitarray('0')}
a.encode(d, [])
self.assertEqual(a, bitarray())
a.encode(d, ['a'])
self.assertEqual(a, bitarray('0'))
self.assertEqual(d, {'a':bitarray('0')})
def test_encode_iter(self):
a = bitarray()
d = {'a':bitarray('0'), 'b':bitarray('1')}
a.encode(d, iter('abba'))
self.assertEqual(a, bitarray('0110'))
def foo():
for c in 'bbaabb':
yield c
a.encode(d, foo())
self.assertEqual(a, bitarray('0110110011'))
self.assertEqual(d, {'a':bitarray('0'), 'b':bitarray('1')})
def test_encode(self):
d = {'I':bitarray('1'),
'l':bitarray('01'),
'a':bitarray('001'),
'n':bitarray('000')}
a = bitarray()
a.encode(d, 'Ilan')
self.assertEqual(a, bitarray('101001000'))
a.encode(d, 'a')
self.assertEqual(a, bitarray('101001000001'))
self.assertEqual(d, {'I':bitarray('1'), 'l':bitarray('01'),
'a':bitarray('001'), 'n':bitarray('000')})
self.assertRaises(ValueError, a.encode, d, 'arvin')
def test_decode_check_codedict(self):
a = bitarray()
self.assertRaises(TypeError, a.decode, 0)
self.assertRaises(ValueError, a.decode, {})
# 42 not iterable
self.assertRaises(TypeError, a.decode, {'a':42})
self.assertRaises(ValueError, a.decode, {'a':bitarray()})
def test_decode_simple(self):
d = {'I':bitarray('1'),
'l':bitarray('01'),
'a':bitarray('001'),
'n':bitarray('000')}
a = bitarray('101001000')
self.assertEqual(a.decode(d), ['I', 'l', 'a', 'n'])
self.assertEqual(d, {'I':bitarray('1'), 'l':bitarray('01'),
'a':bitarray('001'), 'n':bitarray('000')})
self.assertEqual(a, bitarray('101001000'))
def test_decode_empty(self):
d = {'a':bitarray('1')}
a = bitarray()
self.assertEqual(a.decode(d), [])
self.assertEqual(d, {'a':bitarray('1')})
def test_decode_buggybitarray(self):
d = {'a':bitarray('0')}
a = bitarray('1')
self.assertRaises(ValueError, a.decode, d)
self.assertEqual(a, bitarray('1'))
self.assertEqual(d, {'a':bitarray('0')})
def test_decode_buggybitarray2(self):
d = {'a':bitarray('00'), 'b':bitarray('01')}
a = bitarray('1')
self.assertRaises(ValueError, a.decode, d)
self.assertEqual(a, bitarray('1'))
def test_decode_ambiguous_code(self):
d = {'a':bitarray('0'), 'b':bitarray('0'), 'c':bitarray('1')}
a = bitarray()
self.assertRaises(ValueError, a.decode, d)
def test_decode_ambiguous2(self):
d = {'a':bitarray('01'), 'b':bitarray('01'), 'c':bitarray('1')}
a = bitarray()
self.assertRaises(ValueError, a.decode, d)
def test_miscitems(self):
d = {None :bitarray('00'),
0 :bitarray('110'),
1 :bitarray('111'),
'' :bitarray('010'),
2 :bitarray('011')}
a = bitarray()
a.encode(d, [None, 0, 1, '', 2])
self.assertEqual(a, bitarray('00110111010011'))
self.assertEqual(a.decode(d), [None, 0, 1, '', 2])
def test_real_example(self):
code = {' ' : bitarray('001'),
'.' : bitarray('0101010'),
'a' : bitarray('0110'),
'b' : bitarray('0001100'),
'c' : bitarray('000011'),
'd' : bitarray('01011'),
'e' : bitarray('111'),
'f' : bitarray('010100'),
'g' : bitarray('101000'),
'h' : bitarray('00000'),
'i' : bitarray('1011'),
'j' : bitarray('0111101111'),
'k' : bitarray('00011010'),
'l' : bitarray('01110'),
'm' : bitarray('000111'),
'n' : bitarray('1001'),
'o' : bitarray('1000'),
'p' : bitarray('101001'),
'q' : bitarray('00001001101'),
'r' : bitarray('1101'),
's' : bitarray('1100'),
't' : bitarray('0100'),
'u' : bitarray('000100'),
'v' : bitarray('0111100'),
'w' : bitarray('011111'),
'x' : bitarray('0000100011'),
'y' : bitarray('101010'),
'z' : bitarray('00011011110')}
a = bitarray()
a.encode(code, 'the quick brown fox jumps over the lazy dog.')
self.assertEqual(a, bitarray('01000000011100100001001101000100101100'
'00110001101000100011001101100001111110010010101001000000010001100'
'10111101111000100000111101001110000110000111100111110100101000000'
'0111001011100110000110111101010100010101110001010000101010'))
self.assertEqual(''.join(a.decode(code)),
'the quick brown fox jumps over the lazy dog.')
tests.append(PrefixCodeTests)
# ---------------------------------------------------------------------------
def pages():
if sys.platform != 'linux2':
return 0
dat = open('/proc/%i/statm' % os.getpid()).read()
return int(dat.split()[0])
def check_memory_leaks(verbosity):
suite = unittest.TestSuite()
for cls in tests:
suite.addTest(unittest.makeSuite(cls))
logfile = 'pages.log'
if os.path.isfile(logfile):
os.unlink(logfile)
i = 0
runner = unittest.TextTestRunner(verbosity=verbosity)
while True:
print 'Run', i
r = runner.run(suite)
if i % 1 == 0:
fo = open(logfile, 'a')
fo.write('%10i %r %10i\n' % (i, r.wasSuccessful(), pages()))
fo.close()
i += 1
def run(verbosity, chk_mem_leaks=False):
suite = unittest.TestSuite()
for cls in tests:
suite.addTest(unittest.makeSuite(cls))
runner = unittest.TextTestRunner(verbosity=verbosity)
return runner.run(suite)
if __name__ == '__main__':
verbosity = 2 if 'v' in sys.argv else 1
if 'm' in sys.argv:
check_memory_leaks(verbosity)
else:
run(verbosity)
else:
from bitarray import __version__
print 'bitarray is installed in:', os.path.dirname(__file__)
print 'bitarray version:', __version__
print sys.version
| |
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
#!/usr/bin/env python
import sys
import os
from collections import defaultdict
from string import strip
import sqlite3
import math
import tarfile
c = None
__all__ = ["NCBITaxa"]
class NCBITaxa(object):
"""
versionadded: 2.3
Provides a local transparent connector to the NCBI taxonomy database.
"""
def __init__(self, dbfile=None):
if not dbfile:
self.dbfile = os.path.join(os.environ.get('HOME', '/'), '.etetoolkit', 'taxa.sqlite')
else:
self.dbfile = dbfile
if dbfile is None and not os.path.exists(self.dbfile):
print >>sys.stderr, 'NCBI database not present yet (first time used?)'
self.update_taxonomy_database()
if not os.path.exists(self.dbfile):
raise ValueError("Cannot open taxonomy database: %s" %self.dbfile)
self.db = None
self._connect()
def update_taxonomy_database(self, taxdump_file=None):
"""Updates the ncbi taxonomy database by downloading and parsing the latest
taxdump.tar.gz file from the NCBI FTP site.
:param None taxdump_file: an alternative location of the taxdump.tax.gz file.
"""
if not taxdump_file:
update_db(self.dbfile)
else:
update_db(self.dbfile, taxdump_file)
def _connect(self):
self.db = sqlite3.connect(self.dbfile)
def _translate_merged(self, all_taxids):
conv_all_taxids = set((map(int, all_taxids)))
cmd = 'select taxid_old, taxid_new FROM merged WHERE taxid_old IN (%s)' %','.join(map(str, all_taxids))
result = self.db.execute(cmd)
conversion = {}
for old, new in result.fetchall():
conv_all_taxids.discard(int(old))
conv_all_taxids.add(int(new))
conversion[int(old)] = int(new)
return conv_all_taxids, conversion
def get_fuzzy_name_translation(self, name, sim=0.9):
'''
Given an inexact species name, returns the best match in the NCBI database of taxa names.
:argument 0.9 sim: Min word similarity to report a match (from 0 to 1).
:return: taxid, species-name-match, match-score
'''
import pysqlite2.dbapi2 as sqlite2
_db = sqlite2.connect(self.dbfile)
_db.enable_load_extension(True)
module_path = os.path.split(os.path.realpath(__file__))[0]
_db.execute("select load_extension('%s')" % os.path.join(module_path,
"SQLite-Levenshtein/levenshtein.sqlext"))
print "Trying fuzzy search for %s" % name
maxdiffs = math.ceil(len(name) * (1-sim))
cmd = 'SELECT taxid, spname, LEVENSHTEIN(spname, "%s") AS sim FROM species WHERE sim<=%s ORDER BY sim LIMIT 1;' % (name, maxdiffs)
taxid, spname, score = None, None, len(name)
result = _db.execute(cmd)
try:
taxid, spname, score = result.fetchone()
except TypeError:
cmd = 'SELECT taxid, spname, LEVENSHTEIN(spname, "%s") AS sim FROM synonym WHERE sim<=%s ORDER BY sim LIMIT 1;' % (name, maxdiffs)
result = _db.execute(cmd)
try:
taxid, spname, score = result.fetchone()
except:
pass
else:
taxid = int(taxid)
else:
taxid = int(taxid)
norm_score = 1 - (float(score)/len(name))
if taxid:
print "FOUND! %s taxid:%s score:%s (%s)" %(spname, taxid, score, norm_score)
return taxid, spname, norm_score
def get_rank(self, taxids):
'return a dictionary converting a list of taxids into their corresponding NCBI taxonomy rank'
all_ids = set(taxids)
all_ids.discard(None)
all_ids.discard("")
query = ','.join(['"%s"' %v for v in all_ids])
cmd = "select taxid, rank FROM species WHERE taxid IN (%s);" %query
result = self.db.execute(cmd)
id2rank = {}
for tax, spname in result.fetchall():
id2rank[tax] = spname
return id2rank
def get_lineage(self, taxid):
"""Given a valid taxid number, return its corresponding lineage track as a
hierarchically sorted list of parent taxids.
"""
if not taxid:
return None
result = self.db.execute('SELECT track FROM species WHERE taxid=%s' %taxid)
raw_track = result.fetchone()
if not raw_track:
raw_track = ["1"]
#raise ValueError("%s taxid not found" %taxid)
track = map(int, raw_track[0].split(","))
return list(reversed(track))
def get_taxid_translator(self, taxids):
"""Given a list of taxids, returns a dictionary with their corresponding
scientific names.
"""
all_ids = set(map(int, taxids))
all_ids.discard(None)
all_ids.discard("")
query = ','.join(['"%s"' %v for v in all_ids])
cmd = "select taxid, spname FROM species WHERE taxid IN (%s);" %query
result = self.db.execute(cmd)
id2name = {}
for tax, spname in result.fetchall():
id2name[tax] = spname
# any taxid without translation? lets tray in the merged table
if len(all_ids) != len(id2name):
not_found_taxids = all_ids - set(id2name.keys())
taxids, old2new = self._translate_merged(not_found_taxids)
new2old = dict([(v,k) for k,v in old2new.iteritems()])
if old2new:
query = ','.join(['"%s"' %v for v in new2old])
cmd = "select taxid, spname FROM species WHERE taxid IN (%s);" %query
result = self.db.execute(cmd)
for tax, spname in result.fetchall():
id2name[new2old[tax]] = spname
return id2name
def get_name_translator(self, names):
"""
Given a list of taxid scientific names, returns a dictionary translating them into their corresponding taxids.
Exact name match is required for translation.
"""
name2id = {}
name2realname = {}
name2origname = {}
for n in names:
name2origname[n.lower()] = n
names = set(name2origname.keys())
query = ','.join(['"%s"' %n for n in name2origname.iterkeys()])
cmd = 'select spname, taxid from species where spname IN (%s)' %query
result = self.db.execute('select spname, taxid from species where spname IN (%s)' %query)
for sp, taxid in result.fetchall():
oname = name2origname[sp.lower()]
name2id[oname] = taxid
name2realname[oname] = sp
missing = names - set(name2id.keys())
if missing:
query = ','.join(['"%s"' %n for n in missing])
result = self.db.execute('select spname, taxid from synonym where spname IN (%s)' %query)
for sp, taxid in result.fetchall():
oname = name2origname[sp.lower()]
name2id[oname] = taxid
name2realname[oname] = sp
return name2id
def translate_to_names(self, taxids):
"""
Given a list of taxid numbers, returns another list with their corresponding scientific names.
"""
id2name = self.get_taxid_translator(taxids)
names = []
for sp in taxids:
names.append(id2name.get(sp, sp))
return names
def get_topology(self, taxids, intermediate_nodes=False, rank_limit=None, collapse_subspecies=False, annotate=True):
"""Given a list of taxid numbers, return the minimal pruned NCBI taxonomy tree
containing all of them.
:param False intermediate_nodes: If True, single child nodes
representing the complete lineage of leaf nodes are kept. Otherwise, the
tree is pruned to contain the first common ancestor of each group.
:param None rank_limit: If valid NCBI rank name is provided, the tree is
pruned at that given level. For instance, use rank="species" to get rid
of sub-species or strain leaf nodes.
:param False collapse_subspecies: If True, any item under the species
rank will be collapsed into the species upper node.
"""
from ete2 import PhyloTree
sp2track = {}
elem2node = {}
for sp in taxids:
track = []
lineage = self.get_lineage(sp)
id2rank = self.get_rank(lineage)
for elem in lineage:
if elem not in elem2node:
node = elem2node.setdefault(elem, PhyloTree())
node.name = str(elem)
node.taxid = elem
node.add_feature("rank", str(id2rank.get(int(elem), "no rank")))
else:
node = elem2node[elem]
track.append(node)
sp2track[sp] = track
# generate parent child relationships
for sp, track in sp2track.iteritems():
parent = None
for elem in track:
if parent and elem not in parent.children:
parent.add_child(elem)
if rank_limit and elem.rank == rank_limit:
break
parent = elem
root = elem2node[1]
#remove onechild-nodes
if not intermediate_nodes:
for n in root.get_descendants():
if len(n.children) == 1 and int(n.name) not in taxids:
n.delete(prevent_nondicotomic=False)
if collapse_subspecies:
species_nodes = [n for n in t.traverse() if n.rank == "species"
if int(n.name) in all_taxids]
for sp_node in species_nodes:
bellow = sp_node.get_descendants()
if bellow:
# creates a copy of the species node
connector = sp_node.__class__()
for f in sp_node.features:
connector.add_feature(f, getattr(sp_node, f))
connector.name = connector.name + "{species}"
for n in bellow:
n.detach()
n.name = n.name + "{%s}" %n.rank
sp_node.add_child(n)
sp_node.add_child(connector)
sp_node.add_feature("collapse_subspecies", "1")
if len(root.children) == 1:
tree = root.children[0].detach()
else:
tree = root
if annotate:
self.annotate_tree(tree)
return tree
def annotate_tree(self, t, taxid_attr="name", tax2name=None, tax2track=None, tax2rank=None):
"""Annotate a tree containing taxids as leaf names by adding the 'taxid',
'sci_name', 'lineage', 'named_lineage' and 'rank' additional attributes.
:param t: a Tree (or Tree derived) instance.
:param name taxid_attr: Allows to set a custom node attribute containing
the taxid number associated to each node (i.e. species in PhyloTree instances).
:param tax2name,tax2track,tax2rank: Use these arguments to provide
pre-calculated dictionaries providing translation from taxid number and
names,track lineages and ranks.
"""
#leaves = t.get_leaves()
taxids = set()
for n in t.traverse():
try:
tid = int(getattr(n, taxid_attr))
except (ValueError,AttributeError):
pass
else:
taxids.add(tid)
merged_conversion = {}
taxids, merged_conversion = self._translate_merged(taxids)
if not tax2name or taxids - set(map(int, tax2name.keys())):
#print "Querying for tax names"
tax2name = self.get_taxid_translator([tid for tid in taxids])
if not tax2track or taxids - set(map(int, tax2track.keys())):
#print "Querying for tax lineages"
tax2track = dict([(tid, self.get_lineage(tid)) for tid in taxids])
all_taxid_codes = set([_tax for _lin in tax2track.values() for _tax in _lin])
extra_tax2name = self.get_taxid_translator(list(all_taxid_codes - set(tax2name.keys())))
tax2name.update(extra_tax2name)
if not tax2rank:
tax2rank = self.get_rank(tax2name.keys())
n2leaves = t.get_cached_content()
for n in t.traverse('postorder'):
try:
node_taxid = int(getattr(n, taxid_attr))
except (ValueError, AttributeError):
node_taxid = None
n.add_features(taxid = node_taxid)
if node_taxid:
if node_taxid in merged_conversion:
node_taxid = merged_conversion[node_taxid]
n.add_features(sci_name = tax2name.get(node_taxid, getattr(n, taxid_attr, 'NA')),
lineage = tax2track[node_taxid],
rank = tax2rank.get(node_taxid, 'Unknown'),
named_lineage = self.translate_to_names(tax2track[node_taxid]))
elif n.is_leaf():
n.add_features(sci_name = getattr(n, taxid_attr, 'NA'),
lineage = [],
rank = 'Unknown',
named_lineage = [])
else:
lineage = self._common_lineage([lf.lineage for lf in n2leaves[n]])
ancestor = lineage[-1]
n.add_features(sci_name = tax2name.get(ancestor, str(ancestor)),
taxid = ancestor,
lineage = lineage,
rank = tax2rank.get(ancestor, 'Unknown'),
named_lineage = [tax2name.get(tax, str(tax)) for tax in lineage])
return tax2name, tax2track, tax2rank
def _common_lineage(self, vectors):
occurrence = defaultdict(int)
pos = defaultdict(set)
for v in vectors:
for i, taxid in enumerate(v):
occurrence[taxid] += 1
pos[taxid].add(i)
common = [taxid for taxid, ocu in occurrence.iteritems() if ocu == len(vectors)]
if not common:
return [""]
else:
sorted_lineage = sorted(common, lambda x, y: cmp(min(pos[x]), min(pos[y])))
return sorted_lineage
# OLD APPROACH:
# visited = defaultdict(int)
# for index, name in [(ei, e) for v in vectors for ei, e in enumerate(v)]:
# visited[(name, index)] += 1
# def _sort(a, b):
# if a[1] > b[1]:
# return 1
# elif a[1] < b[1]:
# return -1
# else:
# if a[0][1] > b[0][1]:
# return 1
# elif a[0][1] < b[0][1]:
# return -1
# return 0
# matches = sorted(visited.items(), _sort)
# if matches:
# best_match = matches[-1]
# else:
# return "", set()
# if best_match[1] != len(vectors):
# return "", set()
# else:
# return best_match[0][0], [m[0][0] for m in matches if m[1] == len(vectors)]
def get_broken_branches(self, t, taxa_lineages, n2content=None):
"""Returns a list of NCBI lineage names that are not monophyletic in the
provided tree, as well as the list of affected branches and their size.
CURRENTLY EXPERIMENTAL
"""
if not n2content:
n2content = t.get_cached_content()
tax2node = defaultdict(set)
unknown = set()
for leaf in t.iter_leaves():
if leaf.sci_name.lower() != "unknown":
lineage = taxa_lineages[leaf.taxid]
for index, tax in enumerate(lineage):
tax2node[tax].add(leaf)
else:
unknown.add(leaf)
broken_branches = defaultdict(set)
broken_clades = set()
for tax, leaves in tax2node.iteritems():
if len(leaves) > 1:
common = t.get_common_ancestor(leaves)
else:
common = list(leaves)[0]
if (leaves ^ set(n2content[common])) - unknown:
broken_branches[common].add(tax)
broken_clades.add(tax)
broken_clade_sizes = [len(tax2node[tax]) for tax in broken_clades]
return broken_branches, broken_clades, broken_clade_sizes
# def annotate_tree_with_taxa(self, t, name2taxa_file, tax2name=None, tax2track=None, attr_name="name"):
# if name2taxa_file:
# names2taxid = dict([map(strip, line.split("\t"))
# for line in open(name2taxa_file)])
# else:
# names2taxid = dict([(n.name, getattr(n, attr_name)) for n in t.iter_leaves()])
# not_found = 0
# for n in t.iter_leaves():
# n.add_features(taxid=names2taxid.get(n.name, 0))
# n.add_features(species=n.taxid)
# if n.taxid == 0:
# not_found += 1
# if not_found:
# print >>sys.stderr, "WARNING: %s nodes where not found within NCBI taxonomy!!" %not_found
# return self.annotate_tree(t, tax2name, tax2track, attr_name="taxid")
def load_ncbi_tree_from_dump(tar):
from ete2 import Tree
# Download: ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz
parent2child = {}
name2node = {}
node2taxname = {}
synonyms = set()
name2rank = {}
print "Loading node names..."
for line in tar.extractfile("names.dmp"):
fields = map(strip, line.split("|"))
nodename = fields[0]
name_type = fields[3].lower()
taxname = fields[1]
if name_type == "scientific name":
node2taxname[nodename] = taxname
elif name_type in set(["synonym", "equivalent name", "genbank equivalent name",
"anamorph", "genbank synonym", "genbank anamorph", "teleomorph"]):
synonyms.add( (nodename, taxname) )
print len(node2taxname), "names loaded."
print len(synonyms), "synonyms loaded."
print "Loading nodes..."
for line in tar.extractfile("nodes.dmp"):
fields = line.split("|")
nodename = fields[0].strip()
parentname = fields[1].strip()
n = Tree()
n.name = nodename
n.taxname = node2taxname[nodename]
n.rank = fields[2].strip()
parent2child[nodename] = parentname
name2node[nodename] = n
print len(name2node), "nodes loaded."
print "Linking nodes..."
for node in name2node:
if node == "1":
t = name2node[node]
else:
parent = parent2child[node]
parent_node = name2node[parent]
parent_node.add_child(name2node[node])
print "Tree is loaded."
return t, synonyms
def generate_table(t):
OUT = open("taxa.tab", "w")
for j, n in enumerate(t.traverse()):
if j%1000 == 0:
print "\r",j,"generating entries...",
temp_node = n
track = []
while temp_node:
track.append(temp_node.name)
temp_node = temp_node.up
if n.up:
print >>OUT, '\t'.join([n.name, n.up.name, n.taxname, n.rank, ','.join(track)])
else:
print >>OUT, '\t'.join([n.name, "", n.taxname, n.rank, ','.join(track)])
OUT.close()
def update_db(dbfile, targz_file=None):
if not targz_file:
import urllib
print >>sys.stderr, 'Downloading taxdump.tar.gz from NCBI FTP site...'
urllib.urlretrieve("ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz", "taxdump.tar.gz")
print >>sys.stderr, 'Done. Parsing...'
targz_file = "taxdump.tar.gz"
tar = tarfile.open(targz_file, 'r')
t, synonyms = load_ncbi_tree_from_dump(tar)
print "Updating database: %s ..." %dbfile
generate_table(t)
open("syn.tab", "w").write('\n'.join(["%s\t%s" %(v[0],v[1]) for v in synonyms]))
open("merged.tab", "w").write('\n'.join(['\t'.join(map(strip, line.split('|')[:2])) for line in tar.extractfile("merged.dmp")]))
try:
upload_data(dbfile)
except:
raise
else:
os.system("rm syn.tab merged.tab taxa.tab taxdump.tar.gz")
def upload_data(dbfile):
print
print 'Uploading to', dbfile
basepath = os.path.split(dbfile)[0]
if basepath and not os.path.exists(basepath):
os.mkdir(basepath)
db = sqlite3.connect(dbfile)
create_cmd = """
DROP TABLE IF EXISTS species;
DROP TABLE IF EXISTS synonym;
DROP TABLE IF EXISTS merged;
CREATE TABLE species (taxid INT PRIMARY KEY, parent INT, spname VARCHAR(50) COLLATE NOCASE, rank VARCHAR(50), track TEXT);
CREATE TABLE synonym (taxid INT,spname VARCHAR(50) COLLATE NOCASE, PRIMARY KEY (spname, taxid));
CREATE TABLE merged (taxid_old INT, taxid_new INT);
CREATE INDEX spname1 ON species (spname COLLATE NOCASE);
CREATE INDEX spname2 ON synonym (spname COLLATE NOCASE);
"""
for cmd in create_cmd.split(';'):
db.execute(cmd)
print
for i, line in enumerate(open("syn.tab")):
if i%5000 == 0 :
print >>sys.stderr, '\rInserting synonyms: % 6d' %i,
sys.stderr.flush()
taxid, spname = line.strip('\n').split('\t')
db.execute("INSERT INTO synonym (taxid, spname) VALUES (?, ?);", (taxid, spname))
print
db.commit()
for i, line in enumerate(open("merged.tab")):
if i%5000 == 0 :
print >>sys.stderr, '\rInserting taxid merges: % 6d' %i,
sys.stderr.flush()
taxid_old, taxid_new = line.strip('\n').split('\t')
db.execute("INSERT INTO merged (taxid_old, taxid_new) VALUES (?, ?);", (taxid_old, taxid_new))
print
db.commit()
for i, line in enumerate(open("taxa.tab")):
if i%5000 == 0 :
print >>sys.stderr, '\rInserting taxids: % 6d' %i,
sys.stderr.flush()
taxid, parentid, spname, rank, lineage = line.strip('\n').split('\t')
db.execute("INSERT INTO species (taxid, parent, spname, rank, track) VALUES (?, ?, ?, ?, ?);", (taxid, parentid, spname, rank, lineage))
print
db.commit()
| |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import functools
import logging
import multiprocessing
import traceback
from abc import abstractmethod
from collections import OrderedDict
from Queue import Queue
from concurrent.futures import ThreadPoolExecutor
from twitter.common.collections import maybe_list
from pants.base.exceptions import TaskError
from pants.engine.nodes import Noop, Return, State, Throw
from pants.engine.objects import SerializationError
from pants.engine.processing import StatefulPool
from pants.engine.storage import Cache, Storage
from pants.util.meta import AbstractClass
from pants.util.objects import datatype
try:
import cPickle as pickle
except ImportError:
import pickle
logger = logging.getLogger(__name__)
class InFlightException(Exception):
pass
class StepBatchException(Exception):
pass
class ExecutionError(Exception):
pass
class Engine(AbstractClass):
"""An engine for running a pants command line."""
class Result(datatype('Result', ['error', 'root_products'])):
"""Represents the result of a single engine run."""
@classmethod
def finished(cls, root_products):
"""Create a success or partial success result from a finished run.
Runs can either finish with no errors, satisfying all promises, or they can partially finish
if run in fail-slow mode producing as many products as possible.
:param root_products: Mapping of root SelectNodes to their State values.
:rtype: `Engine.Result`
"""
return cls(error=None, root_products=root_products)
@classmethod
def failure(cls, error):
"""Create a failure result.
A failure result represent a run with a fatal error. It presents the error but no
products.
:param error: The execution error encountered.
:type error: :class:`pants.base.exceptions.TaskError`
:rtype: `Engine.Result`
"""
return cls(error=error, root_products=None)
def __init__(self, scheduler, storage=None, cache=None, use_cache=True):
"""
:param scheduler: The local scheduler for creating execution graphs.
:type scheduler: :class:`pants.engine.scheduler.LocalScheduler`
:param storage: The storage instance for serializables keyed by their hashes.
:type storage: :class:`pants.engine.storage.Storage`
:param cache: The cache instance for storing execution results, by default it uses the same
Storage instance if not specified.
:type cache: :class:`pants.engine.storage.Cache`
:param use_cache: True to enable usage of the cache. The cache incurs a large amount of
overhead for small tasks, and needs TODO: further improvement.
:type use_cache: bool
"""
self._scheduler = scheduler
self._storage = storage or Storage.create()
self._cache = cache or Cache.create(storage)
self._use_cache = use_cache
def execute(self, execution_request):
"""Executes the requested build.
:param execution_request: The description of the goals to achieve.
:type execution_request: :class:`ExecutionRequest`
:returns: The result of the run.
:rtype: :class:`Engine.Result`
"""
try:
self.reduce(execution_request)
return self.Result.finished(self._scheduler.root_entries(execution_request))
except TaskError as e:
return self.Result.failure(e)
def product_request(self, product, subjects):
"""Executes a request for a singular product type from the scheduler for one or more subjects
and yields the products.
:param class product: A product type for the request.
:param list subjects: A list of subjects for the request.
:yields: The requested products.
"""
request = self._scheduler.execution_request([product], subjects)
result = self.execute(request)
if result.error:
raise result.error
result_items = self._scheduler.root_entries(request).items()
# State validation.
unknown_state_types = tuple(
type(state) for _, state in result_items if type(state) not in (Throw, Return, Noop)
)
if unknown_state_types:
State.raise_unrecognized(unknown_state_types)
# Throw handling.
throw_roots = tuple(root for root, state in result_items if type(state) is Throw)
if throw_roots:
cumulative_trace = '\n'.join(
'\n'.join(self._scheduler.product_graph.trace(root)) for root in throw_roots
)
stringified_throw_roots = ', '.join(str(x) for x in throw_roots)
raise ExecutionError('received unexpected Throw state(s) for root(s): {}\n{}'
.format(stringified_throw_roots, cumulative_trace))
# Noop handling.
noop_roots = tuple(root for root, state in result_items if type(state) is Noop)
if noop_roots:
raise ExecutionError('received unexpected Noop state(s) for the following root(s): {}'
.format(noop_roots))
# Return handling.
returns = tuple(state.value for _, state in result_items if type(state) is Return)
for return_value in returns:
for computed_product in maybe_list(return_value, expected_type=product):
yield computed_product
def close(self):
"""Shutdown this engine instance, releasing resources it was using."""
self._storage.close()
self._cache.close()
def cache_stats(self):
"""Returns cache stats for the engine."""
return self._cache.get_stats()
def _maybe_cache_get(self, node_entry, runnable):
"""If caching is enabled for the given Entry, create a key and perform a lookup.
The sole purpose of a keyed request is to get a stable cache key, so we can sort
keyed_request.dependencies by keys as opposed to requiring dep nodes to support compare.
:returns: A tuple of a key and result, either of which may be None.
"""
if not self._use_cache or not node_entry.node.is_cacheable:
return None, None
return self._cache.get(runnable)
def _maybe_cache_put(self, key, result):
if key is not None:
self._cache.put(key, result)
@abstractmethod
def reduce(self, execution_request):
"""Reduce the given execution graph returning its root products.
:param execution_request: The description of the goals to achieve.
:type execution_request: :class:`ExecutionRequest`
:returns: The root products promised by the execution graph.
:rtype: dict of (:class:`Promise`, product)
"""
class LocalSerialEngine(Engine):
"""An engine that runs tasks locally and serially in-process."""
def reduce(self, execution_request):
generator = self._scheduler.schedule(execution_request)
for runnable_batch in generator:
completed = []
for entry, runnable in runnable_batch:
key, result = self._maybe_cache_get(entry, runnable)
if result is None:
try:
result = Return(runnable.func(*runnable.args))
self._maybe_cache_put(key, result)
except Exception as e:
result = Throw(e)
completed.append((entry, result))
generator.send(completed)
def _try_pickle(obj):
try:
pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL)
except Exception as e:
# Unfortunately, pickle can raise things other than PickleError instances. For example it
# will raise ValueError when handed a lambda; so we handle the otherwise overly-broad
# `Exception` type here.
raise SerializationError('Failed to pickle {}: {}'.format(obj, e))
class ConcurrentEngine(Engine):
def reduce(self, execution_request):
"""The main reduction loop."""
# 1. Whenever we don't have enough work to saturate the pool, request more.
# 2. Whenever the pool is not saturated, submit currently pending work.
# Step instances which have not been submitted yet.
pending_submission = OrderedDict()
in_flight = dict() # Dict from step id to a Promise for Steps that have been submitted.
def submit_until(completed, n):
submitted, local_completed = self._submit_until(pending_submission, in_flight, n)
completed.extend(local_completed)
return submitted
def await_one(completed):
completed.append(self._await_one(in_flight))
generator = self._scheduler.schedule(execution_request)
for step_batch in generator:
completed = []
if not step_batch:
# A batch should only be empty if all dependency work is currently blocked/running.
if not in_flight and not pending_submission:
raise StepBatchException(
'Scheduler provided an empty batch while no work is in progress!')
else:
# Submit and wait for work for as long as we're able to keep the pool saturated.
pending_submission.update(step_batch)
while submit_until(completed, self._pool_size) > 0:
await_one(completed)
# Await at least one entry per scheduling loop.
submit_until(completed, 0)
if in_flight:
await_one(completed)
# Indicate which items have completed.
generator.send(completed)
if pending_submission or in_flight:
raise AssertionError('Engine loop completed with items: {}, {}'.format(pending_submission, in_flight))
@abstractmethod
def _submit_until(self, pending_submission, in_flight, n):
"""Submit pending while there's capacity, and more than `n` items in pending_submission.
Returns a tuple of entries running in the background, and entries that completed immediately.
"""
@abstractmethod
def _await_one(self, in_flight):
"""Await one completed step, remove it from in_flight, and return it."""
class ThreadHybridEngine(ConcurrentEngine):
"""An engine that runs locally but allows nodes to be optionally run concurrently.
The decision to run concurrently or in serial is determined by _is_async_node.
For IO bound nodes we will run concurrently using threads.
"""
def __init__(self, scheduler, storage, cache=None, threaded_node_types=tuple(),
pool_size=None, debug=True):
"""
:param scheduler: The local scheduler for creating execution graphs.
:type scheduler: :class:`pants.engine.scheduler.LocalScheduler`
:param storage: The storage instance for serializables keyed by their hashes.
:type storage: :class:`pants.engine.storage.Storage`
:param cache: The cache instance for storing execution results, by default it uses the same
Storage instance if not specified.
:type cache: :class:`pants.engine.storage.Cache`
:param tuple threaded_node_types: Node types that will be processed using the thread pool.
:param int pool_size: The number of worker processes to use; by default 2 processes per core will
be used.
:param bool debug: `True` to turn on pickling error debug mode (slower); True by default.
"""
super(ThreadHybridEngine, self).__init__(scheduler, storage, cache)
self._pool_size = pool_size if pool_size and pool_size > 0 else 2 * multiprocessing.cpu_count()
self._pending = set() # Keep track of futures so we can cleanup at the end.
self._processed_queue = Queue()
self._async_nodes = threaded_node_types
self._node_builder = scheduler.node_builder
self._state = (self._node_builder, storage)
self._pool = ThreadPoolExecutor(max_workers=self._pool_size)
self._debug = debug
def _is_async_node(self, node):
"""Override default behavior and handle specific nodes asynchronously."""
return isinstance(node, self._async_nodes)
def _maybe_cache_step(self, step_request):
if step_request.node.is_cacheable:
return step_request.step_id, self._cache.get(step_request)
else:
return step_request.step_id, None
def _execute_step(self, step_entry, runnable):
"""A function to help support local step execution.
:param step_entry: Entry that the step is for.
:param runnable: Runnable to execute.
"""
key, result = self._maybe_cache_get(step_entry, runnable)
if result is None:
try:
result = Return(runnable.func(*runnable.args))
self._maybe_cache_put(key, result)
except Exception as e:
result = Throw(e)
return step_entry, result
def _processed_node_callback(self, finished_future):
self._processed_queue.put(finished_future)
self._pending.remove(finished_future)
def _submit_until(self, pending_submission, in_flight, n):
"""Submit pending while there's capacity, and more than `n` items in pending_submission."""
to_submit = min(len(pending_submission) - n, self._pool_size - len(in_flight))
submitted = 0
completed = []
for _ in range(to_submit):
step, runnable = pending_submission.popitem(last=False)
if self._is_async_node(step.node):
# Run in a future.
if step in in_flight:
raise InFlightException('{} is already in_flight!'.format(step))
future = self._pool.submit(functools.partial(self._execute_step, step, runnable))
in_flight[step] = future
self._pending.add(future)
future.add_done_callback(self._processed_node_callback)
submitted += 1
else:
# Run inline.
completed.append(self._execute_step(step, runnable))
return submitted, completed
def _await_one(self, in_flight):
"""Await one completed step, and remove it from in_flight."""
if not in_flight:
raise InFlightException('Awaited an empty pool!')
entry, result = self._processed_queue.get().result()
if isinstance(result, Exception):
raise result
in_flight.pop(entry)
return entry, result
def close(self):
"""Cleanup thread pool."""
for f in self._pending:
f.cancel()
self._pool.shutdown() # Wait for pool to cleanup before we cleanup storage.
super(ThreadHybridEngine, self).close()
def _execute_step(debug, process_state, step):
"""A picklable top-level function to help support local multiprocessing uses.
Executes the Step for the given node builder and storage, and returns a tuple of step id and
result or exception. Since step execution is only on cache misses, this also saves result
to the cache.
"""
storage, cache = process_state
step_id, runnable_key, is_cacheable = step
def execute():
try:
runnable = storage.get_state(runnable_key)
result = Return(runnable.func(*runnable.args))
if debug:
_try_pickle(result)
result_key = storage.put_state(result)
if is_cacheable:
cache.put(runnable_key, result)
except Exception as e:
result_key = storage.put_state(Throw(e))
return result_key
try:
return step_id, execute()
except Exception as e:
# Trap any exception raised by the execution node that bubbles up, and
# pass this back to our main thread for handling.
logger.warn(traceback.format_exc())
return step_id, e
def _process_initializer(storage):
"""Another picklable top-level function that provides multi-processes' initial states.
States are returned as a tuple. States are `Closable` so they can be cleaned up once
processes are done.
"""
storage = Storage.clone(storage)
return (storage, Cache.create(storage=storage))
class LocalMultiprocessEngine(ConcurrentEngine):
"""An engine that runs tasks locally and in parallel when possible using a process pool.
This implementation stores all process inputs in Storage and executes cache lookups before
submitting a task to another process. This use of Storage means that only a Key for the
Runnable is sent (directly) across process boundaries, and avoids sending the same data across
process boundaries repeatedly.
"""
def __init__(self, scheduler, storage=None, cache=None, pool_size=None, debug=True):
"""
:param scheduler: The local scheduler for creating execution graphs.
:type scheduler: :class:`pants.engine.scheduler.LocalScheduler`
:param storage: The storage instance for serializables keyed by their hashes.
:type storage: :class:`pants.engine.storage.Storage`
:param cache: The cache instance for storing execution results, by default it uses the same
Storage instance if not specified.
:type cache: :class:`pants.engine.storage.Cache`
:param int pool_size: The number of worker processes to use; by default 2 processes per core will
be used.
:param bool debug: `True` to turn on pickling error debug mode (slower); True by default.
"""
# This is the only place where non in-memory storage is needed, create one if not specified.
storage = storage or Storage.create(in_memory=False)
super(LocalMultiprocessEngine, self).__init__(scheduler, storage, cache)
self._pool_size = pool_size if pool_size and pool_size > 0 else 2 * multiprocessing.cpu_count()
execute_step = functools.partial(_execute_step, debug)
self._processed_queue = Queue()
self.node_builder = scheduler.node_builder
process_initializer = functools.partial(_process_initializer, self._storage)
self._pool = StatefulPool(self._pool_size, process_initializer, execute_step)
self._debug = debug
self._pool.start()
def _submit(self, step_id, runnable_key, is_cacheable):
entry = (step_id, runnable_key, is_cacheable)
if self._debug:
_try_pickle(entry)
self._pool.submit(entry)
def close(self):
self._pool.close()
def _submit_until(self, pending_submission, in_flight, n):
"""Submit pending while there's capacity, and more than `n` items pending_submission."""
to_submit = min(len(pending_submission) - n, self._pool_size - len(in_flight))
submitted = 0
completed = []
for _ in range(to_submit):
step, runnable = pending_submission.popitem(last=False)
if step in in_flight:
raise InFlightException('{} is already in_flight!'.format(step))
# We eagerly compute a key for the Runnable, because it allows us to avoid sending the same
# data across process boundaries repeatedly.
runnable_key = self._storage.put_state(runnable)
is_cacheable = self._use_cache and step.node.is_cacheable
result = self._cache.get_for_key(runnable_key) if is_cacheable else None
if result is not None:
# Skip in_flight on cache hit.
completed.append((step, result))
else:
step_id = id(step)
in_flight[step_id] = step
self._submit(step_id, runnable_key, is_cacheable)
submitted += 1
return submitted, completed
def _await_one(self, in_flight):
"""Await one completed step, and remove it from in_flight."""
if not in_flight:
raise InFlightException('Awaited an empty pool!')
step_id, result_key = self._pool.await_one_result()
if isinstance(result_key, Exception):
raise result_key
if step_id not in in_flight:
raise InFlightException(
'Received unexpected work from the Executor: {} vs {}'.format(step_id, in_flight.keys()))
return in_flight.pop(step_id), self._storage.get_state(result_key)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""File logging handler for tasks."""
import logging
import os
from pathlib import Path
from typing import TYPE_CHECKING, Optional
import requests
from airflow.configuration import AirflowConfigException, conf
from airflow.utils.helpers import parse_template_string
if TYPE_CHECKING:
from airflow.models import TaskInstance
class FileTaskHandler(logging.Handler):
"""
FileTaskHandler is a python log handler that handles and reads
task instance logs. It creates and delegates log handling
to `logging.FileHandler` after receiving task instance context.
It reads logs from task instance's host machine.
:param base_log_folder: Base log folder to place logs.
:param filename_template: template filename string
"""
def __init__(self, base_log_folder: str, filename_template: str):
super().__init__()
self.handler = None # type: Optional[logging.FileHandler]
self.local_base = base_log_folder
self.filename_template, self.filename_jinja_template = parse_template_string(filename_template)
def set_context(self, ti: "TaskInstance"):
"""
Provide task_instance context to airflow task handler.
:param ti: task instance object
"""
local_loc = self._init_file(ti)
self.handler = logging.FileHandler(local_loc, encoding='utf-8')
if self.formatter:
self.handler.setFormatter(self.formatter)
self.handler.setLevel(self.level)
def emit(self, record):
if self.handler:
self.handler.emit(record)
def flush(self):
if self.handler:
self.handler.flush()
def close(self):
if self.handler:
self.handler.close()
def _render_filename(self, ti, try_number):
if self.filename_jinja_template:
if hasattr(ti, 'task'):
jinja_context = ti.get_template_context()
jinja_context['try_number'] = try_number
else:
jinja_context = {
'ti': ti,
'ts': ti.execution_date.isoformat(),
'try_number': try_number,
}
return self.filename_jinja_template.render(**jinja_context)
return self.filename_template.format(
dag_id=ti.dag_id,
task_id=ti.task_id,
execution_date=ti.execution_date.isoformat(),
try_number=try_number,
)
def _read_grouped_logs(self):
return False
def _read(self, ti, try_number, metadata=None): # pylint: disable=unused-argument
"""
Template method that contains custom logic of reading
logs given the try_number.
:param ti: task instance record
:param try_number: current try_number to read log from
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
:return: log message as a string and metadata.
"""
# Task instance here might be different from task instance when
# initializing the handler. Thus explicitly getting log location
# is needed to get correct log path.
log_relative_path = self._render_filename(ti, try_number)
location = os.path.join(self.local_base, log_relative_path)
log = ""
if os.path.exists(location):
try:
with open(location) as file:
log += f"*** Reading local file: {location}\n"
log += "".join(file.readlines())
except Exception as e: # pylint: disable=broad-except
log = f"*** Failed to load local log file: {location}\n"
log += "*** {}\n".format(str(e))
elif conf.get('core', 'executor') == 'KubernetesExecutor': # pylint: disable=too-many-nested-blocks
try:
from airflow.kubernetes.kube_client import get_kube_client
kube_client = get_kube_client()
if len(ti.hostname) >= 63:
# Kubernetes takes the pod name and truncates it for the hostname. This truncated hostname
# is returned for the fqdn to comply with the 63 character limit imposed by DNS standards
# on any label of a FQDN.
pod_list = kube_client.list_namespaced_pod(conf.get('kubernetes', 'namespace'))
matches = [
pod.metadata.name
for pod in pod_list.items
if pod.metadata.name.startswith(ti.hostname)
]
if len(matches) == 1:
if len(matches[0]) > len(ti.hostname):
ti.hostname = matches[0]
log += '*** Trying to get logs (last 100 lines) from worker pod {} ***\n\n'.format(
ti.hostname
)
res = kube_client.read_namespaced_pod_log(
name=ti.hostname,
namespace=conf.get('kubernetes', 'namespace'),
container='base',
follow=False,
tail_lines=100,
_preload_content=False,
)
for line in res:
log += line.decode()
except Exception as f: # pylint: disable=broad-except
log += '*** Unable to fetch logs from worker pod {} ***\n{}\n\n'.format(ti.hostname, str(f))
else:
url = os.path.join("http://{ti.hostname}:{worker_log_server_port}/log", log_relative_path).format(
ti=ti, worker_log_server_port=conf.get('celery', 'WORKER_LOG_SERVER_PORT')
)
log += f"*** Log file does not exist: {location}\n"
log += f"*** Fetching from: {url}\n"
try:
timeout = None # No timeout
try:
timeout = conf.getint('webserver', 'log_fetch_timeout_sec')
except (AirflowConfigException, ValueError):
pass
response = requests.get(url, timeout=timeout)
response.encoding = "utf-8"
# Check if the resource was properly fetched
response.raise_for_status()
log += '\n' + response.text
except Exception as e: # pylint: disable=broad-except
log += "*** Failed to fetch log file from worker. {}\n".format(str(e))
return log, {'end_of_log': True}
def read(self, task_instance, try_number=None, metadata=None):
"""
Read logs of given task instance from local machine.
:param task_instance: task instance object
:param try_number: task instance try_number to read logs from. If None
it returns all logs separated by try_number
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
:return: a list of listed tuples which order log string by host
"""
# Task instance increments its try number when it starts to run.
# So the log for a particular task try will only show up when
# try number gets incremented in DB, i.e logs produced the time
# after cli run and before try_number + 1 in DB will not be displayed.
if try_number is None:
next_try = task_instance.next_try_number
try_numbers = list(range(1, next_try))
elif try_number < 1:
logs = [
[('default_host', f'Error fetching the logs. Try number {try_number} is invalid.')],
]
return logs
else:
try_numbers = [try_number]
logs = [''] * len(try_numbers)
metadata_array = [{}] * len(try_numbers)
for i, try_number_element in enumerate(try_numbers):
log, metadata = self._read(task_instance, try_number_element, metadata)
# es_task_handler return logs grouped by host. wrap other handler returning log string
# with default/ empty host so that UI can render the response in the same way
logs[i] = log if self._read_grouped_logs() else [(task_instance.hostname, log)]
metadata_array[i] = metadata
return logs, metadata_array
def _init_file(self, ti):
"""
Create log directory and give it correct permissions.
:param ti: task instance object
:return: relative log path of the given task instance
"""
# To handle log writing when tasks are impersonated, the log files need to
# be writable by the user that runs the Airflow command and the user
# that is impersonated. This is mainly to handle corner cases with the
# SubDagOperator. When the SubDagOperator is run, all of the operators
# run under the impersonated user and create appropriate log files
# as the impersonated user. However, if the user manually runs tasks
# of the SubDagOperator through the UI, then the log files are created
# by the user that runs the Airflow command. For example, the Airflow
# run command may be run by the `airflow_sudoable` user, but the Airflow
# tasks may be run by the `airflow` user. If the log files are not
# writable by both users, then it's possible that re-running a task
# via the UI (or vice versa) results in a permission error as the task
# tries to write to a log file created by the other user.
relative_path = self._render_filename(ti, ti.try_number)
full_path = os.path.join(self.local_base, relative_path)
directory = os.path.dirname(full_path)
# Create the log file and give it group writable permissions
# TODO(aoen): Make log dirs and logs globally readable for now since the SubDag
# operator is not compatible with impersonation (e.g. if a Celery executor is used
# for a SubDag operator and the SubDag operator has a different owner than the
# parent DAG)
Path(directory).mkdir(mode=0o777, parents=True, exist_ok=True)
if not os.path.exists(full_path):
open(full_path, "a").close()
# TODO: Investigate using 444 instead of 666.
os.chmod(full_path, 0o666)
return full_path
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
from subprocess import check_output, STDOUT
from time import time, strftime, sleep
import RPi.GPIO as io
# DEBUG Mode Settings
DEBUG = ''
# [X]: Setup RTC
# [ ]: Setup LCD
# Sanity Checks
# [ ]: Verify the RTC is present (query the dev)
# [ ]: Verify the LCD is present (query the dev)
# [X]: Verify the printer is present (lpstat -p)
# [X]: Verify the switch is present (at least one input is on)
# [ ]: Verify the button is present (check the light?)
# Assign LCD pins
# 4-6 pins here
HAS_LCD = False
# Assign button and switch pins
btn_pin = 16
sw_pos1_pin = 23
sw_pos2_pin = 24
sw_pos3_pin = 25
# Setup GPIO, pull-down resistors from 3V3 (False by default)
io.setmode(io.BCM)
io.setup(btn_pin, io.IN, pull_up_down=io.PUD_DOWN)
io.setup(sw_pos1_pin, io.IN, pull_up_down=io.PUD_DOWN)
io.setup(sw_pos2_pin, io.IN, pull_up_down=io.PUD_DOWN)
io.setup(sw_pos3_pin, io.IN, pull_up_down=io.PUD_DOWN)
def checkSwitch():
# Check if switch is connected.
# At least one position should True.
sw = False
if not (io.input(sw_pos1_pin)):
sw = True
elif not (io.input(sw_pos2_pin)):
sw = True
elif not (io.input(sw_pos3_pin)):
sw = True
if sw:
print("\nSwitch detected")
else:
print("\nSwitch not detected!")
if HAS_LCD:
lcd.clear()
lcd.set_color(1,0,0)
lcd.set_cursor(0,0)
lcd.message("Switch Not Detected!")
lcd.set_cursor(1,0)
lcd.message("Exiting")
exit_program()
def setPos():
# Get switch position
pos = ''
if (io.input(sw_pos1_pin) and io.input(sw_pos2_pin)):
pos = 1
elif (io.input(sw_pos3_pin)):
pos = 3
elif (io.input(sw_pos2_pin)):
pos = 2
# print("Pos: %s" % pos)
return pos
def sw_callback(channel):
# When the switch changes, update the part numbers
print("\nPart Number Changed\n")
setPartNumber()
def setPartNumber():
# Get current switch position
pos = setPos()
# Assign Part Number based on switch position
lh_pn = ''
lh_pd = ''
rh_pn = ''
rh_pd = ''
#############################
# Defaults for testing
# lh_pn = '24680LH'
# lh_pd = 'LH PART NUMBER'
# rh_pn = '24680RH'
# rh_pd = 'RH PART NUMBER'
#############################
if pos == 1:
lh_pn = '12345LH'
lh_pd = 'LH PART NUMBER 1'
rh_pn = '12345RH'
rh_pd = 'RH PART NUMBER 1'
elif pos == 2:
lh_pn = '23456LH'
lh_pd = 'LH PART NUMBER 2'
rh_pn = '23456RH'
rh_pd = 'RH PART NUMBER 2'
elif pos == 3:
lh_pn = '34567LH'
lh_pd = 'LH PART NUMBER 3'
rh_pn = '34567RH'
rh_pd = 'RH PART NUMBER 3'
print("\nSelected Parts:")
print(lh_pn, lh_pd)
print(rh_pn, rh_pd)
return lh_pn, lh_pd, rh_pn, rh_pd
def setPrinter():
lh_printer = check_output("lpstat -p | grep ZT230-LH; exit 0",
stderr=STDOUT, shell=True)
if not len(lh_printer) > 0:
print("ZT230-LH Label Printer not found!")
# lcdError("Err: Left Hand\n Printer Missing")
exit_program()
else:
lh_printer = 'ZT230-LH'
rh_printer = check_output("lpstat -p | grep ZT230-RH; exit 0",
stderr=STDOUT, shell=True)
if not len(rh_printer) > 0:
print("ZT230-RH Label Printer not found!")
# lcdError("Err: Right Hand\n Printer Missing")
exit_program()
else:
rh_printer = 'ZT230-RH'
print("\nConnected Printers:")
print(lh_printer, rh_printer)
return lh_printer, rh_printer
def setSerialNumberTime(pn):
curtime = time()
hexstr = str(hex(int(curtime))).upper()[-8:]
localtime = strftime('%m/%d/%Y %H:%M:%S')
sn = hexstr + 'P' + pn
return sn, localtime
def printLabel():
lh_printer, rh_printer = setPrinter()
lh_pn, lh_pd, rh_pn, rh_pd = setPartNumber()
lh_sn, localtime = setSerialNumberTime(lh_pn)
rh_sn, localtime = setSerialNumberTime(rh_pn)
# Create LH label
# TODO: Refactor: DRY?
lh_label = """N
q406
D7
S2
A20,20,0,4,1,1,N,"Part-# {pn}"
A20,50,0,2,1,1,N,"{pd}"
B90,75,0,1,1,3,60,N,"{sn}"
A90,140,0,1,1,1,N,"S/N {sn}"
A50,155,0,4,1,1,N,"{lt}"
P1
""".format(pn=lh_pn, pd=lh_pd, sn=lh_sn,
lt=localtime)
lh_epl_file = '/tmp/lh_label.epl'
with open(lh_epl_file, 'w') as f:
f.write(lh_label)
# Create RH label
rh_label = """N
q406
D5
S2
A20,20,0,4,1,1,N,"Part-# {pn}"
A20,50,0,2,1,1,N,"{pd}"
B90,75,0,1,1,3,60,N,"{sn}"
A90,140,0,1,1,1,N,"S/N {sn}"
A50,155,0,4,1,1,N,"{lt}"
P1
""".format(pn=rh_pn, pd=rh_pd, sn=rh_sn,
lt=localtime)
rh_epl_file = '/tmp/rh_label.epl'
with open(rh_epl_file, 'w') as f:
f.write(rh_label)
# Print labels
print("\n##############################")
print("Printing Part Numbers:")
print(lh_pn, lh_pd)
print(rh_pn, rh_pd)
print(localtime)
print("##############################\n")
lh_cmd = "lpr -P " + lh_printer + " -o raw " + lh_epl_file
rh_cmd = "lpr -P " + rh_printer + " -o raw " + rh_epl_file
os.system(lh_cmd)
os.system(rh_cmd)
try:
sleep(1)
os.remove(lh_epl_file)
os.remove(rh_epl_file)
except OSError:
pass
def exit_program():
print("\nExiting")
io.cleanup()
sys.exit()
def main():
# Perform checks on startup
print("\n##############################")
print("Starting Up...\n")
checkSwitch()
setPrinter()
setPartNumber()
print("##############################\n")
# Create switch events
io.add_event_detect(sw_pos1_pin, io.BOTH,
callback=sw_callback,
bouncetime=100)
io.add_event_detect(sw_pos2_pin, io.BOTH,
callback=sw_callback,
bouncetime=100)
io.add_event_detect(sw_pos3_pin, io.BOTH,
callback=sw_callback,
bouncetime=100)
print("Startup Complete")
while True:
try:
print("\nWaiting for Print Button")
io.wait_for_edge(btn_pin, io.FALLING, bouncetime=300)
print("Button Pressed")
printLabel()
except KeyboardInterrupt:
io.cleanup()
exit_program()
if __name__ == '__main__':
main()
| |
#
# CORE
# Copyright (c)2012-2013 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
'''
sdt.py: Scripted Display Tool (SDT3D) helper
'''
from core.constants import *
from core.api import coreapi
from .coreobj import PyCoreNet, PyCoreObj
from core.netns import nodes
import socket
class Sdt(object):
''' Helper class for exporting session objects to NRL's SDT3D.
The connect() method initializes the display, and can be invoked
when a node position or link has changed.
'''
DEFAULT_SDT_PORT = 5000
# default altitude (in meters) for flyto view
DEFAULT_ALT = 2500
# TODO: read in user's nodes.conf here; below are default node types
# from the GUI
DEFAULT_SPRITES = [('router', 'router.gif'), ('host', 'host.gif'),
('PC', 'pc.gif'), ('mdr', 'mdr.gif'),
('prouter', 'router_green.gif'), ('xen', 'xen.gif'),
('hub', 'hub.gif'), ('lanswitch','lanswitch.gif'),
('wlan', 'wlan.gif'), ('rj45','rj45.gif'),
('tunnel','tunnel.gif'),
]
class Bunch:
''' Helper class for recording a collection of attributes.
'''
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __init__(self, session):
self.session = session
self.sock = None
self.connected = False
self.showerror = True
self.verbose = self.session.getcfgitembool('verbose', False)
self.address = ("127.0.0.1", self.DEFAULT_SDT_PORT)
# node information for remote nodes not in session._objs
# local nodes also appear here since their obj may not exist yet
self.remotes = {}
session.broker.handlers += (self.handledistributed, )
def is_enabled(self):
if not hasattr(self.session.options, 'enablesdt'):
return False
if self.session.options.enablesdt == '1':
return True
return False
def connect(self, flags=0):
''' Connect to the SDT UDP port if enabled.
'''
if not self.is_enabled():
return False
if self.connected:
return True
if self.session.getstate() == coreapi.CORE_EVENT_SHUTDOWN_STATE:
return False
if self.showerror:
self.session.info("connecting to SDT at %s:%s" % self.address)
if self.sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
self.sock.connect(self.address)
except Exception as e:
self.session.warn("SDT socket connect error: %s" % e)
return False
if not self.initialize():
return False
self.connected = True
# refresh all objects in SDT3D when connecting after session start
if not flags & coreapi.CORE_API_ADD_FLAG:
if not self.sendobjs():
return False
return True
def initialize(self):
''' Load icon sprites, and fly to the reference point location on
the virtual globe.
'''
if not self.cmd('path "%s/icons/normal"' % CORE_DATA_DIR):
return False
# send node type to icon mappings
for (type, icon) in self.DEFAULT_SPRITES:
if not self.cmd('sprite %s image %s' % (type, icon)):
return False
(lat, int) = self.session.location.refgeo[:2]
return self.cmd('flyto %.6f,%.6f,%d' % (int, lat, self.DEFAULT_ALT))
def disconnect(self):
try:
self.sock.close()
except:
pass
self.sock = None
self.connected = False
def shutdown(self):
''' Invoked from Session.shutdown() and Session.checkshutdown().
'''
self.cmd('clear all')
self.disconnect()
self.showerror = True
def cmd(self, cmdstr):
''' Send an SDT command over a UDP socket. socket.sendall() is used
as opposed to socket.sendto() because an exception is raised when
there is no socket listener.
'''
if self.sock is None:
return False
try:
if self.verbose:
self.session.info("sdt: %s" % cmdstr)
self.sock.sendall("%s\n" % cmdstr)
return True
except Exception as e:
if self.showerror:
self.session.warn("SDT connection error: %s" % e)
self.showerror = False
self.connected = False
return False
def updatenode(self, nodenum, flags, x, y, z,
name=None, type=None, icon=None):
''' Node is updated from a Node Message or mobility script.
'''
if not self.connect():
return
if flags & coreapi.CORE_API_DEL_FLAG:
self.cmd('delete node,%d' % nodenum)
return
if x is None or y is None:
return
(lat, int, alt) = self.session.location.getgeo(x, y, z)
pos = "pos %.6f,%.6f,%.6f" % (int, lat, alt)
if flags & coreapi.CORE_API_ADD_FLAG:
if icon is not None:
type = name
icon = icon.replace("$CORE_DATA_DIR", CORE_DATA_DIR)
icon = icon.replace("$CORE_CONF_DIR", CORE_CONF_DIR)
self.cmd('sprite %s image %s' % (type, icon))
self.cmd('node %d type %s label on,"%s" %s' % \
(nodenum, type, name, pos))
else:
self.cmd('node %d %s' % (nodenum, pos))
def updatenodegeo(self, nodenum, lat, long, alt):
''' Node is updated upon receiving an EMANE Location Event.
TODO: received Node Message with lat/long/alt.
'''
if not self.connect():
return
pos = "pos %.6f,%.6f,%.6f" % (int, lat, alt)
self.cmd('node %d %s' % (nodenum, pos))
def updatelink(self, node1num, node2num, flags, wireless=False):
''' Link is updated from a Link Message or by a wireless model.
'''
if node1num is None or node2num is None:
return
if not self.connect():
return
if flags & coreapi.CORE_API_DEL_FLAG:
self.cmd('delete link,%s,%s' % (node1num, node2num))
elif flags & coreapi.CORE_API_ADD_FLAG:
attr = ""
if wireless:
attr = " line green,2"
else:
attr = " line red,2"
self.cmd('link %s,%s%s' % (node1num, node2num, attr))
def sendobjs(self):
''' Session has already started, and the SDT3D GUI later connects.
Send all node and link objects for display. Otherwise, nodes and
links will only be drawn when they have been updated (e.g. moved).
'''
nets = []
with self.session._objslock:
for obj in self.session.objs():
if isinstance(obj, PyCoreNet):
nets.append(obj)
if not isinstance(obj, PyCoreObj):
continue
(x, y, z) = obj.getposition()
if x is None or y is None:
continue
self.updatenode(obj.objid, coreapi.CORE_API_ADD_FLAG, x, y, z,
obj.name, obj.type, obj.icon)
for nodenum in sorted(self.remotes.keys()):
r = self.remotes[nodenum]
(x, y, z) = r.pos
self.updatenode(nodenum, coreapi.CORE_API_ADD_FLAG, x, y, z,
r.name, r.type, r.icon)
for net in nets:
# use tolinkmsgs() to handle various types of links
msgs = net.tolinkmsgs(flags = coreapi.CORE_API_ADD_FLAG)
for msg in msgs:
msghdr = msg[:coreapi.CoreMessage.hdrsiz]
flags = coreapi.CoreMessage.unpackhdr(msghdr)[1]
m = coreapi.CoreLinkMessage(flags, msghdr,
msg[coreapi.CoreMessage.hdrsiz:])
n1num = m.gettlv(coreapi.CORE_TLV_LINK_N1NUMBER)
n2num = m.gettlv(coreapi.CORE_TLV_LINK_N2NUMBER)
link_msg_type = m.gettlv(coreapi.CORE_TLV_LINK_TYPE)
if isinstance(net, nodes.WlanNode) or \
isinstance(net, nodes.EmaneNode):
if (n1num == net.objid):
continue
wl = (link_msg_type == coreapi.CORE_LINK_WIRELESS)
self.updatelink(n1num, n2num, coreapi.CORE_API_ADD_FLAG, wl)
for n1num in sorted(self.remotes.keys()):
r = self.remotes[n1num]
for (n2num, wl) in r.links:
self.updatelink(n1num, n2num, coreapi.CORE_API_ADD_FLAG, wl)
def handledistributed(self, msg):
''' Broker handler for processing CORE API messages as they are
received. This is used to snoop the Node messages and update
node positions.
'''
if msg.msgtype == coreapi.CORE_API_LINK_MSG:
return self.handlelinkmsg(msg)
elif msg.msgtype == coreapi.CORE_API_NODE_MSG:
return self.handlenodemsg(msg)
def handlenodemsg(self, msg):
''' Process a Node Message to add/delete or move a node on
the SDT display. Node properties are found in session._objs or
self.remotes for remote nodes (or those not yet instantiated).
'''
# for distributed sessions to work properly, the SDT option should be
# enabled prior to starting the session
if not self.is_enabled():
return False
# node.(objid, type, icon, name) are used.
nodenum = msg.gettlv(coreapi.CORE_TLV_NODE_NUMBER)
if not nodenum:
return
x = msg.gettlv(coreapi.CORE_TLV_NODE_XPOS)
y = msg.gettlv(coreapi.CORE_TLV_NODE_YPOS)
z = None
name = msg.gettlv(coreapi.CORE_TLV_NODE_NAME)
nodetype = msg.gettlv(coreapi.CORE_TLV_NODE_TYPE)
model = msg.gettlv(coreapi.CORE_TLV_NODE_MODEL)
icon = msg.gettlv(coreapi.CORE_TLV_NODE_ICON)
net = False
if nodetype == coreapi.CORE_NODE_DEF or \
nodetype == coreapi.CORE_NODE_PHYS or \
nodetype == coreapi.CORE_NODE_XEN:
if model is None:
model = "router"
type = model
elif nodetype != None:
type = coreapi.node_class(nodetype).type
net = True
else:
type = None
try:
node = self.session.obj(nodenum)
except KeyError:
node = None
if node:
self.updatenode(node.objid, msg.flags, x, y, z,
node.name, node.type, node.icon)
else:
if nodenum in self.remotes:
remote = self.remotes[nodenum]
if name is None:
name = remote.name
if type is None:
type = remote.type
if icon is None:
icon = remote.icon
else:
remote = self.Bunch(objid=nodenum, type=type, icon=icon,
name=name, net=net, links=set())
self.remotes[nodenum] = remote
remote.pos = (x, y, z)
self.updatenode(nodenum, msg.flags, x, y, z, name, type, icon)
def handlelinkmsg(self, msg):
''' Process a Link Message to add/remove links on the SDT display.
Links are recorded in the remotes[nodenum1].links set for updating
the SDT display at a later time.
'''
if not self.is_enabled():
return False
nodenum1 = msg.gettlv(coreapi.CORE_TLV_LINK_N1NUMBER)
nodenum2 = msg.gettlv(coreapi.CORE_TLV_LINK_N2NUMBER)
link_msg_type = msg.gettlv(coreapi.CORE_TLV_LINK_TYPE)
# this filters out links to WLAN and EMANE nodes which are not drawn
if self.wlancheck(nodenum1):
return
wl = (link_msg_type == coreapi.CORE_LINK_WIRELESS)
if nodenum1 in self.remotes:
r = self.remotes[nodenum1]
if msg.flags & coreapi.CORE_API_DEL_FLAG:
if (nodenum2, wl) in r.links:
r.links.remove((nodenum2, wl))
else:
r.links.add((nodenum2, wl))
self.updatelink(nodenum1, nodenum2, msg.flags, wireless=wl)
def wlancheck(self, nodenum):
''' Helper returns True if a node number corresponds to a WlanNode
or EmaneNode.
'''
if nodenum in self.remotes:
type = self.remotes[nodenum].type
if type in ("wlan", "emane"):
return True
else:
try:
n = self.session.obj(nodenum)
except KeyError:
return False
if isinstance(n, (nodes.WlanNode, nodes.EmaneNode)):
return True
return False
| |
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import mock
import unittest
from cloudbaseinit import exception as cbinit_exception
class WindowsPhysicalDiskUtilsTests(unittest.TestCase):
def setUp(self):
self._ctypes_mock = mock.MagicMock()
self._module_patcher = mock.patch.dict(
'sys.modules',
{'ctypes': self._ctypes_mock})
self._module_patcher.start()
self.physical_disk = importlib.import_module(
"cloudbaseinit.utils.windows.physical_disk")
self.fake_path = mock.sentinel.fake_path
self._phys_disk_class = self.physical_disk.PhysicalDisk(
path=self.fake_path)
self.physical_disk.kernel32 = mock.MagicMock()
def tearDown(self):
self._module_patcher.stop()
@mock.patch('cloudbaseinit.utils.windows.physical_disk'
'.PhysicalDisk.close')
def _test_open(self, mock_close, _handle, exception):
self._phys_disk_class._handle = _handle
if exception:
self.physical_disk.kernel32.CreateFileW.return_value = \
self._phys_disk_class.INVALID_HANDLE_VALUE
self.assertRaises(cbinit_exception.CloudbaseInitException,
self._phys_disk_class.open)
else:
self._phys_disk_class.open()
self.physical_disk.kernel32.CreateFileW.assert_called_once_with(
self._ctypes_mock.c_wchar_p.return_value,
self._phys_disk_class.GENERIC_READ,
self._phys_disk_class.FILE_SHARE_READ,
0, self._phys_disk_class.OPEN_EXISTING,
self._phys_disk_class.FILE_ATTRIBUTE_READONLY, 0
)
self._ctypes_mock.c_wchar_p.assert_called_once_with(self.fake_path)
self.assertEqual(
self.physical_disk.kernel32.CreateFileW.return_value,
self._phys_disk_class._handle)
if _handle:
mock_close.assert_called_once_with()
def test_open(self):
self._test_open(_handle=None, exception=None)
def test_open_exeption(self):
self._test_open(_handle=None, exception=True)
def test_open_with_close(self):
self._test_open(_handle=mock.sentinel._handle, exception=True)
def test_close(self):
self._phys_disk_class._handle = mock.sentinel._handle
self._phys_disk_class._geom = mock.sentinel._geom
self._phys_disk_class.close()
self.physical_disk.kernel32.CloseHandle.assert_called_once_with(
mock.sentinel._handle)
self.assertEqual(0, self._phys_disk_class._handle)
self.assertEqual(None, self._phys_disk_class._geom)
@mock.patch('cloudbaseinit.utils.windows.physical_disk'
'.Win32_DiskGeometry')
def _test_get_geometry(self, mock_Win32_DiskGeometry, _geom, ret_val):
mock_DeviceIoControl = self.physical_disk.kernel32.DeviceIoControl
expect_byref = [mock.call(mock_Win32_DiskGeometry.return_value),
mock.call(
self._ctypes_mock.wintypes.DWORD.return_value)]
self._phys_disk_class._geom = _geom
self.physical_disk.kernel32.DeviceIoControl.return_value = ret_val
if not ret_val:
self.assertRaises(cbinit_exception.CloudbaseInitException,
self._phys_disk_class.get_geometry)
elif _geom:
response = self._phys_disk_class.get_geometry()
self.assertEqual(_geom, response)
else:
response = self._phys_disk_class.get_geometry()
mock_Win32_DiskGeometry.assert_called_once_with()
self._ctypes_mock.wintypes.DWORD.assert_called_once_with()
mock_DeviceIoControl.assert_called_once_with(
self._phys_disk_class._handle,
self._phys_disk_class.IOCTL_DISK_GET_DRIVE_GEOMETRY, 0, 0,
self._ctypes_mock.byref.return_value,
self._ctypes_mock.sizeof.return_value,
self._ctypes_mock.byref.return_value, 0)
self.assertEqual(expect_byref,
self._ctypes_mock.byref.call_args_list)
self.assertEqual(mock_Win32_DiskGeometry.return_value,
self._phys_disk_class._geom)
self.assertEqual(self._phys_disk_class._geom, response)
def test_get_geometry(self):
self._test_get_geometry(_geom=mock.sentinel._geom,
ret_val=mock.sentinel.ret_val)
def test_get_geometry_no_geom(self):
self._test_get_geometry(_geom=None,
ret_val=mock.sentinel.ret_val)
def test_get_geometry_no_geom_exception(self):
self._test_get_geometry(_geom=None, ret_val=None)
def _test_seek(self, exception):
expect_DWORD = [mock.call(0), mock.call(1)]
if exception:
self.physical_disk.kernel32.SetFilePointer.return_value = \
self._phys_disk_class.INVALID_SET_FILE_POINTER
self.assertRaises(cbinit_exception.CloudbaseInitException,
self._phys_disk_class.seek, 1)
else:
self._phys_disk_class.seek(1)
self.physical_disk.kernel32.SetFilePointer.assert_called_once_with(
self._phys_disk_class._handle,
self._ctypes_mock.wintypes.DWORD.return_value,
self._ctypes_mock.byref.return_value,
self._phys_disk_class.FILE_BEGIN)
self._ctypes_mock.byref.assert_called_once_with(
self._ctypes_mock.wintypes.DWORD.return_value)
self.assertEqual(expect_DWORD,
self._ctypes_mock.wintypes.DWORD.call_args_list)
def test_seek(self):
self._test_seek(exception=False)
def test_seek_exception(self):
self._test_seek(exception=True)
def _test_read(self, ret_val):
bytes_to_read = mock.sentinel.bytes_to_read
self.physical_disk.kernel32.ReadFile.return_value = ret_val
if not ret_val:
self.assertRaises(cbinit_exception.CloudbaseInitException,
self._phys_disk_class.read, bytes_to_read)
else:
response = self._phys_disk_class.read(bytes_to_read)
self._ctypes_mock.create_string_buffer.assert_called_once_with(
bytes_to_read)
self._ctypes_mock.wintypes.DWORD.assert_called_once_with()
self.physical_disk.kernel32.ReadFile.assert_called_once_with(
self._phys_disk_class._handle,
self._ctypes_mock.create_string_buffer.return_value,
bytes_to_read, self._ctypes_mock.byref.return_value, 0)
self._ctypes_mock.byref.assert_called_once_with(
self._ctypes_mock.wintypes.DWORD.return_value)
self.assertEqual(
(self._ctypes_mock.create_string_buffer.return_value,
self._ctypes_mock.wintypes.DWORD.return_value.value),
response)
def test_read(self):
self._test_read(ret_val=mock.sentinel.ret_val)
def test_read_exception(self):
self._test_read(ret_val=None)
| |
import asyncio
import multiprocessing
import os
import socket
import ujson as json
from email.utils import formatdate
from functools import partial
from http import HTTPStatus
import httptools
from jsonschema import Draft4Validator, ErrorTree
__version__ = '3.0.1'
class HTTPException(Exception):
def __init__(self, status, msg=None, properties=None):
self.properties = properties
self.msg = msg
self.status = status
class Request:
def __init__(self):
self.headers = {}
self.method = "HEAD"
self.url = "/"
self.raw = None
self.ip = None
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, key, value):
setattr(self, key, value)
class Response:
def __init__(self):
self.body = ""
self.status = 200
self.msg = ""
self.headers = {
'Date': formatdate(timeval=None, localtime=False, usegmt=True),
'Content-Type': 'text/plain'
}
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, key, value):
setattr(self, key, value)
def __bytes__(self):
http_status = HTTPStatus(self.status)
http_status_bytes = f"HTTP/1.1 {http_status.value} {http_status.phrase}".encode()
http_body_bytes = self.body.encode()
self.headers['Content-Length'] = len(http_body_bytes)
http_header_bytes = "\r\n".join([f'{k}: {v}' for k, v in self.headers.items()]).encode()
return http_status_bytes + b'\r\n' + http_header_bytes + b'\r\n\r\n' + http_body_bytes
class Context:
def __init__(self):
self.req = Request()
self.resp = Response()
self.write = None
def send(self, _):
self.write(bytes(self.resp))
def check(self, value, status=400, msg='', properties=""):
if not value:
self.abort(status=status, msg=msg, properties=properties)
def abort(self, status, msg="", properties=""):
raise HTTPException(status=status, msg=msg, properties=properties)
def __getattr__(self, item):
return getattr(self.req, item)
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, key, value):
setattr(self, key, value)
@property
def headers(self):
return self.resp.headers
@property
def json(self):
return json.loads(self.body)
@property
def body(self):
return self.resp.body
@body.setter
def body(self, value):
self.resp.body = value
@property
def status(self):
return self.resp.status
@status.setter
def status(self, value):
self.resp.status = value
@property
def msg(self):
return self.resp.msg
@msg.setter
def msg(self, value):
self.resp.msg = value
class HTTPProtocol(asyncio.Protocol):
def __init__(self, handler, loop):
self.parser = None
self.transport = None
self.handler = handler
self.loop = loop
self.ctx = None
def connection_made(self, transport):
self.parser = httptools.HttpRequestParser(self)
self.transport = transport
def on_url(self, url):
self.ctx = Context()
self.ctx.write = self.transport.write
url = httptools.parse_url(url)
self.ctx.req.path = url.path.decode()
self.ctx.req.method = self.parser.get_method().decode()
def on_header(self, name, value):
self.ctx.req.headers[name.decode()] = value.decode()
def on_body(self, body):
self.ctx.req.raw += body
def on_message_complete(self):
task = self.loop.create_task(self.handler(self.ctx))
task.add_done_callback(self.ctx.send)
def data_received(self, data):
self.parser.feed_data(data)
def connection_lost(self, exc):
self.transport.close()
class App:
def __init__(self):
self.workers = set()
self.routes = {}
def serve(self, sock):
loop = asyncio.new_event_loop()
server = loop.create_server(partial(HTTPProtocol, loop=loop, handler=self), sock=sock)
loop.create_task(server)
try:
loop.run_forever()
except KeyboardInterrupt:
server.close()
loop.close()
def listen(self, port=8000, host="127.0.0.1", workers=multiprocessing.cpu_count()):
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
pid = os.getpid()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(False)
sock.bind((host, port))
os.set_inheritable(sock.fileno(), True)
try:
print(f'[{pid}] Listening at: http://{host}:{port}')
print(f'[{pid}] Workers: {workers}')
for _ in range(workers):
worker = multiprocessing.Process(target=self.serve, kwargs=dict(sock=sock))
worker.daemon = True
worker.start()
print(f'[{pid}] Starting worker with pid: {worker.pid}')
self.workers.add(worker)
for worker in self.workers:
worker.join()
except KeyboardInterrupt:
print('\r', end='\r')
print(f'[{pid}] Server soft stopping')
for worker in self.workers:
worker.terminate()
worker.join()
print(f'[{pid}] Server stopped successfully!')
sock.close()
async def __call__(self, ctx):
try:
handler = self.routes.get(ctx.req.path)
if not handler:
raise HTTPException(404)
await handler(ctx).request()
except HTTPException as e:
ctx.status = e.status
ctx.body = e.msg or HTTPStatus(e.status).phrase
ctx.msg = e.properties
class Controller:
def __init__(self, ctx):
self.ctx = ctx
async def request(self):
handler = getattr(self, self.ctx.req.method.lower(), None)
if not handler:
raise HTTPException(405)
await handler()
class RESTController(Controller):
async def request(self):
self.ctx.headers['Content-Type'] = 'application/json'
await super().request()
self.ctx.body = json.dumps(self.ctx.body)
class Model:
schema = {}
@classmethod
def validate(cls, data):
errors = ErrorTree(Draft4Validator(cls.schema).iter_errors(data)).errors
if errors:
raise HTTPException(400, msg=str(errors))
return data
| |
# Natural Language Toolkit: TnT Tagger
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Sam Huston <sjh900@gmail.com>
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
'''
Implementation of 'TnT - A Statisical Part of Speech Tagger'
by Thorsten Brants
http://acl.ldc.upenn.edu/A/A00/A00-1031.pdf
'''
from __future__ import print_function, division
from math import log
from operator import itemgetter
from nltk.probability import FreqDist, ConditionalFreqDist
from nltk.tag.api import TaggerI
class TnT(TaggerI):
'''
TnT - Statistical POS tagger
IMPORTANT NOTES:
* DOES NOT AUTOMATICALLY DEAL WITH UNSEEN WORDS
- It is possible to provide an untrained POS tagger to
create tags for unknown words, see __init__ function
* SHOULD BE USED WITH SENTENCE-DELIMITED INPUT
- Due to the nature of this tagger, it works best when
trained over sentence delimited input.
- However it still produces good results if the training
data and testing data are separated on all punctuation eg: [,.?!]
- Input for training is expected to be a list of sentences
where each sentence is a list of (word, tag) tuples
- Input for tag function is a single sentence
Input for tagdata function is a list of sentences
Output is of a similar form
* Function provided to process text that is unsegmented
- Please see basic_sent_chop()
TnT uses a second order Markov model to produce tags for
a sequence of input, specifically:
argmax [Proj(P(t_i|t_i-1,t_i-2)P(w_i|t_i))] P(t_T+1 | t_T)
IE: the maximum projection of a set of probabilities
The set of possible tags for a given word is derived
from the training data. It is the set of all tags
that exact word has been assigned.
To speed up and get more precision, we can use log addition
to instead multiplication, specifically:
argmax [Sigma(log(P(t_i|t_i-1,t_i-2))+log(P(w_i|t_i)))] +
log(P(t_T+1|t_T))
The probability of a tag for a given word is the linear
interpolation of 3 markov models; a zero-order, first-order,
and a second order model.
P(t_i| t_i-1, t_i-2) = l1*P(t_i) + l2*P(t_i| t_i-1) +
l3*P(t_i| t_i-1, t_i-2)
A beam search is used to limit the memory usage of the algorithm.
The degree of the beam can be changed using N in the initialization.
N represents the maximum number of possible solutions to maintain
while tagging.
It is possible to differentiate the tags which are assigned to
capitalized words. However this does not result in a significant
gain in the accuracy of the results.
'''
def __init__(self, unk=None, Trained=False, N=1000, C=False):
'''
Construct a TnT statistical tagger. Tagger must be trained
before being used to tag input.
:param unk: instance of a POS tagger, conforms to TaggerI
:type unk:(TaggerI)
:param Trained: Indication that the POS tagger is trained or not
:type Trained: boolean
:param N: Beam search degree (see above)
:type N:(int)
:param C: Capitalization flag
:type C: boolean
Initializer, creates frequency distributions to be used
for tagging
_lx values represent the portion of the tri/bi/uni taggers
to be used to calculate the probability
N value is the number of possible solutions to maintain
while tagging. A good value for this is 1000
C is a boolean value which specifies to use or
not use the Capitalization of the word as additional
information for tagging.
NOTE: using capitalization may not increase the accuracy
of the tagger
'''
self._uni = FreqDist()
self._bi = ConditionalFreqDist()
self._tri = ConditionalFreqDist()
self._wd = ConditionalFreqDist()
self._eos = ConditionalFreqDist()
self._l1 = 0.0
self._l2 = 0.0
self._l3 = 0.0
self._N = N
self._C = C
self._T = Trained
self._unk = unk
# statistical tools (ignore or delete me)
self.unknown = 0
self.known = 0
def train(self, data):
'''
Uses a set of tagged data to train the tagger.
If an unknown word tagger is specified,
it is trained on the same data.
:param data: List of lists of (word, tag) tuples
:type data: tuple(str)
'''
# Ensure that local C flag is initialized before use
C = False
if self._unk is not None and self._T == False:
self._unk.train(data)
for sent in data:
history = [('BOS',False), ('BOS',False)]
for w, t in sent:
# if capitalization is requested,
# and the word begins with a capital
# set local flag C to True
if self._C and w[0].isupper(): C=True
self._wd[w][t] += 1
self._uni[(t,C)] += 1
self._bi[history[1]][(t,C)] += 1
self._tri[tuple(history)][(t,C)] += 1
history.append((t,C))
history.pop(0)
# set local flag C to false for the next word
C = False
self._eos[t]['EOS'] += 1
# compute lambda values from the trained frequency distributions
self._compute_lambda()
#(debugging -- ignore or delete me)
#print "lambdas"
#print i, self._l1, i, self._l2, i, self._l3
def _compute_lambda(self):
'''
creates lambda values based upon training data
NOTE: no need to explicitly reference C,
it is contained within the tag variable :: tag == (tag,C)
for each tag trigram (t1, t2, t3)
depending on the maximum value of
- f(t1,t2,t3)-1 / f(t1,t2)-1
- f(t2,t3)-1 / f(t2)-1
- f(t3)-1 / N-1
increment l3,l2, or l1 by f(t1,t2,t3)
ISSUES -- Resolutions:
if 2 values are equal, increment both lambda values
by (f(t1,t2,t3) / 2)
'''
# temporary lambda variables
tl1 = 0.0
tl2 = 0.0
tl3 = 0.0
# for each t1,t2 in system
for history in self._tri.conditions():
(h1, h2) = history
# for each t3 given t1,t2 in system
# (NOTE: tag actually represents (tag,C))
# However no effect within this function
for tag in self._tri[history].keys():
# if there has only been 1 occurrence of this tag in the data
# then ignore this trigram.
if self._uni[tag] == 1:
continue
# safe_div provides a safe floating point division
# it returns -1 if the denominator is 0
c3 = self._safe_div((self._tri[history][tag]-1), (self._tri[history].N()-1))
c2 = self._safe_div((self._bi[h2][tag]-1), (self._bi[h2].N()-1))
c1 = self._safe_div((self._uni[tag]-1), (self._uni.N()-1))
# if c1 is the maximum value:
if (c1 > c3) and (c1 > c2):
tl1 += self._tri[history][tag]
# if c2 is the maximum value
elif (c2 > c3) and (c2 > c1):
tl2 += self._tri[history][tag]
# if c3 is the maximum value
elif (c3 > c2) and (c3 > c1):
tl3 += self._tri[history][tag]
# if c3, and c2 are equal and larger than c1
elif (c3 == c2) and (c3 > c1):
tl2 += self._tri[history][tag] / 2.0
tl3 += self._tri[history][tag] / 2.0
# if c1, and c2 are equal and larger than c3
# this might be a dumb thing to do....(not sure yet)
elif (c2 == c1) and (c1 > c3):
tl1 += self._tri[history][tag] / 2.0
tl2 += self._tri[history][tag] / 2.0
# otherwise there might be a problem
# eg: all values = 0
else:
#print "Problem", c1, c2 ,c3
pass
# Lambda normalisation:
# ensures that l1+l2+l3 = 1
self._l1 = tl1 / (tl1+tl2+tl3)
self._l2 = tl2 / (tl1+tl2+tl3)
self._l3 = tl3 / (tl1+tl2+tl3)
def _safe_div(self, v1, v2):
'''
Safe floating point division function, does not allow division by 0
returns -1 if the denominator is 0
'''
if v2 == 0:
return -1
else:
return v1 / v2
def tagdata(self, data):
'''
Tags each sentence in a list of sentences
:param data:list of list of words
:type data: [[string,],]
:return: list of list of (word, tag) tuples
Invokes tag(sent) function for each sentence
compiles the results into a list of tagged sentences
each tagged sentence is a list of (word, tag) tuples
'''
res = []
for sent in data:
res1 = self.tag(sent)
res.append(res1)
return res
def tag(self, data):
'''
Tags a single sentence
:param data: list of words
:type data: [string,]
:return: [(word, tag),]
Calls recursive function '_tagword'
to produce a list of tags
Associates the sequence of returned tags
with the correct words in the input sequence
returns a list of (word, tag) tuples
'''
current_state = [(['BOS', 'BOS'], 0.0)]
sent = list(data)
tags = self._tagword(sent, current_state)
res = []
for i in range(len(sent)):
# unpack and discard the C flags
(t,C) = tags[i+2]
res.append((sent[i], t))
return res
def _tagword(self, sent, current_states):
'''
:param sent : List of words remaining in the sentence
:type sent : [word,]
:param current_states : List of possible tag combinations for
the sentence so far, and the log probability
associated with each tag combination
:type current_states : [([tag, ], logprob), ]
Tags the first word in the sentence and
recursively tags the reminder of sentence
Uses formula specified above to calculate the probability
of a particular tag
'''
# if this word marks the end of the sentance,
# return the most probable tag
if sent == []:
(h, logp) = current_states[0]
return h
# otherwise there are more words to be tagged
word = sent[0]
sent = sent[1:]
new_states = []
# if the Capitalisation is requested,
# initalise the flag for this word
C = False
if self._C and word[0].isupper(): C=True
# if word is known
# compute the set of possible tags
# and their associated log probabilities
if word in self._wd.conditions():
self.known += 1
for (history, curr_sent_logprob) in current_states:
logprobs = []
for t in self._wd[word].keys():
p_uni = self._uni.freq((t,C))
p_bi = self._bi[history[-1]].freq((t,C))
p_tri = self._tri[tuple(history[-2:])].freq((t,C))
p_wd = self._wd[word][t] / self._uni[(t,C)]
p = self._l1 *p_uni + self._l2 *p_bi + self._l3 *p_tri
p2 = log(p, 2) + log(p_wd, 2)
logprobs.append(((t,C), p2))
# compute the result of appending each tag to this history
for (tag, logprob) in logprobs:
new_states.append((history + [tag],
curr_sent_logprob + logprob))
# otherwise a new word, set of possible tags is unknown
else:
self.unknown += 1
# since a set of possible tags,
# and the probability of each specific tag
# can not be returned from most classifiers:
# specify that any unknown words are tagged with certainty
p = 1
# if no unknown word tagger has been specified
# then use the tag 'Unk'
if self._unk is None:
tag = ('Unk',C)
# otherwise apply the unknown word tagger
else :
[(_w, t)] = list(self._unk.tag([word]))
tag = (t,C)
for (history, logprob) in current_states:
history.append(tag)
new_states = current_states
# now have computed a set of possible new_states
# sort states by log prob
# set is now ordered greatest to least log probability
new_states.sort(reverse=True, key=itemgetter(1))
# del everything after N (threshold)
# this is the beam search cut
if len(new_states) > self._N:
new_states = new_states[:self._N]
# compute the tags for the rest of the sentence
# return the best list of tags for the sentence
return self._tagword(sent, new_states)
########################################
# helper function -- basic sentence tokenizer
########################################
def basic_sent_chop(data, raw=True):
'''
Basic method for tokenizing input into sentences
for this tagger:
:param data: list of tokens (words or (word, tag) tuples)
:type data: str or tuple(str, str)
:param raw: boolean flag marking the input data
as a list of words or a list of tagged words
:type raw: bool
:return: list of sentences
sentences are a list of tokens
tokens are the same as the input
Function takes a list of tokens and separates the tokens into lists
where each list represents a sentence fragment
This function can separate both tagged and raw sequences into
basic sentences.
Sentence markers are the set of [,.!?]
This is a simple method which enhances the performance of the TnT
tagger. Better sentence tokenization will further enhance the results.
'''
new_data = []
curr_sent = []
sent_mark = [',','.','?','!']
if raw:
for word in data:
if word in sent_mark:
curr_sent.append(word)
new_data.append(curr_sent)
curr_sent = []
else:
curr_sent.append(word)
else:
for (word,tag) in data:
if word in sent_mark:
curr_sent.append((word,tag))
new_data.append(curr_sent)
curr_sent = []
else:
curr_sent.append((word,tag))
return new_data
def demo():
from nltk.corpus import brown
sents = list(brown.tagged_sents())
test = list(brown.sents())
# create and train the tagger
tagger = TnT()
tagger.train(sents[200:1000])
# tag some data
tagged_data = tagger.tagdata(test[100:120])
# print results
for j in range(len(tagged_data)):
s = tagged_data[j]
t = sents[j+100]
for i in range(len(s)):
print(s[i],'--', t[i])
print()
def demo2():
from nltk.corpus import treebank
d = list(treebank.tagged_sents())
t = TnT(N=1000, C=False)
s = TnT(N=1000, C=True)
t.train(d[(11)*100:])
s.train(d[(11)*100:])
for i in range(10):
tacc = t.evaluate(d[i*100:((i+1)*100)])
tp_un = t.unknown / (t.known + t.unknown)
tp_kn = t.known / (t.known + t.unknown)
t.unknown = 0
t.known = 0
print('Capitalization off:')
print('Accuracy:', tacc)
print('Percentage known:', tp_kn)
print('Percentage unknown:', tp_un)
print('Accuracy over known words:', (tacc / tp_kn))
sacc = s.evaluate(d[i*100:((i+1)*100)])
sp_un = s.unknown / (s.known + s.unknown)
sp_kn = s.known / (s.known + s.unknown)
s.unknown = 0
s.known = 0
print('Capitalization on:')
print('Accuracy:', sacc)
print('Percentage known:', sp_kn)
print('Percentage unknown:', sp_un)
print('Accuracy over known words:', (sacc / sp_kn))
def demo3():
from nltk.corpus import treebank, brown
d = list(treebank.tagged_sents())
e = list(brown.tagged_sents())
d = d[:1000]
e = e[:1000]
d10 = int(len(d)*0.1)
e10 = int(len(e)*0.1)
tknacc = 0
sknacc = 0
tallacc = 0
sallacc = 0
tknown = 0
sknown = 0
for i in range(10):
t = TnT(N=1000, C=False)
s = TnT(N=1000, C=False)
dtest = d[(i*d10):((i+1)*d10)]
etest = e[(i*e10):((i+1)*e10)]
dtrain = d[:(i*d10)] + d[((i+1)*d10):]
etrain = e[:(i*e10)] + e[((i+1)*e10):]
t.train(dtrain)
s.train(etrain)
tacc = t.evaluate(dtest)
tp_un = t.unknown / (t.known + t.unknown)
tp_kn = t.known / (t.known + t.unknown)
tknown += tp_kn
t.unknown = 0
t.known = 0
sacc = s.evaluate(etest)
sp_un = s.unknown / (s.known + s.unknown)
sp_kn = s.known / (s.known + s.unknown)
sknown += sp_kn
s.unknown = 0
s.known = 0
tknacc += (tacc / tp_kn)
sknacc += (sacc / tp_kn)
tallacc += tacc
sallacc += sacc
#print i+1, (tacc / tp_kn), i+1, (sacc / tp_kn), i+1, tacc, i+1, sacc
print("brown: acc over words known:", 10 * tknacc)
print(" : overall accuracy:", 10 * tallacc)
print(" : words known:", 10 * tknown)
print("treebank: acc over words known:", 10 * sknacc)
print(" : overall accuracy:", 10 * sallacc)
print(" : words known:", 10 * sknown)
| |
"""Unit test for DPTControlStepCode objects."""
import pytest
from xknx.dpt import (
DPTControlStartStop,
DPTControlStartStopBlinds,
DPTControlStartStopDimming,
DPTControlStepCode,
DPTControlStepwise,
)
from xknx.exceptions import ConversionError
class TestDPTControlStepCode:
"""Test class for DPTControlStepCode objects."""
def test_to_knx(self):
"""Test serializing values to DPTControlStepCode."""
for rawref in range(16):
control = 1 if rawref >> 3 else 0
raw = DPTControlStepCode.to_knx(
{"control": control, "step_code": rawref & 0x07}
)
assert raw == (rawref,)
def test_to_knx_wrong_type(self):
"""Test serializing wrong type to DPTControlStepCode."""
with pytest.raises(ConversionError):
DPTControlStepCode.to_knx("")
with pytest.raises(ConversionError):
DPTControlStepCode.to_knx(0)
def test_to_knx_wrong_keys(self):
"""Test serializing map with missing keys to DPTControlStepCode."""
with pytest.raises(ConversionError):
DPTControlStepCode.to_knx({"control": 0})
with pytest.raises(ConversionError):
DPTControlStepCode.to_knx({"step_code": 0})
def test_to_knx_wrong_value_types(self):
"""Test serializing map with keys of invalid type to DPTControlStepCode."""
with pytest.raises(ConversionError):
DPTControlStepCode.to_knx({"control": ""})
with pytest.raises(ConversionError):
DPTControlStepCode.to_knx({"step_code": ""})
def test_to_knx_wrong_values(self):
"""Test serializing map with keys of invalid values to DPTControlStepCode."""
# with self.assertRaises(ConversionError):
# DPTControlStepCode.to_knx({"control": -1, "step_code": 0})
# with self.assertRaises(ConversionError):
# DPTControlStepCode.to_knx({"control": 2, "step_code": 0})
with pytest.raises(ConversionError):
DPTControlStepCode.to_knx({"control": 0, "step_code": -1})
with pytest.raises(ConversionError):
DPTControlStepCode.to_knx({"control": 0, "step_code": 0x8})
def test_from_knx(self):
"""Test parsing DPTControlStepCode types from KNX."""
for raw in range(16):
control = 1 if raw >> 3 else 0
valueref = {"control": control, "step_code": raw & 0x07}
value = DPTControlStepCode.from_knx((raw,))
assert value == valueref
def test_from_knx_wrong_value(self):
"""Test parsing invalid DPTControlStepCode type from KNX."""
with pytest.raises(ConversionError):
DPTControlStepCode.from_knx((0x1F,))
def test_unit(self):
"""Test unit_of_measurement function."""
assert DPTControlStepCode.unit == ""
class TestDPTControlStepwise:
"""Test class for DPTControlStepwise objects."""
def test_to_knx(self):
"""Test serializing values to DPTControlStepwise."""
assert DPTControlStepwise.to_knx(1) == (0xF,)
assert DPTControlStepwise.to_knx(3) == (0xE,)
assert DPTControlStepwise.to_knx(6) == (0xD,)
assert DPTControlStepwise.to_knx(12) == (0xC,)
assert DPTControlStepwise.to_knx(25) == (0xB,)
assert DPTControlStepwise.to_knx(50) == (0xA,)
assert DPTControlStepwise.to_knx(100) == (0x9,)
assert DPTControlStepwise.to_knx(-1) == (0x7,)
assert DPTControlStepwise.to_knx(-3) == (0x6,)
assert DPTControlStepwise.to_knx(-6) == (0x5,)
assert DPTControlStepwise.to_knx(-12) == (0x4,)
assert DPTControlStepwise.to_knx(-25) == (0x3,)
assert DPTControlStepwise.to_knx(-50) == (0x2,)
assert DPTControlStepwise.to_knx(-100) == (0x1,)
assert DPTControlStepwise.to_knx(0) == (0x0,)
def test_to_knx_wrong_type(self):
"""Test serializing wrong type to DPTControlStepwise."""
with pytest.raises(ConversionError):
DPTControlStepwise.to_knx("")
def test_from_knx(self):
"""Test parsing DPTControlStepwise types from KNX."""
assert DPTControlStepwise.from_knx((0xF,)) == 1
assert DPTControlStepwise.from_knx((0xE,)) == 3
assert DPTControlStepwise.from_knx((0xD,)) == 6
assert DPTControlStepwise.from_knx((0xC,)) == 12
assert DPTControlStepwise.from_knx((0xB,)) == 25
assert DPTControlStepwise.from_knx((0xA,)) == 50
assert DPTControlStepwise.from_knx((0x9,)) == 100
assert DPTControlStepwise.from_knx((0x8,)) == 0
assert DPTControlStepwise.from_knx((0x7,)) == -1
assert DPTControlStepwise.from_knx((0x6,)) == -3
assert DPTControlStepwise.from_knx((0x5,)) == -6
assert DPTControlStepwise.from_knx((0x4,)) == -12
assert DPTControlStepwise.from_knx((0x3,)) == -25
assert DPTControlStepwise.from_knx((0x2,)) == -50
assert DPTControlStepwise.from_knx((0x1,)) == -100
assert DPTControlStepwise.from_knx((0x0,)) == 0
def test_from_knx_wrong_value(self):
"""Test parsing invalid DPTControlStepwise type from KNX."""
with pytest.raises(ConversionError):
DPTControlStepwise.from_knx((0x1F,))
def test_unit(self):
"""Test unit_of_measurement function."""
assert DPTControlStepwise.unit == "%"
class TestDPTControlStartStop:
"""Test class for DPTControlStartStop objects."""
def test_mode_to_knx(self):
"""Test serializing dimming commands to KNX."""
assert DPTControlStartStopDimming.to_knx(
DPTControlStartStopDimming.Direction.INCREASE
) == (9,)
assert DPTControlStartStopDimming.to_knx(
DPTControlStartStopDimming.Direction.DECREASE
) == (1,)
assert DPTControlStartStopDimming.to_knx(
DPTControlStartStopDimming.Direction.STOP
) == (0,)
def test_mode_to_knx_wrong_value(self):
"""Test serializing invalid data type to KNX."""
with pytest.raises(ConversionError):
DPTControlStartStopDimming.to_knx(1)
def test_mode_from_knx(self):
"""Test parsing dimming commands from KNX."""
for i in range(16):
if i > 8:
expected_direction = DPTControlStartStopDimming.Direction.INCREASE
elif i in (0, 8):
expected_direction = DPTControlStartStopDimming.Direction.STOP
elif i < 8:
expected_direction = DPTControlStartStopDimming.Direction.DECREASE
assert DPTControlStartStopDimming.from_knx((i,)) == expected_direction
def test_mode_from_knx_wrong_value(self):
"""Test serializing invalid data type to KNX."""
with pytest.raises(ConversionError):
DPTControlStartStopDimming.from_knx(1)
def test_direction_names(self):
"""Test names of Direction Enum."""
assert str(DPTControlStartStop.Direction.INCREASE) == "Increase"
assert str(DPTControlStartStop.Direction.DECREASE) == "Decrease"
assert str(DPTControlStartStop.Direction.STOP) == "Stop"
class TestDPTControlStartStopDimming:
"""Test class for DPTControlStartStopDimming objects."""
def test_direction_names(self):
"""Test names of Direction Enum."""
assert str(DPTControlStartStopDimming.Direction.INCREASE) == "Increase"
assert str(DPTControlStartStopDimming.Direction.DECREASE) == "Decrease"
assert str(DPTControlStartStopDimming.Direction.STOP) == "Stop"
def test_direction_values(self):
"""Test values of Direction Enum."""
assert (
DPTControlStartStopDimming.Direction.DECREASE.value
== DPTControlStartStop.Direction.DECREASE.value
)
assert (
DPTControlStartStopDimming.Direction.INCREASE.value
== DPTControlStartStop.Direction.INCREASE.value
)
assert (
DPTControlStartStopDimming.Direction.STOP.value
== DPTControlStartStop.Direction.STOP.value
)
class TestDPTControlStartStopBlinds:
"""Test class for DPTControlStartStopBlinds objects."""
def test_direction_names(self):
"""Test names of Direction Enum."""
assert str(DPTControlStartStopBlinds.Direction.DOWN) == "Down"
assert str(DPTControlStartStopBlinds.Direction.UP) == "Up"
assert str(DPTControlStartStopBlinds.Direction.STOP) == "Stop"
def test_direction_values(self):
"""Test values of Direction Enum."""
assert (
DPTControlStartStopBlinds.Direction.UP.value
== DPTControlStartStop.Direction.DECREASE.value
)
assert (
DPTControlStartStopBlinds.Direction.DOWN.value
== DPTControlStartStop.Direction.INCREASE.value
)
assert (
DPTControlStartStopBlinds.Direction.STOP.value
== DPTControlStartStop.Direction.STOP.value
)
| |
from collections import Counter
from collections.abc import MutableSet as AbcMutableSet
from dataclasses import Field
from enum import Enum
from functools import lru_cache
from typing import Any, Callable, Dict, Optional, Tuple, Type, TypeVar, Union
from attr import Attribute
from attr import has as attrs_has
from attr import resolve_types
from ._compat import (
FrozenSetSubscriptable,
Mapping,
MutableMapping,
MutableSequence,
MutableSet,
Sequence,
Set,
fields,
get_origin,
has,
has_with_generic,
is_annotated,
is_bare,
is_counter,
is_frozenset,
is_generic,
is_generic_attrs,
is_hetero_tuple,
is_literal,
is_mapping,
is_mutable_set,
is_sequence,
is_tuple,
is_union_type,
)
from .disambiguators import create_uniq_field_dis_func
from .dispatch import MultiStrategyDispatch
from .errors import StructureHandlerNotFoundError
from .gen import (
AttributeOverride,
make_dict_structure_fn,
make_dict_unstructure_fn,
make_hetero_tuple_unstructure_fn,
make_iterable_unstructure_fn,
make_mapping_structure_fn,
make_mapping_unstructure_fn,
)
NoneType = type(None)
T = TypeVar("T")
V = TypeVar("V")
class UnstructureStrategy(Enum):
"""`attrs` classes unstructuring strategies."""
AS_DICT = "asdict"
AS_TUPLE = "astuple"
def _subclass(typ):
"""a shortcut"""
return lambda cls: issubclass(cls, typ)
def is_attrs_union(typ):
return is_union_type(typ) and all(
has(get_origin(e) or e) for e in typ.__args__
)
def is_attrs_union_or_none(typ):
return is_union_type(typ) and all(
e is NoneType or has(get_origin(e) or e) for e in typ.__args__
)
def is_optional(typ):
return (
is_union_type(typ)
and NoneType in typ.__args__
and len(typ.__args__) == 2
)
class Converter(object):
"""Converts between structured and unstructured data."""
__slots__ = (
"_dis_func_cache",
"_unstructure_func",
"_unstructure_attrs",
"_structure_attrs",
"_dict_factory",
"_union_struct_registry",
"_structure_func",
"_prefer_attrib_converters",
)
def __init__(
self,
dict_factory: Callable[[], Any] = dict,
unstruct_strat: UnstructureStrategy = UnstructureStrategy.AS_DICT,
prefer_attrib_converters: bool = False,
) -> None:
unstruct_strat = UnstructureStrategy(unstruct_strat)
self._prefer_attrib_converters = prefer_attrib_converters
# Create a per-instance cache.
if unstruct_strat is UnstructureStrategy.AS_DICT:
self._unstructure_attrs = self.unstructure_attrs_asdict
self._structure_attrs = self.structure_attrs_fromdict
else:
self._unstructure_attrs = self.unstructure_attrs_astuple
self._structure_attrs = self.structure_attrs_fromtuple
self._dis_func_cache = lru_cache()(self._get_dis_func)
self._unstructure_func = MultiStrategyDispatch(
self._unstructure_identity
)
self._unstructure_func.register_cls_list(
[
(bytes, self._unstructure_identity),
(str, self._unstructure_identity),
]
)
self._unstructure_func.register_func_list(
[
(is_mapping, self._unstructure_mapping),
(is_sequence, self._unstructure_seq),
(is_mutable_set, self._unstructure_seq),
(is_frozenset, self._unstructure_seq),
(_subclass(Enum), self._unstructure_enum),
(has, self._unstructure_attrs),
(is_union_type, self._unstructure_union),
]
)
# Per-instance register of to-attrs converters.
# Singledispatch dispatches based on the first argument, so we
# store the function and switch the arguments in self.loads.
self._structure_func = MultiStrategyDispatch(self._structure_error)
self._structure_func.register_func_list(
[
(
lambda cl: cl is Any or cl is Optional or cl is None,
lambda v, _: v,
),
(is_generic_attrs, self._gen_structure_generic, True),
(is_literal, self._structure_literal),
(is_sequence, self._structure_list),
(is_mutable_set, self._structure_set),
(is_frozenset, self._structure_frozenset),
(is_tuple, self._structure_tuple),
(is_mapping, self._structure_dict),
(
is_attrs_union_or_none,
self._gen_attrs_union_structure,
True,
),
(
lambda t: is_union_type(t)
and t in self._union_struct_registry,
self._structure_union,
),
(is_optional, self._structure_optional),
(has, self._structure_attrs),
]
)
# Strings are sequences.
self._structure_func.register_cls_list(
[
(str, self._structure_call),
(bytes, self._structure_call),
(int, self._structure_call),
(float, self._structure_call),
(Enum, self._structure_call),
]
)
self._dict_factory = dict_factory
# Unions are instances now, not classes. We use different registries.
self._union_struct_registry: Dict[
Any, Callable[[Any, Type[T]], T]
] = {}
def unstructure(self, obj: Any, unstructure_as=None) -> Any:
return self._unstructure_func.dispatch(
obj.__class__ if unstructure_as is None else unstructure_as
)(obj)
@property
def unstruct_strat(self) -> UnstructureStrategy:
"""The default way of unstructuring ``attrs`` classes."""
return (
UnstructureStrategy.AS_DICT
if self._unstructure_attrs == self.unstructure_attrs_asdict
else UnstructureStrategy.AS_TUPLE
)
def register_unstructure_hook(
self, cls: Any, func: Callable[[T], Any]
) -> None:
"""Register a class-to-primitive converter function for a class.
The converter function should take an instance of the class and return
its Python equivalent.
"""
if attrs_has(cls):
resolve_types(cls)
if is_union_type(cls):
self._unstructure_func.register_func_list(
[(lambda t: t == cls, func)]
)
else:
self._unstructure_func.register_cls_list([(cls, func)])
def register_unstructure_hook_func(
self, check_func: Callable[[Any], bool], func: Callable[[T], Any]
):
"""Register a class-to-primitive converter function for a class, using
a function to check if it's a match.
"""
self._unstructure_func.register_func_list([(check_func, func)])
def register_unstructure_hook_factory(
self,
predicate: Callable[[Any], bool],
factory: Callable[[Any], Callable[[Any], Any]],
) -> None:
"""
Register a hook factory for a given predicate.
A predicate is a function that, given a type, returns whether the factory
can produce a hook for that type.
A factory is a callable that, given a type, produces an unstructuring
hook for that type. This unstructuring hook will be cached.
"""
self._unstructure_func.register_func_list([(predicate, factory, True)])
def register_structure_hook(
self, cl: Any, func: Callable[[Any, Type[T]], T]
):
"""Register a primitive-to-class converter function for a type.
The converter function should take two arguments:
* a Python object to be converted,
* the type to convert to
and return the instance of the class. The type may seem redundant, but
is sometimes needed (for example, when dealing with generic classes).
"""
if attrs_has(cl):
resolve_types(cl)
if is_union_type(cl):
self._union_struct_registry[cl] = func
self._structure_func.clear_cache()
else:
self._structure_func.register_cls_list([(cl, func)])
def register_structure_hook_func(
self,
check_func: Callable[[Type[T]], bool],
func: Callable[[Any, Type[T]], T],
):
"""Register a class-to-primitive converter function for a class, using
a function to check if it's a match.
"""
self._structure_func.register_func_list([(check_func, func)])
def register_structure_hook_factory(
self,
predicate: Callable[[Any], bool],
factory: Callable[[Any], Callable[[Any], Any]],
) -> None:
"""
Register a hook factory for a given predicate.
A predicate is a function that, given a type, returns whether the factory
can produce a hook for that type.
A factory is a callable that, given a type, produces a structuring
hook for that type. This structuring hook will be cached.
"""
self._structure_func.register_func_list([(predicate, factory, True)])
def structure(self, obj: Any, cl: Type[T]) -> T:
"""Convert unstructured Python data structures to structured data."""
return self._structure_func.dispatch(cl)(obj, cl)
# Classes to Python primitives.
def unstructure_attrs_asdict(self, obj) -> Dict[str, Any]:
"""Our version of `attrs.asdict`, so we can call back to us."""
attrs = fields(obj.__class__)
dispatch = self._unstructure_func.dispatch
rv = self._dict_factory()
for a in attrs:
name = a.name
v = getattr(obj, name)
rv[name] = dispatch(a.type or v.__class__)(v)
return rv
def unstructure_attrs_astuple(self, obj) -> Tuple[Any, ...]:
"""Our version of `attrs.astuple`, so we can call back to us."""
attrs = fields(obj.__class__)
dispatch = self._unstructure_func.dispatch
res = list()
for a in attrs:
name = a.name
v = getattr(obj, name)
res.append(dispatch(a.type or v.__class__)(v))
return tuple(res)
def _unstructure_enum(self, obj):
"""Convert an enum to its value."""
return obj.value
def _unstructure_identity(self, obj):
"""Just pass it through."""
return obj
def _unstructure_seq(self, seq):
"""Convert a sequence to primitive equivalents."""
# We can reuse the sequence class, so tuples stay tuples.
dispatch = self._unstructure_func.dispatch
return seq.__class__(dispatch(e.__class__)(e) for e in seq)
def _unstructure_mapping(self, mapping):
"""Convert a mapping of attr classes to primitive equivalents."""
# We can reuse the mapping class, so dicts stay dicts and OrderedDicts
# stay OrderedDicts.
dispatch = self._unstructure_func.dispatch
return mapping.__class__(
(dispatch(k.__class__)(k), dispatch(v.__class__)(v))
for k, v in mapping.items()
)
def _unstructure_union(self, obj):
"""
Unstructure an object as a union.
By default, just unstructures the instance.
"""
return self._unstructure_func.dispatch(obj.__class__)(obj)
# Python primitives to classes.
def _structure_error(self, _, cl):
"""At the bottom of the condition stack, we explode if we can't handle it."""
msg = (
"Unsupported type: {0}. Register a structure hook for "
"it.".format(cl)
)
raise StructureHandlerNotFoundError(msg, type_=cl)
def _gen_structure_generic(self, cl):
"""Create and return a hook for structuring generics."""
fn = make_dict_structure_fn(
cl,
self,
_cattrs_prefer_attrib_converters=self._prefer_attrib_converters,
)
return fn
def _gen_attrs_union_structure(self, cl):
"""Generate a structuring function for a union of attrs classes (and maybe None)."""
dis_fn = self._get_dis_func(cl)
has_none = NoneType in cl.__args__
if has_none:
def structure_attrs_union(obj, _):
if obj is None:
return None
return self.structure(obj, dis_fn(obj))
else:
def structure_attrs_union(obj, _):
return self.structure(obj, dis_fn(obj))
return structure_attrs_union
@staticmethod
def _structure_call(obj, cl):
"""Just call ``cl`` with the given ``obj``.
This is just an optimization on the ``_structure_default`` case, when
we know we can skip the ``if`` s. Use for ``str``, ``bytes``, ``enum``,
etc.
"""
return cl(obj)
@staticmethod
def _structure_literal(val, type):
if val != type.__args__[0]:
raise Exception(f"Literal {type} not equal to {val}")
return val
# Attrs classes.
def structure_attrs_fromtuple(
self, obj: Tuple[Any, ...], cl: Type[T]
) -> T:
"""Load an attrs class from a sequence (tuple)."""
conv_obj = [] # A list of converter parameters.
for a, value in zip(fields(cl), obj): # type: ignore
# We detect the type by the metadata.
converted = self._structure_attribute(a, value)
conv_obj.append(converted)
return cl(*conv_obj) # type: ignore
def _structure_attribute(
self, a: Union[Attribute, Field], value: Any
) -> Any:
"""Handle an individual attrs attribute."""
type_ = a.type
attrib_converter = getattr(a, "converter", None)
if self._prefer_attrib_converters and attrib_converter:
# A attrib converter is defined on this attribute, and prefer_attrib_converters is set
# to give these priority over registered structure hooks. So, pass through the raw
# value, which attrs will flow into the converter
return value
if type_ is None:
# No type metadata.
return value
try:
return self._structure_func.dispatch(type_)(value, type_)
except StructureHandlerNotFoundError:
if attrib_converter:
# Return the original value and fallback to using an attrib converter.
return value
else:
raise
def structure_attrs_fromdict(
self, obj: Mapping[str, Any], cl: Type[T]
) -> T:
"""Instantiate an attrs class from a mapping (dict)."""
# For public use.
conv_obj = {} # Start with a fresh dict, to ignore extra keys.
for a in fields(cl): # type: ignore
name = a.name
try:
val = obj[name]
except KeyError:
continue
if name[0] == "_":
name = name[1:]
conv_obj[name] = self._structure_attribute(a, val)
return cl(**conv_obj) # type: ignore
def _structure_list(self, obj, cl):
"""Convert an iterable to a potentially generic list."""
if is_bare(cl) or cl.__args__[0] is Any:
return [e for e in obj]
else:
elem_type = cl.__args__[0]
return [
self._structure_func.dispatch(elem_type)(e, elem_type)
for e in obj
]
def _structure_set(self, obj, cl):
"""Convert an iterable into a potentially generic set."""
if is_bare(cl) or cl.__args__[0] is Any:
return set(obj)
else:
elem_type = cl.__args__[0]
return {
self._structure_func.dispatch(elem_type)(e, elem_type)
for e in obj
}
def _structure_frozenset(self, obj, cl):
"""Convert an iterable into a potentially generic frozenset."""
if is_bare(cl) or cl.__args__[0] is Any:
return frozenset(obj)
else:
elem_type = cl.__args__[0]
dispatch = self._structure_func.dispatch
return frozenset(dispatch(elem_type)(e, elem_type) for e in obj)
def _structure_dict(self, obj, cl):
"""Convert a mapping into a potentially generic dict."""
if is_bare(cl) or cl.__args__ == (Any, Any):
return dict(obj)
else:
key_type, val_type = cl.__args__
if key_type is Any:
val_conv = self._structure_func.dispatch(val_type)
return {k: val_conv(v, val_type) for k, v in obj.items()}
elif val_type is Any:
key_conv = self._structure_func.dispatch(key_type)
return {key_conv(k, key_type): v for k, v in obj.items()}
else:
key_conv = self._structure_func.dispatch(key_type)
val_conv = self._structure_func.dispatch(val_type)
return {
key_conv(k, key_type): val_conv(v, val_type)
for k, v in obj.items()
}
def _structure_optional(self, obj, union):
if obj is None:
return None
union_params = union.__args__
other = (
union_params[0] if union_params[1] is NoneType else union_params[1]
)
# We can't actually have a Union of a Union, so this is safe.
return self._structure_func.dispatch(other)(obj, other)
def _structure_union(self, obj, union):
"""Deal with structuring a union."""
handler = self._union_struct_registry[union]
return handler(obj, union)
def _structure_tuple(self, obj, tup: Type[T]):
"""Deal with converting to a tuple."""
if tup in (Tuple, tuple):
tup_params = None
else:
tup_params = tup.__args__
has_ellipsis = tup_params and tup_params[-1] is Ellipsis
if tup_params is None or (has_ellipsis and tup_params[0] is Any):
# Just a Tuple. (No generic information.)
return tuple(obj)
if has_ellipsis:
# We're dealing with a homogenous tuple, Tuple[int, ...]
tup_type = tup_params[0]
conv = self._structure_func.dispatch(tup_type)
return tuple(conv(e, tup_type) for e in obj)
else:
# We're dealing with a heterogenous tuple.
return tuple(
self._structure_func.dispatch(t)(e, t)
for t, e in zip(tup_params, obj)
)
@staticmethod
def _get_dis_func(union):
# type: (Type) -> Callable[..., Type]
"""Fetch or try creating a disambiguation function for a union."""
union_types = union.__args__
if NoneType in union_types: # type: ignore
# We support unions of attrs classes and NoneType higher in the
# logic.
union_types = tuple(
e for e in union_types if e is not NoneType # type: ignore
)
if not all(has(get_origin(e) or e) for e in union_types):
raise StructureHandlerNotFoundError(
"Only unions of attrs classes supported "
"currently. Register a loads hook manually.",
type_=union,
)
return create_uniq_field_dis_func(*union_types)
class GenConverter(Converter):
"""A converter which generates specialized un/structuring functions."""
__slots__ = (
"omit_if_default",
"forbid_extra_keys",
"type_overrides",
"_unstruct_collection_overrides",
)
def __init__(
self,
dict_factory: Callable[[], Any] = dict,
unstruct_strat: UnstructureStrategy = UnstructureStrategy.AS_DICT,
omit_if_default: bool = False,
forbid_extra_keys: bool = False,
type_overrides: Mapping[Type, AttributeOverride] = {},
unstruct_collection_overrides: Mapping[Type, Callable] = {},
prefer_attrib_converters: bool = False,
):
super().__init__(
dict_factory=dict_factory,
unstruct_strat=unstruct_strat,
prefer_attrib_converters=prefer_attrib_converters,
)
self.omit_if_default = omit_if_default
self.forbid_extra_keys = forbid_extra_keys
self.type_overrides = dict(type_overrides)
self._unstruct_collection_overrides = unstruct_collection_overrides
# Do a little post-processing magic to make things easier for users.
co = unstruct_collection_overrides
# abc.Set overrides, if defined, apply to abc.MutableSets and sets
if Set in co:
if MutableSet not in co:
co[MutableSet] = co[Set]
co[AbcMutableSet] = co[Set] # For 3.7/3.8 compatibility.
if FrozenSetSubscriptable not in co:
co[FrozenSetSubscriptable] = co[Set]
# abc.MutableSet overrrides, if defined, apply to sets
if MutableSet in co:
if set not in co:
co[set] = co[MutableSet]
if FrozenSetSubscriptable in co:
co[frozenset] = co[
FrozenSetSubscriptable
] # For 3.7/3.8 compatibility.
# abc.Sequence overrides, if defined, can apply to MutableSequences, lists and tuples
if Sequence in co:
if MutableSequence not in co:
co[MutableSequence] = co[Sequence]
if tuple not in co:
co[tuple] = co[Sequence]
# abc.MutableSequence overrides, if defined, can apply to lists
if MutableSequence in co:
if list not in co:
co[list] = co[MutableSequence]
# abc.Mapping overrides, if defined, can apply to MutableMappings
if Mapping in co:
if MutableMapping not in co:
co[MutableMapping] = co[Mapping]
# abc.MutableMapping overrides, if defined, can apply to dicts
if MutableMapping in co:
if dict not in co:
co[dict] = co[MutableMapping]
# builtins.dict overrides, if defined, can apply to counters
if dict in co:
if Counter not in co:
co[Counter] = co[dict]
if unstruct_strat is UnstructureStrategy.AS_DICT:
# Override the attrs handler.
self.register_unstructure_hook_factory(
has_with_generic, self.gen_unstructure_attrs_fromdict
)
self.register_structure_hook_factory(
has_with_generic, self.gen_structure_attrs_fromdict
)
self.register_unstructure_hook_factory(
is_annotated, self.gen_unstructure_annotated
)
self.register_unstructure_hook_factory(
is_hetero_tuple, self.gen_unstructure_hetero_tuple
)
self.register_unstructure_hook_factory(
is_sequence, self.gen_unstructure_iterable
)
self.register_unstructure_hook_factory(
is_mapping, self.gen_unstructure_mapping
)
self.register_unstructure_hook_factory(
is_mutable_set,
lambda cl: self.gen_unstructure_iterable(cl, unstructure_to=set),
)
self.register_unstructure_hook_factory(
is_frozenset,
lambda cl: self.gen_unstructure_iterable(
cl, unstructure_to=frozenset
),
)
self.register_structure_hook_factory(
is_annotated, self.gen_structure_annotated
)
self.register_structure_hook_factory(
is_mapping, self.gen_structure_mapping
)
self.register_structure_hook_factory(
is_counter, self.gen_structure_counter
)
def gen_unstructure_annotated(self, type):
origin = type.__origin__
h = self._unstructure_func.dispatch(origin)
return h
def gen_structure_annotated(self, type):
origin = type.__origin__
h = self._structure_func.dispatch(origin)
return h
def gen_unstructure_attrs_fromdict(self, cl: Type[T]) -> Dict[str, Any]:
origin = get_origin(cl)
if origin is not None:
cl = origin
attribs = fields(cl)
if any(isinstance(a.type, str) for a in attribs):
# PEP 563 annotations - need to be resolved.
resolve_types(cl)
attrib_overrides = {
a.name: self.type_overrides[a.type]
for a in attribs
if a.type in self.type_overrides
}
h = make_dict_unstructure_fn(
cl, self, omit_if_default=self.omit_if_default, **attrib_overrides
)
return h
def gen_structure_attrs_fromdict(self, cl: Type[T]) -> T:
attribs = fields(get_origin(cl) if is_generic(cl) else cl)
if any(isinstance(a.type, str) for a in attribs):
# PEP 563 annotations - need to be resolved.
resolve_types(cl)
attrib_overrides = {
a.name: self.type_overrides[a.type]
for a in attribs
if a.type in self.type_overrides
}
h = make_dict_structure_fn(
cl,
self,
_cattrs_forbid_extra_keys=self.forbid_extra_keys,
_cattrs_prefer_attrib_converters=self._prefer_attrib_converters,
**attrib_overrides,
)
# only direct dispatch so that subclasses get separately generated
return h
def gen_unstructure_iterable(self, cl: Any, unstructure_to=None):
unstructure_to = self._unstruct_collection_overrides.get(
get_origin(cl) or cl, unstructure_to or list
)
h = make_iterable_unstructure_fn(
cl, self, unstructure_to=unstructure_to
)
self._unstructure_func.register_cls_list([(cl, h)], direct=True)
return h
def gen_unstructure_hetero_tuple(self, cl: Any, unstructure_to=None):
unstructure_to = self._unstruct_collection_overrides.get(
get_origin(cl) or cl, unstructure_to or tuple
)
h = make_hetero_tuple_unstructure_fn(
cl, self, unstructure_to=unstructure_to
)
self._unstructure_func.register_cls_list([(cl, h)], direct=True)
return h
def gen_unstructure_mapping(
self, cl: Any, unstructure_to=None, key_handler=None
):
unstructure_to = self._unstruct_collection_overrides.get(
get_origin(cl) or cl, unstructure_to or dict
)
h = make_mapping_unstructure_fn(
cl, self, unstructure_to=unstructure_to, key_handler=key_handler
)
self._unstructure_func.register_cls_list([(cl, h)], direct=True)
return h
def gen_structure_counter(self, cl: Any):
h = make_mapping_structure_fn(
cl, self, structure_to=Counter, val_type=int
)
self._structure_func.register_cls_list([(cl, h)], direct=True)
return h
def gen_structure_mapping(self, cl: Any):
h = make_mapping_structure_fn(cl, self)
self._structure_func.register_cls_list([(cl, h)], direct=True)
return h
| |
#!/usr/bin/python
"""
Program for creating HTML plots
"""
import os
import sys
import json
import time
from readevtlog import *
def write_prefix():
f.write('''
<!doctype html>
<html>
<head>
<title>DNA performance report</title>
<script src="http://code.jquery.com/jquery-latest.min.js"></script>
<script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script>
<script src="d3-timeline.js"></script>
<script type="text/javascript">
function plot() {
''')
def make_class_name(name) :
"Makes a valid CCS class name out of the given string"
# I have a feeling this won't work for long...
return re.sub('[^-_A-Za-z0-9]+', '', name)
def write_timeline_data(logs, conf) :
"Timeline of execution, data generation"
n = 0
# Collect all delay types, figure out profile length
delays = set()
total_time = 0
for k in sorted(logs) :
for e in logs[k].time :
if conf.get(e.msg,{}).get('ignore', False) :
continue
delays.add(e.msg)
if e.t2 != None: total_time = max(e.t2, total_time)
if e.t1 != None: total_time = max(e.t1, total_time)
f.write('''
var colorScale = d3.scale.category20().domain({0});'''.format(list(delays)))
f.write('''
var color; var elems; var i;''')
for a in delays:
f.write('''
color = colorScale("%s");
elems = document.querySelectorAll(".%s");
console.log(elems.length);
for (i = 0; i < elems.length; i++)
elems[i].style.backgroundColor = color;'''
% (a, make_class_name(a)));
f.write('''
var data = [''')
for k in sorted(logs) :
def pid_func(e) : return e.tags.get('pid',e.tags2.get('pid',''))
tt = sorted(logs[k].time, key=pid_func)
pid = None
end = 0
done = set()
for e in tt :
# Get configuration, check whether we're supposed to ignore
# this one.
if conf.get(e.msg,{}).get('ignore', False) :
continue
# Already done?
if e.msg in done:
continue
# Create row
f.write('\n {"label": "%s", type:"%s", times:['
% ('' if pid_func(e) == pid else pid_func(e),e.msg))
pid = pid_func(e)
# Write entries
for e2 in tt :
if e.msg != e2.msg:
continue
# Make sure we have a certain minimum width. This is a hack.
end = e2.t2
if end == None or end < e2.t1+total_time/1000:
end = e2.t1 + total_time/1000
f.write('''
{"starting_time": %g, "ending_time": %g, "label": "%s", "type": "%s"},'''
% (1000*e2.t1, 1000*end, e2.msg if e2.t1 == e.t1 else '', e2.msg))
f.write('\n ]},')
done.add(e.msg)
f.write('''
];''')
# Figure out a good tick interval
tickInterval = 1
for i in [2,5,10,20,30,60,120]:
if i * 10 > total_time: break
tickInterval = i
print "tickInterval= ", tickInterval
f.write('''
var chart = d3.timeline()
.beginning(-1)
.tickFormat(
{ format: d3.time.format("%%M:%%S"),
tickInterval: %d,
tickTime: d3.time.second,
tickSize: 5 })
.stack().showTimeAxisTick()
.colorProperty('type')
.colors(colorScale)
.margin({left:300, right:20, top:0, bottom:0});
d3.selectAll("#timeline svg").remove();
var svg = d3.select("#timeline").append("svg").attr("width", window.innerWidth-30)
.datum(data).call(chart);''' % (tickInterval))
def write_middle():
f.write('''
}
window.onload = plot;
window.onresize = plot;
</script>
<style type="text/css">
.axis path,
.axis line {
fill: none;
stroke: black;
shape-rendering: crispEdges;
}
.axis text {
font-family: sans-serif;
font-size: 10px;
}
#timeline text {
font-family: sans-serif;
font-size: 12px;
text-anchor: end;
}
#timeline .timeline-label {
text-anchor: start;
}
table, th, td {
padding: 3px;
}
table {
border-collapse: collapse;
}
td.key {
font-weight: bold;
font-family: serif;
text-align: right;
min-width: 150px;
}
table.program-conf td.val {
font-family: monospace;
}
table.statistics tr.first {
border-top: 1px solid black;
}
table.statistics col.group {
border-left: 1px solid black;
}
table.statistics td {
text-align: center;
min-width: 80px;
}
h4 {
box-shadow: 0 1px 0 rgba(0,0,0,0.1);
}
.hintbased {
background-color: lightgray;
}
.hintokay {
background-color: lightgreen;
}
.hintwarning {
background-color: yellow;
}
.hinterror {
background-color: pink;
}
</style>
</head>
<body>
<div>''')
def write_timeline_body(logs, conf) :
"Timeline of execution, html body"
# Get arguments (we assume they are the same for all nodes - which
# right now they often are)
args = logs[0].get_args()
f.write('''
<h4>Program Configuration</h4>
<table class="program-conf">
<tr><td class="key">Executable:</td><td class="val">{0}</td></tr>
<tr><td class="key">Arguments:</td><td class="val">{1}</td></tr>
</table>'''
.format(args[0], ' '.join(args[1:])))
# Get program environment (see above)
env = logs[0].get_env()
if env.has_key('SLURM_NODEID') :
f.write('''
<h4>SLURM Configuration</h4>
<table class="slurm_conf">
<tr><td class="key">Job:</td><td>{0} {1}, started by {2}</td></tr>
<tr><td class="key">Nodes:</td><td>{3}: {4}</td></tr>
<tr><td class="key">Tasks:</td><td>{5} = {6}</td></tr>
<tr><td class="key">Procs:</td><td>{7}</td></tr>
<tr><td class="key">CPUS:</td><td>{8}</td></tr>
</table>'''
.format(env.get('SLURM_JOB_NAME', '-'),
env.get('SLURM_JOB_UID', '-'),
env.get('SLURM_JOB_USER', '-'),
env.get('SLURM_NNODES', '-'),
env.get('SLURM_JOB_NODELIST', '-'),
env.get('SLURM_NTASKS', '-'),
env.get('SLURM_TASKS_PER_NODE', '-'),
env.get('SLURM_NPROCS', '-'),
env.get('SLURM_JOB_CPUS_PER_NODE', '-')))
# Show timeline
f.write('''
</table>
<h4>Timeline</h4>
<div id="timeline"></div>''')
# Build stats
instances = {}
total_time = {}
total_hints = {}
total_tags = {}
total_tags_time = {}
for k in logs :
t = 0
for e in logs[k].time :
instances[e.msg] = 1 + instances.get(e.msg, 0)
if e.t2 != None :
total_time[e.msg] = (e.t2 - e.t1) + total_time.get(e.msg, 0)
cur_hints = total_hints.get(e.msg, {})
cur_tags = total_tags.get(e.msg, {})
cur_tags_time = total_tags_time.get(e.msg, {})
hint_re = re.compile(r'hint:')
for t in e.tags.iterkeys():
if hint_re.match(t) != None:
cur_hints[t] = int(e.tags[t]) + cur_hints.get(t, 0)
for t in e.tags2.iterkeys():
if hint_re.match(t) == None:
d = e.diff(t)
if d != None: cur_tags[t] = d + cur_tags.get(t, 0)
d = e.diff_t(t)
if d != None: cur_tags_time[t] = d + cur_tags_time.get(t, 0)
total_hints[e.msg] = cur_hints
total_tags[e.msg] = cur_tags
total_tags_time[e.msg] = cur_tags_time
# Make table
f.write('''
</table>
<h4>Statistics</h4>
<table class="statistics">
<colgroup>
<col span="2"/>
<col span="1" class="group"/>
<col span="1" class="group"/>
<col span="4"/>
<col span="1" class="group"/>
<col span="4"/>
</colgroup>
<tr><td></td><th>Instances</th><th>Time</th><th colspan=5>IO</th><th colspan=5>Instructions</th></tr>
<tr><td /><td /><td />
<td /><td>Value</td><td>Expected</td><td>Rate</td><td>Time</td>
<td /><td>Value</td><td>Expected</td><td>Rate</td><td>Time</td></tr>''')
def format_num(rate) :
if rate < 1000 :
return '%d ' % rate
if rate < 1000000 :
return '%.2f k' % (float(rate) / 1000)
if rate < 1000000000 :
return '%.2f M' % (float(rate) / 1000000)
if rate < 1000000000000 :
return '%.2f G' % (float(rate) / 1000000000)
return '%.2f T' % (float(rate) / 1000000000000)
err_threshold = 1.5
warn_threshold = 1.1
class Metric:
"Performance metric"
def __init__(self, name, val, hint, time, unit):
self.name = name; self.val = val; self.hint=hint
self.time = time; self.unit = unit
def valid(self):
return (self.val != None and self.val > 0) or self.hint != None
def error(self):
return max(float(self.val) / self.hint, float(self.hint) / self.val)
def format_name(self):
if self.name == None: return ('', '');
else: return ('', self.name + ':')
def format_val(self):
if self.name == None: return ('', '')
if self.val == None: return ('', '-')
return ('', format_num(self.val) + self.unit)
def format_hint(self):
if self.hint == None: return ('', '');
if self.val == 0 or self.val == None:
return ('hintbased', '[' + format_num(self.hint) + self.unit + ']')
# Check hint discrepancy
if self.error() >= err_threshold:
return ('hinterror', '[' + format_num(self.hint) + self.unit + ']')
elif self.error() >= warn_threshold:
style = 'hintwarning'
else:
style = 'hintokay'
return (style, '[%.1f%%]' % (float(100) * self.val / self.hint));
def format_rate(self):
if self.time == None or self.time == 0: return ('', '')
if self.hint != None and self.hint != 0:
if self.val == 0 or self.val == None: # or self.error() > err_threshold:
return ('hintbased', '[' + format_num(self.hint / self.time) + self.unit + '/s]')
if self.val == None:
return ('', '-')
if self.val == 0:
return ('', '0')
return ('', format_num(self.val / self.time) + self.unit + '/s')
def format_time(self):
if self.time == None: return ('', '')
return ('', 'x %02d:%02d.%03d' % (int(self.time / 60),
int(self.time) % 60,
int(self.time * 1000) % 1000))
for a in instances.iterkeys() :
print a, total_hints[a], total_tags[a], total_tags_time[a]
# Get configuration for this key
econf = conf.get(a, {})
if econf.get('ignore', False) :
continue
# Derive performance values
sumTable = { 'perf:float-ops': { 'perf:scalar-float-ops': 1
, 'perf:sse-float-ops': 4
, 'perf:avx-float-ops': 8
}
, 'perf:double-ops': { 'perf:scalar-double-ops': 1
, 'perf:sse-double-ops': 2
, 'perf:avx-double-ops': 4
}
}
for (sumAttr, weightedVals) in sumTable.items():
sum = 0
time_sum = 0
weight_sum = 0
for (valAttr, weight) in weightedVals.items():
if total_tags[a].has_key(valAttr):
sum += weight * total_tags[a][valAttr]
time_sum += weight * total_tags_time[a][valAttr]
print total_tags_time[a][valAttr]
weight_sum += weight
if time_sum > 0:
total_tags[a][sumAttr] = sum
total_tags_time[a][sumAttr] = time_sum / weight_sum
# Put reference values where we can determine them
referenceTable = {
'cuda:memset-time': ['cuda:memset-bytes'],
'cuda:memcpy-time-host': ['cuda:memcpy-bytes-host'],
'cuda:memcpy-time-device': ['cuda:memcpy-bytes-device'],
'cuda:kernel-time': [ 'cuda:gpu-float-ops'
, 'cuda:gpu-float-ops-add'
, 'cuda:gpu-float-ops-mul'
, 'cuda:gpu-float-ops-fma'
, 'cuda:gpu-double-ops'
, 'cuda:gpu-double-ops-add'
, 'cuda:gpu-double-ops-mul'
, 'cuda:gpu-double-ops-fma'
, 'cuda:gpu-float-instrs'
, 'cuda:gpu-double-instrs'
]
}
for (time_attr, val_attrs) in referenceTable.items():
if total_tags[a].has_key(time_attr):
for val_attr in val_attrs:
total_tags_time[a][val_attr] = total_tags[a][time_attr]
# Calculate metrics
metrics = {'io':[], 'instr':[] }
def mk_metric(cat, name, tag, hint_tag, time_factor, unit):
# Figure out reference time
if total_tags_time[a].has_key(tag):
time = float(total_tags_time[a][tag]) / time_factor;
else:
time = total_time.get(a,0)
metric = Metric(name, total_tags[a].get(tag),
total_hints[a].get(hint_tag),
time, unit);
if metric.valid():
metrics.setdefault(cat, []).append(metric)
ms = 1000
us = 1000000
ns = 1000000000
mk_metric('io', 'disk read', 'proc:read-bytes', 'hint:read-bytes', ns, 'B');
mk_metric('io', 'disk write', 'proc:write-bytes', 'hint:write-bytes', ns, 'B');
mk_metric('io', 'CUDA memset', 'cuda:memset-bytes', None, ns, 'B');
mk_metric('io', 'CUDA read', 'cuda:memcpy-bytes-host', 'hint:memcpy-bytes-host', ns, 'B');
mk_metric('io', 'CUDA write', 'cuda:memcpy-bytes-device', 'hint:memcpy-bytes-device', ns, 'B');
mk_metric('io', 'RAM read', 'perf:mem-read-bytes', 'hint:mem-read-bytes', ns, 'B');
mk_metric('instr', 'instructions', 'perf:cpu-instructions', None, ns, 'OP');
mk_metric('instr', 'x87', 'perf:x87-ops', None, ns, 'OP');
mk_metric('instr', 'float', 'perf:float-ops', 'hint:float-ops', ns, 'OP');
mk_metric('instr', 'double', 'perf:double-ops', 'hint:double-ops', ns, 'OP');
mk_metric('instr', 'float (scalar)', 'perf:scalar-float-ops', '', ns, 'OP');
mk_metric('instr', 'double (scalar)', 'perf:scalar-double-ops', '', ns, 'OP');
mk_metric('instr', 'float (sse)', 'perf:sse-float-ops', None, ns, 'OP');
mk_metric('instr', 'double (sse)', 'perf:sse-double-ops', None, ns, 'OP');
mk_metric('instr', 'float (avx)', 'perf:avx-float-ops', None, ns, 'OP');
mk_metric('instr', 'double (avx)', 'perf:avx-double-ops', None, ns, 'OP');
mk_metric('instr', 'float (gpu)', 'cuda:gpu-float-ops', 'hint:gpu-float-ops', ns, 'OP');
mk_metric('instr', 'instructions (gpu)', 'cuda:gpu-instructions', None, ns, 'OP');
mk_metric('instr', 'float (gpu add)', 'cuda:gpu-float-ops-add', None, ns, 'OP');
mk_metric('instr', 'float (gpu mul)', 'cuda:gpu-float-ops-mul', None, ns, 'OP');
mk_metric('instr', 'float (gpu fma)', 'cuda:gpu-float-ops-fma', None, ns, 'OP');
mk_metric('instr', 'double (gpu)', 'cuda:gpu-double-ops', 'hint:gpu-double-ops', ns, 'OP');
mk_metric('instr', 'double (gpu add)', 'cuda:gpu-double-ops-add', None, ns, 'OP');
mk_metric('instr', 'double (gpu mul)', 'cuda:gpu-double-ops-mul', None, ns, 'OP');
mk_metric('instr', 'double (gpu fma)', 'cuda:gpu-double-ops-fma', None, ns, 'OP');
mk_metric('instr', 'float (gpu)?', 'cuda:gpu-float-instrs', None, ns, 'OP');
mk_metric('instr', 'double (gpu)?', 'cuda:gpu-double-instrs', None, ns, 'OP');
# Print row(s)
defMetric = Metric(None, None, None, None, '')
rows = max([1, len(metrics['io']), len(metrics['instr'])])
for i in range(0, rows):
time = total_time.get(a, 0)
ioMetric = instrMetric = defMetric
if i < len(metrics['io']): ioMetric = metrics['io'][i]
if i < len(metrics['instr']): instrMetric = metrics['instr'][i]
row_classes = ''
if i == 0: row_classes += 'first'
f.write('''
<tr class='%s'>
<td class='key %s'>%s</td><td>%s</td><td>%s</td>
<td class='%s'>%s</td><td class='%s'>%s</td><td class='%s'>%s</td><td class='%s'>%s</td><td class='%s'>%s</td>
<td class='%s'>%s</td><td class='%s'>%s</td><td class='%s'>%s</td><td class='%s'>%s</td><td class='%s'>%s</td>
</tr>'''
% ((row_classes,
make_class_name(a),
a if i == 0 else '',
'%d' % instances[a] if i == 0 else '',
'%02d:%02d.%03d' % (int(time / 60),
int(time) % 60,
int(time * 1000) % 1000)
if i == 0 else '') +
ioMetric.format_name() +
ioMetric.format_val() +
ioMetric.format_hint() +
ioMetric.format_rate() +
ioMetric.format_time() +
instrMetric.format_name() +
instrMetric.format_val() +
instrMetric.format_hint() +
instrMetric.format_rate() +
instrMetric.format_time()
))
f.write('''
</table>
<h4>Legend</h4>
<table>
<th>Colour<th></td><th>Explanation</th>
<tr><td class='hintokay'>Okay</td><td>Performance as predicted (+-%d%%)</td></tr>
<tr><td class='hintwarning'>Warning</td><td>Medium performance discrepancy (+-%d%%)</td></tr>
<tr><td class='hinterror'>Error</td><td>Large performance discrepancy, assuming data corrupt</td></tr>
<tr><td class='hintbased'>Hint-based</td><td>Hint is used for metric calculation, measurement discarded</td></tr>
</table>
''' % (100*(warn_threshold-1), 100*(err_threshold-1)))
def write_suffix():
f.write('''
</div>
</body>
</html>''')
data_commands = {
"timeline" : write_timeline_data,
}
body_commands = {
"timeline" : write_timeline_body,
}
# Get parameters
cmd = sys.argv[1]
nm = sys.argv[2]
out = sys.argv[3]
# Open input and ouput files
logs = read_timelines(nm)
f = open(out, 'w')
# Load configuration file, if any
if len(sys.argv) > 4:
conf_file = open(sys.argv[4], "r")
conf = json.load(conf_file)
conf_file.close()
else :
conf = {}
# Compose HTML file
write_prefix()
data_commands[cmd](logs, conf)
write_middle()
body_commands[cmd](logs, conf)
write_suffix()
f.close()
| |
# Copyright 2014 Rustici Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
from datetime import timedelta, datetime
import uuid
import pytz
if __name__ == '__main__':
from test.main import setup_tincan_path
setup_tincan_path()
from tincan import (
Statement,
Agent,
Group,
Verb,
Result,
Context,
Attachment,
SubStatement,
Activity,
StatementRef,
)
class StatementTest(unittest.TestCase):
def test_InitEmpty(self):
statement = Statement()
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
def test_InitVersion(self):
statement = Statement(version='test')
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.assertEqual(statement.version, 'test')
def test_InitId(self):
statement = Statement(id='016699c6-d600-48a7-96ab-86187498f16f')
self.assertEqual(statement.id, uuid.UUID('016699c6-d600-48a7-96ab-86187498f16f'))
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
def test_InitTimestamp(self):
statement = Statement(timestamp="2014-06-23T15:25:00-05:00")
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
central = pytz.timezone("US/Central") # UTC -0500
dt = central.localize(datetime(2014, 6, 23, 15, 25))
self.assertEqual(statement.timestamp, dt)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
def test_InitStored(self):
statement = Statement(stored="2014-06-23T15:25:00-05:00")
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
central = pytz.timezone("US/Central") # UTC -0500
dt = central.localize(datetime(2014, 6, 23, 15, 25))
self.assertEqual(statement.stored, dt)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.authority)
def test_InitEmptyActor(self):
statement = Statement(actor={})
self.assertIsNone(statement.id)
self.assertIsInstance(statement.actor, Agent)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
def test_InitEmptyVerb(self):
statement = Statement(verb={})
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsInstance(statement.verb, Verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
def test_InitEmptyObject(self):
statement = Statement(object={})
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsInstance(statement.object, Activity)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
def test_InitEmptyAuthority(self):
statement = Statement(authority={})
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsInstance(statement.authority, Agent)
def test_InitEmptyResult(self):
statement = Statement(result={})
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.assertIsInstance(statement.result, Result)
def test_InitEmptyContext(self):
statement = Statement(context={})
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.assertIsInstance(statement.context, Context)
def test_InitEmptyAttachments(self):
statement = Statement(attachments=[])
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.assertEqual(statement.attachments, [])
def test_InitAnonAgentActor(self):
statement = Statement(actor={'name': 'test'})
self.assertIsNone(statement.id)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.agentVerificationHelper(statement.actor)
def test_InitAnonGroupActor(self):
statement = Statement(actor={'member': [Agent(name='test')], 'object_type': 'Group'})
self.assertIsNone(statement.id)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.groupVerificationHelper(statement.actor)
def test_InitAnonVerb(self):
statement = Statement(verb={'id': 'test'})
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.verbVerificationHelper(statement.verb)
def test_InitAnonObject(self):
statement = Statement(object={'id': 'test'})
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.activityVerificationHelper(statement.object)
def test_InitAnonAgentObject(self):
statement = Statement(object={'object_type': 'Agent', 'name': 'test'})
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.agentVerificationHelper(statement.object)
def test_InitDifferentNamingObject(self):
statement = Statement(object={'objectType': 'Agent', 'name': 'test'})
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.agentVerificationHelper(statement.object)
def test_InitAnonAuthority(self):
statement = Statement(authority={'name': 'test'})
self.assertIsNone(statement.id)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.actor)
self.agentVerificationHelper(statement.authority)
def test_InitAnonResult(self):
statement = Statement(result={'duration': timedelta(days=7)})
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.resultVerificationHelper(statement.result)
def test_InitAnonContext(self):
statement = Statement(context={'registration': '016699c6-d600-48a7-96ab-86187498f16f'})
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.contextVerificationHelper(statement.context)
def test_InitAnonAttachments(self):
statement = Statement(attachments=[{'usage_type': 'test'}])
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
for k in statement.attachments:
self.attachmentVerificationHelper(k)
def test_InitAgentActor(self):
statement = Statement(actor=Agent(name='test'))
self.assertIsNone(statement.id)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.agentVerificationHelper(statement.actor)
def test_InitGroupActor(self):
statement = Statement(actor=Group(member=[Agent(name='test')]))
self.assertIsNone(statement.id)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.groupVerificationHelper(statement.actor)
def test_InitVerb(self):
statement = Statement(verb=Verb(id='test'))
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.verbVerificationHelper(statement.verb)
def test_InitAgentObject(self):
statement = Statement(object=Agent(name='test'))
self.assertIsNone(statement.id)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.agentVerificationHelper(statement.object)
def test_InitSubStatementObject(self):
statement = Statement(object=SubStatement(object_type='SubStatement'))
self.assertIsNone(statement.id)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.substatementVerificationHelper(statement.object)
def test_InitStatementRefObject(self):
statement = Statement(object=StatementRef(object_type='StatementRef'))
self.assertIsNone(statement.id)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.statementrefVerificationHelper(statement.object)
def test_InitActivityObject(self):
statement = Statement(object=Activity(id='test'))
self.assertIsNone(statement.id)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.activityVerificationHelper(statement.object)
def test_InitAuthority(self):
statement = Statement(authority=Agent(name='test'))
self.assertIsNone(statement.id)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.actor)
self.agentVerificationHelper(statement.authority)
def test_InitResult(self):
statement = Statement(result=Result(duration=timedelta(days=7)))
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.resultVerificationHelper(statement.result)
def test_InitContext(self):
statement = Statement(context=Context(registration='016699c6-d600-48a7-96ab-86187498f16f'))
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
self.contextVerificationHelper(statement.context)
def test_InitAttachments(self):
statement = Statement(attachments=[Attachment(usage_type='test')])
self.assertIsNone(statement.id)
self.assertIsNone(statement.actor)
self.assertIsNone(statement.verb)
self.assertIsNone(statement.object)
self.assertIsNone(statement.timestamp)
self.assertIsNone(statement.stored)
self.assertIsNone(statement.authority)
for k in statement.attachments:
self.attachmentVerificationHelper(k)
def test_InitUnpack(self):
obj = {'id': '016699c6-d600-48a7-96ab-86187498f16f', 'actor': {'name': 'test'}, 'verb': {'id': 'test'},
'object': {'object_type': 'Agent', 'name': 'test'}, 'authority': {'name': 'test'},
'context': {'registration': '016699c6-d600-48a7-96ab-86187498f16f'},
'attachments': [{'usage_type': 'test'}]}
statement = Statement(**obj)
self.assertEqual(statement.id, uuid.UUID('016699c6-d600-48a7-96ab-86187498f16f'))
self.agentVerificationHelper(statement.actor)
self.verbVerificationHelper(statement.verb)
self.agentVerificationHelper(statement.object)
self.agentVerificationHelper(statement.authority)
self.contextVerificationHelper(statement.context)
for k in statement.attachments:
self.attachmentVerificationHelper(k)
def test_FromJSON(self):
json_str = '{"id":"016699c6-d600-48a7-96ab-86187498f16f", "actor":{"name":"test"}, ' \
'"verb":{"id":"test"}, "object":{"object_type":"Agent", "name":"test"}, ' \
'"authority":{"name":"test"}, "context":{"registration":"016699c6-d600-48a7-96ab-86187498f16f"}, ' \
'"attachments":[{"usage_type":"test"}]}'
statement = Statement.from_json(json_str)
self.assertEqual(statement.id, uuid.UUID('016699c6-d600-48a7-96ab-86187498f16f'))
self.agentVerificationHelper(statement.actor)
self.verbVerificationHelper(statement.verb)
self.agentVerificationHelper(statement.object)
self.agentVerificationHelper(statement.authority)
self.contextVerificationHelper(statement.context)
for k in statement.attachments:
self.attachmentVerificationHelper(k)
def test_ToJSON(self):
statement = Statement(
**{'id': '016699c6-d600-48a7-96ab-86187498f16f', 'actor': {'name': 'test'}, 'verb': {'id': 'test'},
'object': {'object_type': 'Agent', 'name': 'test'}, 'authority': {'name': 'test'},
'context': {'registration': '016699c6-d600-48a7-96ab-86187498f16f'},
'attachments': [{'usage_type': 'test'}]})
self.assertEqual(json.loads(statement.to_json()),
json.loads('{"verb": {"id": "test"}, '
'"attachments": [{"usageType": "test"}], '
'"object": {"name": "test", "objectType": "Agent"}, '
'"actor": {"name": "test", "objectType": "Agent"}, '
'"version": "1.0.3", '
'"authority": {"name": "test", "objectType": "Agent"}, '
'"context": {"registration": "016699c6-d600-48a7-96ab-86187498f16f"}, '
'"id": "016699c6-d600-48a7-96ab-86187498f16f"}'))
def test_ToJSONEmpty(self):
statement = Statement()
self.assertEqual(json.loads(statement.to_json()), json.loads('{"version": "1.0.3"}'))
def test_FromJSONToJSON(self):
json_str = '{"id":"016699c6-d600-48a7-96ab-86187498f16f", ' \
'"actor": {"name":"test"}, ' \
'"verb": {"id":"test"}, ' \
'"object": {"object_type":"Agent", "name":"test"}, ' \
'"authority":{ "name":"test"}, ' \
'"context": {"registration":"016699c6-d600-48a7-96ab-86187498f16f"}, ' \
'"attachments":[{"usage_type":"test"}]}'
statement = Statement.from_json(json_str)
self.assertEqual(statement.id, uuid.UUID('016699c6-d600-48a7-96ab-86187498f16f'))
self.agentVerificationHelper(statement.actor)
self.verbVerificationHelper(statement.verb)
self.agentVerificationHelper(statement.object)
self.agentVerificationHelper(statement.authority)
self.contextVerificationHelper(statement.context)
for k in statement.attachments:
self.attachmentVerificationHelper(k)
self.assertEqual(json.loads(statement.to_json()),
json.loads('{"verb": {"id": "test"}, '
'"attachments": [{"usageType": "test"}], '
'"object": {"name": "test", "objectType": "Agent"}, '
'"actor": {"name": "test", "objectType": "Agent"}, '
'"version": "1.0.3", '
'"authority": {"name": "test", "objectType": "Agent"}, '
'"context": {"registration": "016699c6-d600-48a7-96ab-86187498f16f"}, '
'"id": "016699c6-d600-48a7-96ab-86187498f16f"}'))
def test_ExceptionInvalidUUID(self):
with self.assertRaises(ValueError):
Statement(id='badtest')
def agentVerificationHelper(self, value):
self.assertIsInstance(value, Agent)
self.assertEqual(value.name, 'test')
def groupVerificationHelper(self, value):
self.assertIsInstance(value, Group)
for k in value.member:
self.assertIsInstance(k, Agent)
self.assertEqual(k.name, 'test')
def verbVerificationHelper(self, value):
self.assertIsInstance(value, Verb)
self.assertEqual(value.id, 'test')
def resultVerificationHelper(self, value):
self.assertIsInstance(value, Result)
self.assertEqual(value.duration, timedelta(days=7))
def contextVerificationHelper(self, value):
self.assertIsInstance(value, Context)
self.assertEqual(value.registration, uuid.UUID('016699c6-d600-48a7-96ab-86187498f16f'))
def attachmentVerificationHelper(self, value):
self.assertIsInstance(value, Attachment)
self.assertEqual(value.usage_type, 'test')
def substatementVerificationHelper(self, value):
self.assertIsInstance(value, SubStatement)
self.assertEqual(value.object_type, 'SubStatement')
def statementrefVerificationHelper(self, value):
self.assertIsInstance(value, StatementRef)
self.assertEqual(value.object_type, 'StatementRef')
def activityVerificationHelper(self, value):
self.assertIsInstance(value, Activity)
self.assertEqual(value.id, 'test')
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(StatementTest)
unittest.TextTestRunner(verbosity=2).run(suite)
| |
# Copyright 2015 Conchylicultor. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Main script. See README.md for more information
Use python 3
"""
import argparse # Command line parsing
import configparser # Saving the models parameters
import datetime # Chronometer
import os # Files management
import tensorflow as tf
import numpy as np
import math
from tqdm import tqdm # Progress bar
from tensorflow.python import debug as tf_debug
from chatbot.textdata import TextData
from chatbot.model import Model
class Chatbot:
"""
Main class which launch the training or testing mode
"""
class TestMode:
""" Simple structure representing the different testing modes
"""
ALL = 'all'
INTERACTIVE = 'interactive' # The user can write his own questions
DAEMON = 'daemon' # The chatbot runs on background and can regularly be called to predict something
def __init__(self):
"""
"""
# Model/dataset parameters
self.args = None
# Task specific object
self.textData = None # Dataset
self.model = None # Sequence to sequence model
# Tensorflow utilities for convenience saving/logging
self.writer = None
self.saver = None
self.modelDir = '' # Where the model is saved
self.globStep = 0 # Represent the number of iteration for the current model
# TensorFlow main session (we keep track for the daemon)
self.sess = None
# Filename and directories constants
self.MODEL_DIR_BASE = 'save/model'
self.MODEL_NAME_BASE = 'model'
self.MODEL_EXT = '.ckpt'
self.CONFIG_FILENAME = 'params.ini'
self.CONFIG_VERSION = '0.4'
self.TEST_IN_NAME = 'data/test/samples.txt'
self.TEST_OUT_SUFFIX = '_predictions.txt'
self.SENTENCES_PREFIX = ['Q: ', 'A: ']
@staticmethod
def parseArgs(args):
"""
Parse the arguments from the given command line
Args:
args (list<str>): List of arguments to parse. If None, the default sys.argv will be parsed
"""
parser = argparse.ArgumentParser()
# Global options
globalArgs = parser.add_argument_group('Global options')
globalArgs.add_argument('--test',
nargs='?',
choices=[Chatbot.TestMode.ALL, Chatbot.TestMode.INTERACTIVE, Chatbot.TestMode.DAEMON],
const=Chatbot.TestMode.ALL, default=None,
help='if present, launch the program try to answer all sentences from data/test/ with'
' the defined model(s), in interactive mode, the user can wrote his own sentences,'
' use daemon mode to integrate the chatbot in another program')
globalArgs.add_argument('--createDataset', action='store_true', help='if present, the program will only generate the dataset from the corpus (no training/testing)')
globalArgs.add_argument('--playDataset', type=int, nargs='?', const=10, default=None, help='if set, the program will randomly play some samples(can be use conjointly with createDataset if this is the only action you want to perform)')
globalArgs.add_argument('--reset', action='store_true', help='use this if you want to ignore the previous model present on the model directory (Warning: the model will be destroyed with all the folder content)')
globalArgs.add_argument('--verbose', action='store_true', help='When testing, will plot the outputs at the same time they are computed')
globalArgs.add_argument('--debug', action='store_true', help='run DeepQA with Tensorflow debug mode. Read TF documentation for more details on this.')
globalArgs.add_argument('--keepAll', action='store_true', help='If this option is set, all saved model will be kept (Warning: make sure you have enough free disk space or increase saveEvery)') # TODO: Add an option to delimit the max size
globalArgs.add_argument('--modelTag', type=str, default=None, help='tag to differentiate which model to store/load')
globalArgs.add_argument('--rootDir', type=str, default=None, help='folder where to look for the models and data')
globalArgs.add_argument('--watsonMode', action='store_true', help='Inverse the questions and answer when training (the network try to guess the question)')
globalArgs.add_argument('--autoEncode', action='store_true', help='Randomly pick the question or the answer and use it both as input and output')
globalArgs.add_argument('--device', type=str, default=None, help='\'gpu\' or \'cpu\' (Warning: make sure you have enough free RAM), allow to choose on which hardware run the model')
globalArgs.add_argument('--seed', type=int, default=None, help='random seed for replication')
# Dataset options
datasetArgs = parser.add_argument_group('Dataset options')
datasetArgs.add_argument('--corpus', choices=TextData.corpusChoices(), default=TextData.corpusChoices()[0], help='corpus on which extract the dataset.')
datasetArgs.add_argument('--datasetTag', type=str, default='', help='add a tag to the dataset (file where to load the vocabulary and the precomputed samples, not the original corpus). Useful to manage multiple versions. Also used to define the file used for the lightweight format.') # The samples are computed from the corpus if it does not exist already. There are saved in \'data/samples/\'
datasetArgs.add_argument('--ratioDataset', type=float, default=1.0, help='ratio of dataset used to avoid using the whole dataset') # Not implemented, useless ?
datasetArgs.add_argument('--maxLength', type=int, default=10, help='maximum length of the sentence (for input and output), define number of maximum step of the RNN')
datasetArgs.add_argument('--lightweightFile', type=str, default=None, help='file containing our lightweight-formatted corpus')
# Network options (Warning: if modifying something here, also make the change on save/loadParams() )
nnArgs = parser.add_argument_group('Network options', 'architecture related option')
nnArgs.add_argument('--hiddenSize', type=int, default=256, help='number of hidden units in each RNN cell')
nnArgs.add_argument('--numLayers', type=int, default=2, help='number of rnn layers')
nnArgs.add_argument('--embeddingSize', type=int, default=32, help='embedding size of the word representation')
nnArgs.add_argument('--initEmbeddings', action='store_true', help='if present, the program will initialize the embeddings with pre-trained word2vec vectors')
nnArgs.add_argument('--softmaxSamples', type=int, default=0, help='Number of samples in the sampled softmax loss function. A value of 0 deactivates sampled softmax')
# Training options
trainingArgs = parser.add_argument_group('Training options')
trainingArgs.add_argument('--numEpochs', type=int, default=30, help='maximum number of epochs to run')
trainingArgs.add_argument('--saveEvery', type=int, default=1000, help='nb of mini-batch step before creating a model checkpoint')
trainingArgs.add_argument('--batchSize', type=int, default=10, help='mini-batch size')
trainingArgs.add_argument('--learningRate', type=float, default=0.001, help='Learning rate')
return parser.parse_args(args)
def main(self, args=None):
"""
Launch the training and/or the interactive mode
"""
print('Welcome to DeepQA v0.1 !')
print()
print('TensorFlow detected: v{}'.format(tf.__version__))
# General initialisation
self.args = self.parseArgs(args)
if not self.args.rootDir:
self.args.rootDir = os.getcwd() # Use the current working directory
#tf.logging.set_verbosity(tf.logging.INFO) # DEBUG, INFO, WARN (default), ERROR, or FATAL
self.loadModelParams() # Update the self.modelDir and self.globStep, for now, not used when loading Model (but need to be called before _getSummaryName)
self.textData = TextData(self.args)
# TODO: Add a mode where we can force the input of the decoder // Try to visualize the predictions for
# each word of the vocabulary / decoder input
# TODO: For now, the model are trained for a specific dataset (because of the maxLength which define the
# vocabulary). Add a compatibility mode which allow to launch a model trained on a different vocabulary (
# remap the word2id/id2word variables).
if self.args.createDataset:
print('Dataset created! Thanks for using this program')
return # No need to go further
# Prepare the model
with tf.device(self.getDevice()):
self.model = Model(self.args, self.textData)
# Saver/summaries
self.writer = tf.summary.FileWriter(self._getSummaryName())
self.saver = tf.train.Saver(max_to_keep=200, write_version=tf.train.SaverDef.V1) # TODO: See GitHub for format name issue (when restoring the model)
# TODO: Fixed seed (WARNING: If dataset shuffling, make sure to do that after saving the
# dataset, otherwise, all which cames after the shuffling won't be replicable when
# reloading the dataset). How to restore the seed after loading ??
# Also fix seed for random.shuffle (does it works globally for all files ?)
# Running session
self.sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True, # Allows backup device for non GPU-available operations (when forcing GPU)
log_device_placement=False) # Too verbose ?
) # TODO: Replace all sess by self.sess (not necessary a good idea) ?
if self.args.debug:
self.sess = tf_debug.LocalCLIDebugWrapperSession(self.sess)
self.sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
print('Initialize variables...')
self.sess.run(tf.global_variables_initializer())
# Reload the model eventually (if it exist.), on testing mode, the models are not loaded here (but in predictTestset)
if self.args.test != Chatbot.TestMode.ALL:
self.managePreviousModel(self.sess)
# Initialize embeddings with pre-trained word2vec vectors
if self.args.initEmbeddings:
print("Loading pre-trained embeddings from GoogleNews-vectors-negative300.bin")
self.loadEmbedding(self.sess)
if self.args.test:
if self.args.test == Chatbot.TestMode.INTERACTIVE:
self.mainTestInteractive(self.sess)
elif self.args.test == Chatbot.TestMode.ALL:
print('Start predicting...')
self.predictTestset(self.sess)
print('All predictions done')
elif self.args.test == Chatbot.TestMode.DAEMON:
print('Daemon mode, running in background...')
else:
raise RuntimeError('Unknown test mode: {}'.format(self.args.test)) # Should never happen
else:
self.mainTrain(self.sess)
if self.args.test != Chatbot.TestMode.DAEMON:
self.sess.close()
print("The End! Thanks for using this program")
def mainTrain(self, sess):
""" Training loop
Args:
sess: The current running session
"""
# Specific training dependent loading
self.textData.makeLighter(self.args.ratioDataset) # Limit the number of training samples
mergedSummaries = tf.summary.merge_all() # Define the summary operator (Warning: Won't appear on the tensorboard graph)
if self.globStep == 0: # Not restoring from previous run
self.writer.add_graph(sess.graph) # First time only
# If restoring a model, restore the progression bar ? and current batch ?
print('Start training (press Ctrl+C to save and exit)...')
try: # If the user exit while training, we still try to save the model
for e in range(self.args.numEpochs):
print()
print("----- Epoch {}/{} ; (lr={}) -----".format(e+1, self.args.numEpochs, self.args.learningRate))
batches = self.textData.getBatches()
# TODO: Also update learning parameters eventually
tic = datetime.datetime.now()
for nextBatch in tqdm(batches, desc="Training"):
# Training pass
ops, feedDict = self.model.step(nextBatch)
assert len(ops) == 2 # training, loss
_, loss, summary = sess.run(ops + (mergedSummaries,), feedDict)
self.writer.add_summary(summary, self.globStep)
self.globStep += 1
# Output training status
if self.globStep % 100 == 0:
perplexity = math.exp(float(loss)) if loss < 300 else float("inf")
tqdm.write("----- Step %d -- Loss %.2f -- Perplexity %.2f" % (self.globStep, loss, perplexity))
# Checkpoint
if self.globStep % self.args.saveEvery == 0:
self._saveSession(sess)
toc = datetime.datetime.now()
print("Epoch finished in {}".format(toc-tic)) # Warning: Will overflow if an epoch takes more than 24 hours, and the output isn't really nicer
except (KeyboardInterrupt, SystemExit): # If the user press Ctrl+C while testing progress
print('Interruption detected, exiting the program...')
self._saveSession(sess) # Ultimate saving before complete exit
def predictTestset(self, sess):
""" Try predicting the sentences from the samples.txt file.
The sentences are saved on the modelDir under the same name
Args:
sess: The current running session
"""
# Loading the file to predict
with open(os.path.join(self.args.rootDir, self.TEST_IN_NAME), 'r') as f:
lines = f.readlines()
modelList = self._getModelList()
if not modelList:
print('Warning: No model found in \'{}\'. Please train a model before trying to predict'.format(self.modelDir))
return
# Predicting for each model present in modelDir
for modelName in sorted(modelList): # TODO: Natural sorting
print('Restoring previous model from {}'.format(modelName))
self.saver.restore(sess, modelName)
print('Testing...')
saveName = modelName[:-len(self.MODEL_EXT)] + self.TEST_OUT_SUFFIX # We remove the model extension and add the prediction suffix
with open(saveName, 'w') as f:
nbIgnored = 0
for line in tqdm(lines, desc='Sentences'):
question = line[:-1] # Remove the endl character
answer = self.singlePredict(question)
if not answer:
nbIgnored += 1
continue # Back to the beginning, try again
predString = '{x[0]}{0}\n{x[1]}{1}\n\n'.format(question, self.textData.sequence2str(answer, clean=True), x=self.SENTENCES_PREFIX)
if self.args.verbose:
tqdm.write(predString)
f.write(predString)
print('Prediction finished, {}/{} sentences ignored (too long)'.format(nbIgnored, len(lines)))
def mainTestInteractive(self, sess):
""" Try predicting the sentences that the user will enter in the console
Args:
sess: The current running session
"""
# TODO: If verbose mode, also show similar sentences from the training set with the same words (include in mainTest also)
# TODO: Also show the top 10 most likely predictions for each predicted output (when verbose mode)
# TODO: Log the questions asked for latter re-use (merge with test/samples.txt)
print('Testing: Launch interactive mode:')
print('')
print('Welcome to the interactive mode, here you can ask to Deep Q&A the sentence you want. Don\'t have high '
'expectation. Type \'exit\' or just press ENTER to quit the program. Have fun.')
while True:
question = input(self.SENTENCES_PREFIX[0])
if question == '' or question == 'exit':
break
questionSeq = [] # Will be contain the question as seen by the encoder
answer = self.singlePredict(question, questionSeq)
if not answer:
print('Warning: sentence too long, sorry. Maybe try a simpler sentence.')
continue # Back to the beginning, try again
print('{}{}'.format(self.SENTENCES_PREFIX[1], self.textData.sequence2str(answer, clean=True)))
if self.args.verbose:
print(self.textData.batchSeq2str(questionSeq, clean=True, reverse=True))
print(self.textData.sequence2str(answer))
print()
def singlePredict(self, question, questionSeq=None):
""" Predict the sentence
Args:
question (str): the raw input sentence
questionSeq (List<int>): output argument. If given will contain the input batch sequence
Return:
list <int>: the word ids corresponding to the answer
"""
# Create the input batch
batch = self.textData.sentence2enco(question)
if not batch:
return None
if questionSeq is not None: # If the caller want to have the real input
questionSeq.extend(batch.encoderSeqs)
# Run the model
ops, feedDict = self.model.step(batch)
output = self.sess.run(ops[0], feedDict) # TODO: Summarize the output too (histogram, ...)
answer = self.textData.deco2sentence(output)
return answer
def daemonPredict(self, sentence):
""" Return the answer to a given sentence (same as singlePredict() but with additional cleaning)
Args:
sentence (str): the raw input sentence
Return:
str: the human readable sentence
"""
return self.textData.sequence2str(
self.singlePredict(sentence),
clean=True
)
def daemonClose(self):
""" A utility function to close the daemon when finish
"""
print('Exiting the daemon mode...')
self.sess.close()
print('Daemon closed.')
def loadEmbedding(self, sess):
""" Initialize embeddings with pre-trained word2vec vectors
Will modify the embedding weights of the current loaded model
Uses the GoogleNews pre-trained values (path hardcoded)
"""
# Fetch embedding variables from model
with tf.variable_scope("embedding_rnn_seq2seq/RNN/EmbeddingWrapper", reuse=True):
em_in = tf.get_variable("embedding")
with tf.variable_scope("embedding_rnn_seq2seq/embedding_rnn_decoder", reuse=True):
em_out = tf.get_variable("embedding")
# Disable training for embeddings
variables = tf.get_collection_ref(tf.GraphKeys.TRAINABLE_VARIABLES)
variables.remove(em_in)
variables.remove(em_out)
# If restoring a model, we can leave here
if self.globStep != 0:
return
# New model, we load the pre-trained word2vec data and initialize embeddings
with open(os.path.join(self.args.rootDir, 'data/word2vec/GoogleNews-vectors-negative300.bin'), "rb", 0) as f:
header = f.readline()
vocab_size, vector_size = map(int, header.split())
binary_len = np.dtype('float32').itemsize * vector_size
initW = np.random.uniform(-0.25,0.25,(len(self.textData.word2id), vector_size))
for line in tqdm(range(vocab_size)):
word = []
while True:
ch = f.read(1)
if ch == b' ':
word = b''.join(word).decode('utf-8')
break
if ch != b'\n':
word.append(ch)
if word in self.textData.word2id:
initW[self.textData.word2id[word]] = np.fromstring(f.read(binary_len), dtype='float32')
else:
f.read(binary_len)
# PCA Decomposition to reduce word2vec dimensionality
if self.args.embeddingSize < vector_size:
U, s, Vt = np.linalg.svd(initW, full_matrices=False)
S = np.zeros((vector_size, vector_size), dtype=complex)
S[:vector_size, :vector_size] = np.diag(s)
initW = np.dot(U[:, :self.args.embeddingSize], S[:self.args.embeddingSize, :self.args.embeddingSize])
# Initialize input and output embeddings
sess.run(em_in.assign(initW))
sess.run(em_out.assign(initW))
def managePreviousModel(self, sess):
""" Restore or reset the model, depending of the parameters
If the destination directory already contains some file, it will handle the conflict as following:
* If --reset is set, all present files will be removed (warning: no confirmation is asked) and the training
restart from scratch (globStep & cie reinitialized)
* Otherwise, it will depend of the directory content. If the directory contains:
* No model files (only summary logs): works as a reset (restart from scratch)
* Other model files, but modelName not found (surely keepAll option changed): raise error, the user should
decide by himself what to do
* The right model file (eventually some other): no problem, simply resume the training
In any case, the directory will exist as it has been created by the summary writer
Args:
sess: The current running session
"""
print('WARNING: ', end='')
modelName = self._getModelName()
if os.listdir(self.modelDir):
if self.args.reset:
print('Reset: Destroying previous model at {}'.format(self.modelDir))
# Analysing directory content
elif os.path.exists(modelName): # Restore the model
print('Restoring previous model from {}'.format(modelName))
self.saver.restore(sess, modelName) # Will crash when --reset is not activated and the model has not been saved yet
elif self._getModelList():
print('Conflict with previous models.')
raise RuntimeError('Some models are already present in \'{}\'. You should check them first (or re-try with the keepAll flag)'.format(self.modelDir))
else: # No other model to conflict with (probably summary files)
print('No previous model found, but some files found at {}. Cleaning...'.format(self.modelDir)) # Warning: No confirmation asked
self.args.reset = True
if self.args.reset:
fileList = [os.path.join(self.modelDir, f) for f in os.listdir(self.modelDir)]
for f in fileList:
print('Removing {}'.format(f))
os.remove(f)
else:
print('No previous model found, starting from clean directory: {}'.format(self.modelDir))
def _saveSession(self, sess):
""" Save the model parameters and the variables
Args:
sess: the current session
"""
tqdm.write('Checkpoint reached: saving model (don\'t stop the run)...')
self.saveModelParams()
self.saver.save(sess, self._getModelName()) # TODO: Put a limit size (ex: 3GB for the modelDir)
tqdm.write('Model saved.')
def _getModelList(self):
""" Return the list of the model files inside the model directory
"""
return [os.path.join(self.modelDir, f) for f in os.listdir(self.modelDir) if f.endswith(self.MODEL_EXT)]
def loadModelParams(self):
""" Load the some values associated with the current model, like the current globStep value
For now, this function does not need to be called before loading the model (no parameters restored). However,
the modelDir name will be initialized here so it is required to call this function before managePreviousModel(),
_getModelName() or _getSummaryName()
Warning: if you modify this function, make sure the changes mirror saveModelParams, also check if the parameters
should be reset in managePreviousModel
"""
# Compute the current model path
self.modelDir = os.path.join(self.args.rootDir, self.MODEL_DIR_BASE)
if self.args.modelTag:
self.modelDir += '-' + self.args.modelTag
# If there is a previous model, restore some parameters
configName = os.path.join(self.modelDir, self.CONFIG_FILENAME)
if not self.args.reset and not self.args.createDataset and os.path.exists(configName):
# Loading
config = configparser.ConfigParser()
config.read(configName)
# Check the version
currentVersion = config['General'].get('version')
if currentVersion != self.CONFIG_VERSION:
raise UserWarning('Present configuration version {0} does not match {1}. You can try manual changes on \'{2}\''.format(currentVersion, self.CONFIG_VERSION, configName))
# Restoring the the parameters
self.globStep = config['General'].getint('globStep')
self.args.maxLength = config['General'].getint('maxLength') # We need to restore the model length because of the textData associated and the vocabulary size (TODO: Compatibility mode between different maxLength)
self.args.watsonMode = config['General'].getboolean('watsonMode')
self.args.autoEncode = config['General'].getboolean('autoEncode')
self.args.corpus = config['General'].get('corpus')
self.args.datasetTag = config['General'].get('datasetTag', '')
self.args.hiddenSize = config['Network'].getint('hiddenSize')
self.args.numLayers = config['Network'].getint('numLayers')
self.args.embeddingSize = config['Network'].getint('embeddingSize')
self.args.initEmbeddings = config['Network'].getboolean('initEmbeddings')
self.args.softmaxSamples = config['Network'].getint('softmaxSamples')
# No restoring for training params, batch size or other non model dependent parameters
# Show the restored params
print()
print('Warning: Restoring parameters:')
print('globStep: {}'.format(self.globStep))
print('maxLength: {}'.format(self.args.maxLength))
print('watsonMode: {}'.format(self.args.watsonMode))
print('autoEncode: {}'.format(self.args.autoEncode))
print('corpus: {}'.format(self.args.corpus))
print('datasetTag: {}'.format(self.args.datasetTag))
print('hiddenSize: {}'.format(self.args.hiddenSize))
print('numLayers: {}'.format(self.args.numLayers))
print('embeddingSize: {}'.format(self.args.embeddingSize))
print('initEmbeddings: {}'.format(self.args.initEmbeddings))
print('softmaxSamples: {}'.format(self.args.softmaxSamples))
print()
# For now, not arbitrary independent maxLength between encoder and decoder
self.args.maxLengthEnco = self.args.maxLength
self.args.maxLengthDeco = self.args.maxLength + 2
if self.args.watsonMode:
self.SENTENCES_PREFIX.reverse()
def saveModelParams(self):
""" Save the params of the model, like the current globStep value
Warning: if you modify this function, make sure the changes mirror loadModelParams
"""
config = configparser.ConfigParser()
config['General'] = {}
config['General']['version'] = self.CONFIG_VERSION
config['General']['globStep'] = str(self.globStep)
config['General']['maxLength'] = str(self.args.maxLength)
config['General']['watsonMode'] = str(self.args.watsonMode)
config['General']['autoEncode'] = str(self.args.autoEncode)
config['General']['corpus'] = str(self.args.corpus)
config['General']['datasetTag'] = str(self.args.datasetTag)
config['Network'] = {}
config['Network']['hiddenSize'] = str(self.args.hiddenSize)
config['Network']['numLayers'] = str(self.args.numLayers)
config['Network']['embeddingSize'] = str(self.args.embeddingSize)
config['Network']['initEmbeddings'] = str(self.args.initEmbeddings)
config['Network']['softmaxSamples'] = str(self.args.softmaxSamples)
# Keep track of the learning params (but without restoring them)
config['Training (won\'t be restored)'] = {}
config['Training (won\'t be restored)']['learningRate'] = str(self.args.learningRate)
config['Training (won\'t be restored)']['batchSize'] = str(self.args.batchSize)
with open(os.path.join(self.modelDir, self.CONFIG_FILENAME), 'w') as configFile:
config.write(configFile)
def _getSummaryName(self):
""" Parse the argument to decide were to save the summary, at the same place that the model
The folder could already contain logs if we restore the training, those will be merged
Return:
str: The path and name of the summary
"""
return self.modelDir
def _getModelName(self):
""" Parse the argument to decide were to save/load the model
This function is called at each checkpoint and the first time the model is load. If keepAll option is set, the
globStep value will be included in the name.
Return:
str: The path and name were the model need to be saved
"""
modelName = os.path.join(self.modelDir, self.MODEL_NAME_BASE)
if self.args.keepAll: # We do not erase the previously saved model by including the current step on the name
modelName += '-' + str(self.globStep)
return modelName + self.MODEL_EXT
def getDevice(self):
""" Parse the argument to decide on which device run the model
Return:
str: The name of the device on which run the program
"""
if self.args.device == 'cpu':
return '/cpu:0'
elif self.args.device == 'gpu':
return '/gpu:0'
elif self.args.device is None: # No specified device (default)
return None
else:
print('Warning: Error in the device name: {}, use the default device'.format(self.args.device))
return None
| |
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import sys
import mock
from neutron_lib import constants
from oslo_config import cfg
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb
from neutron.plugins.ml2.drivers.linuxbridge.agent.common \
import constants as lconst
from neutron.plugins.ml2.drivers.linuxbridge.agent \
import linuxbridge_neutron_agent
from neutron.tests import base
LOCAL_IP = '192.168.0.33'
LOCAL_IPV6 = '2001:db8:1::33'
VXLAN_GROUPV6 = 'ff05::/120'
PORT_1 = 'abcdef01-12ddssdfds-fdsfsd'
DEVICE_1 = 'tapabcdef01-12'
NETWORK_ID = '57653b20-ed5b-4ed0-a31d-06f84e3fd909'
BRIDGE_MAPPING_VALUE = 'br-eth2'
BRIDGE_MAPPINGS = {'physnet0': BRIDGE_MAPPING_VALUE}
INTERFACE_MAPPINGS = {'physnet1': 'eth1'}
FAKE_DEFAULT_DEV = mock.Mock()
FAKE_DEFAULT_DEV.name = 'eth1'
PORT_DATA = {
"port_id": PORT_1,
"device": DEVICE_1
}
class FakeIpLinkCommand(object):
def set_up(self):
pass
def set_mtu(self, mtu):
pass
class FakeIpDevice(object):
def __init__(self):
self.link = FakeIpLinkCommand()
def disable_ipv6(self):
pass
def get_linuxbridge_manager(bridge_mappings, interface_mappings):
with mock.patch.object(ip_lib.IPWrapper, 'get_device_by_ip',
return_value=FAKE_DEFAULT_DEV),\
mock.patch.object(ip_lib, 'device_exists', return_value=True),\
mock.patch.object(linuxbridge_neutron_agent.LinuxBridgeManager,
'check_vxlan_support'):
cfg.CONF.set_override('local_ip', LOCAL_IP, 'VXLAN')
return linuxbridge_neutron_agent.LinuxBridgeManager(
bridge_mappings, interface_mappings)
class TestLinuxBridge(base.BaseTestCase):
def setUp(self):
super(TestLinuxBridge, self).setUp()
self.linux_bridge = get_linuxbridge_manager(
BRIDGE_MAPPINGS, INTERFACE_MAPPINGS)
def test_ensure_physical_in_bridge_invalid(self):
result = self.linux_bridge.ensure_physical_in_bridge(
'network_id', constants.TYPE_VLAN, 'physnetx', 7, 1450)
self.assertFalse(result)
def test_ensure_physical_in_bridge_flat(self):
with mock.patch.object(self.linux_bridge,
'ensure_flat_bridge') as flat_bridge_func:
self.linux_bridge.ensure_physical_in_bridge(
'network_id', constants.TYPE_FLAT, 'physnet1', None, 1450)
self.assertTrue(flat_bridge_func.called)
def test_ensure_physical_in_bridge_vlan(self):
with mock.patch.object(self.linux_bridge,
'ensure_vlan_bridge') as vlan_bridge_func:
self.linux_bridge.ensure_physical_in_bridge(
'network_id', constants.TYPE_VLAN, 'physnet1', 7, 1450)
self.assertTrue(vlan_bridge_func.called)
def test_ensure_physical_in_bridge_vxlan(self):
self.linux_bridge.vxlan_mode = lconst.VXLAN_UCAST
with mock.patch.object(self.linux_bridge,
'ensure_vxlan_bridge') as vxlan_bridge_func:
self.linux_bridge.ensure_physical_in_bridge(
'network_id', 'vxlan', 'physnet1', 7, 1450)
self.assertTrue(vxlan_bridge_func.called)
class TestLinuxBridgeManager(base.BaseTestCase):
def setUp(self):
super(TestLinuxBridgeManager, self).setUp()
self.lbm = get_linuxbridge_manager(
BRIDGE_MAPPINGS, INTERFACE_MAPPINGS)
def test_local_ip_validation_with_valid_ip(self):
with mock.patch.object(ip_lib.IPWrapper,
'get_device_by_ip',
return_value=FAKE_DEFAULT_DEV):
self.lbm.local_ip = LOCAL_IP
result = self.lbm.get_local_ip_device()
self.assertEqual(FAKE_DEFAULT_DEV, result)
def test_local_ip_validation_with_invalid_ip(self):
with mock.patch.object(ip_lib.IPWrapper,
'get_device_by_ip',
return_value=None),\
mock.patch.object(sys, 'exit') as exit,\
mock.patch.object(linuxbridge_neutron_agent.LOG,
'error') as log:
self.lbm.local_ip = LOCAL_IP
self.lbm.get_local_ip_device()
self.assertEqual(1, log.call_count)
exit.assert_called_once_with(1)
def _test_vxlan_group_validation(self, bad_local_ip, bad_vxlan_group):
with mock.patch.object(ip_lib.IPWrapper,
'get_device_by_ip',
return_value=FAKE_DEFAULT_DEV),\
mock.patch.object(sys, 'exit') as exit,\
mock.patch.object(linuxbridge_neutron_agent.LOG,
'error') as log:
self.lbm.local_ip = bad_local_ip
cfg.CONF.set_override('vxlan_group', bad_vxlan_group, 'VXLAN')
self.lbm.validate_vxlan_group_with_local_ip()
self.assertEqual(1, log.call_count)
exit.assert_called_once_with(1)
def test_vxlan_group_validation_with_mismatched_local_ip(self):
self._test_vxlan_group_validation(LOCAL_IP, VXLAN_GROUPV6)
def test_vxlan_group_validation_with_unicast_group(self):
self._test_vxlan_group_validation(LOCAL_IP, '240.0.0.0')
def test_vxlan_group_validation_with_invalid_cidr(self):
self._test_vxlan_group_validation(LOCAL_IP, '224.0.0.1/')
def test_vxlan_group_validation_with_v6_unicast_group(self):
self._test_vxlan_group_validation(LOCAL_IPV6, '2001:db8::')
def test_get_existing_bridge_name(self):
phy_net = 'physnet0'
self.assertEqual('br-eth2',
self.lbm.bridge_mappings.get(phy_net))
phy_net = ''
self.assertIsNone(self.lbm.bridge_mappings.get(phy_net))
def test_get_bridge_name(self):
nw_id = "123456789101112"
self.assertEqual("brq" + nw_id[0:11],
self.lbm.get_bridge_name(nw_id))
nw_id = ""
self.assertEqual("brq", self.lbm.get_bridge_name(nw_id))
def test_get_subinterface_name_backwards_compatibility(self):
self.assertEqual("abcdefghijklm.1",
self.lbm.get_subinterface_name("abcdefghijklm", "1"))
self.assertEqual("abcdefghijkl.11",
self.lbm.get_subinterface_name("abcdefghijkl", "11"))
self.assertEqual("abcdefghij.1111",
self.lbm.get_subinterface_name("abcdefghij",
"1111"))
def test_get_subinterface_name_advanced(self):
"""Ensure the same hash is used for long interface names.
If the generated vlan device name would be too long, make sure that
everything before the '.' is equal. This might be helpful when
debugging problems.
"""
max_device_name = "abcdefghijklmno"
vlan_dev_name1 = self.lbm.get_subinterface_name(max_device_name, "1")
vlan_dev_name2 = self.lbm.get_subinterface_name(max_device_name,
"1111")
self.assertEqual(vlan_dev_name1.partition(".")[0],
vlan_dev_name2.partition(".")[0])
def test_get_tap_device_name(self):
if_id = "123456789101112"
self.assertEqual(constants.TAP_DEVICE_PREFIX + if_id[0:11],
self.lbm.get_tap_device_name(if_id))
if_id = ""
self.assertEqual(constants.TAP_DEVICE_PREFIX,
self.lbm.get_tap_device_name(if_id))
def test_get_vxlan_device_name(self):
vn_id = constants.MAX_VXLAN_VNI
self.assertEqual("vxlan-" + str(vn_id),
self.lbm.get_vxlan_device_name(vn_id))
self.assertIsNone(self.lbm.get_vxlan_device_name(vn_id + 1))
def test_get_vxlan_group(self):
cfg.CONF.set_override('vxlan_group', '239.1.2.3/24', 'VXLAN')
vn_id = constants.MAX_VXLAN_VNI
self.assertEqual('239.1.2.255', self.lbm.get_vxlan_group(vn_id))
vn_id = 256
self.assertEqual('239.1.2.0', self.lbm.get_vxlan_group(vn_id))
vn_id = 257
self.assertEqual('239.1.2.1', self.lbm.get_vxlan_group(vn_id))
def test_get_vxlan_group_with_multicast_address(self):
cfg.CONF.set_override('vxlan_group', '239.1.2.3/32', 'VXLAN')
cfg.CONF.set_override('multicast_ranges',
('224.0.0.10:300:315',
'225.0.0.15:400:600'), 'VXLAN')
vn_id = 300
self.assertEqual('224.0.0.10', self.lbm.get_vxlan_group(vn_id))
vn_id = 500
self.assertEqual('225.0.0.15', self.lbm.get_vxlan_group(vn_id))
vn_id = 315
self.assertEqual('224.0.0.10', self.lbm.get_vxlan_group(vn_id))
vn_id = 4000
# outside of range should fallback to group
self.assertEqual('239.1.2.3', self.lbm.get_vxlan_group(vn_id))
def test__is_valid_multicast_range(self):
bad_ranges = ['224.0.0.10:330:315', 'x:100:200', '10.0.0.1:100:200',
'224.0.0.10:100', '224.0.0.10:100:200:300']
for r in bad_ranges:
self.assertFalse(self.lbm._is_valid_multicast_range(r),
'range %s should have been invalid' % r)
good_ranges = ['224.0.0.10:315:330', '224.0.0.0:315:315']
for r in good_ranges:
self.assertTrue(self.lbm._is_valid_multicast_range(r),
'range %s should have been valid' % r)
# v4 ranges are bad when a v6 local_ip is present
self.lbm.local_ip = '2000::1'
for r in good_ranges:
self.assertFalse(self.lbm._is_valid_multicast_range(r),
'range %s should have been invalid' % r)
def test__match_multicast_range(self):
cfg.CONF.set_override('multicast_ranges',
('224.0.0.10:300:315',
'225.0.0.15:400:600'), 'VXLAN')
self.assertEqual('224.0.0.10', self.lbm._match_multicast_range(307))
self.assertEqual('225.0.0.15', self.lbm._match_multicast_range(407))
self.assertIsNone(self.lbm._match_multicast_range(399))
def test_get_vxlan_group_with_ipv6(self):
cfg.CONF.set_override('local_ip', LOCAL_IPV6, 'VXLAN')
self.lbm.local_ip = LOCAL_IPV6
cfg.CONF.set_override('vxlan_group', VXLAN_GROUPV6, 'VXLAN')
vn_id = constants.MAX_VXLAN_VNI
self.assertEqual('ff05::ff', self.lbm.get_vxlan_group(vn_id))
vn_id = 256
self.assertEqual('ff05::', self.lbm.get_vxlan_group(vn_id))
vn_id = 257
self.assertEqual('ff05::1', self.lbm.get_vxlan_group(vn_id))
def test_get_deletable_bridges(self):
br_list = ["br-int", "brq1", "brq2", "brq-user"]
expected = set(br_list[1:3])
lbm = get_linuxbridge_manager(
bridge_mappings={"physnet0": "brq-user"}, interface_mappings={})
with mock.patch.object(
bridge_lib, 'get_bridge_names', return_value=br_list):
self.assertEqual(expected, lbm.get_deletable_bridges())
def test_get_tap_devices_count(self):
with mock.patch.object(
bridge_lib.BridgeDevice, 'get_interfaces') as get_ifs_fn:
get_ifs_fn.return_value = ['tap2101', 'eth0.100', 'vxlan-1000']
self.assertEqual(1, self.lbm.get_tap_devices_count('br0'))
def test_get_interface_details(self):
with mock.patch.object(ip_lib.IpAddrCommand, 'list') as list_fn,\
mock.patch.object(ip_lib.IpRouteCommand,
'get_gateway') as getgw_fn:
gwdict = dict(gateway='1.1.1.1')
getgw_fn.return_value = gwdict
ipdict = dict(cidr='1.1.1.1/24',
broadcast='1.1.1.255',
scope='global',
ip_version=4,
dynamic=False)
list_fn.return_value = ipdict
ret = self.lbm.get_interface_details("eth0", 4)
self.assertTrue(list_fn.called)
self.assertTrue(getgw_fn.called)
self.assertEqual(ret, (ipdict, gwdict))
def test_ensure_flat_bridge(self):
with mock.patch.object(self.lbm, 'ensure_bridge') as ens:
self.assertEqual(
"eth0",
self.lbm.ensure_flat_bridge("123", None, "eth0"))
ens.assert_called_once_with("brq123", "eth0")
def test_ensure_flat_bridge_with_existed_brq(self):
with mock.patch.object(self.lbm, 'ensure_bridge') as ens:
ens.return_value = "br-eth2"
self.assertEqual("br-eth2",
self.lbm.ensure_flat_bridge("123",
"br-eth2",
None))
ens.assert_called_with("br-eth2")
def test_ensure_vlan_bridge(self):
with mock.patch.object(self.lbm, 'ensure_vlan') as ens_vl_fn,\
mock.patch.object(self.lbm, 'ensure_bridge') as ens:
ens_vl_fn.return_value = "eth0.1"
self.assertEqual("eth0.1",
self.lbm.ensure_vlan_bridge("123",
None,
"eth0",
"1"))
ens.assert_called_with("brq123", "eth0.1")
self.assertEqual("eth0.1",
self.lbm.ensure_vlan_bridge("123",
None,
"eth0",
"1"))
ens.assert_called_with("brq123", "eth0.1")
def test_ensure_vlan_bridge_with_existed_brq(self):
with mock.patch.object(self.lbm, 'ensure_vlan') as ens_vl_fn,\
mock.patch.object(self.lbm, 'ensure_bridge') as ens:
ens_vl_fn.return_value = None
ens.return_value = "br-eth2"
self.assertEqual("br-eth2",
self.lbm.ensure_vlan_bridge("123",
"br-eth2",
None,
None))
ens.assert_called_with("br-eth2")
def test_ensure_local_bridge(self):
with mock.patch.object(self.lbm, 'ensure_bridge') as ens_fn:
self.lbm.ensure_local_bridge("54321", None)
ens_fn.assert_called_once_with("brq54321")
def test_ensure_local_bridge_with_existed_brq(self):
with mock.patch.object(self.lbm, 'ensure_bridge') as ens_fn:
ens_fn.return_value = "br-eth2"
self.lbm.ensure_local_bridge("54321", 'br-eth2')
ens_fn.assert_called_once_with("br-eth2")
def test_ensure_vlan(self):
with mock.patch.object(ip_lib, 'device_exists') as de_fn:
de_fn.return_value = True
self.assertEqual("eth0.1", self.lbm.ensure_vlan("eth0", "1"))
de_fn.return_value = False
vlan_dev = FakeIpDevice()
with mock.patch.object(vlan_dev, 'disable_ipv6') as dv6_fn,\
mock.patch.object(self.lbm.ip, 'add_vlan',
return_value=vlan_dev) as add_vlan_fn:
retval = self.lbm.ensure_vlan("eth0", "1")
self.assertEqual("eth0.1", retval)
add_vlan_fn.assert_called_with('eth0.1', 'eth0', '1')
dv6_fn.assert_called_once_with()
def test_ensure_vxlan(self, expected_proxy=False):
physical_mtu = 1500
seg_id = "12345678"
self.lbm.local_int = 'eth0'
self.lbm.vxlan_mode = lconst.VXLAN_MCAST
with mock.patch.object(ip_lib, 'device_exists') as de_fn:
de_fn.return_value = True
self.assertEqual("vxlan-" + seg_id, self.lbm.ensure_vxlan(seg_id))
de_fn.return_value = False
vxlan_dev = FakeIpDevice()
with mock.patch.object(vxlan_dev, 'disable_ipv6') as dv6_fn,\
mock.patch.object(vxlan_dev.link,
'set_mtu') as set_mtu_fn,\
mock.patch.object(ip_lib, 'get_device_mtu',
return_value=physical_mtu),\
mock.patch.object(self.lbm.ip, 'add_vxlan',
return_value=vxlan_dev) as add_vxlan_fn:
retval = self.lbm.ensure_vxlan(seg_id, mtu=1450)
self.assertEqual("vxlan-" + seg_id, retval)
add_vxlan_fn.assert_called_with("vxlan-" + seg_id, seg_id,
group="224.0.0.1",
srcport=(0, 0),
dstport=None,
ttl=None,
dev=self.lbm.local_int)
dv6_fn.assert_called_once_with()
set_mtu_fn.assert_called_once_with(1450)
cfg.CONF.set_override('l2_population', 'True', 'VXLAN')
self.assertEqual("vxlan-" + seg_id,
self.lbm.ensure_vxlan(seg_id))
add_vxlan_fn.assert_called_with("vxlan-" + seg_id, seg_id,
group="224.0.0.1",
srcport=(0, 0),
dstport=None,
ttl=None,
dev=self.lbm.local_int,
proxy=expected_proxy)
def test_ensure_vxlan_arp_responder_enabled(self):
cfg.CONF.set_override('arp_responder', True, 'VXLAN')
self.test_ensure_vxlan(expected_proxy=True)
def test_ensure_vxlan_dscp_inherit_set(self):
cfg.CONF.set_override('dscp_inherit', 'True', 'AGENT')
seg_id = "12345678"
self.lbm.local_int = 'eth0'
self.lbm.vxlan_mode = lconst.VXLAN_MCAST
with mock.patch.object(ip_lib, 'device_exists', return_value=False):
vxlan_dev = FakeIpDevice()
with mock.patch.object(vxlan_dev, 'disable_ipv6') as dv6_fn,\
mock.patch.object(self.lbm.ip, 'add_vxlan',
return_value=vxlan_dev) as add_vxlan_fn:
self.assertEqual("vxlan-" + seg_id,
self.lbm.ensure_vxlan(seg_id))
add_vxlan_fn.assert_called_with("vxlan-" + seg_id, seg_id,
group="224.0.0.1",
srcport=(0, 0),
dstport=None,
ttl=None,
tos='inherit',
dev=self.lbm.local_int)
dv6_fn.assert_called_once_with()
def test_ensure_vxlan_mtu_too_big(self):
seg_id = "12345678"
physical_mtu = 1500
# Any mtu value which will be higher than
# physical_mtu - VXLAN_ENCAP_OVERHEAD should raise NetlinkError
mtu = 1490
self.lbm.local_int = 'eth0'
self.lbm.vxlan_mode = lconst.VXLAN_MCAST
with mock.patch.object(ip_lib, 'device_exists', return_value=False):
vxlan_dev = mock.Mock()
with mock.patch.object(vxlan_dev, 'disable_ipv6') as dv6_fn,\
mock.patch.object(self.lbm.ip, 'add_vxlan',
return_value=vxlan_dev) as add_vxlan_fn,\
mock.patch.object(
vxlan_dev.link, 'set_mtu',
side_effect=ip_lib.InvalidArgument(
parameter="MTU", value=mtu)),\
mock.patch.object(ip_lib, 'get_device_mtu',
return_value=physical_mtu),\
mock.patch.object(vxlan_dev.link, 'delete') as delete_dev:
self.assertFalse(
self.lbm.ensure_vxlan(seg_id, mtu=mtu))
add_vxlan_fn.assert_called_with("vxlan-" + seg_id, seg_id,
group="224.0.0.1",
srcport=(0, 0),
dstport=None,
ttl=None,
dev=self.lbm.local_int)
delete_dev.assert_called_once_with()
dv6_fn.assert_not_called()
def test__update_interface_ip_details(self):
gwdict = dict(gateway='1.1.1.1',
metric=50)
ipdict = dict(cidr='1.1.1.1/24',
broadcast='1.1.1.255',
scope='global',
ip_version=4,
dynamic=False)
with mock.patch.object(ip_lib.IpAddrCommand, 'add') as add_fn,\
mock.patch.object(ip_lib.IpAddrCommand, 'delete') as del_fn,\
mock.patch.object(ip_lib.IpAddrCommand, 'list') as list_fn:
# 'list' actually returns a dict, but we're only simulating
# whether the device exists or not
list_fn.side_effect = [True, False]
self.lbm._update_interface_ip_details("br0", "eth0",
[ipdict], None)
self.assertFalse(add_fn.called)
self.assertTrue(del_fn.called)
add_fn.reset_mock()
del_fn.reset_mock()
self.lbm._update_interface_ip_details("br0", "eth0",
[ipdict], None)
self.assertTrue(add_fn.called)
self.assertTrue(del_fn.called)
with mock.patch.object(ip_lib.IpRouteCommand,
'add_gateway') as addgw_fn,\
mock.patch.object(ip_lib.IpRouteCommand,
'delete_gateway') as delgw_fn:
self.lbm._update_interface_ip_details("br0", "eth0",
None, gwdict)
self.assertTrue(addgw_fn.called)
self.assertTrue(delgw_fn.called)
def test_ensure_bridge(self):
bridge_device = mock.Mock()
bridge_device_old = mock.Mock()
with mock.patch.object(ip_lib,
'ensure_device_is_ready') as de_fn,\
mock.patch.object(bridge_lib, "BridgeDevice",
return_value=bridge_device) as br_fn,\
mock.patch.object(self.lbm,
'update_interface_ip_details') as upd_fn,\
mock.patch.object(bridge_lib, 'is_bridged_interface'),\
mock.patch.object(bridge_lib.BridgeDevice,
'get_interface_bridge') as get_if_br_fn:
de_fn.return_value = False
br_fn.addbr.return_value = bridge_device
bridge_device.setfd.return_value = False
bridge_device.disable_stp.return_value = False
bridge_device.disable_ipv6.return_value = False
bridge_device.link.set_up.return_value = False
self.assertEqual("br0", self.lbm.ensure_bridge("br0", None))
bridge_device.owns_interface.return_value = False
self.lbm.ensure_bridge("br0", "eth0")
upd_fn.assert_called_with("br0", "eth0")
bridge_device.owns_interface.assert_called_with("eth0")
de_fn.return_value = True
bridge_device.delif.side_effect = Exception()
self.lbm.ensure_bridge("br0", "eth0")
bridge_device.owns_interface.assert_called_with("eth0")
de_fn.return_value = True
bridge_device.owns_interface.return_value = False
get_if_br_fn.return_value = bridge_device_old
bridge_device.addif.reset_mock()
self.lbm.ensure_bridge("br0", "eth0")
bridge_device_old.delif.assert_called_once_with('eth0')
bridge_device.addif.assert_called_once_with('eth0')
def test_ensure_physical_in_bridge(self):
self.assertFalse(
self.lbm.ensure_physical_in_bridge("123", constants.TYPE_VLAN,
"phys", "1", 1450)
)
with mock.patch.object(self.lbm, "ensure_flat_bridge") as flbr_fn:
self.assertTrue(
self.lbm.ensure_physical_in_bridge("123", constants.TYPE_FLAT,
"physnet1", None, 1450)
)
self.assertTrue(flbr_fn.called)
with mock.patch.object(self.lbm, "ensure_vlan_bridge") as vlbr_fn:
self.assertTrue(
self.lbm.ensure_physical_in_bridge("123", constants.TYPE_VLAN,
"physnet1", "1", 1450)
)
self.assertTrue(vlbr_fn.called)
with mock.patch.object(self.lbm, "ensure_vxlan_bridge") as vlbr_fn:
self.lbm.vxlan_mode = lconst.VXLAN_MCAST
self.assertTrue(
self.lbm.ensure_physical_in_bridge("123", constants.TYPE_VXLAN,
"physnet1", "1", 1450)
)
self.assertTrue(vlbr_fn.called)
def test_ensure_physical_in_bridge_with_existed_brq(self):
with mock.patch.object(linuxbridge_neutron_agent.LOG, 'error') as log:
self.lbm.ensure_physical_in_bridge("123", constants.TYPE_FLAT,
"physnet9", "1", 1450)
self.assertEqual(1, log.call_count)
@mock.patch.object(ip_lib, "device_exists", return_value=False)
def test_add_tap_interface_with_interface_disappearing(self, exists):
with mock.patch.object(self.lbm, "_add_tap_interface",
side_effect=RuntimeError("No such dev")):
self.assertFalse(self.lbm.add_tap_interface("123",
constants.TYPE_VLAN,
"physnet1", None,
"tap1", "foo", None))
@mock.patch.object(ip_lib, "device_exists", return_value=True)
def test_add_tap_interface_with_other_error(self, exists):
with mock.patch.object(self.lbm, "_add_tap_interface",
side_effect=RuntimeError("No more fuel")):
self.assertRaises(RuntimeError, self.lbm.add_tap_interface, "123",
constants.TYPE_VLAN, "physnet1", None, "tap1",
"foo", None)
def test_add_tap_interface_owner_compute(self):
with mock.patch.object(ip_lib, "device_exists"):
with mock.patch.object(self.lbm, "ensure_local_bridge"):
self.assertTrue(self.lbm.add_tap_interface(
"123", constants.TYPE_LOCAL, "physnet1",
None, "tap1", "compute:1", None))
def _test_add_tap_interface(self, dev_owner_prefix):
with mock.patch.object(ip_lib, "device_exists") as de_fn:
de_fn.return_value = False
self.assertFalse(
self.lbm.add_tap_interface("123", constants.TYPE_VLAN,
"physnet1", "1", "tap1",
dev_owner_prefix, None))
de_fn.return_value = True
bridge_device = mock.Mock()
with mock.patch.object(self.lbm, "ensure_local_bridge") as en_fn,\
mock.patch.object(bridge_lib, "BridgeDevice",
return_value=bridge_device), \
mock.patch.object(self.lbm, '_set_tap_mtu') as set_tap, \
mock.patch.object(bridge_lib.BridgeDevice,
"get_interface_bridge") as get_br:
bridge_device.addif.retun_value = False
get_br.return_value = True
self.assertTrue(self.lbm.add_tap_interface(
"123", constants.TYPE_LOCAL, "physnet1", None,
"tap1", dev_owner_prefix, None))
en_fn.assert_called_with("123", "brq123")
self.lbm.bridge_mappings = {"physnet1": "brq999"}
self.assertTrue(self.lbm.add_tap_interface(
"123", constants.TYPE_LOCAL, "physnet1", None,
"tap1", dev_owner_prefix, 8765))
set_tap.assert_called_with('tap1', 8765)
en_fn.assert_called_with("123", "brq999")
get_br.return_value = False
bridge_device.addif.retun_value = True
self.assertFalse(self.lbm.add_tap_interface(
"123", constants.TYPE_LOCAL, "physnet1",
None, "tap1", dev_owner_prefix, None))
with mock.patch.object(self.lbm,
"ensure_physical_in_bridge") as ens_fn:
ens_fn.return_value = False
self.assertFalse(self.lbm.add_tap_interface(
"123", constants.TYPE_VLAN, "physnet1", "1",
"tap1", dev_owner_prefix, None))
def test_add_tap_interface_owner_network(self):
self._test_add_tap_interface(constants.DEVICE_OWNER_NETWORK_PREFIX)
def test_add_tap_interface_owner_neutron(self):
self._test_add_tap_interface(constants.DEVICE_OWNER_NEUTRON_PREFIX)
def test_plug_interface(self):
segment = amb.NetworkSegment(
constants.TYPE_VLAN, "physnet-1", "1", 1777)
with mock.patch.object(self.lbm, "add_tap_interface") as add_tap:
self.lbm.plug_interface("123", segment, "tap234",
constants.DEVICE_OWNER_NETWORK_PREFIX)
add_tap.assert_called_with("123", constants.TYPE_VLAN, "physnet-1",
"1", "tap234",
constants.DEVICE_OWNER_NETWORK_PREFIX,
1777)
def test_delete_bridge(self):
with mock.patch.object(ip_lib.IPDevice, "exists") as de_fn,\
mock.patch.object(ip_lib, "IpLinkCommand") as link_cmd,\
mock.patch.object(bridge_lib.BridgeDevice,
"get_interfaces") as getif_fn,\
mock.patch.object(self.lbm, "remove_interface"),\
mock.patch.object(self.lbm,
"update_interface_ip_details") as updif_fn,\
mock.patch.object(self.lbm, "delete_interface") as delif_fn:
de_fn.return_value = False
self.lbm.delete_bridge("br0")
self.assertFalse(getif_fn.called)
de_fn.return_value = True
getif_fn.return_value = ["eth0", "eth1", "vxlan-1002"]
link_cmd.set_down.return_value = False
self.lbm.delete_bridge("br0")
updif_fn.assert_called_with("eth1", "br0")
delif_fn.assert_called_with("vxlan-1002")
def test_delete_bridge_not_exist(self):
self.lbm.interface_mappings.update({})
bridge_device = mock.Mock()
with mock.patch.object(bridge_lib, "BridgeDevice",
return_value=bridge_device):
bridge_device.exists.side_effect = [True, False]
bridge_device.get_interfaces.return_value = []
bridge_device.link.set_down.side_effect = RuntimeError
self.lbm.delete_bridge("br0")
self.assertEqual(2, bridge_device.exists.call_count)
bridge_device.exists.side_effect = [True, True]
self.assertRaises(RuntimeError, self.lbm.delete_bridge, "br0")
def test_delete_bridge_with_ip(self):
bridge_device = mock.Mock()
with mock.patch.object(ip_lib, "device_exists") as de_fn,\
mock.patch.object(self.lbm, "remove_interface"),\
mock.patch.object(self.lbm,
"update_interface_ip_details") as updif_fn,\
mock.patch.object(self.lbm,
"delete_interface") as del_interface,\
mock.patch.object(bridge_lib, "BridgeDevice",
return_value=bridge_device):
de_fn.return_value = True
updif_fn.return_value = True
bridge_device.get_interfaces.return_value = ["eth0", "eth1.1"]
bridge_device.link.set_down.return_value = False
self.lbm.delete_bridge("br0")
updif_fn.assert_called_with("eth1.1", "br0")
self.assertFalse(del_interface.called)
def test_delete_bridge_no_ip(self):
bridge_device = mock.Mock()
with mock.patch.object(ip_lib, "device_exists") as de_fn,\
mock.patch.object(self.lbm, "remove_interface"),\
mock.patch.object(self.lbm,
"get_interface_details") as if_det_fn,\
mock.patch.object(self.lbm,
"_update_interface_ip_details") as updif_fn,\
mock.patch.object(self.lbm,
"delete_interface") as del_interface,\
mock.patch.object(bridge_lib, "BridgeDevice",
return_value=bridge_device):
de_fn.return_value = True
bridge_device.get_interfaces.return_value = ["eth0", "eth1.1"]
bridge_device.link.set_down.return_value = False
if_det_fn.return_value = ([], None)
self.lbm.delete_bridge("br0")
del_interface.assert_called_with("eth1.1")
self.assertFalse(updif_fn.called)
def test_delete_bridge_no_int_mappings(self):
lbm = get_linuxbridge_manager(
bridge_mappings={}, interface_mappings={})
with mock.patch.object(ip_lib.IPDevice, "exists") as de_fn,\
mock.patch.object(ip_lib, "IpLinkCommand") as link_cmd,\
mock.patch.object(bridge_lib.BridgeDevice,
"get_interfaces") as getif_fn,\
mock.patch.object(lbm, "remove_interface"),\
mock.patch.object(lbm, "delete_interface") as del_interface:
de_fn.return_value = False
lbm.delete_bridge("br0")
self.assertFalse(getif_fn.called)
de_fn.return_value = True
getif_fn.return_value = ["vxlan-1002"]
link_cmd.set_down.return_value = False
lbm.delete_bridge("br0")
del_interface.assert_called_with("vxlan-1002")
def test_delete_bridge_with_physical_vlan(self):
self.lbm.interface_mappings.update({"physnet2": "eth1.4000"})
bridge_device = mock.Mock()
with mock.patch.object(ip_lib, "device_exists") as de_fn,\
mock.patch.object(self.lbm, "remove_interface"),\
mock.patch.object(self.lbm,
"update_interface_ip_details") as updif_fn,\
mock.patch.object(self.lbm, "delete_interface") as del_int,\
mock.patch.object(bridge_lib, "BridgeDevice",
return_value=bridge_device):
de_fn.return_value = True
bridge_device.get_interfaces.return_value = ["eth1.1", "eth1.4000"]
updif_fn.return_value = False
bridge_device.link.set_down.return_value = False
self.lbm.delete_bridge("br0")
del_int.assert_called_once_with("eth1.1")
def test_remove_interface(self):
with mock.patch.object(ip_lib.IPDevice, "exists") as de_fn,\
mock.patch.object(bridge_lib.BridgeDevice,
'owns_interface') as owns_fn,\
mock.patch.object(bridge_lib.BridgeDevice,
"delif") as delif_fn:
de_fn.return_value = False
self.assertFalse(self.lbm.remove_interface("br0", "eth0"))
self.assertFalse(owns_fn.called)
de_fn.return_value = True
owns_fn.return_value = False
self.assertTrue(self.lbm.remove_interface("br0", "eth0"))
delif_fn.return_value = False
self.assertTrue(self.lbm.remove_interface("br0", "eth0"))
def test_remove_interface_not_on_bridge(self):
bridge_device = mock.Mock()
with mock.patch.object(bridge_lib, "BridgeDevice",
return_value=bridge_device):
bridge_device.exists.return_value = True
bridge_device.delif.side_effect = RuntimeError
bridge_device.owns_interface.side_effect = [True, False]
self.lbm.remove_interface("br0", 'tap0')
self.assertEqual(2, bridge_device.owns_interface.call_count)
bridge_device.owns_interface.side_effect = [True, True]
self.assertRaises(RuntimeError,
self.lbm.remove_interface, "br0", 'tap0')
def test_delete_interface(self):
with mock.patch.object(ip_lib.IPDevice, "exists") as de_fn,\
mock.patch.object(ip_lib.IpLinkCommand,
"set_down") as down_fn,\
mock.patch.object(ip_lib.IpLinkCommand, "delete") as delete_fn:
de_fn.return_value = False
self.lbm.delete_interface("eth1.1")
self.assertFalse(down_fn.called)
self.assertFalse(delete_fn.called)
de_fn.return_value = True
self.lbm.delete_interface("eth1.1")
self.assertTrue(down_fn.called)
self.assertTrue(delete_fn.called)
def _check_vxlan_support(self, expected, vxlan_ucast_supported,
vxlan_mcast_supported):
with mock.patch.object(self.lbm,
'vxlan_ucast_supported',
return_value=vxlan_ucast_supported),\
mock.patch.object(self.lbm,
'vxlan_mcast_supported',
return_value=vxlan_mcast_supported):
if expected == lconst.VXLAN_NONE:
self.assertRaises(exceptions.VxlanNetworkUnsupported,
self.lbm.check_vxlan_support)
self.assertEqual(expected, self.lbm.vxlan_mode)
else:
self.lbm.check_vxlan_support()
self.assertEqual(expected, self.lbm.vxlan_mode)
def test_check_vxlan_support(self):
self._check_vxlan_support(expected=lconst.VXLAN_UCAST,
vxlan_ucast_supported=True,
vxlan_mcast_supported=True)
self._check_vxlan_support(expected=lconst.VXLAN_MCAST,
vxlan_ucast_supported=False,
vxlan_mcast_supported=True)
self._check_vxlan_support(expected=lconst.VXLAN_NONE,
vxlan_ucast_supported=False,
vxlan_mcast_supported=False)
self._check_vxlan_support(expected=lconst.VXLAN_NONE,
vxlan_ucast_supported=False,
vxlan_mcast_supported=False)
def _check_vxlan_ucast_supported(
self, expected, l2_population, iproute_arg_supported, fdb_append):
cfg.CONF.set_override('l2_population', l2_population, 'VXLAN')
with mock.patch.object(ip_lib, 'device_exists', return_value=False),\
mock.patch.object(ip_lib, 'vxlan_in_use', return_value=False),\
mock.patch.object(self.lbm,
'delete_interface',
return_value=None),\
mock.patch.object(self.lbm,
'ensure_vxlan',
return_value=None),\
mock.patch.object(
utils,
'execute',
side_effect=None if fdb_append else RuntimeError()),\
mock.patch.object(ip_lib,
'iproute_arg_supported',
return_value=iproute_arg_supported):
self.assertEqual(expected, self.lbm.vxlan_ucast_supported())
def test_vxlan_ucast_supported(self):
self._check_vxlan_ucast_supported(
expected=False,
l2_population=False, iproute_arg_supported=True, fdb_append=True)
self._check_vxlan_ucast_supported(
expected=False,
l2_population=True, iproute_arg_supported=False, fdb_append=True)
self._check_vxlan_ucast_supported(
expected=False,
l2_population=True, iproute_arg_supported=True, fdb_append=False)
self._check_vxlan_ucast_supported(
expected=True,
l2_population=True, iproute_arg_supported=True, fdb_append=True)
def _check_vxlan_mcast_supported(
self, expected, vxlan_group, iproute_arg_supported):
cfg.CONF.set_override('vxlan_group', vxlan_group, 'VXLAN')
with mock.patch.object(
ip_lib, 'iproute_arg_supported',
return_value=iproute_arg_supported):
self.assertEqual(expected, self.lbm.vxlan_mcast_supported())
def test_vxlan_mcast_supported(self):
self._check_vxlan_mcast_supported(
expected=False,
vxlan_group='',
iproute_arg_supported=True)
self._check_vxlan_mcast_supported(
expected=False,
vxlan_group='224.0.0.1',
iproute_arg_supported=False)
self._check_vxlan_mcast_supported(
expected=True,
vxlan_group='224.0.0.1',
iproute_arg_supported=True)
def _test_ensure_port_admin_state(self, admin_state):
port_id = 'fake_id'
with mock.patch.object(ip_lib, 'IPDevice') as dev_mock:
self.lbm.ensure_port_admin_state(port_id, admin_state)
tap_name = self.lbm.get_tap_device_name(port_id)
self.assertEqual(admin_state,
dev_mock(tap_name).link.set_up.called)
self.assertNotEqual(admin_state,
dev_mock(tap_name).link.set_down.called)
def test_ensure_port_admin_state_up(self):
self._test_ensure_port_admin_state(True)
def test_ensure_port_admin_state_down(self):
self._test_ensure_port_admin_state(False)
def test_get_agent_id_bridge_mappings(self):
lbm = get_linuxbridge_manager(BRIDGE_MAPPINGS, INTERFACE_MAPPINGS)
with mock.patch.object(ip_lib,
"get_device_mac",
return_value='16:63:69:10:a0:59') as mock_gim:
agent_id = lbm.get_agent_id()
self.assertEqual("lb16636910a059", agent_id)
mock_gim.assert_called_with(BRIDGE_MAPPING_VALUE)
def test_get_agent_id_no_bridge_mappings(self):
devices_mock = [
mock.MagicMock(),
mock.MagicMock()
]
devices_mock[0].name = "eth1"
devices_mock[1].name = "eth2"
bridge_mappings = {}
lbm = get_linuxbridge_manager(bridge_mappings, INTERFACE_MAPPINGS)
with mock.patch.object(ip_lib.IPWrapper,
'get_devices',
return_value=devices_mock), \
mock.patch.object(
ip_lib,
"get_device_mac",
side_effect=[None, '16:63:69:10:a0:59']) as mock_gim:
agent_id = lbm.get_agent_id()
self.assertEqual("lb16636910a059", agent_id)
mock_gim.assert_has_calls([mock.call("eth1"), mock.call("eth2")])
class TestLinuxBridgeRpcCallbacks(base.BaseTestCase):
def setUp(self):
super(TestLinuxBridgeRpcCallbacks, self).setUp()
class FakeLBAgent(object):
def __init__(self):
self.agent_id = 1
self.mgr = get_linuxbridge_manager(
BRIDGE_MAPPINGS, INTERFACE_MAPPINGS)
self.mgr.vxlan_mode = lconst.VXLAN_UCAST
self.network_ports = collections.defaultdict(list)
self.lb_rpc = linuxbridge_neutron_agent.LinuxBridgeRpcCallbacks(
object(),
FakeLBAgent(),
object()
)
segment = mock.Mock()
segment.network_type = 'vxlan'
segment.segmentation_id = 1
self.lb_rpc.network_map['net_id'] = segment
cfg.CONF.set_default('host', 'host')
def test_network_delete_mapped_net(self):
mock_net = mock.Mock()
mock_net.physical_network = None
self._test_network_delete({NETWORK_ID: mock_net})
def test_network_delete_unmapped_net(self):
self._test_network_delete({})
def _test_network_delete(self, net_map):
self.lb_rpc.network_map = net_map
with mock.patch.object(self.lb_rpc.agent.mgr,
"get_bridge_name") as get_br_fn,\
mock.patch.object(self.lb_rpc.agent.mgr,
"delete_bridge") as del_fn:
get_br_fn.return_value = "br0"
self.lb_rpc.network_delete("anycontext", network_id=NETWORK_ID)
get_br_fn.assert_called_with(NETWORK_ID)
del_fn.assert_called_with("br0")
def test_port_update(self):
port = {'id': PORT_1}
self.lb_rpc.port_update(context=None, port=port)
self.assertEqual(set([DEVICE_1]), self.lb_rpc.updated_devices)
def test_network_update(self):
updated_network = {'id': NETWORK_ID}
self.lb_rpc.agent.network_ports = {
NETWORK_ID: [PORT_DATA]
}
self.lb_rpc.network_update(context=None, network=updated_network)
self.assertEqual(set([DEVICE_1]), self.lb_rpc.updated_devices)
def test_network_delete_with_existed_brq(self):
mock_net = mock.Mock()
mock_net.physical_network = 'physnet0'
self.lb_rpc.network_map = {'123': mock_net}
with mock.patch.object(linuxbridge_neutron_agent.LOG, 'info') as log,\
mock.patch.object(self.lb_rpc.agent.mgr,
"delete_bridge") as del_fn:
self.lb_rpc.network_delete("anycontext", network_id="123")
self.assertEqual(0, del_fn.call_count)
self.assertEqual(1, log.call_count)
def test_binding_deactivate(self):
with mock.patch.object(self.lb_rpc.agent.mgr,
"get_bridge_name") as get_br_fn,\
mock.patch.object(self.lb_rpc.agent.mgr,
"get_tap_device_name") as get_tap_fn,\
mock.patch.object(self.lb_rpc.agent.mgr,
"remove_interface") as rem_intf:
get_br_fn.return_value = "br0"
get_tap_fn.return_value = "tap456"
self.lb_rpc.binding_deactivate(mock.ANY, host="host",
network_id="123", port_id="456")
get_br_fn.assert_called_once_with("123")
get_tap_fn.assert_called_once_with("456")
rem_intf.assert_called_once_with("br0", "tap456")
def test_binding_deactivate_not_for_host(self):
with mock.patch.object(self.lb_rpc.agent.mgr,
"get_bridge_name") as get_br_fn,\
mock.patch.object(self.lb_rpc.agent.mgr,
"get_tap_device_name") as get_tap_fn,\
mock.patch.object(self.lb_rpc.agent.mgr,
"remove_interface") as rem_intf:
self.lb_rpc.binding_deactivate(mock.ANY, host="other_host",
network_id="123", port_id="456")
get_br_fn.assert_not_called()
get_tap_fn.assert_not_called()
rem_intf.assert_not_called()
def test_binding_activate(self):
with mock.patch.object(self.lb_rpc.agent.mgr,
"get_tap_device_name") as get_tap_fun:
get_tap_fun.return_value = "tap456"
self.lb_rpc.binding_activate(mock.ANY, host="host", port_id="456")
self.assertIn("tap456", self.lb_rpc.updated_devices)
def test_binding_activate_not_for_host(self):
self.lb_rpc.binding_activate(mock.ANY, host="other-host",
port_id="456")
self.assertFalse(self.lb_rpc.updated_devices)
def _test_fdb_add(self, proxy_enabled=False):
fdb_entries = {'net_id':
{'ports':
{'agent_ip': [constants.FLOODING_ENTRY,
['port_mac', 'port_ip']]},
'network_type': 'vxlan',
'segment_id': 1}}
with mock.patch.object(utils, 'execute',
return_value='') as execute_fn, \
mock.patch.object(ip_lib, 'add_neigh_entry',
return_value='') as add_fn:
self.lb_rpc.fdb_add(None, fdb_entries)
expected = [
mock.call(['bridge', 'fdb', 'show', 'dev', 'vxlan-1'],
run_as_root=True),
mock.call(['bridge', 'fdb', 'add',
constants.FLOODING_ENTRY[0],
'dev', 'vxlan-1', 'dst', 'agent_ip'],
run_as_root=True,
check_exit_code=False),
mock.call(['bridge', 'fdb', 'replace', 'port_mac', 'dev',
'vxlan-1', 'dst', 'agent_ip'],
run_as_root=True,
check_exit_code=False),
]
execute_fn.assert_has_calls(expected)
if proxy_enabled:
add_fn.assert_called_with('port_ip', 'port_mac', 'vxlan-1')
else:
add_fn.assert_not_called()
def test_fdb_add(self):
self._test_fdb_add(proxy_enabled=False)
def test_fdb_add_with_arp_responder(self):
cfg.CONF.set_override('arp_responder', True, 'VXLAN')
self._test_fdb_add(proxy_enabled=True)
def test_fdb_ignore(self):
fdb_entries = {'net_id':
{'ports':
{LOCAL_IP: [constants.FLOODING_ENTRY,
['port_mac', 'port_ip']]},
'network_type': 'vxlan',
'segment_id': 1}}
with mock.patch.object(utils, 'execute',
return_value='') as execute_fn:
self.lb_rpc.fdb_add(None, fdb_entries)
self.lb_rpc.fdb_remove(None, fdb_entries)
self.assertFalse(execute_fn.called)
fdb_entries = {'other_net_id':
{'ports':
{'192.168.0.67': [constants.FLOODING_ENTRY,
['port_mac', 'port_ip']]},
'network_type': 'vxlan',
'segment_id': 1}}
with mock.patch.object(utils, 'execute',
return_value='') as execute_fn:
self.lb_rpc.fdb_add(None, fdb_entries)
self.lb_rpc.fdb_remove(None, fdb_entries)
self.assertFalse(execute_fn.called)
def _test_fdb_remove(self, proxy_enabled=False):
fdb_entries = {'net_id':
{'ports':
{'agent_ip': [constants.FLOODING_ENTRY,
['port_mac', 'port_ip']]},
'network_type': 'vxlan',
'segment_id': 1}}
with mock.patch.object(utils, 'execute',
return_value='') as execute_fn, \
mock.patch.object(ip_lib, 'delete_neigh_entry',
return_value='') as del_fn:
self.lb_rpc.fdb_remove(None, fdb_entries)
expected = [
mock.call(['bridge', 'fdb', 'delete',
constants.FLOODING_ENTRY[0],
'dev', 'vxlan-1', 'dst', 'agent_ip'],
run_as_root=True,
check_exit_code=False),
mock.call(['bridge', 'fdb', 'delete', 'port_mac',
'dev', 'vxlan-1', 'dst', 'agent_ip'],
run_as_root=True,
check_exit_code=False),
]
execute_fn.assert_has_calls(expected)
if proxy_enabled:
del_fn.assert_called_with('port_ip', 'port_mac', 'vxlan-1')
else:
del_fn.assert_not_called()
def test_fdb_remove(self):
self._test_fdb_remove(proxy_enabled=False)
def test_fdb_remove_with_arp_responder(self):
cfg.CONF.set_override('arp_responder', True, 'VXLAN')
self._test_fdb_remove(proxy_enabled=True)
def _test_fdb_update_chg_ip(self, proxy_enabled=False):
fdb_entries = {'chg_ip':
{'net_id':
{'agent_ip':
{'before': [['port_mac', 'port_ip_1']],
'after': [['port_mac', 'port_ip_2']]}}}}
with mock.patch.object(ip_lib, 'add_neigh_entry',
return_value='') as add_fn, \
mock.patch.object(ip_lib, 'delete_neigh_entry',
return_value='') as del_fn:
self.lb_rpc.fdb_update(None, fdb_entries)
if proxy_enabled:
del_fn.assert_called_with('port_ip_1', 'port_mac', 'vxlan-1')
add_fn.assert_called_with('port_ip_2', 'port_mac', 'vxlan-1')
else:
del_fn.assert_not_called()
add_fn.assert_not_called()
def test_fdb_update_chg_ip(self):
self._test_fdb_update_chg_ip(proxy_enabled=False)
def test_fdb_update_chg_ip_with_arp_responder(self):
cfg.CONF.set_override('arp_responder', True, 'VXLAN')
self._test_fdb_update_chg_ip(proxy_enabled=True)
def test_fdb_update_chg_ip_empty_lists(self):
fdb_entries = {'chg_ip': {'net_id': {'agent_ip': {}}}}
self.lb_rpc.fdb_update(None, fdb_entries)
| |
import usb.core
import time
# Find the st link
dev = usb.core.find(idVendor=0x0483, idProduct=0x3748)
# configuration = dev.get_active_configuration()
# print configuration
#
# dev.set_configuration()
# # We senf 0xf308, and the chip responds with 20 bytes. The first four and last 12
# # are encrypted with key "best performance", then the 16 encrypted bytes are used
# # as a key to encrypt the firmware before sending it
# byte_ints = [0xf3, 0x08]
# byte_str = "".join(chr(n) for n in byte_ints)
# dev.write(0x02, byte_str)
# response = dev.read(0x81, 100)
# # response = [64, 0, 255, 255, 74, 6, 55, 5, 82, 255, 104, 6, 113, 136, 72, 85, 37, 96, 3, 103]
# print response
# key_bytes = response[:4] + response[8:]
# # print len(key_bytes)
# Now we want to send the re-encrypted firmware, in 1024 byte chunks
# Telling it where to write to, we send 2100400008
# incrementing each block (to 2100440008, 48, 4c, 50 etc)
# (After the first three, it does three starting with 41 in a row (no data transfers) then the same three
# with 21 and a chink of data. There are some others with 41. My theory is that these are erase ops or something
# - and possibly the initial few are getting rid of some sort of protection? <<< needs more investigation
# It proceeds incrementally with each block of fw data up to 2100b40008, then
# the last block goes to 21003c0008?
# The we send f3 (DFU command) 01 (firmware chunk up next) XX (count, 02, 03, 04 then back to 02)
# 00, XXXX (checksum - sum of all bytes being sent) 04 (for size of firmware chunk = 0x0400)
# Finally, we send 1024 bytes of the encrypted firmware
# Make the address commands
address_cmds = []
for i in range(0x40, 0xb8, 0x04):
address_cmds.append('2100'+str(hex(i))[2:] + '0008')
address_cmds.append('21003c0008')
# print address_cmds
# For now, steal the checksums and actual data from the USB data cap
fw_write_cmds = [
'f3010200648d00040000000000000000',
'f3010300a4a700040000000000000000',
'f3010400949500040000000000000000',
'f3010200a2ce00040000000000000000',
'f301030072ad00040000000000000000',
'f30104006c7500040000000000000000',
'f3010200eab500040000000000000000',
'f3010300eac000040000000000000000',
'f3010400efc400040000000000000000',
'f30102005d9d00040000000000000000',
'f3010300beb400040000000000000000',
'f30104003e9b00040000000000000000',
'f30102006ca100040000000000000000',
'f3010300979400040000000000000000',
'f3010400116c00040000000000000000',
'f3010200e79800040000000000000000',
'f3010300be8d00040000000000000000',
'f30104001b6a00040000000000000000',
'f3010200d1a800040000000000000000',
'f3010300735600040000000000000000',
'f3010400ea9b00040000000000000000',
'f3010200928400040000000000000000',
'f3010300699100040000000000000000',
'f3010400219600040000000000000000',
'f3010200b58000040000000000000000',
'f30103005bb800040000000000000000',
'f3010400316100040000000000000000',
'f30102005caa00040000000000000000',
'f3010300296500040000000000000000',
'f3010400316100040000000000000000',
'f301020061f000040000000000000000'
]
fw_chunks = []
bin_file = '/home/jonathan/stm_jig/usb_sniff/bin_chunks.bin'
bin_chunks_concat = open(bin_file, "r")
for i in range(31):
fw_chunks.append(bin_chunks_concat.read(1024))
if i!= 30:
bin_chunks_concat.read(192) # the pcap nonsense inbetween each chunk
# print hex(ord(fw_chunks[1][0])) # just checking it works
#
# print len(address_cmds)
# print len(fw_write_cmds)
def byte_str_to_str(line):
hexs = ['0x' + line[i:i+2] for i in range(0, len(line), 2)]
byte_ints = [int(h, 16) for h in hexs]
return "".join(chr(b) for b in byte_ints)
def str_to_byte_str(line):
# messes up 00 (puts it to 0)
l = ''
for i in range(len(line)):
l += hex(ord(line[i]))[2:]
if hex(ord(line[i]))[2:] == '0':
l += '0'
return l
# print str_to_byte_str(fw_chunks[0])
# Some setup - not sure what this does!!!
start_commands = [
['f1800000000000000000000000000000', 1],
['f5000000000000000000000000000000', 1],
['f3080000000000000000000000000000', 2],
['f3030000000006000000000000000000', 1],
['f3030000000006000000000000000000', 1],
['f3010000450105000000000000000000', 0],
['4100fc0008',0],
['f3030000000006000000000000000000', 1],
['f3030000000006000000000000000000', 1],
['f3030000000006000000000000000000', 1],
['f3010000180205000000000000000000',0],
['21f0ff0008',0],
['f3030000000006000000000000000000', 1],
['f3030000000006000000000000000000', 1],
['f3010200f00f10000000000000000000',0],
['5c971a46af77599b8098da64ebe5ff69',0],
['f3030000000006000000000000000000', 1],
['f3030000000006000000000000000000', 1],
['f3030000000006000000000000000000', 1],
['f3030000000006000000000000000000', 1],
['f3010000890005000000000000000000',0],
['4100400008',0],
['f3030000000006000000000000000000', 1],
['f3030000000006000000000000000000', 1],
['f30100008d0005000000000000000000',0],
['4100440008',0],
['f3030000000006000000000000000000', 1],
['f3030000000006000000000000000000', 1],
['f3010000910005000000000000000000',0],
['4100480008',0],
['f3030000000006000000000000000000', 1],
]
for c in start_commands:
dev.write(0x02, byte_str_to_str(c[0]), 100)
if c[1]==1:
print dev.read(0x81, 100)
if c[1] == 2:
print dev.read(0x81, 100)
time.sleep(0.06) # simulate encryption stuff
# Send the packets
for i in range(31):
print i
dev.write(0x02, byte_str_to_str("f3030000000006000000000000000000"),1000)
response = dev.read(0x81, 100)
print response
dev.write(0x02, byte_str_to_str("f3030000000006000000000000000000"),1000)
response = dev.read(0x81, 100)
print response
count = 0
while response[4] == 4 and count < 10:
dev.write(0x02, byte_str_to_str("f3030000000006000000000000000000"),1000)
response = dev.read(0x81, 100)
count += 1
time.sleep(0.1)
print response
dev.write(0x02, byte_str_to_str("f3010000"+ hex(0x69 + 4*i)[2:] + "0005000000000000000000"))
print "wrote command: ", ("f3010000"+ hex(0x69 + 4*i)[2:] + "0005000000000000000000")
add_cmd_str = byte_str_to_str(address_cmds[i])
dev.write(0x02, add_cmd_str,1000)
print "Wrote addr:", str_to_byte_str(add_cmd_str)
dev.write(0x02, byte_str_to_str("f3030000000006000000000000000000"),1000)
print dev.read(0x81, 100)
dev.write(0x02, byte_str_to_str("f3030000000006000000000000000000"),1000)
print dev.read(0x81, 100)
fw_str = byte_str_to_str(fw_write_cmds[i])
dev.write(0x02, fw_str, 1000)
print "Wrote fw command: ", str_to_byte_str(fw_str)
fw_chunk = fw_chunks[i] # No need to convert
dev.write(0x02, fw_chunk, 1000)
print "Wrote firmware chunk", i
# Finally, write the new version number and stuff like that
def wait_for_ready():
dev.write(0x02, byte_str_to_str("f3030000000006000000000000000000"),1000)
response = dev.read(0x81, 100)
print response
count = 0
while response[4] == 4 and count < 10:
dev.write(0x02, byte_str_to_str("f3030000000006000000000000000000"),1000)
response = dev.read(0x81, 100)
count += 1
time.sleep(0.1)
print response
wait_for_ready()
dev.write(0x02, byte_str_to_str("f3010000450105000000000000000000"), 100)
dev.write(0x02, byte_str_to_str("4100fc0008"), 100)
print "Wrote stuff"
wait_for_ready()
dev.write(0x02, byte_str_to_str("f3010000180205000000000000000000"), 100)
dev.write(0x02, byte_str_to_str("21f0ff0008"), 100)
print "Wrote stuff"
wait_for_ready()
dev.write(0x02, byte_str_to_str("f3010200930d10000000000000000000"), 100)
dev.write(0x02, byte_str_to_str("f76af84e192bb6f8ce4e6366cfc05f29"), 100)
print "wrote stuff"
wait_for_ready()
# Get FW and print
dev.write(0x02, byte_str_to_str("f3070000000000000000000000000000"), 100)
dev.write(0x02, byte_str_to_str("f1800000000000000000000000000000"), 100)
print dev.read(0x81, 100)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experiment class collecting information needed for a single training run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
import os
import time
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import export_strategy
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import saver
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
__all__ = ["Experiment"]
class Experiment(object):
"""Experiment is a class containing all information needed to train a model.
After an experiment is created (by passing an Estimator and inputs for
training and evaluation), an Experiment instance knows how to invoke training
and eval loops in a sensible fashion for distributed training.
"""
# TODO(ispir): remove delay_workers_by_global_step and make global step based
# waiting as only behavior.
@deprecated_args(
"2016-10-23",
"local_eval_frequency is deprecated as local_run will be renamed to "
"train_and_evaluate. Use min_eval_frequency and call train_and_evaluate "
"instead. Note, however, that the default for min_eval_frequency is 1, "
"meaning models will be evaluated every time a new checkpoint is "
"available. In contrast, the default for local_eval_frequency is None, "
"resulting in evaluation occurring only after training has completed. "
"min_eval_frequency is ignored when calling the deprecated local_run.",
"local_eval_frequency")
def __init__(self,
estimator,
train_input_fn,
eval_input_fn,
eval_metrics=None,
train_steps=None,
eval_steps=100,
train_monitors=None,
eval_hooks=None,
local_eval_frequency=None,
eval_delay_secs=120,
continuous_eval_throttle_secs=60,
min_eval_frequency=None,
delay_workers_by_global_step=False,
export_strategies=None,
train_steps_per_iteration=None):
"""Constructor for `Experiment`.
Creates an Experiment instance. None of the functions passed to this
constructor are executed at construction time. They are stored and used
when a method is executed which requires it.
Args:
estimator: Object implementing Estimator interface, which could be a
combination of ${tf.contrib.learn.Trainable} and
${tf.contrib.learn.Evaluable} (deprecated), or
${tf.estimator.`Estimator}.
train_input_fn: function, returns features and labels for training.
eval_input_fn: function, returns features and labels for evaluation. If
`eval_steps` is `None`, this should be configured only to produce for a
finite number of batches (generally, 1 epoch over the evaluation data).
eval_metrics: `dict` of string, metric function. If `None`, default set
is used. This should be `None` if the `estimator` is
${tf.estimator.Estimator}. If metrics are provided they will be
*appended* to the default set.
train_steps: Perform this many steps of training. `None`, the default,
means train forever.
eval_steps: `evaluate` runs until input is exhausted (or another exception
is raised), or for `eval_steps` steps, if specified.
train_monitors: A list of monitors to pass to the `Estimator`'s `fit`
function.
eval_hooks: A list of `SessionRunHook` hooks to pass to the
`Estimator`'s `evaluate` function.
local_eval_frequency: (applies only to local_run) Frequency of running
eval in steps. If `None`, runs evaluation only at the end of training.
eval_delay_secs: Start evaluating after waiting for this many seconds.
continuous_eval_throttle_secs: Do not re-evaluate unless the last
evaluation was started at least this many seconds ago for
continuous_eval().
min_eval_frequency: (applies only to train_and_evaluate). the minimum
number of steps between evaluations. Of course, evaluation does not
occur if no new snapshot is available, hence, this is the minimum.
If 0, the evaluation will only happen after training.
If None, defaults to 1, unless model_dir is on GCS, in which case the
default is 1000.
delay_workers_by_global_step: if `True` delays training workers
based on global step instead of time.
export_strategies: Iterable of `ExportStrategy`s, or a single one, or
`None`.
train_steps_per_iteration: (applies only to continuous_train_and_eval).
Perform this many (integer) number of train steps for each
training-evaluation iteration. With a small value, the model will be
evaluated more frequently with more checkpoints saved. If `None`, will
use a default value (which is smaller than `train_steps` if provided).
Raises:
ValueError: if `estimator` does not implement Estimator interface,
or if export_strategies has the wrong type.
"""
if isinstance(estimator, core_estimator.Estimator):
self._core_estimator_used = True
if eval_metrics is not None:
raise ValueError(
"`eval_metrics` must be `None` with `tf.estimator.Estimator`")
else:
self._core_estimator_used = False
if not isinstance(estimator, evaluable.Evaluable):
raise ValueError(
"`estimator` must implement `tf.contrib.learn.Evaluable` "
"or `tf.estimator.Estimator`.")
if not isinstance(estimator, trainable.Trainable):
raise ValueError(
"`estimator` must implement `tf.contrib.learn.Trainable`"
"or `tf.estimator.`Estimator`.")
super(Experiment, self).__init__()
# Immutable fields.
self._estimator = estimator
self._train_input_fn = train_input_fn
self._eval_input_fn = eval_input_fn
self._eval_metrics = eval_metrics
self._train_steps = train_steps
self._eval_steps = eval_steps
self._local_eval_frequency = local_eval_frequency
self._eval_delay_secs = eval_delay_secs
self._continuous_eval_throttle_secs = continuous_eval_throttle_secs
# Using 1 on a non-cached file system requires a lot of overhead to
# read the checkpoint state file. This is particular bad on GCS, so
# we use a different default. This is a temporary band-aid, to be
# fixed holistically later (b/36498507).
default_min_eval_frequency = 1000 if _is_gcs(estimator.model_dir) else 1
self._min_eval_frequency = min_eval_frequency if (
min_eval_frequency is not None) else default_min_eval_frequency
self._delay_workers_by_global_step = delay_workers_by_global_step
self._train_monitors = train_monitors[:] if train_monitors else []
self._eval_hooks = eval_hooks[:] if eval_hooks else []
self._set_export_strategies(export_strategies)
self._train_steps_per_iteration = train_steps_per_iteration
if (self._train_steps_per_iteration is not None and
not isinstance(self._train_steps_per_iteration, int)):
raise ValueError(
"`train_steps_per_iteration` must be an integer.")
@property
def estimator(self):
return self._estimator
@property
def eval_metrics(self):
return self._eval_metrics
@property
def train_steps(self):
return self._train_steps
@property
def eval_steps(self):
return self._eval_steps
def _set_export_strategies(self, values): # pylint: disable=missing-docstring
export_strategies = []
if values:
if isinstance(values, export_strategy.ExportStrategy):
export_strategies.append(values)
else:
for value in values:
if not isinstance(value, export_strategy.ExportStrategy):
raise ValueError("`export_strategies` must be an ExportStrategy,"
" an iterable of ExportStrategy, or `None`,"
" found %s." % value)
export_strategies.append(value)
self._export_strategies = tuple(export_strategies)
def extend_train_hooks(self, additional_hooks):
"""Extends the hooks for training."""
self._train_monitors.extend(additional_hooks)
def reset_export_strategies(self, new_export_strategies=None):
"""Resets the export strategies with the `new_export_strategies`.
Args:
new_export_strategies: A new list of `ExportStrategy`s, or a single one,
or None.
Returns:
The old export strategies.
"""
old_export_strategies = self._export_strategies
self._set_export_strategies(new_export_strategies)
return old_export_strategies
def train(self, delay_secs=None):
"""Fit the estimator using the training data.
Train the estimator for `self._train_steps` steps, after waiting for
`delay_secs` seconds. If `self._train_steps` is `None`, train forever.
Args:
delay_secs: Start training after this many seconds.
Returns:
The trained estimator.
"""
start = time.time()
# Start the server, if needed. It's important to start the server before
# we (optionally) sleep for the case where no device_filters are set.
# Otherwise, the servers will wait to connect to each other before starting
# to train. We might as well start as soon as we can.
config = self._estimator.config
if (config.environment != run_config.Environment.LOCAL and
config.environment != run_config.Environment.GOOGLE and
config.cluster_spec and config.master):
self._start_server()
extra_hooks = []
if delay_secs is None:
task_id = self._estimator.config.task_id or 0
if self._delay_workers_by_global_step:
# Wait 5500 global steps for the second worker. Each worker waits more
# then previous one but with a diminishing number of steps.
extra_hooks.append(
basic_session_run_hooks.GlobalStepWaiterHook(
int(8000.0 * math.log(task_id + 1))))
delay_secs = 0
else:
# Wait 5 secs more for each new worker up to 60 secs.
delay_secs = min(60, task_id * 5)
if delay_secs > 0:
elapsed_secs = time.time() - start
remaining = delay_secs - elapsed_secs
logging.info("Waiting %d secs before starting training.", remaining)
time.sleep(delay_secs)
return self._call_train(input_fn=self._train_input_fn,
max_steps=self._train_steps,
hooks=self._train_monitors + extra_hooks)
def evaluate(self, delay_secs=None):
"""Evaluate on the evaluation data.
Runs evaluation on the evaluation data and returns the result. Runs for
`self._eval_steps` steps, or if it's `None`, then run until input is
exhausted or another exception is raised. Start the evaluation after
`delay_secs` seconds, or if it's `None`, defaults to using
`self._eval_delay_secs` seconds.
Args:
delay_secs: Start evaluating after this many seconds. If `None`, defaults
to using `self._eval_delays_secs`.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
if delay_secs is None:
delay_secs = self._eval_delay_secs
if delay_secs:
logging.info("Waiting %d secs before starting eval.", delay_secs)
time.sleep(delay_secs)
return self._call_evaluate(input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name="one_pass",
hooks=self._eval_hooks)
@deprecated(
"2016-10-23",
"local_run will be renamed to train_and_evaluate and the new default "
"behavior will be to run evaluation every time there is a new "
"checkpoint.")
def local_run(self):
with _new_attr_context(self, "_min_eval_frequency"):
self._min_eval_frequency = self._local_eval_frequency
return self.train_and_evaluate()
# TODO(xiejw): Allow continuous_eval_predicate_fn to be passed via constructor
# once stopping all jobs is implemented.
def _continuous_eval(self,
input_fn,
name,
delay_secs,
throttle_delay_secs,
evaluate_checkpoint_only_once=True,
continuous_eval_predicate_fn=None):
"""Run continuous eval.
Runs infinite eval on the evaluation data set. This function starts
evaluating after `delay_secs` seconds and then runs no more than one
evaluation (with `self._eval_steps` steps each time) per
`throttle_delay_secs`. If `train_steps` is not None, will return after
global_step reaches `train_steps`.
Args:
input_fn: The input to use for this eval.
name: A string appended to the folder name of evaluation results.
delay_secs: Start evaluating after this many seconds. If None, defaults to
self._eval_delay_secs.
throttle_delay_secs: Do not re-evaluate unless the last evaluation was
started at least this many seconds ago. If None, defaults to
self._continuous_eval_throttle_secs.
evaluate_checkpoint_only_once: Whether to skip evaluation of checkpoints
that have already been evaluated. Default is `True`.
continuous_eval_predicate_fn: A predicate function determining whether to
continue eval after each iteration. `predicate_fn` takes the evaluation
results as arguments. At the beginning of evaluation, the passed eval
results will be None so it's expected that the predicate function
handles that gracefully. When `predicate_fn` is not specified,
continuous eval will run in an infinite loop (if `train_steps` is None)
or exit once global step reaches `train_steps`.
Raises:
ValueError: if `continuous_eval_predicate_fn` is neither None nor
callable.
"""
if (continuous_eval_predicate_fn is not None and
not callable(continuous_eval_predicate_fn)):
raise ValueError(
"`continuous_eval_predicate_fn` must be a callable, or None.")
if delay_secs is None:
delay_secs = self._eval_delay_secs
if throttle_delay_secs is None:
throttle_delay_secs = self._continuous_eval_throttle_secs
if delay_secs:
logging.info("Waiting %f secs before starting eval.", delay_secs)
time.sleep(delay_secs)
previous_path = None
eval_result = None
last_warning_time = 0
while (not continuous_eval_predicate_fn or
continuous_eval_predicate_fn(eval_result)):
# Exit if we have already reached number of steps to train.
if self._has_training_stopped(eval_result):
logging.info("Exiting continuous eval, global_step=%s >= "
"train_step=%s",
eval_result[ops.GraphKeys.GLOBAL_STEP],
self._train_steps)
return
start = time.time()
error_msg = None
latest_path = saver.latest_checkpoint(self._estimator.model_dir)
if not latest_path:
error_msg = ("Estimator is not fitted yet. "
"Will start an evaluation when a checkpoint is ready.")
elif evaluate_checkpoint_only_once and latest_path == previous_path:
error_msg = "No new checkpoint ready for evaluation."
if error_msg:
# Print warning message every 10 mins.
eval_result = {}
if time.time() - last_warning_time > 600:
logging.warning(error_msg)
last_warning_time = time.time()
else:
eval_result = self._call_evaluate(input_fn=input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name=name,
checkpoint_path=latest_path,
hooks=self._eval_hooks)
# Ensure eval result is not None for next round of evaluation.
if not eval_result:
eval_result = {}
self._maybe_export(eval_result, checkpoint_path=latest_path)
# Clear warning timer and update last evaluated checkpoint
last_warning_time = 0
previous_path = latest_path
duration = time.time() - start
if duration < throttle_delay_secs:
difference = throttle_delay_secs - duration
logging.info("Waiting %f secs before starting next eval run.",
difference)
time.sleep(difference)
def _has_training_stopped(self, eval_result):
"""Determines whether the training has stopped."""
if not eval_result:
return False
global_step = eval_result.get(ops.GraphKeys.GLOBAL_STEP)
return global_step and self._train_steps and (
global_step >= self._train_steps)
def continuous_eval(self,
delay_secs=None,
throttle_delay_secs=None,
evaluate_checkpoint_only_once=True,
continuous_eval_predicate_fn=None):
self._continuous_eval(
self._eval_input_fn,
name="continuous",
delay_secs=delay_secs,
throttle_delay_secs=throttle_delay_secs,
evaluate_checkpoint_only_once=evaluate_checkpoint_only_once,
continuous_eval_predicate_fn=continuous_eval_predicate_fn)
def continuous_eval_on_train_data(self,
delay_secs=None,
throttle_delay_secs=None,
continuous_eval_predicate_fn=None):
self._continuous_eval(
self._train_input_fn,
name="continuous_on_train_data",
delay_secs=delay_secs,
throttle_delay_secs=throttle_delay_secs,
continuous_eval_predicate_fn=continuous_eval_predicate_fn)
def train_and_evaluate(self):
"""Interleaves training and evaluation.
The frequency of evaluation is controlled by the constructor arg
`min_eval_frequency`. When this parameter is 0, evaluation happens
only after training has completed. Note that evaluation cannot happen
more frequently than checkpoints are taken. If no new snapshots are
available when evaluation is supposed to occur, then evaluation doesn't
happen for another `min_eval_frequency` steps (assuming a checkpoint is
available at that point). Thus, settings `min_eval_frequency` to 1 means
that the model will be evaluated everytime there is a new checkpoint.
This is particular useful for a "Master" task in the cloud, whose
responsibility it is to take checkpoints, evaluate those checkpoints,
and write out summaries. Participating in training as the supervisor
allows such a task to accomplish the first and last items, while
performing evaluation allows for the second.
Returns:
The result of the `evaluate` call to the `Estimator` as well as the
export results using the specified `ExportStrategy`.
"""
# The directory to which evaluation summaries are written are determined
# by adding a suffix to 'eval'; that suffix is the 'name' parameter to
# the various evaluate(...) methods. By setting it to None, we force
# the directory name to simply be 'eval'.
eval_dir_suffix = None
# We set every_n_steps to 1, but evaluation only occurs when a new
# snapshot is available. If, by the time we finish evaluation
# there is a new snapshot, then we just evaluate again. Otherwise,
# we keep training until one becomes available.
with _new_attr_context(self, "_train_monitors"):
self._train_monitors = self._train_monitors or []
if self._min_eval_frequency:
self._train_monitors += [monitors.ValidationMonitor(
input_fn=self._eval_input_fn, eval_steps=self._eval_steps,
metrics=self._eval_metrics, every_n_steps=self._min_eval_frequency,
name=eval_dir_suffix, hooks=self._eval_hooks
)]
self.train(delay_secs=0)
eval_result = self._call_evaluate(input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name=eval_dir_suffix,
hooks=self._eval_hooks)
export_results = self._maybe_export(eval_result)
return eval_result, export_results
@experimental
def continuous_train_and_eval(self,
continuous_eval_predicate_fn=None):
"""Interleaves training and evaluation.
The frequency of evaluation is controlled by the `train_steps_per_iteration`
(via constructor). The model will be first trained for
`train_steps_per_iteration`, and then be evaluated in turns.
This method is intended for single machine usage.
This differs from `train_and_evaluate` as follows:
1. The procedure will have train and evaluation in turns. The model
will be trained for a number of steps (usually smaller than `train_steps`
if provided) and then be evaluated. `train_and_evaluate` will train the
model for `train_steps` (no small training iterations).
2. Due to the different approach this schedule takes, it leads to two
differences in resource control. First, the resources (e.g., memory) used
by training will be released before evaluation (`train_and_evaluate` takes
double resources). Second, more checkpoints will be saved as a checkpoint
is generated at the end of each small training iteration.
Args:
continuous_eval_predicate_fn: A predicate function determining whether to
continue after each iteration. `predicate_fn` takes the evaluation
results as its arguments. At the beginning of evaluation, the passed
eval results will be None so it's expected that the predicate function
handles that gracefully. When `predicate_fn` is not specified, this will
run in an infinite loop or exit when global_step reaches `train_steps`.
Returns:
A tuple of the result of the `evaluate` call to the `Estimator` and the
export results using the specified `ExportStrategy`.
Raises:
ValueError: if `continuous_eval_predicate_fn` is neither None nor
callable.
"""
if (continuous_eval_predicate_fn is not None and
not callable(continuous_eval_predicate_fn)):
raise ValueError(
"`continuous_eval_predicate_fn` must be a callable, or None.")
eval_result = None
# Set the default value for train_steps_per_iteration, which will be
# overridden by other settings.
train_steps_per_iteration = 1000
if self._train_steps_per_iteration is not None:
train_steps_per_iteration = self._train_steps_per_iteration
elif self._train_steps is not None:
train_steps_per_iteration = int(self._train_steps / 10)
while (not continuous_eval_predicate_fn or
continuous_eval_predicate_fn(eval_result)):
if self._has_training_stopped(eval_result):
# Exits once max steps of training is satisfied.
logging.info("Stop training model as max steps reached")
break
logging.info("Training model for %s steps", train_steps_per_iteration)
self._call_train(input_fn=self._train_input_fn,
steps=train_steps_per_iteration,
hooks=self._train_monitors)
logging.info("Evaluating model now.")
eval_result = self._call_evaluate(input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name="one_pass",
hooks=self._eval_hooks)
return eval_result, self._maybe_export(eval_result)
def _maybe_export(self, eval_result, checkpoint_path=None):
"""Export the Estimator using export_fn, if defined."""
export_dir_base = os.path.join(
compat.as_bytes(self._estimator.model_dir),
compat.as_bytes("export"))
export_results = []
for strategy in self._export_strategies:
export_results.append(
strategy.export(
self._estimator,
os.path.join(
compat.as_bytes(export_dir_base),
compat.as_bytes(strategy.name)),
checkpoint_path=checkpoint_path,
eval_result=eval_result))
return export_results
def run_std_server(self):
"""Starts a TensorFlow server and joins the serving thread.
Typically used for parameter servers.
Raises:
ValueError: if not enough information is available in the estimator's
config to create a server.
"""
self._start_server().join()
def test(self):
"""Tests training, evaluating and exporting the estimator for a single step.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
self._call_train(input_fn=self._train_input_fn,
steps=1,
hooks=self._train_monitors)
eval_result = self._call_evaluate(input_fn=self._eval_input_fn,
steps=1,
metrics=self._eval_metrics,
name="one_pass")
_ = self._maybe_export(eval_result)
return eval_result
def _start_server(self):
"""Creates, starts, and returns a server_lib.Server."""
config = self._estimator.config
if (not config.cluster_spec or not config.task_type or not config.master or
config.task_id is None):
raise ValueError("Could not start server; be sure to specify "
"cluster_spec, task_type, master, and task in "
"RunConfig or set the TF_CONFIG environment variable.")
server = server_lib.Server(
config.cluster_spec,
job_name=config.task_type,
task_index=config.task_id,
config=config.tf_config,
start=False)
server.start()
return server
def _call_train(self, _sentinel=None, # pylint: disable=invalid-name,
input_fn=None, steps=None, hooks=None, max_steps=None):
if _sentinel is not None:
raise ValueError("_call_train should be called with keyword args only")
# Estimator in core cannot work with monitors. We need to convert them
# to hooks. For Estimator in contrib, it is converted internally. So, it is
# safe to convert for both cases.
hooks = monitors.replace_monitors_with_hooks(hooks, self._estimator)
if self._core_estimator_used:
return self._estimator.train(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
hooks=hooks)
else:
return self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=hooks)
def _call_evaluate(self, _sentinel=None, # pylint: disable=invalid-name,
input_fn=None, steps=None, metrics=None, name=None,
checkpoint_path=None, hooks=None):
if _sentinel is not None:
raise ValueError("_call_evaluate should be called with keyword args only")
if self._core_estimator_used:
if metrics is not None:
raise ValueError(
"`eval_metrics` must be `None` with `tf.estimator.Estimator`")
return self._estimator.evaluate(input_fn=input_fn,
steps=steps,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks)
else:
return self._estimator.evaluate(input_fn=input_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks)
@contextlib.contextmanager
def _new_attr_context(obj, attr):
"""Creates a new context in which an object's attribute can be changed.
This creates a context in which an object's attribute can be changed.
Once the context is exited, the attribute reverts to its original value.
Args:
obj: An object whose attribute to restore at the end of the context.
attr: An attribute to remember and restore at the end of the context.
Yields:
Context.
Example:
my_obj.x = 1
with _new_attr_context(my_obj, "x"):
my_obj.x = 2
print(my_obj.x)
print(my_obj.x)
"""
saved = getattr(obj, attr)
try:
yield
finally:
setattr(obj, attr, saved)
def _is_gcs(model_dir):
return model_dir and model_dir.startswith("gs://")
| |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_type, check_variable_and_dtype
from ..fluid.layers.layer_function_generator import templatedoc
from ..static import Variable
from ..framework import VarBase as Tensor
# TODO: define logic functions of a tensor
from ..fluid.layers import is_empty # noqa: F401
from ..fluid.layers import logical_and # noqa: F401
from ..fluid.layers import logical_not # noqa: F401
from ..fluid.layers import logical_or # noqa: F401
from ..fluid.layers import logical_xor # noqa: F401
import paddle
from paddle import _C_ops
from paddle.tensor.creation import full
__all__ = []
def equal_all(x, y, name=None):
"""
This OP returns the truth value of :math:`x == y`. True if two inputs have the same elements, False otherwise.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): Tensor, data type is bool, float32, float64, int32, int64.
y(Tensor): Tensor, data type is bool, float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: output Tensor, data type is bool, value is [False] or [True].
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 2, 3])
z = paddle.to_tensor([1, 4, 3])
result1 = paddle.equal_all(x, y)
print(result1) # result1 = [True ]
result2 = paddle.equal_all(x, z)
print(result2) # result2 = [False ]
"""
if paddle.in_dynamic_mode():
return _C_ops.equal_all(x, y)
helper = LayerHelper("equal_all", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op(
type='equal_all', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [out]})
return out
@templatedoc()
def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
"""
${comment}
Args:
x(Tensor): ${input_comment}.
y(Tensor): ${other_comment}.
rtol(rtoltype, optional): The relative tolerance. Default: :math:`1e-5` .
atol(atoltype, optional): The absolute tolerance. Default: :math:`1e-8` .
equal_nan(equalnantype, optional): ${equal_nan_comment}.
name (str, optional): Name for the operation. For more information, please
refer to :ref:`api_guide_Name`. Default: None.
Returns:
Tensor: ${out_comment}.
Raises:
TypeError: The data type of ``x`` must be one of float32, float64.
TypeError: The data type of ``y`` must be one of float32, float64.
TypeError: The type of ``rtol`` must be float.
TypeError: The type of ``atol`` must be float.
TypeError: The type of ``equal_nan`` must be bool.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([10000., 1e-07])
y = paddle.to_tensor([10000.1, 1e-08])
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
# [False]
result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")
np_result2 = result2.numpy()
# [False]
x = paddle.to_tensor([1.0, float('nan')])
y = paddle.to_tensor([1.0, float('nan')])
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
# [False]
result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")
np_result2 = result2.numpy()
# [True]
"""
if paddle.in_dynamic_mode():
return _C_ops.allclose(x, y, 'rtol',
str(rtol), 'atol',
str(atol), 'equal_nan', equal_nan)
check_variable_and_dtype(x, "input", ['float32', 'float64'], 'allclose')
check_variable_and_dtype(y, "input", ['float32', 'float64'], 'allclose')
check_type(rtol, 'rtol', float, 'allclose')
check_type(atol, 'atol', float, 'allclose')
check_type(equal_nan, 'equal_nan', bool, 'allclose')
helper = LayerHelper("allclose", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
inputs = {'Input': x, 'Other': y}
outputs = {'Out': out}
attrs = {'rtol': str(rtol), 'atol': str(atol), 'equal_nan': equal_nan}
helper.append_op(
type='allclose', inputs=inputs, outputs=outputs, attrs=attrs)
return out
@templatedoc()
def equal(x, y, name=None):
"""
This layer returns the truth value of :math:`x == y` elementwise.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): Tensor, data type is bool, float32, float64, int32, int64.
y(Tensor): Tensor, data type is bool, float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: output Tensor, it's shape is the same as the input's Tensor,
and the data type is bool. The result of this op is stop_gradient.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.equal(x, y)
print(result1) # result1 = [True False False]
"""
if not isinstance(y, (int, bool, float, Variable)):
raise TypeError(
"Type of input args must be float, bool, int or Tensor, but received type {}".
format(type(y)))
if not isinstance(y, Variable):
y = full(shape=[1], dtype=x.dtype, fill_value=y)
if paddle.in_dynamic_mode():
return _C_ops.equal(x, y)
check_variable_and_dtype(
x, "x", ["bool", "float32", "float64", "int32", "int64"], "equal")
check_variable_and_dtype(
y, "y", ["bool", "float32", "float64", "int32", "int64"], "equal")
helper = LayerHelper("equal", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='equal', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [out]})
return out
@templatedoc()
def greater_equal(x, y, name=None):
"""
This OP returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): First input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64.
y(Tensor): Second input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.greater_equal(x, y)
print(result1) # result1 = [True False True]
"""
if paddle.in_dynamic_mode():
return _C_ops.greater_equal(x, y)
check_variable_and_dtype(x, "x",
["bool", "float32", "float64", "int32", "int64"],
"greater_equal")
check_variable_and_dtype(y, "y",
["bool", "float32", "float64", "int32", "int64"],
"greater_equal")
helper = LayerHelper("greater_equal", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='greater_equal',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [out]})
return out
@templatedoc()
def greater_than(x, y, name=None):
"""
This OP returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): First input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64.
y(Tensor): Second input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x` .
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.greater_than(x, y)
print(result1) # result1 = [False False True]
"""
if paddle.in_dynamic_mode():
return _C_ops.greater_than(x, y)
check_variable_and_dtype(x, "x",
["bool", "float32", "float64", "int32", "int64"],
"greater_than")
check_variable_and_dtype(y, "y",
["bool", "float32", "float64", "int32", "int64"],
"greater_than")
helper = LayerHelper("greater_than", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='greater_than',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [out]})
return out
@templatedoc()
def less_equal(x, y, name=None):
"""
This OP returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): First input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64.
y(Tensor): Second input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.less_equal(x, y)
print(result1) # result1 = [True True False]
"""
if paddle.in_dynamic_mode():
return _C_ops.less_equal(x, y)
check_variable_and_dtype(
x, "x", ["bool", "float32", "float64", "int32", "int64"], "less_equal")
check_variable_and_dtype(
y, "y", ["bool", "float32", "float64", "int32", "int64"], "less_equal")
helper = LayerHelper("less_equal", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='less_equal', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [out]})
return out
@templatedoc()
def less_than(x, y, name=None):
"""
This OP returns the truth value of :math:`x < y` elementwise, which is equivalent function to the overloaded operator `<`.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): First input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64.
y(Tensor): Second input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.less_than(x, y)
print(result1) # result1 = [False True False]
"""
if paddle.in_dynamic_mode():
return _C_ops.less_than(x, y)
check_variable_and_dtype(
x, "x", ["bool", "float32", "float64", "int32", "int64"], "less_than")
check_variable_and_dtype(
y, "y", ["bool", "float32", "float64", "int32", "int64"], "less_than")
helper = LayerHelper("less_than", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='less_than', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [out]})
return out
@templatedoc()
def not_equal(x, y, name=None):
"""
This OP returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): First input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64.
y(Tensor): Second input to compare which is N-D tensor. The input data type should be bool, float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.not_equal(x, y)
print(result1) # result1 = [False True True]
"""
if paddle.in_dynamic_mode():
return _C_ops.not_equal(x, y)
check_variable_and_dtype(
x, "x", ["bool", "float32", "float64", "int32", "int64"], "not_equal")
check_variable_and_dtype(
y, "y", ["bool", "float32", "float64", "int32", "int64"], "not_equal")
helper = LayerHelper("not_equal", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='not_equal', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [out]})
return out
def is_tensor(x):
"""
This function tests whether input object is a paddle.Tensor.
Args:
x (object): Object to test.
Returns:
A boolean value. True if 'x' is a paddle.Tensor, otherwise False.
Examples:
.. code-block:: python
import paddle
input1 = paddle.rand(shape=[2, 3, 5], dtype='float32')
check = paddle.is_tensor(input1)
print(check) #True
input3 = [1, 4]
check = paddle.is_tensor(input3)
print(check) #False
"""
return isinstance(x, Tensor)
def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True):
if paddle.in_dynamic_mode():
op = getattr(_C_ops, op_name)
if binary_op:
return op(x, y)
else:
return op(x)
check_variable_and_dtype(
x, "x", ["bool", "uint8", "int8", "int16", "int32", "int64"], op_name)
if y is not None:
check_variable_and_dtype(
y, "y", ["bool", "uint8", "int8", "int16", "int32", "int64"],
op_name)
if out is not None:
check_type(out, "out", Variable, op_name)
helper = LayerHelper(op_name, **locals())
if binary_op:
assert x.dtype == y.dtype
if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if binary_op:
helper.append_op(
type=op_name, inputs={"X": x,
"Y": y}, outputs={"Out": out})
else:
helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out})
return out
@templatedoc()
def bitwise_and(x, y, out=None, name=None):
"""
${comment}
Args:
x (Tensor): ${x_comment}
y (Tensor): ${y_comment}
out(Tensor): ${out_comment}
Returns:
Tensor: ${out_comment}
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-5, -1, 1])
y = paddle.to_tensor([4, 2, -3])
res = paddle.bitwise_and(x, y)
print(res) # [0, 2, 1]
"""
return _bitwise_op(
op_name="bitwise_and", x=x, y=y, name=name, out=out, binary_op=True)
@templatedoc()
def bitwise_or(x, y, out=None, name=None):
"""
${comment}
Args:
x (Tensor): ${x_comment}
y (Tensor): ${y_comment}
out(Tensor): ${out_comment}
Returns:
Tensor: ${out_comment}
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-5, -1, 1])
y = paddle.to_tensor([4, 2, -3])
res = paddle.bitwise_or(x, y)
print(res) # [-1, -1, -3]
"""
return _bitwise_op(
op_name="bitwise_or", x=x, y=y, name=name, out=out, binary_op=True)
@templatedoc()
def bitwise_xor(x, y, out=None, name=None):
"""
${comment}
Args:
x (Tensor): ${x_comment}
y (Tensor): ${y_comment}
out(Tensor): ${out_comment}
Returns:
Tensor: ${out_comment}
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-5, -1, 1])
y = paddle.to_tensor([4, 2, -3])
res = paddle.bitwise_xor(x, y)
print(res) # [-1, -3, -4]
"""
return _bitwise_op(
op_name="bitwise_xor", x=x, y=y, name=name, out=out, binary_op=True)
@templatedoc()
def bitwise_not(x, out=None, name=None):
"""
${comment}
Args:
x(Tensor): ${x_comment}
out(Tensor): ${out_comment}
Returns:
Tensor: ${out_comment}
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-5, -1, 1])
res = paddle.bitwise_not(x)
print(res) # [4, 0, -2]
"""
return _bitwise_op(
op_name="bitwise_not", x=x, y=None, name=name, out=out, binary_op=False)
@templatedoc()
def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
"""
${comment}
Args:
x(Tensor): ${input_comment}.
y(Tensor): ${other_comment}.
rtol(rtoltype, optional): The relative tolerance. Default: :math:`1e-5` .
atol(atoltype, optional): The absolute tolerance. Default: :math:`1e-8` .
equal_nan(equalnantype, optional): ${equal_nan_comment}.
name (str, optional): Name for the operation. For more information, please
refer to :ref:`api_guide_Name`. Default: None.
Returns:
Tensor: ${out_comment}.
Raises:
TypeError: The data type of ``x`` must be one of float32, float64.
TypeError: The data type of ``y`` must be one of float32, float64.
TypeError: The type of ``rtol`` must be float.
TypeError: The type of ``atol`` must be float.
TypeError: The type of ``equal_nan`` must be bool.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([10000., 1e-07])
y = paddle.to_tensor([10000.1, 1e-08])
result1 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
# [True, False]
result2 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")
np_result2 = result2.numpy()
# [True, False]
x = paddle.to_tensor([1.0, float('nan')])
y = paddle.to_tensor([1.0, float('nan')])
result1 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
# [True, False]
result2 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")
np_result2 = result2.numpy()
# [True, True]
"""
if paddle.in_dynamic_mode():
return _C_ops.isclose(x, y, 'rtol',
str(rtol), 'atol',
str(atol), 'equal_nan', equal_nan)
check_variable_and_dtype(x, "input", ['float32', 'float64'], 'isclose')
check_variable_and_dtype(y, "input", ['float32', 'float64'], 'isclose')
check_type(rtol, 'rtol', float, 'isclose')
check_type(atol, 'atol', float, 'isclose')
check_type(equal_nan, 'equal_nan', bool, 'isclose')
helper = LayerHelper("isclose", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
inputs = {'Input': x, 'Other': y}
outputs = {'Out': out}
attrs = {'rtol': str(rtol), 'atol': str(atol), 'equal_nan': equal_nan}
helper.append_op(
type='isclose', inputs=inputs, outputs=outputs, attrs=attrs)
return out
| |
import bs
import weakref
def bsGetAPIVersion():
# see bombsquadgame.com/apichanges
return 4
def bsGetGames():
return [BoxingOfTheHillGame]
class BoxingOfTheHillGame(bs.TeamGameActivity):
FLAG_NEW = 0
FLAG_UNCONTESTED = 1
FLAG_CONTESTED = 2
FLAG_HELD = 3
@classmethod
def getName(cls):
return 'Boxing of the Hill'
@classmethod
def getDescription(cls,sessionType):
return 'Secure the flag for a set length of time. Gloves only!'
@classmethod
def getScoreInfo(cls):
return {'scoreName':'Time Held'}
@classmethod
def supportsSessionType(cls,sessionType):
return True if (issubclass(sessionType,bs.TeamsSession)
or issubclass(sessionType,bs.FreeForAllSession)) else False
@classmethod
def getSupportedMaps(cls,sessionType):
return bs.getMapsSupportingPlayType("kingOfTheHill")
@classmethod
def getSettings(cls,sessionType):
return [("Hold Time",{'minValue':10,'default':30,'increment':10}),
("Time Limit",{'choices':[('None',0),('1 Minute',60),
('2 Minutes',120),('5 Minutes',300),
('10 Minutes',600),('20 Minutes',1200)],'default':0}),
("Respawn Times",{'choices':[('Shorter',0.25),('Short',0.5),('Normal',1.0),('Long',2.0),('Longer',4.0)],'default':1.0})]
def __init__(self,settings):
bs.TeamGameActivity.__init__(self,settings)
self._scoreBoard = bs.ScoreBoard()
self._swipSound = bs.getSound("swip")
self._tickSound = bs.getSound('tick')
self._countDownSounds = {10:bs.getSound('announceTen'),
9:bs.getSound('announceNine'),
8:bs.getSound('announceEight'),
7:bs.getSound('announceSeven'),
6:bs.getSound('announceSix'),
5:bs.getSound('announceFive'),
4:bs.getSound('announceFour'),
3:bs.getSound('announceThree'),
2:bs.getSound('announceTwo'),
1:bs.getSound('announceOne')}
self._flagRegionMaterial = bs.Material()
self._flagRegionMaterial.addActions(conditions=("theyHaveMaterial",bs.getSharedObject('playerMaterial')),
actions=(("modifyPartCollision","collide",True),
("modifyPartCollision","physical",False),
("call","atConnect",bs.Call(self._handlePlayerFlagRegionCollide,1)),
("call","atDisconnect",bs.Call(self._handlePlayerFlagRegionCollide,0))))
def getInstanceDescription(self):
return ('Secure the flag for ${ARG1} seconds.',self.settings['Hold Time'])
def getInstanceScoreBoardDescription(self):
return ('secure the flag for ${ARG1} seconds',self.settings['Hold Time'])
def onTransitionIn(self):
bs.TeamGameActivity.onTransitionIn(self, music='Scary')
def onTeamJoin(self,team):
team.gameData['timeRemaining'] = self.settings["Hold Time"]
self._updateScoreBoard()
def onPlayerJoin(self,player):
bs.TeamGameActivity.onPlayerJoin(self,player)
player.gameData['atFlag'] = 0
def onBegin(self):
bs.TeamGameActivity.onBegin(self)
self.setupStandardTimeLimit(self.settings['Time Limit'])
# self.setupStandardPowerupDrops() #no powerups due to boxing
self._flagPos = self.getMap().getFlagPosition(None)
bs.gameTimer(1000,self._tick,repeat=True)
self._flagState = self.FLAG_NEW
self.projectFlagStand(self._flagPos)
self._flag = bs.Flag(position=self._flagPos,
touchable=False,
color=(1,1,1))
self._flagLight = bs.newNode('light',
attrs={'position':self._flagPos,
'intensity':0.2,
'heightAttenuated':False,
'radius':0.4,
'color':(0.2,0.2,0.2)})
# flag region
bs.newNode('region',
attrs={'position':self._flagPos,
'scale': (1.8,1.8,1.8),
'type': 'sphere',
'materials':[self._flagRegionMaterial,bs.getSharedObject('regionMaterial')]})
self._updateFlagState()
def spawnPlayer(self,player):
spaz = self.spawnPlayerSpaz(player)
spaz.connectControlsToPlayer(enablePunch=True,
enableBomb=False,
enablePickUp=True)
spaz.equipBoxingGloves()
def _tick(self):
self._updateFlagState()
# give holding players points
for player in self.players:
if player.gameData['atFlag'] > 0:
self.scoreSet.playerScored(player,3,screenMessage=False,display=False)
scoringTeam = None if self._scoringTeam is None else self._scoringTeam()
if scoringTeam:
if scoringTeam.gameData['timeRemaining'] > 0: bs.playSound(self._tickSound)
scoringTeam.gameData['timeRemaining'] = max(0,scoringTeam.gameData['timeRemaining']-1)
self._updateScoreBoard()
if scoringTeam.gameData['timeRemaining'] > 0:
self._flag.setScoreText(str(scoringTeam.gameData['timeRemaining']))
# announce numbers we have sounds for
try: bs.playSound(self._countDownSounds[scoringTeam.gameData['timeRemaining']])
except Exception: pass
# winner
if scoringTeam.gameData['timeRemaining'] <= 0:
self.endGame()
def endGame(self):
results = bs.TeamGameResults()
for team in self.teams: results.setTeamScore(team,self.settings['Hold Time'] - team.gameData['timeRemaining'])
self.end(results=results,announceDelay=0)
def _updateFlagState(self):
holdingTeams = set(player.getTeam() for player in self.players if player.gameData['atFlag'])
prevState = self._flagState
if len(holdingTeams) > 1:
self._flagState = self.FLAG_CONTESTED
self._scoringTeam = None
self._flagLight.color = (0.6,0.6,0.1)
self._flag.node.color = (1.0,1.0,0.4)
elif len(holdingTeams) == 1:
holdingTeam = list(holdingTeams)[0]
self._flagState = self.FLAG_HELD
self._scoringTeam = weakref.ref(holdingTeam)
self._flagLight.color = bs.getNormalizedColor(holdingTeam.color)
self._flag.node.color = holdingTeam.color
else:
self._flagState = self.FLAG_UNCONTESTED
self._scoringTeam = None
self._flagLight.color = (0.2,0.2,0.2)
self._flag.node.color = (1,1,1)
if self._flagState != prevState:
bs.playSound(self._swipSound)
def _handlePlayerFlagRegionCollide(self,colliding):
flagNode,playerNode = bs.getCollisionInfo("sourceNode","opposingNode")
try: player = playerNode.getDelegate().getPlayer()
except Exception: return
# different parts of us can collide so a single value isn't enough
# also don't count it if we're dead (flying heads shouldnt be able to win the game :-)
if colliding and player.isAlive(): player.gameData['atFlag'] += 1
else: player.gameData['atFlag'] = max(0,player.gameData['atFlag'] - 1)
self._updateFlagState()
def _updateScoreBoard(self):
for team in self.teams:
self._scoreBoard.setTeamValue(team,team.gameData['timeRemaining'],self.settings['Hold Time'],countdown=True)
def handleMessage(self,m):
if isinstance(m,bs.PlayerSpazDeathMessage):
bs.TeamGameActivity.handleMessage(self,m) # augment default
# no longer can count as atFlag once dead
player = m.spaz.getPlayer()
player.gameData['atFlag'] = 0
self._updateFlagState()
self.respawnPlayer(player)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver
from tensorflow.python.training import training as train
def _get_checkpoint_filename(filepattern):
"""Returns checkpoint filename given directory or specific filepattern."""
if gfile.IsDirectory(filepattern):
return saver.latest_checkpoint(filepattern)
return filepattern
def load_checkpoint(filepattern):
"""Returns CheckpointReader for latest checkpoint.
Args:
filepattern: Directory with checkpoints file or path to checkpoint.
Returns:
`CheckpointReader` object.
Raises:
ValueError: if checkpoint_dir doesn't have 'checkpoint' file or checkpoints.
"""
filename = _get_checkpoint_filename(filepattern)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % filepattern)
return train.NewCheckpointReader(filename)
def load_variable(checkpoint_dir, name):
"""Returns a Tensor with the contents of the given variable in the checkpoint.
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
name: Name of the tensor to return.
Returns:
`Tensor` object.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(checkpoint_dir)
return reader.get_tensor(name)
def list_variables(checkpoint_dir):
"""Returns list of all variables in the latest checkpoint.
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(checkpoint_dir)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
# pylint: disable=protected-access
# Currently variable_scope doesn't provide very good APIs to access
# all variables under scope and retrieve and check existing scopes.
# TODO(ipolosukhin): Refactor variable_scope module to provide nicer APIs.
def _set_checkpoint_initializer(variable, file_pattern, tensor_name, slice_spec,
name="checkpoint_initializer"):
"""Sets variable initializer to assign op form value in checkpoint's tensor.
Args:
variable: `Variable` object.
file_pattern: string, where to load checkpoints from.
tensor_name: Name of the `Tensor` to load from checkpoint reader.
slice_spec: Slice specification for loading partitioned variables.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
restore_op = gen_io_ops._restore_slice(
file_pattern,
tensor_name,
slice_spec,
base_type,
preferred_shard=-1,
name=name)
variable._initializer_op = state_ops.assign(variable, restore_op)
def _set_variable_or_list_initializer(variable_or_list, file_pattern,
tensor_name):
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
if slice_name is None:
slice_name = v._save_slice_info.full_name
elif slice_name != v._save_slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, v._save_slice_info.full_name))
_set_checkpoint_initializer(v, file_pattern, tensor_name,
v._save_slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, file_pattern, tensor_name, "")
def init_from_checkpoint(checkpoint_dir, assignment_map):
"""Using assingment map initializes current variables with loaded tensors.
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports next syntax:
`'scope_name/': 'checkpoint_scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching variable
names.
`'scope_name/variable_name': 'checkpoint_scope_name/some_other_variable'` -
will initalize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
`variable: 'scope_varaible_name'` - will initialize given variable with
variable from the checkpoint.
`'scope_name/': '/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
'<variable>/part_<part #>'.
Example:
```python
# Create variables.
with tf.variable_scope('test'):
m = tf.get_variable('my_var')
with tf.variable_scope('test2'):
var2 = tf.get_variable('my_var')
...
# Specify which variables to intialize from checkpoint.
init_from_checkpoint(checkpoint_dir, {
'test/my_var': 'some_var',
'test2/', 'some_scope/'})
...
# Or use `Variable` objects to identify what to initialize.
init_from_checkpoint(checkpoint_dir, {
var2: 'some_scope/var2',
})
...
# Initialize variables as usual.
session.run(tf.get_all_variables())
```
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of current variables
(in default graph) and values are names of the variables
in the checkpoint.
Raises:
tf.errors.OpError: If missing checkpoints or tensors in checkpoints.
ValueError: If missing variables in current graph.
"""
filepattern = _get_checkpoint_filename(checkpoint_dir)
reader = load_checkpoint(checkpoint_dir)
variable_map = reader.get_variable_to_shape_map()
for current_name, tensor_name in six.iteritems(assignment_map):
scopes = ""
var = None
# Check if this is Variable object.
if isinstance(current_name, variables.Variable):
var = current_name
else:
var_scope = vs._get_default_variable_store()
# Check if this is variable in var_store.
var = var_scope._vars.get(current_name, None)
# Also check if variable is partitioned as list.
if var is None:
if current_name + "/part_0" in var_scope._vars:
var = []
i = 0
while current_name + "/part_%d" % i in var_scope._vars:
var.append(var_scope._vars[current_name + "/part_%d" % i])
i += 1
if var is not None:
# If 1 to 1 mapping was provided, find variable in the scope.
if tensor_name not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint" % (
tensor_name, checkpoint_dir
))
if isinstance(var, variables.Variable):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(variable_map[tensor_name]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name, str(variable_map[tensor_name])
))
_set_variable_or_list_initializer(var, filepattern, tensor_name)
logging.info("Initialize variable %s from checkpoint %s with %s" % (
current_name, checkpoint_dir, tensor_name
))
else:
if "/" in current_name:
scopes = current_name[:current_name.rindex("/")]
current_name = current_name[current_name.rindex("/") + 1:]
if not tensor_name.endswith("/"):
raise ValueError(
"Assignment map with scope only name (%s) "
"should map to scope only (%s). "
"Should be 'scope/': 'other_scope/'." % (
scopes, tensor_name
))
# If scope to scope mapping was provided, find all variables in the scope.
for var_name in var_scope._vars:
if var_name.startswith(scopes):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
if tensor_name != "/":
full_tensor_name = tensor_name + var_name[len(scopes) + 1:]
else:
full_tensor_name = var_name[len(scopes) + 1:]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:], tensor_name,
checkpoint_dir
))
var = var_scope._vars[var_name]
_set_variable_or_list_initializer(var, filepattern, full_tensor_name)
logging.info("Initialize variable %s from checkpoint %s with %s" % (
var_name, checkpoint_dir, tensor_name
))
# pylint: enable=protected-access
| |
"""
Tests for a location card.
"""
import pytest
from onirim import card
from onirim import core
from onirim import component
from onirim import agent
from onirim import exception
LOCATION_PLAY_SIMPLE_CASE = (
card.sun(card.Color.red),
component.Content(undrawn_cards=[], hand=[card.sun(card.Color.red)]),
component.Content(undrawn_cards=[], explored=[card.sun(card.Color.red)]),
)
LOCATION_PLAY_OBTAIN_DOOR = (
card.sun(card.Color.red),
component.Content(
undrawn_cards=[
card.door(card.Color.red),
card.door(card.Color.green),
],
explored=[
card.sun(card.Color.red),
card.moon(card.Color.red),
],
hand=[card.sun(card.Color.red)]),
component.Content(
undrawn_cards=[card.door(card.Color.green)],
explored=[
card.sun(card.Color.red),
card.moon(card.Color.red),
card.sun(card.Color.red),
],
hand=[],
opened=[card.door(card.Color.red)]),
)
LOCATION_PLAY_OBTAIN_DOOR_2 = (
card.sun(card.Color.red),
component.Content(
undrawn_cards=[
card.door(card.Color.green),
card.door(card.Color.red)
],
explored=[
card.sun(card.Color.red),
card.moon(card.Color.red),
card.key(card.Color.red),
card.sun(card.Color.red),
card.moon(card.Color.red),
],
hand=[card.sun(card.Color.red)]),
component.Content(
undrawn_cards=[card.door(card.Color.green)],
explored=[
card.sun(card.Color.red),
card.moon(card.Color.red),
card.key(card.Color.red),
card.sun(card.Color.red),
card.moon(card.Color.red),
card.sun(card.Color.red),
],
hand=[],
opened=[card.door(card.Color.red)]),
)
LOCATION_PLAY_OBTAIN_NO_DOOR = (
card.sun(card.Color.red),
component.Content(
undrawn_cards=[
card.door(card.Color.green),
],
explored=[
card.sun(card.Color.red),
card.moon(card.Color.red),
],
hand=[card.sun(card.Color.red)]),
component.Content(
undrawn_cards=[card.door(card.Color.green)],
explored=[
card.sun(card.Color.red),
card.moon(card.Color.red),
card.sun(card.Color.red),
],
hand=[]),
)
LOCATION_PLAY_NOT_OBTAIN_DOOR = (
card.sun(card.Color.red),
component.Content(
undrawn_cards=[],
explored=[
card.sun(card.Color.red),
card.moon(card.Color.red),
card.key(card.Color.red),
],
hand=[card.sun(card.Color.red)]),
component.Content(
undrawn_cards=[],
explored=[
card.sun(card.Color.red),
card.moon(card.Color.red),
card.key(card.Color.red),
card.sun(card.Color.red),
],
hand=[]),
)
LOCATION_PLAY_NOT_OBTAIN_DOOR_2 = (
card.sun(card.Color.red),
component.Content(
undrawn_cards=[],
explored=[
card.sun(card.Color.red),
card.moon(card.Color.red),
card.sun(card.Color.green),
card.key(card.Color.red),
],
hand=[card.sun(card.Color.red)]),
component.Content(
undrawn_cards=[],
explored=[
card.sun(card.Color.red),
card.moon(card.Color.red),
card.sun(card.Color.green),
card.key(card.Color.red),
card.sun(card.Color.red),
],
hand=[]),
)
LOCATION_PLAY_CASES = [
LOCATION_PLAY_SIMPLE_CASE,
LOCATION_PLAY_OBTAIN_DOOR,
LOCATION_PLAY_OBTAIN_DOOR_2,
LOCATION_PLAY_OBTAIN_NO_DOOR,
LOCATION_PLAY_NOT_OBTAIN_DOOR,
LOCATION_PLAY_NOT_OBTAIN_DOOR_2,
]
@pytest.mark.parametrize(
"location_card, content, content_after",
LOCATION_PLAY_CASES)
def test_location_play(location_card, content, content_after):
location_card.play(core.Core(None, agent.Observer(), content))
assert content == content_after
LOCATION_PLAY_CONSECUTIVE = [
(card.sun(card.Color.red), card.sun(card.Color.yellow), True),
(card.moon(card.Color.blue), card.moon(card.Color.green), True),
(card.key(card.Color.blue), card.key(card.Color.blue), True),
(card.key(card.Color.blue), card.sun(card.Color.blue), False),
]
@pytest.mark.parametrize(
"first_card, second_card, raises",
LOCATION_PLAY_CONSECUTIVE)
def test_location_play_consecutive(first_card, second_card, raises):
content = component.Content(
undrawn_cards=[],
hand=[first_card, second_card])
play_core = core.Core(None, agent.Observer(), content)
first_card.play(play_core)
if raises:
with pytest.raises(exception.ConsecutiveSameKind):
second_card.play(play_core)
else:
second_card.play(play_core)
class LocationDiscardActor(agent.Actor):
def key_discard_react(self, content, cards):
return 1, [0, 2, 3, 4]
SUN_DISCARDED_CASE = (
card.sun(card.Color.red),
component.Content(
undrawn_cards=[],
hand=[card.sun(card.Color.red)]),
component.Content(
undrawn_cards=[],
discarded=[card.sun(card.Color.red)]),
)
MOON_DISCARDED_CASE = (
card.moon(card.Color.blue),
component.Content(
undrawn_cards=[],
hand=[card.moon(card.Color.blue)]),
component.Content(
undrawn_cards=[],
discarded=[card.moon(card.Color.blue)]),
)
KEY_DISCARDED_CASE = (
card.key(card.Color.red),
component.Content(
undrawn_cards=[
card.sun(card.Color.red),
card.nightmare(),
] + [card.sun(card.Color.red)] * 3,
hand=[card.key(card.Color.red)]),
component.Content(
undrawn_cards=[card.sun(card.Color.red)] * 4,
discarded=[
card.key(card.Color.red),
card.nightmare()]),
)
LOCATION_DISCARD_CASES = [
SUN_DISCARDED_CASE,
MOON_DISCARDED_CASE,
KEY_DISCARDED_CASE,
]
@pytest.mark.parametrize(
"location_card, content, content_after",
LOCATION_DISCARD_CASES)
def test_location_discard(location_card, content, content_after):
discard_core = core.Core(LocationDiscardActor(), agent.Observer(), content)
location_card.discard(discard_core)
assert content == content_after
| |
from django.utils import timezone
from transitions import Machine, MachineError
from api.providers.workflows import Workflows
from framework.auth import Auth
from osf.exceptions import InvalidTransitionError
from osf.models.preprintlog import PreprintLog
from osf.models.action import ReviewAction, NodeRequestAction, PreprintRequestAction
from osf.utils import permissions
from osf.utils.workflows import (
DefaultStates,
DefaultTriggers,
ReviewStates,
SanctionStates,
DEFAULT_TRANSITIONS,
REVIEWABLE_TRANSITIONS,
SANCTION_TRANSITIONS
)
from website.mails import mails
from website.reviews import signals as reviews_signals
from website.settings import DOMAIN, OSF_SUPPORT_EMAIL, OSF_CONTACT_EMAIL
from osf.utils import notifications as notify
class BaseMachine(Machine):
action = None
from_state = None
States = DefaultStates
Transitions = DEFAULT_TRANSITIONS
def __init__(self, machineable, state_attr='machine_state'):
"""
Welcome to the machine, this is our attempt at a state machine. It was written for nodes, prerprints etc,
but sometimes applies to sanctions, it may be to applied to anything that wants to have states and transitions.
The general idea behind this is that we are instantiating the machine object as part of the model and it will
validate different state changes and transitions ensuring a model will be easy to identify at a certain state.
Here we are using the pytransitions state machine in conjunction with an "action object" which is used to store
pre-transition info, mainly the instigator of the transition or a comment about the transition.
:param machineable: The thing (should probably a be model) that is hold the state info.
:param state_attr: The name of the state attribute, usually `machine_state`
"""
self.machineable = machineable
self.__state_attr = state_attr
self._validate_transitions(self.Transitions)
super(BaseMachine, self).__init__(
states=[s.value for s in self.States],
transitions=self.Transitions,
initial=self.state,
send_event=True,
prepare_event=['initialize_machine'],
ignore_invalid_triggers=True,
)
@property
def state(self):
return getattr(self.machineable, self.__state_attr)
@state.setter
def state(self, value):
setattr(self.machineable, self.__state_attr, value)
@property
def ActionClass(self):
raise NotImplementedError()
def _validate_transitions(self, transitions):
for transition in set(sum([t['after'] for t in transitions], [])):
if not hasattr(self, transition):
raise InvalidTransitionError(self, transition)
def initialize_machine(self, ev):
self.action = None
self.from_state = ev.state
def save_action(self, ev):
user = ev.kwargs.get('user')
self.action = self.ActionClass.objects.create(
target=self.machineable,
creator=user,
trigger=ev.event.name,
from_state=self.from_state.name,
to_state=ev.state.name,
comment=ev.kwargs.get('comment', ''),
auto=ev.kwargs.get('auto', False),
)
def update_last_transitioned(self, ev):
now = self.action.created if self.action is not None else timezone.now()
self.machineable.date_last_transitioned = now
class ReviewsMachine(BaseMachine):
ActionClass = ReviewAction
States = ReviewStates
Transitions = REVIEWABLE_TRANSITIONS
def save_changes(self, ev):
now = self.action.created if self.action is not None else timezone.now()
should_publish = self.machineable.in_public_reviews_state
if self.machineable.is_retracted:
pass # Do not alter published state
elif should_publish and not self.machineable.is_published:
if not (self.machineable.primary_file and self.machineable.primary_file.target == self.machineable):
raise ValueError('Preprint is not a valid preprint; cannot publish.')
if not self.machineable.provider:
raise ValueError('Preprint provider not specified; cannot publish.')
if not self.machineable.subjects.exists():
raise ValueError('Preprint must have at least one subject to be published.')
self.machineable.date_published = now
self.machineable.is_published = True
self.machineable.ever_public = True
elif not should_publish and self.machineable.is_published:
self.machineable.is_published = False
self.machineable.save()
def resubmission_allowed(self, ev):
return self.machineable.provider.reviews_workflow == Workflows.PRE_MODERATION.value
def perform_withdraw(self, ev):
self.machineable.date_withdrawn = self.action.created if self.action is not None else timezone.now()
self.machineable.withdrawal_justification = ev.kwargs.get('comment', '')
def notify_submit(self, ev):
user = ev.kwargs.get('user')
notify.notify_submit(self.machineable, user)
auth = Auth(user)
self.machineable.add_log(
action=PreprintLog.PUBLISHED,
params={
'preprint': self.machineable._id
},
auth=auth,
save=False,
)
def notify_resubmit(self, ev):
notify.notify_resubmit(self.machineable, ev.kwargs.get('user'), self.action)
def notify_accept_reject(self, ev):
notify.notify_accept_reject(self.machineable, ev.kwargs.get('user'), self.action, self.States)
def notify_edit_comment(self, ev):
notify.notify_edit_comment(self.machineable, ev.kwargs.get('user'), self.action)
def notify_withdraw(self, ev):
context = self.get_context()
context['ever_public'] = self.machineable.ever_public
try:
preprint_request_action = PreprintRequestAction.objects.get(target__target__id=self.machineable.id,
from_state='pending',
to_state='accepted',
trigger='accept')
context['requester'] = preprint_request_action.target.creator
except PreprintRequestAction.DoesNotExist:
# If there is no preprint request action, it means the withdrawal is directly initiated by admin/moderator
context['force_withdrawal'] = True
for contributor in self.machineable.contributors.all():
context['contributor'] = contributor
if context.get('requester', None):
context['is_requester'] = context['requester'].username == contributor.username
mails.send_mail(
contributor.username,
mails.WITHDRAWAL_REQUEST_GRANTED,
document_type=self.machineable.provider.preprint_word,
**context
)
def get_context(self):
return {
'domain': DOMAIN,
'reviewable': self.machineable,
'workflow': self.machineable.provider.reviews_workflow,
'provider_url': self.machineable.provider.domain or '{domain}preprints/{provider_id}'.format(domain=DOMAIN, provider_id=self.machineable.provider._id),
'provider_contact_email': self.machineable.provider.email_contact or OSF_CONTACT_EMAIL,
'provider_support_email': self.machineable.provider.email_support or OSF_SUPPORT_EMAIL,
}
class NodeRequestMachine(BaseMachine):
ActionClass = NodeRequestAction
def save_changes(self, ev):
""" Handles contributorship changes and state transitions
"""
if ev.event.name == DefaultTriggers.EDIT_COMMENT.value and self.action is not None:
self.machineable.comment = self.action.comment
self.machineable.save()
if ev.event.name == DefaultTriggers.ACCEPT.value:
if not self.machineable.target.is_contributor(self.machineable.creator):
contributor_permissions = ev.kwargs.get('permissions', permissions.READ)
self.machineable.target.add_contributor(
self.machineable.creator,
auth=Auth(ev.kwargs['user']),
permissions=contributor_permissions,
visible=ev.kwargs.get('visible', True),
send_email='{}_request'.format(self.machineable.request_type))
def resubmission_allowed(self, ev):
# TODO: [PRODUCT-395]
return False
def notify_submit(self, ev):
""" Notify admins that someone is requesting access
"""
context = self.get_context()
context['contributors_url'] = '{}contributors/'.format(self.machineable.target.absolute_url)
context['project_settings_url'] = '{}settings/'.format(self.machineable.target.absolute_url)
for admin in self.machineable.target.get_users_with_perm(permissions.ADMIN):
mails.send_mail(
admin.username,
mails.ACCESS_REQUEST_SUBMITTED,
admin=admin,
osf_contact_email=OSF_CONTACT_EMAIL,
**context
)
def notify_resubmit(self, ev):
""" Notify admins that someone is requesting access again
"""
# TODO: [PRODUCT-395]
raise NotImplementedError()
def notify_accept_reject(self, ev):
""" Notify requester that admins have approved/denied
"""
if ev.event.name == DefaultTriggers.REJECT.value:
context = self.get_context()
mails.send_mail(
self.machineable.creator.username,
mails.ACCESS_REQUEST_DENIED,
osf_contact_email=OSF_CONTACT_EMAIL,
**context
)
else:
# add_contributor sends approval notification email
pass
def notify_edit_comment(self, ev):
""" Not presently required to notify for this event
"""
pass
def get_context(self):
return {
'node': self.machineable.target,
'requester': self.machineable.creator
}
class PreprintRequestMachine(BaseMachine):
ActionClass = PreprintRequestAction
def save_changes(self, ev):
""" Handles preprint status changes and state transitions
"""
if ev.event.name == DefaultTriggers.EDIT_COMMENT.value and self.action is not None:
self.machineable.comment = self.action.comment
elif ev.event.name == DefaultTriggers.SUBMIT.value:
# If the provider is pre-moderated and target has not been through moderation, auto approve withdrawal
if self.auto_approval_allowed():
self.machineable.run_accept(user=self.machineable.creator, comment=self.machineable.comment, auto=True)
elif ev.event.name == DefaultTriggers.ACCEPT.value:
# If moderator accepts the withdrawal request
self.machineable.target.run_withdraw(user=self.action.creator, comment=self.action.comment)
self.machineable.save()
def auto_approval_allowed(self):
# Returns True if the provider is pre-moderated and the preprint is never public.
return self.machineable.target.provider.reviews_workflow == Workflows.PRE_MODERATION.value and not self.machineable.target.ever_public
def notify_submit(self, ev):
context = self.get_context()
if not self.auto_approval_allowed():
reviews_signals.email_withdrawal_requests.send(timestamp=timezone.now(), context=context)
def notify_accept_reject(self, ev):
if ev.event.name == DefaultTriggers.REJECT.value:
context = self.get_context()
mails.send_mail(
self.machineable.creator.username,
mails.WITHDRAWAL_REQUEST_DECLINED,
**context
)
else:
pass
def notify_edit_comment(self, ev):
""" Not presently required to notify for this event
"""
pass
def notify_resubmit(self, ev):
""" Notify moderators that someone is requesting withdrawal again
Not presently required to notify for this event
"""
# TODO
pass
def get_context(self):
return {
'reviewable': self.machineable.target,
'requester': self.machineable.creator,
'is_request_email': True,
'document_type': self.machineable.target.provider.preprint_word
}
class SanctionStateMachine(Machine):
'''SanctionsStateMachine manages state transitions for Sanctions objects.
The valid machine states for a Sanction object are defined in Workflows.SanctionStates.
The valid transitions between these states are defined in Workflows.SANCTION_TRANSITIONS.
Subclasses of SanctionStateMachine inherit the 'trigger' functions named in
the SANCTION_TRANSITIONS dictionary (approve, accept, and reject).
These trigger functions will, in order,
1) Call any 'prepare_event' functions defined on the StateMachine (see __init__)
2) Call Sanction member functions listed in the 'conditions' key of the dictionary
3) Call Sanction member functions listed in the 'before' key of the dictionary
4) Update the state field of the Sanction object via the approval_stage setter
5) Call Sanction member functions listed in the 'after' key of the dictionary
If any step fails, the whole transition will fail and the Sanction's
approval_stage will be rolled back.
SanctionStateMachine also provides some extra functionality to write
RegistrationActions on events moving in to or out of Moderated machine states
as well as to convert MachineErrors (which arise on unsupported state changes
requests) into HTTPErrors to report back to users who try to initiate such errors.
'''
def __init__(self):
super().__init__(
states=SanctionStates,
transitions=SANCTION_TRANSITIONS,
initial=SanctionStates.from_db_name(self.state),
model_attribute='approval_stage',
after_state_change='_save_transition',
send_event=True,
queued=True,
)
@property
def target_registration(self):
raise NotImplementedError(
'SanctionStateMachine subclasses must define a target_registration property'
)
@property
def approval_stage(self):
raise NotImplementedError(
'SanctionStateMachine subclasses must define an approval_stage property with a setter.'
)
def _process(self, *args, **kwargs):
'''Wrap superclass _process to handle expected MachineErrors.'''
try:
super()._process(*args, **kwargs)
except MachineError as e:
if self.approval_stage in [SanctionStates.REJECTED, SanctionStates.MODERATOR_REJECTED]:
error_message = (
'This {sanction} has already been rejected and cannot be approved'.format(
sanction=self.DISPLAY_NAME))
elif self.approval_stage in [SanctionStates.APPROVED, SanctionStates.COMPLETED]:
error_message = (
'This {sanction} has all required approvals and cannot be rejected'.format(
sanction=self.DISPLAY_NAME))
else:
raise e
raise MachineError(error_message)
def _save_transition(self, event_data):
"""Recored the effects of a state transition in the database."""
self.save()
new_state = event_data.transition.dest
# No need to update registration state with no sanction state change
if new_state is None:
return
user = event_data.kwargs.get('user')
if user is None and event_data.kwargs:
user = event_data.args[0]
comment = event_data.kwargs.get('comment', '')
if new_state == SanctionStates.PENDING_MODERATION.name:
user = None # Don't worry about the particular user who gave final approval
self.target_registration.update_moderation_state(initiated_by=user, comment=comment)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import webob
from nova.api.openstack.compute.plugins.v3 import keypairs
from nova.api.openstack import wsgi
from nova import db
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import policy
from nova import quota
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.objects import test_keypair
QUOTAS = quota.QUOTAS
keypair_data = {
'public_key': 'FAKE_KEY',
'fingerprint': 'FAKE_FINGERPRINT',
}
def fake_keypair(name):
return dict(test_keypair.fake_keypair,
name=name, **keypair_data)
def db_key_pair_get_all_by_user(self, user_id):
return [fake_keypair('FAKE')]
def db_key_pair_create(self, keypair):
return fake_keypair(name=keypair['name'])
def db_key_pair_destroy(context, user_id, name):
if not (user_id and name):
raise Exception()
def db_key_pair_create_duplicate(context, keypair):
raise exception.KeyPairExists(key_name=keypair.get('name', ''))
class KeypairsTest(test.TestCase):
def setUp(self):
super(KeypairsTest, self).setUp()
self.Controller = keypairs.Controller()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Keypairs'])
self.app = fakes.wsgi_app_v3(init_only=('keypairs', 'servers'))
def test_keypair_list(self):
req = webob.Request.blank('/v3/keypairs')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
response = {'keypairs': [{'keypair': dict(keypair_data, name='FAKE')}]}
self.assertEqual(res_dict, response)
def test_keypair_create(self):
body = {'keypair': {'name': 'create_test'}}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 201)
res_dict = jsonutils.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertTrue(len(res_dict['keypair']['private_key']) > 0)
def test_keypair_create_with_empty_name(self):
body = {'keypair': {'name': ''}}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
'Keypair data is invalid: '
'Keypair name must be between 1 and 255 characters long',
res_dict['badRequest']['message'])
def test_keypair_create_with_name_too_long(self):
body = {
'keypair': {
'name': 'a' * 256
}
}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
'Keypair data is invalid: '
'Keypair name must be between 1 and 255 characters long',
res_dict['badRequest']['message'])
def test_keypair_create_with_non_alphanumeric_name(self):
body = {
'keypair': {
'name': 'test/keypair'
}
}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
res_dict = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Keypair data is invalid: "
"Keypair name contains unsafe characters",
res_dict['badRequest']['message'])
def test_keypair_import(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 201)
# FIXME(ja): sholud we check that public_key was sent to create?
res_dict = jsonutils.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertNotIn('private_key', res_dict['keypair'])
def test_keypair_import_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 413)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Quota exceeded, too many key pairs.",
res_dict['overLimit']['message'])
def test_keypair_create_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
},
}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 413)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Quota exceeded, too many key pairs.",
res_dict['overLimit']['message'])
def test_keypair_create_duplicate(self):
self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate)
body = {'keypair': {'name': 'create_duplicate'}}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 409)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Key pair 'create_duplicate' already exists.",
res_dict['conflictingRequest']['message'])
def test_keypair_import_bad_key(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-what negative',
},
}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
'Keypair data is invalid: failed to generate fingerprint',
res_dict['badRequest']['message'])
def test_keypair_delete(self):
req = webob.Request.blank('/v3/keypairs/FAKE')
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
def test_keypair_get_keypair_not_found(self):
req = webob.Request.blank('/v3/keypairs/DOESNOTEXIST')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_keypair_delete_not_found(self):
def db_key_pair_get_not_found(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stubs.Set(db, "key_pair_get",
db_key_pair_get_not_found)
req = webob.Request.blank('/v3/keypairs/WHAT')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_keypair_show(self):
def _db_key_pair_get(context, user_id, name):
return dict(test_keypair.fake_keypair,
name='foo', public_key='XXX', fingerprint='YYY')
self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
req = webob.Request.blank('/v3/keypairs/FAKE')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
res_dict = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual('foo', res_dict['keypair']['name'])
self.assertEqual('XXX', res_dict['keypair']['public_key'])
self.assertEqual('YYY', res_dict['keypair']['fingerprint'])
def test_keypair_show_not_found(self):
def _db_key_pair_get(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
req = webob.Request.blank('/v3/keypairs/FAKE')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_show_server(self):
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get())
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get())
req = webob.Request.blank('/v3/servers/1')
req.headers['Content-Type'] = 'application/json'
response = req.get_response(self.app)
self.assertEquals(response.status_int, 200)
res_dict = jsonutils.loads(response.body)
self.assertTrue('key_name' in res_dict['server'])
self.assertEquals(res_dict['server']['key_name'], '')
def test_detail_servers(self):
self.stubs.Set(db, 'instance_get_all_by_filters',
fakes.fake_instance_get_all_by_filters())
self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get())
req = fakes.HTTPRequest.blank('/v3/servers/detail')
res = req.get_response(self.app)
server_dicts = jsonutils.loads(res.body)['servers']
self.assertEquals(len(server_dicts), 5)
for server_dict in server_dicts:
self.assertTrue('key_name' in server_dict)
self.assertEquals(server_dict['key_name'], '')
def test_keypair_create_with_invalid_keypair_body(self):
body = {'alpha': {'name': 'create_test'}}
req = webob.Request.blank('/v3/keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
res_dict = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['message'],
"Invalid request body")
class KeypairPolicyTest(test.TestCase):
def setUp(self):
super(KeypairPolicyTest, self).setUp()
self.KeyPairController = keypairs.KeypairController()
def _db_key_pair_get(context, user_id, name):
return dict(test_keypair.fake_keypair,
name='foo', public_key='XXX', fingerprint='YYY')
self.stubs.Set(db, "key_pair_get",
_db_key_pair_get)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
def test_keypair_list_fail_policy(self):
rules = policy.Rules({'compute_extension:v3:keypairs:index':
policy.parse_rule('role:admin')})
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v3/keypairs')
self.assertRaises(exception.NotAuthorized,
self.KeyPairController.index,
req)
def test_keypair_list_pass_policy(self):
rules = policy.Rules({'compute_extension:v3:keypairs:index':
policy.parse_rule('')})
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v3/keypairs')
res = self.KeyPairController.index(req)
self.assertTrue('keypairs' in res)
def test_keypair_show_fail_policy(self):
rules = policy.Rules({'compute_extension:v3:keypairs:show':
policy.parse_rule('role:admin')})
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v3/keypairs/FAKE')
self.assertRaises(exception.NotAuthorized,
self.KeyPairController.show,
req, 'FAKE')
def test_keypair_show_pass_policy(self):
rules = policy.Rules({'compute_extension:v3:keypairs:show':
policy.parse_rule('')})
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v3/keypairs/FAKE')
res = self.KeyPairController.show(req, 'FAKE')
self.assertTrue('keypair' in res)
def test_keypair_create_fail_policy(self):
rules = policy.Rules({'compute_extension:v3:keypairs:create':
policy.parse_rule('role:admin')})
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v3/keypairs')
req.method = 'POST'
self.assertRaises(exception.NotAuthorized,
self.KeyPairController.create,
req, {})
def test_keypair_create_pass_policy(self):
body = {'keypair': {'name': 'create_test'}}
rules = policy.Rules({'compute_extension:v3:keypairs:create':
policy.parse_rule('')})
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v3/keypairs')
req.method = 'POST'
res = self.KeyPairController.create(req, body)
self.assertTrue('keypair' in res)
def test_keypair_delete_fail_policy(self):
rules = policy.Rules({'compute_extension:v3:keypairs:delete':
policy.parse_rule('role:admin')})
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v3/keypairs/FAKE')
req.method = 'DELETE'
self.assertRaises(exception.NotAuthorized,
self.KeyPairController.delete,
req, 'FAKE')
def test_keypair_delete_pass_policy(self):
rules = policy.Rules({'compute_extension:v3:keypairs:delete':
policy.parse_rule('')})
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v3/keypairs/FAKE')
req.method = 'DELETE'
res = self.KeyPairController.delete(req, 'FAKE')
self.assertEqual(res.status_int, 202)
class KeypairsXMLSerializerTest(test.TestCase):
def setUp(self):
super(KeypairsXMLSerializerTest, self).setUp()
self.deserializer = wsgi.XMLDeserializer()
def test_default_serializer(self):
exemplar = dict(keypair=dict(
public_key='fake_public_key',
private_key='fake_private_key',
fingerprint='fake_fingerprint',
user_id='fake_user_id',
name='fake_key_name'))
serializer = keypairs.KeypairTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('keypair', tree.tag)
for child in tree:
self.assertTrue(child.tag in exemplar['keypair'])
self.assertEqual(child.text, exemplar['keypair'][child.tag])
def test_index_serializer(self):
exemplar = dict(keypairs=[
dict(keypair=dict(
name='key1_name',
public_key='key1_key',
fingerprint='key1_fingerprint')),
dict(keypair=dict(
name='key2_name',
public_key='key2_key',
fingerprint='key2_fingerprint'))])
serializer = keypairs.KeypairsTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('keypairs', tree.tag)
self.assertEqual(len(exemplar['keypairs']), len(tree))
for idx, keypair in enumerate(tree):
self.assertEqual('keypair', keypair.tag)
kp_data = exemplar['keypairs'][idx]['keypair']
for child in keypair:
self.assertTrue(child.tag in kp_data)
self.assertEqual(child.text, kp_data[child.tag])
def test_deserializer(self):
exemplar = dict(keypair=dict(
name='key_name',
public_key='public_key'))
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<keypair><name>key_name</name>'
'<public_key>public_key</public_key></keypair>')
result = self.deserializer.deserialize(intext)['body']
self.assertEqual(result, exemplar)
| |
import functools
import glob
import gzip
import os
import sys
import warnings
import zipfile
from itertools import product
from django.apps import apps
from django.conf import settings
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.core.management.utils import parse_apps_and_model_labels
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connections, router,
transaction,
)
from django.utils.functional import cached_property
try:
import bz2
has_bz2 = True
except ImportError:
has_bz2 = False
try:
import lzma
has_lzma = True
except ImportError:
has_lzma = False
READ_STDIN = '-'
class Command(BaseCommand):
help = 'Installs the named fixture(s) in the database.'
missing_args_message = (
"No database fixture specified. Please provide the path of at least "
"one fixture in the command line."
)
def add_arguments(self, parser):
parser.add_argument('args', metavar='fixture', nargs='+', help='Fixture labels.')
parser.add_argument(
'--database', default=DEFAULT_DB_ALIAS,
help='Nominates a specific database to load fixtures into. Defaults to the "default" database.',
)
parser.add_argument(
'--app', dest='app_label',
help='Only look for fixtures in the specified app.',
)
parser.add_argument(
'--ignorenonexistent', '-i', action='store_true', dest='ignore',
help='Ignores entries in the serialized data for fields that do not '
'currently exist on the model.',
)
parser.add_argument(
'-e', '--exclude', action='append', default=[],
help='An app_label or app_label.ModelName to exclude. Can be used multiple times.',
)
parser.add_argument(
'--format',
help='Format of serialized data when reading from stdin.',
)
def handle(self, *fixture_labels, **options):
self.ignore = options['ignore']
self.using = options['database']
self.app_label = options['app_label']
self.verbosity = options['verbosity']
self.excluded_models, self.excluded_apps = parse_apps_and_model_labels(options['exclude'])
self.format = options['format']
with transaction.atomic(using=self.using):
self.loaddata(fixture_labels)
# Close the DB connection -- unless we're still in a transaction. This
# is required as a workaround for an edge case in MySQL: if the same
# connection is used to create tables, load data, and query, the query
# can return incorrect results. See Django #7572, MySQL #37735.
if transaction.get_autocommit(self.using):
connections[self.using].close()
def loaddata(self, fixture_labels):
connection = connections[self.using]
# Keep a count of the installed objects and fixtures
self.fixture_count = 0
self.loaded_object_count = 0
self.fixture_object_count = 0
self.models = set()
self.serialization_formats = serializers.get_public_serializer_formats()
# Forcing binary mode may be revisited after dropping Python 2 support (see #22399)
self.compression_formats = {
None: (open, 'rb'),
'gz': (gzip.GzipFile, 'rb'),
'zip': (SingleZipReader, 'r'),
'stdin': (lambda *args: sys.stdin, None),
}
if has_bz2:
self.compression_formats['bz2'] = (bz2.BZ2File, 'r')
if has_lzma:
self.compression_formats['lzma'] = (lzma.LZMAFile, 'r')
self.compression_formats['xz'] = (lzma.LZMAFile, 'r')
# Django's test suite repeatedly tries to load initial_data fixtures
# from apps that don't have any fixtures. Because disabling constraint
# checks can be expensive on some database (especially MSSQL), bail
# out early if no fixtures are found.
for fixture_label in fixture_labels:
if self.find_fixtures(fixture_label):
break
else:
return
with connection.constraint_checks_disabled():
self.objs_with_deferred_fields = []
for fixture_label in fixture_labels:
self.load_label(fixture_label)
for obj in self.objs_with_deferred_fields:
obj.save_deferred_fields(using=self.using)
# Since we disabled constraint checks, we must manually check for
# any invalid keys that might have been added
table_names = [model._meta.db_table for model in self.models]
try:
connection.check_constraints(table_names=table_names)
except Exception as e:
e.args = ("Problem installing fixtures: %s" % e,)
raise
# If we found even one object in a fixture, we need to reset the
# database sequences.
if self.loaded_object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(no_style(), self.models)
if sequence_sql:
if self.verbosity >= 2:
self.stdout.write('Resetting sequences')
with connection.cursor() as cursor:
for line in sequence_sql:
cursor.execute(line)
if self.verbosity >= 1:
if self.fixture_object_count == self.loaded_object_count:
self.stdout.write(
"Installed %d object(s) from %d fixture(s)"
% (self.loaded_object_count, self.fixture_count)
)
else:
self.stdout.write(
"Installed %d object(s) (of %d) from %d fixture(s)"
% (self.loaded_object_count, self.fixture_object_count, self.fixture_count)
)
def load_label(self, fixture_label):
"""Load fixtures files for a given label."""
show_progress = self.verbosity >= 3
for fixture_file, fixture_dir, fixture_name in self.find_fixtures(fixture_label):
_, ser_fmt, cmp_fmt = self.parse_name(os.path.basename(fixture_file))
open_method, mode = self.compression_formats[cmp_fmt]
fixture = open_method(fixture_file, mode)
try:
self.fixture_count += 1
objects_in_fixture = 0
loaded_objects_in_fixture = 0
if self.verbosity >= 2:
self.stdout.write(
"Installing %s fixture '%s' from %s."
% (ser_fmt, fixture_name, humanize(fixture_dir))
)
objects = serializers.deserialize(
ser_fmt, fixture, using=self.using, ignorenonexistent=self.ignore,
handle_forward_references=True,
)
for obj in objects:
objects_in_fixture += 1
if (obj.object._meta.app_config in self.excluded_apps or
type(obj.object) in self.excluded_models):
continue
if router.allow_migrate_model(self.using, obj.object.__class__):
loaded_objects_in_fixture += 1
self.models.add(obj.object.__class__)
try:
obj.save(using=self.using)
if show_progress:
self.stdout.write(
'\rProcessed %i object(s).' % loaded_objects_in_fixture,
ending=''
)
# psycopg2 raises ValueError if data contains NUL chars.
except (DatabaseError, IntegrityError, ValueError) as e:
e.args = ("Could not load %(object_label)s(pk=%(pk)s): %(error_msg)s" % {
'object_label': obj.object._meta.label,
'pk': obj.object.pk,
'error_msg': e,
},)
raise
if obj.deferred_fields:
self.objs_with_deferred_fields.append(obj)
if objects and show_progress:
self.stdout.write() # Add a newline after progress indicator.
self.loaded_object_count += loaded_objects_in_fixture
self.fixture_object_count += objects_in_fixture
except Exception as e:
if not isinstance(e, CommandError):
e.args = ("Problem installing fixture '%s': %s" % (fixture_file, e),)
raise
finally:
fixture.close()
# Warn if the fixture we loaded contains 0 objects.
if objects_in_fixture == 0:
warnings.warn(
"No fixture data found for '%s'. (File format may be "
"invalid.)" % fixture_name,
RuntimeWarning
)
@functools.lru_cache(maxsize=None)
def find_fixtures(self, fixture_label):
"""Find fixture files for a given label."""
if fixture_label == READ_STDIN:
return [(READ_STDIN, None, READ_STDIN)]
fixture_name, ser_fmt, cmp_fmt = self.parse_name(fixture_label)
databases = [self.using, None]
cmp_fmts = list(self.compression_formats) if cmp_fmt is None else [cmp_fmt]
ser_fmts = self.serialization_formats if ser_fmt is None else [ser_fmt]
if self.verbosity >= 2:
self.stdout.write("Loading '%s' fixtures..." % fixture_name)
if os.path.isabs(fixture_name):
fixture_dirs = [os.path.dirname(fixture_name)]
fixture_name = os.path.basename(fixture_name)
else:
fixture_dirs = self.fixture_dirs
if os.path.sep in os.path.normpath(fixture_name):
fixture_dirs = [os.path.join(dir_, os.path.dirname(fixture_name))
for dir_ in fixture_dirs]
fixture_name = os.path.basename(fixture_name)
suffixes = (
'.'.join(ext for ext in combo if ext)
for combo in product(databases, ser_fmts, cmp_fmts)
)
targets = {'.'.join((fixture_name, suffix)) for suffix in suffixes}
fixture_files = []
for fixture_dir in fixture_dirs:
if self.verbosity >= 2:
self.stdout.write("Checking %s for fixtures..." % humanize(fixture_dir))
fixture_files_in_dir = []
path = os.path.join(fixture_dir, fixture_name)
for candidate in glob.iglob(glob.escape(path) + '*'):
if os.path.basename(candidate) in targets:
# Save the fixture_dir and fixture_name for future error messages.
fixture_files_in_dir.append((candidate, fixture_dir, fixture_name))
if self.verbosity >= 2 and not fixture_files_in_dir:
self.stdout.write("No fixture '%s' in %s." %
(fixture_name, humanize(fixture_dir)))
# Check kept for backwards-compatibility; it isn't clear why
# duplicates are only allowed in different directories.
if len(fixture_files_in_dir) > 1:
raise CommandError(
"Multiple fixtures named '%s' in %s. Aborting." %
(fixture_name, humanize(fixture_dir)))
fixture_files.extend(fixture_files_in_dir)
if not fixture_files:
raise CommandError("No fixture named '%s' found." % fixture_name)
return fixture_files
@cached_property
def fixture_dirs(self):
"""
Return a list of fixture directories.
The list contains the 'fixtures' subdirectory of each installed
application, if it exists, the directories in FIXTURE_DIRS, and the
current directory.
"""
dirs = []
fixture_dirs = settings.FIXTURE_DIRS
if len(fixture_dirs) != len(set(fixture_dirs)):
raise ImproperlyConfigured("settings.FIXTURE_DIRS contains duplicates.")
for app_config in apps.get_app_configs():
app_label = app_config.label
app_dir = os.path.join(app_config.path, 'fixtures')
if app_dir in fixture_dirs:
raise ImproperlyConfigured(
"'%s' is a default fixture directory for the '%s' app "
"and cannot be listed in settings.FIXTURE_DIRS." % (app_dir, app_label)
)
if self.app_label and app_label != self.app_label:
continue
if os.path.isdir(app_dir):
dirs.append(app_dir)
dirs.extend(fixture_dirs)
dirs.append('')
return [os.path.realpath(d) for d in dirs]
def parse_name(self, fixture_name):
"""
Split fixture name in name, serialization format, compression format.
"""
if fixture_name == READ_STDIN:
if not self.format:
raise CommandError('--format must be specified when reading from stdin.')
return READ_STDIN, self.format, 'stdin'
parts = fixture_name.rsplit('.', 2)
if len(parts) > 1 and parts[-1] in self.compression_formats:
cmp_fmt = parts[-1]
parts = parts[:-1]
else:
cmp_fmt = None
if len(parts) > 1:
if parts[-1] in self.serialization_formats:
ser_fmt = parts[-1]
parts = parts[:-1]
else:
raise CommandError(
"Problem installing fixture '%s': %s is not a known "
"serialization format." % ('.'.join(parts[:-1]), parts[-1]))
else:
ser_fmt = None
name = '.'.join(parts)
return name, ser_fmt, cmp_fmt
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if len(self.namelist()) != 1:
raise ValueError("Zip-compressed fixtures must contain one file.")
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
def humanize(dirname):
return "'%s'" % dirname if dirname else 'absolute path'
| |
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
#import mshr
from dolfin import *
import sympy as sy
import numpy as np
import ExactSol
import MatrixOperations as MO
import CheckPetsc4py as CP
from dolfin import __version__
import MaxwellPrecond as MP
import StokesPrecond
import time
def myCCode(A):
return sy.ccode(A).replace('M_PI','pi')
def Domain(n):
# mesh = RectangleMesh(0., -1., 2., 1., n, n)
# mesh = RectangleMesh(0., 0., 1.0, 1.0, n, n)
mesh = UnitSquareMesh(n, n)
class Left(SubDomain):
def inside(self, x, on_boundary):
return near(x[0], 0.0)
class Right(SubDomain):
def inside(self, x, on_boundary):
return near(x[0], 1.0)
class Bottom(SubDomain):
def inside(self, x, on_boundary):
return near(x[1], 0.0)
class Top(SubDomain):
def inside(self, x, on_boundary):
return near(x[1], 1.0)
# mesh = RectangleMesh(Point(0., -1.), Point(1*10., 1.), 1*5*n, n)
# class Left(SubDomain):
# def inside(self, x, on_boundary):
# return near(x[0], 0.0)
# class Right(SubDomain):
# def inside(self, x, on_boundary):
# return near(x[0], 1*10.0)
# class Bottom(SubDomain):
# def inside(self, x, on_boundary):
# return near(x[1], -1.)
# class Top(SubDomain):
# def inside(self, x, on_boundary):
# return near(x[1], 1.)
left = Left()
top = Top()
right = Right()
bottom = Bottom()
# Initialize mesh function for the domain
domains = CellFunction("size_t", mesh)
domains.set_all(0)
# Initialize mesh function for boundary domains
boundaries = FacetFunction("size_t", mesh)
boundaries.set_all(0)
right.mark(boundaries, 2)
left.mark(boundaries, 2)
top.mark(boundaries, 1)
bottom.mark(boundaries, 1)
return mesh, boundaries, domains
def ExactSolution(mesh, params):
Re = 1./params[2]
Ha = sqrt(params[0]/(params[1]*params[2]))
G = 10.
x = sy.Symbol('x[0]')
y = sy.Symbol('x[1]')
b = (G/params[0])*(sy.sinh(y*Ha)/sy.sinh(Ha)-y)
d = sy.diff(x,x)
p = -G*x - (G**2)/(2*params[0])*(sy.sinh(y*Ha)/sy.sinh(Ha)-y)**2
u = (G/(params[2]*Ha*sy.tanh(Ha)))*(1-sy.cosh(y*Ha)/sy.cosh(Ha))
v = sy.diff(x, y)
r = sy.diff(x, y)
uu = y*x*sy.exp(x+y)
u = sy.diff(uu, y)
v = -sy.diff(uu, x)
p = sy.sin(x)*sy.exp(y)
bb = x*y*sy.cos(x)
b = sy.diff(bb, y)
d = -sy.diff(bb, x)
r = x*sy.sin(2*sy.pi*y)*sy.sin(2*sy.pi*x)
# b = y
# d = sy.diff(x, y)
# r = sy.diff(y, y)
J11 = p - params[2]*sy.diff(u, x)
J12 = - params[2]*sy.diff(u, y)
J21 = - params[2]*sy.diff(v, x)
J22 = p - params[2]*sy.diff(v, y)
L1 = sy.diff(u, x, x)+sy.diff(u, y, y)
L2 = sy.diff(v, x, x)+sy.diff(v, y, y)
A1 = u*sy.diff(u, x)+v*sy.diff(u, y)
A2 = u*sy.diff(v, x)+v*sy.diff(v, y)
P1 = sy.diff(p, x)
P2 = sy.diff(p, y)
C1 = sy.diff(d, x, y) - sy.diff(b, y, y)
C2 = sy.diff(b, x, y) - sy.diff(d, x, x)
NS1 = -d*(sy.diff(d, x) - sy.diff(b, y))
NS2 = b*(sy.diff(d, x) - sy.diff(b, y))
R1 = sy.diff(r, x)
R2 = sy.diff(r, y)
M1 = sy.diff(u*d-v*b, y)
M2 = -sy.diff(u*d-v*b, x)
u0 = Expression((myCCode(u), myCCode(v)), degree=4)
p0 = Expression(myCCode(p), degree=4)
b0 = Expression((myCCode(b), myCCode(d)), degree=4)
r0 = Expression(myCCode(r), degree=4)
print " u = (", str(u).replace('x[0]', 'x').replace('x[1]', 'y'), ", ", str(v).replace('x[0]', 'x').replace('x[1]', 'y'), ")\n"
print " p = (", str(p).replace('x[0]', 'x').replace('x[1]', 'y'), ")\n"
print " b = (", str(b).replace('x[0]', 'x').replace('x[1]', 'y'), ", ", str(d).replace('x[0]', 'x').replace('x[1]', 'y'), ")\n"
print " r = (", str(r).replace('x[0]', 'x').replace('x[1]', 'y'), ")\n"
Laplacian = Expression((myCCode(L1), myCCode(L2)), degree=4)
Advection = Expression((myCCode(A1), myCCode(A2)), degree=4)
gradPres = Expression((myCCode(P1), myCCode(P2)), degree=4)
NScouple = Expression((myCCode(NS1), myCCode(NS2)), degree=4)
CurlCurl = Expression((myCCode(C1), myCCode(C2)), degree=4)
gradLagr = Expression((myCCode(R1), myCCode(R2)), degree=4)
Mcouple = Expression((myCCode(M1), myCCode(M2)), degree=4)
# pN = as_matrix(((Expression(myCCode(J11)), Expression(myCCode(J12))), (Expression(myCCode(J21)), Expression(myCCode(J22)))))
return u0, p0, b0, r0, 1, Laplacian, Advection, gradPres, NScouple, CurlCurl, gradLagr, Mcouple
# Sets up the initial guess for the MHD problem
def Stokes(V, Q, F, u0, pN, params, mesh, boundaries, domains):
parameters['reorder_dofs_serial'] = False
W = FunctionSpace(mesh, MixedElement([V, Q]))
IS = MO.IndexSet(W)
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
n = FacetNormal(mesh)
dx = Measure('dx', domain=mesh, subdomain_data=domains)
ds = Measure('ds', domain=mesh, subdomain_data=boundaries)
a11 = params[2]*inner(grad(v), grad(u))*dx(0)
a12 = -div(v)*p*dx(0)
a21 = -div(u)*q*dx(0)
a = a11+a12+a21
print F
L = inner(v, F)*dx(0) #- inner(pN*n,v)*ds(2)
pp = params[2]*inner(grad(v), grad(u))*dx(0) + (1./params[2])*p*q*dx(0)
def boundary(x, on_boundary):
return on_boundary
# bcu = DirichletBC(W.sub(0), u0, boundaries, 1)
bcu = DirichletBC(W.sub(0), u0, boundary)
# bcu = [bcu1, bcu2]
A, b = assemble_system(a, L, bcu)
A, b = CP.Assemble(A, b)
C = A.getSubMatrix(IS[1],IS[1])
u = b.duplicate()
P, Pb = assemble_system(pp, L, bcu)
P, Pb = CP.Assemble(P, Pb)
ksp = PETSc.KSP()
ksp.create(comm=PETSc.COMM_WORLD)
pc = ksp.getPC()
ksp.setType('preonly')
pc.setType('lu')
OptDB = PETSc.Options()
# if __version__ != '1.6.0':
OptDB['pc_factor_mat_solver_package'] = "pastix"
OptDB['pc_factor_mat_ordering_type'] = "rcm"
ksp.setFromOptions()
ksp.setOperators(A,A)
# ksp = PETSc.KSP().create()
# ksp.setTolerances(1e-8)
# ksp.max_it = 200
# pc = ksp.getPC()
# pc.setType(PETSc.PC.Type.PYTHON)
# ksp.setType('minres')
# pc.setPythonContext(StokesPrecond.Approx(W, 1))
# ksp.setOperators(A,P)
scale = b.norm()
b = b/scale
del A
start_time = time.time()
ksp.solve(b,u)
# Mits +=dodim
u = u*scale
print ("{:40}").format("Stokes solve, time: "), " ==> ",("{:4f}").format(time.time() - start_time),("{:9}").format(" Its: "), ("{:4}").format(ksp.its), ("{:9}").format(" time: "), ("{:4}").format(time.strftime('%X %x %Z')[0:5])
u_k = Function(FunctionSpace(mesh, V))
p_k = Function(FunctionSpace(mesh, Q))
u_k.vector()[:] = u.getSubVector(IS[0]).array
p_k.vector()[:] = u.getSubVector(IS[1]).array
ones = Function(FunctionSpace(mesh, Q))
ones.vector()[:]=(0*ones.vector().array()+1)
p_k.vector()[:] += -assemble(p_k*dx(0))/assemble(ones*dx(0))
return u_k, p_k
def Maxwell(V, Q, F, b0, r0, params, mesh,HiptmairMatrices, Hiptmairtol):
parameters['reorder_dofs_serial'] = False
W = V*Q
W = FunctionSpace(mesh, MixedElement([V, Q]))
IS = MO.IndexSet(W)
(b, r) = TrialFunctions(W)
(c, s) = TestFunctions(W)
if params[0] == 0.0:
a11 = params[1]*inner(curl(b), curl(c))*dx
else:
a11 = params[1]*params[0]*inner(curl(b), curl(c))*dx
a21 = inner(b,grad(s))*dx
a12 = inner(c,grad(r))*dx
# print F
L = inner(c, F)*dx
a = a11+a12+a21
def boundary(x, on_boundary):
return on_boundary
# class b0(Expression):
# def __init__(self):
# self.p = 1
# def eval_cell(self, values, x, ufc_cell):
# values[0] = 0.0
# values[1] = 1.0
# def value_shape(self):
# return (2,)
bcb = DirichletBC(W.sub(0), b0, boundary)
bcr = DirichletBC(W.sub(1), r0, boundary)
bc = [bcb, bcr]
A, b = assemble_system(a, L, bc)
A, b = CP.Assemble(A, b)
u = b.duplicate()
ksp = PETSc.KSP()
ksp.create(comm=PETSc.COMM_WORLD)
pc = ksp.getPC()
ksp.setType('preonly')
pc.setType('lu')
OptDB = PETSc.Options()
OptDB['pc_factor_mat_solver_package'] = "pastix"
OptDB['pc_factor_mat_ordering_type'] = "rcm"
ksp.setFromOptions()
# ksp = PETSc.KSP().create()
# ksp.setTolerances(1e-8)
# ksp.max_it = 200
# pc = ksp.getPC()
# pc.setType(PETSc.PC.Type.PYTHON)
# ksp.setType('minres')
# pc.setPythonContext(MP.Hiptmair(W, HiptmairMatrices[3], HiptmairMatrices[4], HiptmairMatrices[2], HiptmairMatrices[0], HiptmairMatrices[1], HiptmairMatrices[6],Hiptmairtol))
scale = b.norm()
b = b/scale
ksp.setOperators(A,A)
del A
start_time = time.time()
ksp.solve(b,u)
print ("{:40}").format("Maxwell solve, time: "), " ==> ",("{:4f}").format(time.time() - start_time),("{:9}").format(" Its: "), ("{:4}").format(ksp.its), ("{:9}").format(" time: "), ("{:4}").format(time.strftime('%X %x %Z')[0:5])
u = u*scale
b_k = Function(FunctionSpace(mesh, V))
r_k = Function(FunctionSpace(mesh, Q))
b_k.vector()[:] = u.getSubVector(IS[0]).array
r_k.vector()[:] = u.getSubVector(IS[1]).array
return b_k, r_k
| |
# coding: utf-8
from datetime import date, datetime
from typing import List, Dict, Type
from openapi_server.models.base_model_ import Model
from openapi_server.models.pipeline_branchesitemlatest_run import PipelineBranchesitemlatestRun
from openapi_server.models.pipeline_branchesitempull_request import PipelineBranchesitempullRequest
from openapi_server import util
class PipelineBranchesitem(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, display_name: str=None, estimated_duration_in_millis: int=None, name: str=None, weather_score: int=None, latest_run: PipelineBranchesitemlatestRun=None, organization: str=None, pull_request: PipelineBranchesitempullRequest=None, total_number_of_pull_requests: int=None, _class: str=None):
"""PipelineBranchesitem - a model defined in OpenAPI
:param display_name: The display_name of this PipelineBranchesitem.
:param estimated_duration_in_millis: The estimated_duration_in_millis of this PipelineBranchesitem.
:param name: The name of this PipelineBranchesitem.
:param weather_score: The weather_score of this PipelineBranchesitem.
:param latest_run: The latest_run of this PipelineBranchesitem.
:param organization: The organization of this PipelineBranchesitem.
:param pull_request: The pull_request of this PipelineBranchesitem.
:param total_number_of_pull_requests: The total_number_of_pull_requests of this PipelineBranchesitem.
:param _class: The _class of this PipelineBranchesitem.
"""
self.openapi_types = {
'display_name': str,
'estimated_duration_in_millis': int,
'name': str,
'weather_score': int,
'latest_run': PipelineBranchesitemlatestRun,
'organization': str,
'pull_request': PipelineBranchesitempullRequest,
'total_number_of_pull_requests': int,
'_class': str
}
self.attribute_map = {
'display_name': 'displayName',
'estimated_duration_in_millis': 'estimatedDurationInMillis',
'name': 'name',
'weather_score': 'weatherScore',
'latest_run': 'latestRun',
'organization': 'organization',
'pull_request': 'pullRequest',
'total_number_of_pull_requests': 'totalNumberOfPullRequests',
'_class': '_class'
}
self._display_name = display_name
self._estimated_duration_in_millis = estimated_duration_in_millis
self._name = name
self._weather_score = weather_score
self._latest_run = latest_run
self._organization = organization
self._pull_request = pull_request
self._total_number_of_pull_requests = total_number_of_pull_requests
self.__class = _class
@classmethod
def from_dict(cls, dikt: dict) -> 'PipelineBranchesitem':
"""Returns the dict as a model
:param dikt: A dict.
:return: The PipelineBranchesitem of this PipelineBranchesitem.
"""
return util.deserialize_model(dikt, cls)
@property
def display_name(self):
"""Gets the display_name of this PipelineBranchesitem.
:return: The display_name of this PipelineBranchesitem.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this PipelineBranchesitem.
:param display_name: The display_name of this PipelineBranchesitem.
:type display_name: str
"""
self._display_name = display_name
@property
def estimated_duration_in_millis(self):
"""Gets the estimated_duration_in_millis of this PipelineBranchesitem.
:return: The estimated_duration_in_millis of this PipelineBranchesitem.
:rtype: int
"""
return self._estimated_duration_in_millis
@estimated_duration_in_millis.setter
def estimated_duration_in_millis(self, estimated_duration_in_millis):
"""Sets the estimated_duration_in_millis of this PipelineBranchesitem.
:param estimated_duration_in_millis: The estimated_duration_in_millis of this PipelineBranchesitem.
:type estimated_duration_in_millis: int
"""
self._estimated_duration_in_millis = estimated_duration_in_millis
@property
def name(self):
"""Gets the name of this PipelineBranchesitem.
:return: The name of this PipelineBranchesitem.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PipelineBranchesitem.
:param name: The name of this PipelineBranchesitem.
:type name: str
"""
self._name = name
@property
def weather_score(self):
"""Gets the weather_score of this PipelineBranchesitem.
:return: The weather_score of this PipelineBranchesitem.
:rtype: int
"""
return self._weather_score
@weather_score.setter
def weather_score(self, weather_score):
"""Sets the weather_score of this PipelineBranchesitem.
:param weather_score: The weather_score of this PipelineBranchesitem.
:type weather_score: int
"""
self._weather_score = weather_score
@property
def latest_run(self):
"""Gets the latest_run of this PipelineBranchesitem.
:return: The latest_run of this PipelineBranchesitem.
:rtype: PipelineBranchesitemlatestRun
"""
return self._latest_run
@latest_run.setter
def latest_run(self, latest_run):
"""Sets the latest_run of this PipelineBranchesitem.
:param latest_run: The latest_run of this PipelineBranchesitem.
:type latest_run: PipelineBranchesitemlatestRun
"""
self._latest_run = latest_run
@property
def organization(self):
"""Gets the organization of this PipelineBranchesitem.
:return: The organization of this PipelineBranchesitem.
:rtype: str
"""
return self._organization
@organization.setter
def organization(self, organization):
"""Sets the organization of this PipelineBranchesitem.
:param organization: The organization of this PipelineBranchesitem.
:type organization: str
"""
self._organization = organization
@property
def pull_request(self):
"""Gets the pull_request of this PipelineBranchesitem.
:return: The pull_request of this PipelineBranchesitem.
:rtype: PipelineBranchesitempullRequest
"""
return self._pull_request
@pull_request.setter
def pull_request(self, pull_request):
"""Sets the pull_request of this PipelineBranchesitem.
:param pull_request: The pull_request of this PipelineBranchesitem.
:type pull_request: PipelineBranchesitempullRequest
"""
self._pull_request = pull_request
@property
def total_number_of_pull_requests(self):
"""Gets the total_number_of_pull_requests of this PipelineBranchesitem.
:return: The total_number_of_pull_requests of this PipelineBranchesitem.
:rtype: int
"""
return self._total_number_of_pull_requests
@total_number_of_pull_requests.setter
def total_number_of_pull_requests(self, total_number_of_pull_requests):
"""Sets the total_number_of_pull_requests of this PipelineBranchesitem.
:param total_number_of_pull_requests: The total_number_of_pull_requests of this PipelineBranchesitem.
:type total_number_of_pull_requests: int
"""
self._total_number_of_pull_requests = total_number_of_pull_requests
@property
def _class(self):
"""Gets the _class of this PipelineBranchesitem.
:return: The _class of this PipelineBranchesitem.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this PipelineBranchesitem.
:param _class: The _class of this PipelineBranchesitem.
:type _class: str
"""
self.__class = _class
| |
"""
1. Goto https://console.developers.google.com/iam-admin/projects
2. Create new project
3. Goto APIs and register for OAuth2.0 for installed applications
4. Download JSON secret file and move into same directory as this file
"""
# flake8: noqa
from datetime import datetime
import re
from pandas import compat
import numpy as np
from pandas import DataFrame
import pandas as pd
import pandas.io.parsers as psr
import pandas.lib as lib
from pandas.io.date_converters import generic_parser
import pandas.io.auth as auth
from pandas.util.decorators import Appender, Substitution
from apiclient.errors import HttpError
from oauth2client.client import AccessTokenRefreshError
from pandas.compat import zip, u
# GH11038
import warnings
warnings.warn("The pandas.io.ga module is deprecated and will be "
"removed in a future version.",
FutureWarning, stacklevel=2)
TYPE_MAP = {u('INTEGER'): int, u('FLOAT'): float, u('TIME'): int}
NO_CALLBACK = auth.OOB_CALLBACK_URN
DOC_URL = auth.DOC_URL
_QUERY_PARAMS = """metrics : list of str
Un-prefixed metric names (e.g., 'visitors' and not 'ga:visitors')
dimensions : list of str
Un-prefixed dimension variable names
start_date : str/date/datetime
end_date : str/date/datetime, optional, default is None but internally set as today
segment : list of str, optional, default: None
filters : list of str, optional, default: None
start_index : int, default 1
max_results : int, default 10000
If >10000, must specify chunksize or ValueError will be raised"""
_QUERY_DOC = """
Construct a google analytics query using given parameters
Metrics and dimensions do not need the 'ga:' prefix
Parameters
----------
profile_id : str
%s
""" % _QUERY_PARAMS
_GA_READER_DOC = """Given query parameters, return a DataFrame with all the
data or an iterator that returns DataFrames containing chunks of the data
Parameters
----------
%s
sort : bool/list, default True
Sort output by index or list of columns
chunksize : int, optional
If max_results >10000, specifies the number of rows per iteration
index_col : str/list of str/dict, optional, default: None
If unspecified then dimension variables are set as index
parse_dates : bool/list/dict, default: True
keep_date_col : boolean, default: False
date_parser : optional, default: None
na_values : optional, default: None
converters : optional, default: None
dayfirst : bool, default False
Informs date parsing
account_name : str, optional, default: None
account_id : str, optional, default: None
property_name : str, optional, default: None
property_id : str, optional, default: None
profile_name : str, optional, default: None
profile_id : str, optional, default: None
%%(extras)s
Returns
-------
data : DataFrame or DataFrame yielding iterator
""" % _QUERY_PARAMS
_AUTH_PARAMS = """secrets : str, optional
File path to the secrets file
scope : str, optional
Authentication scope
token_file_name : str, optional
Path to token storage
redirect : str, optional
Local host redirect if unspecified
"""
def reset_token_store():
"""
Deletes the default token store
"""
auth.reset_default_token_store()
@Substitution(extras=_AUTH_PARAMS)
@Appender(_GA_READER_DOC)
def read_ga(metrics, dimensions, start_date, **kwargs):
lst = ['secrets', 'scope', 'token_file_name', 'redirect']
reader_kwds = dict((p, kwargs.pop(p)) for p in lst if p in kwargs)
reader = GAnalytics(**reader_kwds)
return reader.get_data(metrics=metrics, start_date=start_date,
dimensions=dimensions, **kwargs)
class OAuthDataReader(object):
"""
Abstract class for handling OAuth2 authentication using the Google
oauth2client library
"""
def __init__(self, scope, token_file_name, redirect):
"""
Parameters
----------
scope : str
Designates the authentication scope
token_file_name : str
Location of cache for authenticated tokens
redirect : str
Redirect URL
"""
self.scope = scope
self.token_store = auth.make_token_store(token_file_name)
self.redirect_url = redirect
def authenticate(self, secrets):
"""
Run the authentication process and return an authorized
http object
Parameters
----------
secrets : str
File name for client secrets
Notes
-----
See google documention for format of secrets file
%s
""" % DOC_URL
flow = self._create_flow(secrets)
return auth.authenticate(flow, self.token_store)
def _create_flow(self, secrets):
"""
Create an authentication flow based on the secrets file
Parameters
----------
secrets : str
File name for client secrets
Notes
-----
See google documentation for format of secrets file
%s
""" % DOC_URL
return auth.get_flow(secrets, self.scope, self.redirect_url)
class GDataReader(OAuthDataReader):
"""
Abstract class for reading data from google APIs using OAuth2
Subclasses must implement create_query method
"""
def __init__(self, scope=auth.DEFAULT_SCOPE,
token_file_name=auth.DEFAULT_TOKEN_FILE,
redirect=NO_CALLBACK, secrets=auth.DEFAULT_SECRETS):
super(GDataReader, self).__init__(scope, token_file_name, redirect)
self._service = self._init_service(secrets)
@property
def service(self):
"""The authenticated request service object"""
return self._service
def _init_service(self, secrets):
"""
Build an authenticated google api request service using the given
secrets file
"""
http = self.authenticate(secrets)
return auth.init_service(http)
def get_account(self, name=None, id=None, **kwargs):
""" Retrieve an account that matches the name, id, or some account
attribute specified in **kwargs
Parameters
----------
name : str, optional, default: None
id : str, optional, default: None
"""
accounts = self.service.management().accounts().list().execute()
return _get_match(accounts, name, id, **kwargs)
def get_web_property(self, account_id=None, name=None, id=None, **kwargs):
"""
Retrieve a web property given and account and property name, id, or
custom attribute
Parameters
----------
account_id : str, optional, default: None
name : str, optional, default: None
id : str, optional, default: None
"""
prop_store = self.service.management().webproperties()
kwds = {}
if account_id is not None:
kwds['accountId'] = account_id
prop_for_acct = prop_store.list(**kwds).execute()
return _get_match(prop_for_acct, name, id, **kwargs)
def get_profile(self, account_id=None, web_property_id=None, name=None,
id=None, **kwargs):
"""
Retrieve the right profile for the given account, web property, and
profile attribute (name, id, or arbitrary parameter in kwargs)
Parameters
----------
account_id : str, optional, default: None
web_property_id : str, optional, default: None
name : str, optional, default: None
id : str, optional, default: None
"""
profile_store = self.service.management().profiles()
kwds = {}
if account_id is not None:
kwds['accountId'] = account_id
if web_property_id is not None:
kwds['webPropertyId'] = web_property_id
profiles = profile_store.list(**kwds).execute()
return _get_match(profiles, name, id, **kwargs)
def create_query(self, *args, **kwargs):
raise NotImplementedError()
@Substitution(extras='')
@Appender(_GA_READER_DOC)
def get_data(self, metrics, start_date, end_date=None,
dimensions=None, segment=None, filters=None, start_index=1,
max_results=10000, index_col=None, parse_dates=True,
keep_date_col=False, date_parser=None, na_values=None,
converters=None, sort=True, dayfirst=False,
account_name=None, account_id=None, property_name=None,
property_id=None, profile_name=None, profile_id=None,
chunksize=None):
if chunksize is None and max_results > 10000:
raise ValueError('Google API returns maximum of 10,000 rows, '
'please set chunksize')
account = self.get_account(account_name, account_id)
web_property = self.get_web_property(account.get('id'), property_name,
property_id)
profile = self.get_profile(account.get('id'), web_property.get('id'),
profile_name, profile_id)
profile_id = profile.get('id')
if index_col is None and dimensions is not None:
if isinstance(dimensions, compat.string_types):
dimensions = [dimensions]
index_col = _clean_index(list(dimensions), parse_dates)
def _read(start, result_size):
query = self.create_query(profile_id, metrics, start_date,
end_date=end_date, dimensions=dimensions,
segment=segment, filters=filters,
start_index=start,
max_results=result_size)
try:
rs = query.execute()
rows = rs.get('rows', [])
col_info = rs.get('columnHeaders', [])
return self._parse_data(rows, col_info, index_col,
parse_dates=parse_dates,
keep_date_col=keep_date_col,
date_parser=date_parser,
dayfirst=dayfirst,
na_values=na_values,
converters=converters, sort=sort)
except HttpError as inst:
raise ValueError('Google API error %s: %s' % (inst.resp.status,
inst._get_reason()))
if chunksize is None:
return _read(start_index, max_results)
def iterator():
curr_start = start_index
while curr_start < max_results:
yield _read(curr_start, chunksize)
curr_start += chunksize
return iterator()
def _parse_data(self, rows, col_info, index_col, parse_dates=True,
keep_date_col=False, date_parser=None, dayfirst=False,
na_values=None, converters=None, sort=True):
# TODO use returned column types
col_names = _get_col_names(col_info)
df = psr._read(rows, dict(index_col=index_col, parse_dates=parse_dates,
date_parser=date_parser, dayfirst=dayfirst,
na_values=na_values,
keep_date_col=keep_date_col,
converters=converters,
header=None, names=col_names))
if isinstance(sort, bool) and sort:
return df.sort_index()
elif isinstance(sort, (compat.string_types, list, tuple, np.ndarray)):
return df.sort_index(by=sort)
return df
class GAnalytics(GDataReader):
@Appender(_QUERY_DOC)
def create_query(self, profile_id, metrics, start_date, end_date=None,
dimensions=None, segment=None, filters=None,
start_index=None, max_results=10000, **kwargs):
qry = format_query(profile_id, metrics, start_date, end_date=end_date,
dimensions=dimensions, segment=segment,
filters=filters, start_index=start_index,
max_results=max_results, **kwargs)
try:
return self.service.data().ga().get(**qry)
except TypeError as error:
raise ValueError('Error making query: %s' % error)
def format_query(ids, metrics, start_date, end_date=None, dimensions=None,
segment=None, filters=None, sort=None, start_index=None,
max_results=10000, **kwargs):
if isinstance(metrics, compat.string_types):
metrics = [metrics]
met = ','.join(['ga:%s' % x for x in metrics])
start_date = pd.to_datetime(start_date).strftime('%Y-%m-%d')
if end_date is None:
end_date = datetime.today()
end_date = pd.to_datetime(end_date).strftime('%Y-%m-%d')
qry = dict(ids='ga:%s' % str(ids),
metrics=met,
start_date=start_date,
end_date=end_date)
qry.update(kwargs)
names = ['dimensions', 'filters', 'sort']
lst = [dimensions, filters, sort]
[_maybe_add_arg(qry, n, d) for n, d in zip(names, lst)]
if isinstance(segment, compat.string_types):
if re.match("^[a-zA-Z0-9\-\_]+$", segment):
_maybe_add_arg(qry, 'segment', segment, 'gaid:')
else:
_maybe_add_arg(qry, 'segment', segment, 'dynamic::ga')
elif isinstance(segment, int):
_maybe_add_arg(qry, 'segment', segment, 'gaid:')
elif segment:
raise ValueError("segment must be string for dynamic and int ID")
if start_index is not None:
qry['start_index'] = str(start_index)
if max_results is not None:
qry['max_results'] = str(max_results)
return qry
def _maybe_add_arg(query, field, data, prefix='ga'):
if data is not None:
if isinstance(data, (compat.string_types, int)):
data = [data]
data = ','.join(['%s:%s' % (prefix, x) for x in data])
query[field] = data
def _get_match(obj_store, name, id, **kwargs):
key, val = None, None
if len(kwargs) > 0:
key = list(kwargs.keys())[0]
val = list(kwargs.values())[0]
if name is None and id is None and key is None:
return obj_store.get('items')[0]
name_ok = lambda item: name is not None and item.get('name') == name
id_ok = lambda item: id is not None and item.get('id') == id
key_ok = lambda item: key is not None and item.get(key) == val
match = None
if obj_store.get('items'):
# TODO look up gapi for faster lookup
for item in obj_store.get('items'):
if name_ok(item) or id_ok(item) or key_ok(item):
return item
def _clean_index(index_dims, parse_dates):
_should_add = lambda lst: pd.Index(lst).isin(index_dims).all()
to_remove = []
to_add = []
if isinstance(parse_dates, (list, tuple, np.ndarray)):
for lst in parse_dates:
if isinstance(lst, (list, tuple, np.ndarray)):
if _should_add(lst):
to_add.append('_'.join(lst))
to_remove.extend(lst)
elif isinstance(parse_dates, dict):
for name, lst in compat.iteritems(parse_dates):
if isinstance(lst, (list, tuple, np.ndarray)):
if _should_add(lst):
to_add.append(name)
to_remove.extend(lst)
index_dims = pd.Index(index_dims)
to_remove = pd.Index(set(to_remove))
to_add = pd.Index(set(to_add))
return index_dims.difference(to_remove).union(to_add)
def _get_col_names(header_info):
return [x['name'][3:] for x in header_info]
def _get_column_types(header_info):
return [(x['name'][3:], x['columnType']) for x in header_info]
def _get_dim_names(header_info):
return [x['name'][3:] for x in header_info
if x['columnType'] == u('DIMENSION')]
def _get_met_names(header_info):
return [x['name'][3:] for x in header_info
if x['columnType'] == u('METRIC')]
def _get_data_types(header_info):
return [(x['name'][3:], TYPE_MAP.get(x['dataType'], object))
for x in header_info]
| |
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
from shade import task_manager
class UserList(task_manager.Task):
def main(self, client):
return client.keystone_client.users.list()
class UserCreate(task_manager.Task):
def main(self, client):
return client.keystone_client.users.create(**self.args)
class UserDelete(task_manager.Task):
def main(self, client):
return client.keystone_client.users.delete(**self.args)
class UserUpdate(task_manager.Task):
def main(self, client):
return client.keystone_client.users.update(**self.args)
class FlavorList(task_manager.Task):
def main(self, client):
return client.nova_client.flavors.list(**self.args)
class FlavorCreate(task_manager.Task):
def main(self, client):
return client.nova_client.flavors.create(**self.args)
class FlavorDelete(task_manager.Task):
def main(self, client):
return client.nova_client.flavors.delete(**self.args)
class FlavorGet(task_manager.Task):
def main(self, client):
return client.nova_client.flavors.get(**self.args)
class ServerList(task_manager.Task):
def main(self, client):
return client.nova_client.servers.list(**self.args)
class ServerGet(task_manager.Task):
def main(self, client):
return client.nova_client.servers.get(**self.args)
class ServerCreate(task_manager.Task):
def main(self, client):
return client.nova_client.servers.create(**self.args)
class ServerDelete(task_manager.Task):
def main(self, client):
return client.nova_client.servers.delete(**self.args)
class ServerRebuild(task_manager.Task):
def main(self, client):
return client.nova_client.servers.rebuild(**self.args)
class KeypairList(task_manager.Task):
def main(self, client):
return client.nova_client.keypairs.list()
class KeypairCreate(task_manager.Task):
def main(self, client):
return client.nova_client.keypairs.create(**self.args)
class KeypairDelete(task_manager.Task):
def main(self, client):
return client.nova_client.keypairs.delete(**self.args)
class NovaUrlGet(task_manager.Task):
def main(self, client):
return client.nova_client.client.get(**self.args)
class NetworkList(task_manager.Task):
def main(self, client):
return client.neutron_client.list_networks()
class NetworkCreate(task_manager.Task):
def main(self, client):
return client.neutron_client.create_network(**self.args)
class NetworkDelete(task_manager.Task):
def main(self, client):
return client.neutron_client.delete_network(**self.args)
class RouterList(task_manager.Task):
def main(self, client):
return client.neutron_client.list_routers()
class RouterCreate(task_manager.Task):
def main(self, client):
return client.neutron_client.create_router(**self.args)
class RouterUpdate(task_manager.Task):
def main(self, client):
return client.neutron_client.update_router(**self.args)
class RouterDelete(task_manager.Task):
def main(self, client):
client.neutron_client.delete_router(**self.args)
class GlanceImageList(task_manager.Task):
def main(self, client):
return [image for image in self.args['image_gen']]
class NovaImageList(task_manager.Task):
def main(self, client):
return client.nova_client.images.list()
class ImageSnapshotCreate(task_manager.Task):
def main(self, client):
return client.nova_client.servers.create_image(**self.args)
class ImageCreate(task_manager.Task):
def main(self, client):
return client.glance_client.images.create(**self.args)
class ImageDelete(task_manager.Task):
def main(self, client):
return client.glance_client.images.delete(**self.args)
class ImageTaskCreate(task_manager.Task):
def main(self, client):
return client.glance_client.tasks.create(**self.args)
class ImageTaskGet(task_manager.Task):
def main(self, client):
return client.glance_client.tasks.get(**self.args)
class ImageUpdate(task_manager.Task):
def main(self, client):
client.glance_client.images.update(**self.args)
class ImageUpload(task_manager.Task):
def main(self, client):
client.glance_client.images.upload(**self.args)
class VolumeCreate(task_manager.Task):
def main(self, client):
return client.cinder_client.volumes.create(**self.args)
class VolumeDelete(task_manager.Task):
def main(self, client):
client.cinder_client.volumes.delete(**self.args)
class VolumeList(task_manager.Task):
def main(self, client):
return client.cinder_client.volumes.list()
class VolumeDetach(task_manager.Task):
def main(self, client):
client.nova_client.volumes.delete_server_volume(**self.args)
class VolumeAttach(task_manager.Task):
def main(self, client):
client.nova_client.volumes.create_server_volume(**self.args)
class NeutronSecurityGroupList(task_manager.Task):
def main(self, client):
return client.neutron_client.list_security_groups()
class NeutronSecurityGroupCreate(task_manager.Task):
def main(self, client):
return client.neutron_client.create_security_group(**self.args)
class NeutronSecurityGroupDelete(task_manager.Task):
def main(self, client):
return client.neutron_client.delete_security_group(**self.args)
class NeutronSecurityGroupUpdate(task_manager.Task):
def main(self, client):
return client.neutron_client.update_security_group(**self.args)
class NeutronSecurityGroupRuleCreate(task_manager.Task):
def main(self, client):
return client.neutron_client.create_security_group_rule(**self.args)
class NeutronSecurityGroupRuleDelete(task_manager.Task):
def main(self, client):
return client.neutron_client.delete_security_group_rule(**self.args)
class NovaSecurityGroupList(task_manager.Task):
def main(self, client):
return client.nova_client.security_groups.list()
class NovaSecurityGroupCreate(task_manager.Task):
def main(self, client):
return client.nova_client.security_groups.create(**self.args)
class NovaSecurityGroupDelete(task_manager.Task):
def main(self, client):
return client.nova_client.security_groups.delete(**self.args)
class NovaSecurityGroupUpdate(task_manager.Task):
def main(self, client):
return client.nova_client.security_groups.update(**self.args)
class NovaSecurityGroupRuleCreate(task_manager.Task):
def main(self, client):
return client.nova_client.security_group_rules.create(**self.args)
class NovaSecurityGroupRuleDelete(task_manager.Task):
def main(self, client):
return client.nova_client.security_group_rules.delete(**self.args)
class NeutronFloatingIPList(task_manager.Task):
def main(self, client):
return client.neutron_client.list_floatingips(**self.args)
class NovaFloatingIPList(task_manager.Task):
def main(self, client):
return client.nova_client.floating_ips.list()
class NeutronFloatingIPCreate(task_manager.Task):
def main(self, client):
return client.neutron_client.create_floatingip(**self.args)
class NovaFloatingIPCreate(task_manager.Task):
def main(self, client):
return client.nova_client.floating_ips.create(**self.args)
class NeutronFloatingIPDelete(task_manager.Task):
def main(self, client):
return client.neutron_client.delete_floatingip(**self.args)
class NovaFloatingIPDelete(task_manager.Task):
def main(self, client):
return client.nova_client.floating_ips.delete(**self.args)
class NovaFloatingIPAttach(task_manager.Task):
def main(self, client):
return client.nova_client.servers.add_floating_ip(**self.args)
class NovaFloatingIPDetach(task_manager.Task):
def main(self, client):
return client.nova_client.servers.remove_floating_ip(**self.args)
class NeutronFloatingIPUpdate(task_manager.Task):
def main(self, client):
return client.neutron_client.update_floatingip(**self.args)
class FloatingIPPoolList(task_manager.Task):
def main(self, client):
return client.nova_client.floating_ip_pools.list()
class ContainerGet(task_manager.Task):
def main(self, client):
return client.swift_client.head_container(**self.args)
class ContainerCreate(task_manager.Task):
def main(self, client):
client.swift_client.put_container(**self.args)
class ContainerDelete(task_manager.Task):
def main(self, client):
client.swift_client.delete_container(**self.args)
class ContainerUpdate(task_manager.Task):
def main(self, client):
client.swift_client.post_container(**self.args)
class ObjectCapabilities(task_manager.Task):
def main(self, client):
return client.swift_client.get_capabilities(**self.args)
class ObjectDelete(task_manager.Task):
def main(self, client):
return client.swift_client.delete_object(**self.args)
class ObjectCreate(task_manager.Task):
def main(self, client):
return client.swift_service.upload(**self.args)
class ObjectUpdate(task_manager.Task):
def main(self, client):
client.swift_client.post_object(**self.args)
class ObjectMetadata(task_manager.Task):
def main(self, client):
return client.swift_client.head_object(**self.args)
class SubnetCreate(task_manager.Task):
def main(self, client):
return client.neutron_client.create_subnet(**self.args)
class SubnetList(task_manager.Task):
def main(self, client):
return client.neutron_client.list_subnets()
class SubnetDelete(task_manager.Task):
def main(self, client):
client.neutron_client.delete_subnet(**self.args)
class SubnetUpdate(task_manager.Task):
def main(self, client):
return client.neutron_client.update_subnet(**self.args)
class PortList(task_manager.Task):
def main(self, client):
return client.neutron_client.list_ports(**self.args)
class PortCreate(task_manager.Task):
def main(self, client):
return client.neutron_client.create_port(**self.args)
class PortUpdate(task_manager.Task):
def main(self, client):
return client.neutron_client.update_port(**self.args)
class PortDelete(task_manager.Task):
def main(self, client):
return client.neutron_client.delete_port(**self.args)
class MachineCreate(task_manager.Task):
def main(self, client):
return client.ironic_client.node.create(**self.args)
class MachineDelete(task_manager.Task):
def main(self, client):
return client.ironic_client.node.delete(**self.args)
class MachinePatch(task_manager.Task):
def main(self, client):
return client.ironic_client.node.update(**self.args)
class MachinePortGet(task_manager.Task):
def main(self, client):
return client.ironic_client.port.get(**self.args)
class MachinePortGetByAddress(task_manager.Task):
def main(self, client):
return client.ironic_client.port.get_by_address(**self.args)
class MachinePortCreate(task_manager.Task):
def main(self, client):
return client.ironic_client.port.create(**self.args)
class MachinePortDelete(task_manager.Task):
def main(self, client):
return client.ironic_client.port.delete(**self.args)
class MachinePortList(task_manager.Task):
def main(self, client):
return client.ironic_client.port.list()
class MachineNodeGet(task_manager.Task):
def main(self, client):
return client.ironic_client.node.get(**self.args)
class MachineNodeList(task_manager.Task):
def main(self, client):
return client.ironic_client.node.list(**self.args)
class MachineNodePortList(task_manager.Task):
def main(self, client):
return client.ironic_client.node.list_ports(**self.args)
class MachineNodeUpdate(task_manager.Task):
def main(self, client):
return client.ironic_client.node.update(**self.args)
class MachineNodeValidate(task_manager.Task):
def main(self, client):
return client.ironic_client.node.validate(**self.args)
class MachineSetMaintenance(task_manager.Task):
def main(self, client):
return client.ironic_client.node.set_maintenance(**self.args)
class MachineSetPower(task_manager.Task):
def main(self, client):
return client.ironic_client.node.set_power_state(**self.args)
class MachineSetProvision(task_manager.Task):
def main(self, client):
return client.ironic_client.node.set_provision_state(**self.args)
class ServiceCreate(task_manager.Task):
def main(self, client):
return client.keystone_client.services.create(**self.args)
class ServiceList(task_manager.Task):
def main(self, client):
return client.keystone_client.services.list()
class ServiceDelete(task_manager.Task):
def main(self, client):
return client.keystone_client.services.delete(**self.args)
class EndpointCreate(task_manager.Task):
def main(self, client):
return client.keystone_client.endpoints.create(**self.args)
class EndpointList(task_manager.Task):
def main(self, client):
return client.keystone_client.endpoints.list()
class EndpointDelete(task_manager.Task):
def main(self, client):
return client.keystone_client.endpoints.delete(**self.args)
# IdentityDomain and not Domain because Domain is a DNS concept
class IdentityDomainCreate(task_manager.Task):
def main(self, client):
return client.keystone_client.domains.create(**self.args)
# IdentityDomain and not Domain because Domain is a DNS concept
class IdentityDomainList(task_manager.Task):
def main(self, client):
return client.keystone_client.domains.list()
# IdentityDomain and not Domain because Domain is a DNS concept
class IdentityDomainGet(task_manager.Task):
def main(self, client):
return client.keystone_client.domains.get(**self.args)
# IdentityDomain and not Domain because Domain is a DNS concept
class IdentityDomainUpdate(task_manager.Task):
def main(self, client):
return client.keystone_client.domains.update(**self.args)
# IdentityDomain and not Domain because Domain is a DNS concept
class IdentityDomainDelete(task_manager.Task):
def main(self, client):
return client.keystone_client.domains.delete(**self.args)
class DomainList(task_manager.Task):
def main(self, client):
return client.designate_client.domains.list()
class DomainGet(task_manager.Task):
def main(self, client):
return client.designate_client.domains.get(**self.args)
class RecordList(task_manager.Task):
def main(self, client):
return client.designate_client.records.list(**self.args)
class RecordGet(task_manager.Task):
def main(self, client):
return client.designate_client.records.get(**self.args)
| |
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from compose.config.environment import Environment
from compose.config.interpolation import interpolate_environment_variables
from compose.config.interpolation import Interpolator
from compose.config.interpolation import InvalidInterpolation
from compose.config.interpolation import TemplateWithDefaults
from compose.const import COMPOSEFILE_V2_0 as V2_0
from compose.const import COMPOSEFILE_V2_3 as V2_3
from compose.const import COMPOSEFILE_V3_4 as V3_4
@pytest.fixture
def mock_env():
return Environment({
'USER': 'jenny',
'FOO': 'bar',
'TRUE': 'True',
'FALSE': 'OFF',
'POSINT': '50',
'NEGINT': '-200',
'FLOAT': '0.145',
'MODE': '0600',
})
@pytest.fixture
def variable_mapping():
return Environment({'FOO': 'first', 'BAR': ''})
@pytest.fixture
def defaults_interpolator(variable_mapping):
return Interpolator(TemplateWithDefaults, variable_mapping).interpolate
def test_interpolate_environment_variables_in_services(mock_env):
services = {
'servicea': {
'image': 'example:${USER}',
'volumes': ['$FOO:/target'],
'logging': {
'driver': '${FOO}',
'options': {
'user': '$USER',
}
}
}
}
expected = {
'servicea': {
'image': 'example:jenny',
'volumes': ['bar:/target'],
'logging': {
'driver': 'bar',
'options': {
'user': 'jenny',
}
}
}
}
value = interpolate_environment_variables(V2_0, services, 'service', mock_env)
assert value == expected
def test_interpolate_environment_variables_in_volumes(mock_env):
volumes = {
'data': {
'driver': '$FOO',
'driver_opts': {
'max': 2,
'user': '${USER}'
}
},
'other': None,
}
expected = {
'data': {
'driver': 'bar',
'driver_opts': {
'max': 2,
'user': 'jenny'
}
},
'other': {},
}
value = interpolate_environment_variables(V2_0, volumes, 'volume', mock_env)
assert value == expected
def test_interpolate_environment_variables_in_secrets(mock_env):
secrets = {
'secretservice': {
'file': '$FOO',
'labels': {
'max': 2,
'user': '${USER}'
}
},
'other': None,
}
expected = {
'secretservice': {
'file': 'bar',
'labels': {
'max': 2,
'user': 'jenny'
}
},
'other': {},
}
value = interpolate_environment_variables(V3_4, secrets, 'secret', mock_env)
assert value == expected
def test_interpolate_environment_services_convert_types_v2(mock_env):
entry = {
'service1': {
'blkio_config': {
'weight': '${POSINT}',
'weight_device': [{'file': '/dev/sda1', 'weight': '${POSINT}'}]
},
'cpus': '${FLOAT}',
'cpu_count': '$POSINT',
'healthcheck': {
'retries': '${POSINT:-3}',
'disable': '${FALSE}',
'command': 'true'
},
'mem_swappiness': '${DEFAULT:-127}',
'oom_score_adj': '${NEGINT}',
'scale': '${POSINT}',
'ulimits': {
'nproc': '${POSINT}',
'nofile': {
'soft': '${POSINT}',
'hard': '${DEFAULT:-40000}'
},
},
'privileged': '${TRUE}',
'read_only': '${DEFAULT:-no}',
'tty': '${DEFAULT:-N}',
'stdin_open': '${DEFAULT-on}',
}
}
expected = {
'service1': {
'blkio_config': {
'weight': 50,
'weight_device': [{'file': '/dev/sda1', 'weight': 50}]
},
'cpus': 0.145,
'cpu_count': 50,
'healthcheck': {
'retries': 50,
'disable': False,
'command': 'true'
},
'mem_swappiness': 127,
'oom_score_adj': -200,
'scale': 50,
'ulimits': {
'nproc': 50,
'nofile': {
'soft': 50,
'hard': 40000
},
},
'privileged': True,
'read_only': False,
'tty': False,
'stdin_open': True,
}
}
value = interpolate_environment_variables(V2_3, entry, 'service', mock_env)
assert value == expected
def test_interpolate_environment_services_convert_types_v3(mock_env):
entry = {
'service1': {
'healthcheck': {
'retries': '${POSINT:-3}',
'disable': '${FALSE}',
'command': 'true'
},
'ulimits': {
'nproc': '${POSINT}',
'nofile': {
'soft': '${POSINT}',
'hard': '${DEFAULT:-40000}'
},
},
'privileged': '${TRUE}',
'read_only': '${DEFAULT:-no}',
'tty': '${DEFAULT:-N}',
'stdin_open': '${DEFAULT-on}',
'deploy': {
'update_config': {
'parallelism': '${DEFAULT:-2}',
'max_failure_ratio': '${FLOAT}',
},
'restart_policy': {
'max_attempts': '$POSINT',
},
'replicas': '${DEFAULT-3}'
},
'ports': [{'target': '${POSINT}', 'published': '${DEFAULT:-5000}'}],
'configs': [{'mode': '${MODE}', 'source': 'config1'}],
'secrets': [{'mode': '${MODE}', 'source': 'secret1'}],
}
}
expected = {
'service1': {
'healthcheck': {
'retries': 50,
'disable': False,
'command': 'true'
},
'ulimits': {
'nproc': 50,
'nofile': {
'soft': 50,
'hard': 40000
},
},
'privileged': True,
'read_only': False,
'tty': False,
'stdin_open': True,
'deploy': {
'update_config': {
'parallelism': 2,
'max_failure_ratio': 0.145,
},
'restart_policy': {
'max_attempts': 50,
},
'replicas': 3
},
'ports': [{'target': 50, 'published': 5000}],
'configs': [{'mode': 0o600, 'source': 'config1'}],
'secrets': [{'mode': 0o600, 'source': 'secret1'}],
}
}
value = interpolate_environment_variables(V3_4, entry, 'service', mock_env)
assert value == expected
def test_interpolate_environment_network_convert_types(mock_env):
entry = {
'network1': {
'external': '${FALSE}',
'attachable': '${TRUE}',
'internal': '${DEFAULT:-false}'
}
}
expected = {
'network1': {
'external': False,
'attachable': True,
'internal': False,
}
}
value = interpolate_environment_variables(V3_4, entry, 'network', mock_env)
assert value == expected
def test_interpolate_environment_external_resource_convert_types(mock_env):
entry = {
'resource1': {
'external': '${TRUE}',
}
}
expected = {
'resource1': {
'external': True,
}
}
value = interpolate_environment_variables(V3_4, entry, 'network', mock_env)
assert value == expected
value = interpolate_environment_variables(V3_4, entry, 'volume', mock_env)
assert value == expected
value = interpolate_environment_variables(V3_4, entry, 'secret', mock_env)
assert value == expected
value = interpolate_environment_variables(V3_4, entry, 'config', mock_env)
assert value == expected
def test_escaped_interpolation(defaults_interpolator):
assert defaults_interpolator('$${foo}') == '${foo}'
def test_invalid_interpolation(defaults_interpolator):
with pytest.raises(InvalidInterpolation):
defaults_interpolator('${')
with pytest.raises(InvalidInterpolation):
defaults_interpolator('$}')
with pytest.raises(InvalidInterpolation):
defaults_interpolator('${}')
with pytest.raises(InvalidInterpolation):
defaults_interpolator('${ }')
with pytest.raises(InvalidInterpolation):
defaults_interpolator('${ foo}')
with pytest.raises(InvalidInterpolation):
defaults_interpolator('${foo }')
with pytest.raises(InvalidInterpolation):
defaults_interpolator('${foo!}')
def test_interpolate_missing_no_default(defaults_interpolator):
assert defaults_interpolator("This ${missing} var") == "This var"
assert defaults_interpolator("This ${BAR} var") == "This var"
def test_interpolate_with_value(defaults_interpolator):
assert defaults_interpolator("This $FOO var") == "This first var"
assert defaults_interpolator("This ${FOO} var") == "This first var"
def test_interpolate_missing_with_default(defaults_interpolator):
assert defaults_interpolator("ok ${missing:-def}") == "ok def"
assert defaults_interpolator("ok ${missing-def}") == "ok def"
assert defaults_interpolator("ok ${BAR:-/non:-alphanumeric}") == "ok /non:-alphanumeric"
def test_interpolate_with_empty_and_default_value(defaults_interpolator):
assert defaults_interpolator("ok ${BAR:-def}") == "ok def"
assert defaults_interpolator("ok ${BAR-def}") == "ok "
| |
#!/usr/bin/env python2
#
# Copyright (c) 2016 Intel Corporation.
#
# SPDX-License-Identifier: Apache-2.0
#
import sys,re,os
import datetime
class ContextSwitch():
def __init__(self, context):
self.context = context
class Interrupt():
def __init__(self, irq):
self.irq = irq
class Sleep():
def __init__(self, duration, irq):
self.duration = duration
self.irq = irq
class TaskState():
SWITCH = 0
RESET_BIT = 1
SET_BIT = 2
def __init__(self, task, switch, bits):
self.task = task
self.switch = switch
self.bits = bits
class PacketCmd():
def __init__(self, task, command_ptr):
self.task = task
self.command_ptr = command_ptr
class Kevent():
def __init__(self, event_ptr):
self.event_ptr = event_ptr
class Event():
TYPE_ERROR = -1
TYPE_CONTEXTSWITCH = 1
TYPE_INTERRUPT = 2
TYPE_SLEEP = 3
TYPE_WAKEUP = 4
TYPE_TASK_STATE_CHANGE = 5
TYPE_COMMAND_PACKET = 6
TYPE_KEVENT = 7
def __init__(self, time, event_type, data):
self.time = time
self.etype = event_type
self.data = data
class EventType():
PLATFORM_INFO = 255
CONTEXTSWITCH = 1
INTERRUPT = 2
SLEEP = 3
TASK_STATE_CHANGE = 4
COMMAND_PACKET = 5
KEVENT = 6
class Params():
TICKS_PER_MSEC = 0
def getData(monitorFile):
global symbols
MO_STBIT0 = 0x20000000
MO_STBIT1 = 0x30000000
MO_EVENT = 0x40000000
MO_MASK = 0xFFFFFFF
EVT_MASK = 0xFFFFFFC
eventList = []
count = 0
with open(monitorFile, 'rb') as f:
while (1):
c = f.read(1)
count += 1
if (len(c) < 1):
return eventList
code = int(c.encode('hex'), 16)
if (code > 0):
#print "Count={:x}".format(count-1)
header = c
cc = f.read(3)
if (len(cc) < 3):
return eventList
header += cc
count += 3
evt_type = int(header[0:2][::-1].encode('hex'), 16)
size = int(header[2:4][::-1].encode('hex'), 16)
#print "Event {}({})".format(evt_type, size)
chunk = f.read(size*4)
if (len(chunk) < size*4):
return eventList
count += size*4
if (evt_type == EventType.PLATFORM_INFO):
Params.TICKS_PER_MSEC = \
int(chunk[0:4][::-1].encode('hex'), 16) / 1000.0
elif (evt_type == EventType.CONTEXTSWITCH):
time = int(chunk[0:4][::-1].encode('hex'), 16)
context = int(chunk[4:8][::-1].encode('hex'), 16)
eventList.append(Event(time,
Event.TYPE_CONTEXTSWITCH,
ContextSwitch(context)))
elif (evt_type == EventType.INTERRUPT):
time = int(chunk[0:4][::-1].encode('hex'), 16)
irq = int(chunk[4:8][::-1].encode('hex'), 16)
eventList.append(Event(time,
Event.TYPE_INTERRUPT,
Interrupt(irq)))
elif (evt_type == EventType.SLEEP):
time = int(chunk[0:4][::-1].encode('hex'), 16)
duration = int(chunk[4:8][::-1].encode('hex'), 16)
cause = int(chunk[8:12][::-1].encode('hex'), 16)
eventList.append(Event(time,
Event.TYPE_SLEEP,
Sleep(duration, cause)))
eventList.append(Event(time,
Event.TYPE_WAKEUP,
Sleep(duration, cause)))
elif (evt_type == EventType.TASK_STATE_CHANGE):
time = int(chunk[0:4][::-1].encode('hex'), 16)
task = int(chunk[4:8][::-1].encode('hex'), 16)
data = int(chunk[8:12][::-1].encode('hex'), 16)
if data == 0:
eventList.append(Event(
time,
Event.TYPE_TASK_STATE_CHANGE,
TaskState(task,
TaskState.SWITCH,
0)))
elif (data & 0xF0000000) == MO_STBIT0:
eventList.append(Event(
time,
Event.TYPE_TASK_STATE_CHANGE,
TaskState(task,
TaskState.RESET_BIT,
data & MO_MASK )))
elif (data & 0xF0000000) == MO_STBIT1:
eventList.append(Event(
time,
Event.TYPE_TASK_STATE_CHANGE,
TaskState(task,
TaskState.SET_BIT,
data & MO_MASK )))
elif (evt_type == EventType.COMMAND_PACKET):
time = int(chunk[0:4][::-1].encode('hex'), 16)
task = int(chunk[4:8][::-1].encode('hex'), 16)
cmd_ptr = int(chunk[8:12][::-1].encode('hex'), 16)
eventList.append(Event(time,
Event.TYPE_COMMAND_PACKET,
PacketCmd(task, cmd_ptr)))
elif (evt_type == EventType.KEVENT):
time = int(chunk[0:4][::-1].encode('hex'), 16)
event = int(chunk[4:8][::-1].encode('hex'), 16)
eventList.append(Event(time,
Event.TYPE_KEVENT,
Kevent(event & EVT_MASK)))
else:
eventList.append(Event(0, Event.TYPE_ERROR, evt_type))
def findFollowingTask(events, i):
i = i + 1
while (i < len(events)) and (events[i].etype != Event.TYPE_CONTEXTSWITCH):
i = i + 1
if (i == len(events)):
return -1
else:
return events[i].data.context
def decodeStateBits(flags):
bitValue = [ "STOP", "TERM", "SUSP", "BLCK", "GDBSTOP", "PRIO" , "NA" , "NA", "NA", "NA",
"NA", "TIME", "DRIV", "RESV", "EVNT", "ENQU", "DEQU", "SEND", "RECV", "SEMA",
"LIST", "LOCK", "ALLOC", "GTBL", "RESV", "RESV", "RECVDATA", "SENDDATA" ]
s = ''
for x in range(0, len(bitValue) - 1):
if (flags & (1 << x)):
s += bitValue[x] + " "
return s
def display(events):
global ftrace_format
global isr
current_task = -1
for i in range(0, len(events) -1):
evt = events[i]
if ftrace_format == 0:
if (evt.etype == Event.TYPE_CONTEXTSWITCH):
fTask = findFollowingTask(events, i)
if (fTask >= 0):
print "{:12} : {:>16} ---> {:<16}".format(
formatTime(evt.time),
getTask(evt.data.context),
getTask(fTask))
elif (evt.etype == Event.TYPE_INTERRUPT):
print "{:12} : IRQ{} handler={}".format(
formatTime(evt.time),
evt.data.irq,
getIsr(evt.data.irq))
elif (evt.etype == Event.TYPE_SLEEP):
print "{:12} : SLEPT {} OS ticks".format(
formatTime(evt.time),
evt.data.duration)
elif (evt.etype == Event.TYPE_WAKEUP):
print "{:12} : WAKEUP IRQ{}".format(
formatTime(evt.time),
evt.data.irq)
elif (evt.etype == Event.TYPE_TASK_STATE_CHANGE):
if (evt.data.task == 0):
task = "main_task"
else:
task = getSymbol(evt.data.task).replace('_k_task_obj_', '')
if (evt.data.switch == TaskState.SWITCH):
print "{:12} : " \
"Task switch to {}".format(formatTime(evt.time),
task)
elif (evt.data.switch == TaskState.SET_BIT):
print "{:12} : " \
"Task bits set ({}) {}".format(
formatTime(evt.time),
task,
decodeStateBits(evt.data.bits))
elif (evt.data.switch == TaskState.RESET_BIT):
print "{:12} : " \
"Task bits reset ({}) {}".format(
formatTime(evt.time),
task,
decodeStateBits(evt.data.bits))
elif (evt.etype == Event.TYPE_COMMAND_PACKET):
if (evt.data.task == 0):
task = "main_task"
else:
task = getSymbol(evt.data.task).replace('_k_task_obj_', '')
print "{:12} : " \
"Command ({}) {}".format(formatTime(evt.time),
task,
getSymbol(evt.data.command_ptr))
elif (evt.etype == Event.TYPE_KEVENT):
print "{:12} : " \
"Event {}".format(
formatTime(evt.time),
getSymbol(evt.data.event_ptr).replace(
'_k_event_obj_',''
)
)
else:
print "ERROR type={:08x}".format(evt.data)
else:
if (evt.etype == Event.TYPE_CONTEXTSWITCH):
ftask_id = findFollowingTask(events, i)
if (ftask_id > 0):
task_id = evt.data.context
task_name = getTask(evt.data.context)
if task_name == "main_task":
task_id = 0
if task_name == "_k_server":
task_id = 1
ftask_name = getTask(ftask_id)
if ftask_name == "main_task":
ftask_id = 0
if ftask_name == "_k_server":
ftask_id = 1
print " {:>16}-{:<8} [000] .... {:12}: sched_switch:" \
" prev_comm={} prev_pid={} prev_prio=0" \
" prev_state=S ==> next_comm={} next_pid={}" \
" next_prio=0".format(task_name,
task_id,
formatTime(evt.time),
task_name,
task_id,
ftask_name,
ftask_id)
current_task = evt.data.context
elif (evt.etype == Event.TYPE_INTERRUPT):
print " {:>16}-{:<8} [000] .... {:12}: irq_handler_entry: irq={}" \
" name={} handler={}".format(getTask(current_task),
current_task,
formatTime(evt.time),
evt.data.irq,
evt.data.irq,
getIsr(evt.data.irq))
print " {:>16}-{:<8} [000] .... {:12}: irq_handler_exit: irq={}" \
" ret=handled".format(getTask(current_task),
current_task,
formatTime(evt.time),
evt.data.irq)
last_timestamp = 0.0
base_timestamp = 0.0
def formatTime(val):
global last_timestamp
global base_timestamp
ticks_per_ms = Params.TICKS_PER_MSEC
val_ms = base_timestamp + (val / ticks_per_ms)
if val_ms < last_timestamp:
val_ms = val_ms + (0xFFFFFFFF / ticks_per_ms)
base_timestamp = base_timestamp + (0xFFFFFFFF / ticks_per_ms)
last_timestamp = val_ms
return "{:15.3f}".format(val_ms)
def getSymbol(val):
global symbols
if symbols.has_key(val):
return symbols[val]
else:
mem_key = 0
for key, value in symbols.iteritems():
if key < val:
if key > mem_key:
mem_key = key
symbols[val] = symbols[mem_key] + "+{}".format(val - mem_key)
return symbols[val]
def getTask(val):
return getSymbol(val).replace("_stack", "")
def getIsr(val):
global isr
if (isr.has_key(val)):
return isr[val].replace("$","").replace("_stub","")
else:
return "??{}".format(val)
def loadSymbols(elfFile):
os.system('readelf -s ' + elfFile + ' > symbols.txt')
prog = re.compile("\s+\S+:\s+([a-fA-F0-9]+)\s+\d+\s+\S+\s+\S+\s+\S+\s+\S+\s+(\S+)")
objList = {}
with open('symbols.txt', 'r') as f:
for line in f:
match = prog.match(line)
if match:
address = int(match.group(1), 16)
if not objList.has_key(address):
objList[address] = match.group(2)
else:
s = match.group(2)
# Prioritize tasks / event symbols when multiple symbols
# on same address
if (("_k_task_obj" in s) or ("_k_event_obj" in s) or
("_idt_base_address" in s)):
objList[address] = s
os.system('rm -rf symbols.txt')
return objList
def getIdtFunc(str1, str2):
# Format: cc1d0800 008e1000 => 0x00101dcc
# IDT encoding (see _IdtEntCreate function in zephyr)
address = str2[6:8]+str2[4:6]+str1[2:4]+str1[0:2]
if symbols.has_key(int(address, 16)):
return symbols[int(address, 16)]
else:
return "??{}".format(address)
def getSection(address, elfFile):
os.system('readelf -S ' + elfFile + ' > sections.txt')
prog = re.compile("\s+\[.+\]\s(\S+)\s+\S+\s+([0-9a-fA-F]+)\s+[0-9a-fA-F]+\s+([0-9a-fA-F]+)")
with open("sections.txt", 'r') as f:
for line in f:
match = prog.match(line)
if match:
start = int(match.group(2), 16)
end = start + int(match.group(3), 16)
name = match.group(1)
if (address >= start) and (address < end):
os.system('rm -rf sections.txt')
return name
os.system('rm -rf sections.txt')
return ''
def getIsrTable(elfFile):
# First get IDT table address '_idt_base_address' symbol
idt_address = 0
for addr,sym in symbols.iteritems():
if sym == '_idt_base_address':
idt_address = addr
if idt_address == 0:
print "IDT table address not found"
return 0
sectionName = getSection(idt_address, elfFile)
if sectionName == '':
print "IDT section not found"
return 0
os.system('readelf -x ' + sectionName + ' ' + elfFile + ' > ' + sectionName + '.txt')
prog = re.compile(
"\s+0x([0-9a-fA-F]+)\s+([0-9a-fA-F]+)\s+([0-9a-fA-F]+)\s+([0-9a-fA-F]+)" \
"\s+([0-9a-fA-F]+)\s+\S+")
symbol_table = {}
first = 0
with open(sectionName + '.txt', 'r') as f:
for line in f:
match = prog.match(line)
if match:
address = int(match.group(1), 16)
if (first == 0):
first = address
symbol_table[address] = match.group(2)
symbol_table[address+4] = match.group(3)
symbol_table[address+8] = match.group(4)
symbol_table[address+12] = match.group(5)
address_end = address + 12
capture_on = 0
index = 0
isr = {}
address = first
while address < address_end:
if not capture_on:
if (address == idt_address):
capture_on = 1
if (capture_on):
isr[index] = getIdtFunc(symbol_table[address], symbol_table[address+4])
index += 1
isr[index] = getIdtFunc(symbol_table[address+8], symbol_table[address+12])
index += 1
if (index == 256):
break
address = address + 16
else:
address = address + 4
os.system('rm -rf ' + sectionName + '.txt')
return isr
symbols = {}
isr = {}
prevTaskName = ""
prevTaskId = -1
ftrace_format = 0
def Main(argv):
global symbols, isr
global ftrace_format
dumpFile = ""
elfFile = ""
sys.argv.pop(0)
iterator = sys.argv.__iter__()
for arg in iterator:
if arg == "--ftrace":
ftrace_format = 1
elif arg == "-c":
Params.TICKS_PER_MSEC = float(iterator.next()) / 1000.0
else:
if not dumpFile:
dumpFile = arg
elif not elfFile:
elfFile = arg
if not elfFile:
print "profile.py [DUMP FILE] [ELF FILE]"
sys.exit(0)
symbols = loadSymbols(elfFile)
isr = getIsrTable(elfFile)
eventList = getData(dumpFile)
if (Params.TICKS_PER_MSEC == 0):
print "Platform info not found ! Use -c option if decoding dump from JTAG"
sys.exit(0)
display(eventList)
if __name__ == "__main__":
Main(sys.argv[1:])
| |
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self.decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
`average_coef_` : array, shape (n_features,)
Averaged weights assigned to the features.
`average_intercept_` : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| |
#!/usr/bin/env python
"""
check_glstring.py
This script does a few sanity checks of a GL String
Checks the following...
- if a locus is found in more than one locus block
e.g., this is good
HLA-A*01:01/HLA-A*01:02+HLA-A*24:02^HLA-B*08:01+HLA-B*44:02^"
e.g., this is bad
HLA-A*01:01/HLA-A*01:02+HLA-A*24:02^HLA-B*08:01+HLA-A*44:02^"
- if any of the following contain more than one locus
genotype lists
genotypes
allele lists
Note: Both genotypes and genotype lists may contain phased loci,
and so these may contain multiple loci
"""
import argparse
import re
def get_loci(glstring):
"""
Takes GL String and returns a set containing all the loci
"""
alleles = get_alleles(glstring)
loci = set()
for allele in alleles:
loci.add(allele.split('*')[0])
return loci
def get_alleles(glstring):
"""
Takes a GL String, and returns a set containing all the alleles
"""
alleles = set()
for allele in re.split(r'[/~+|^]', glstring):
alleles.add(allele)
return alleles
def get_allele_lists(glstring):
"""
Takes a GL String and returns a list of allele lists it contains
"""
allele_lists = []
for allele_list in re.split(r'[~+|^]', glstring):
if "/" in allele_list:
allele_lists.append(allele_list)
return allele_lists
def get_genotypes(glstring):
"""
Take a GL String, and return a list of genotypes
"""
parsed = re.split(r'[|^]', glstring)
genotypes = []
for genotype in parsed:
if "+" in genotype:
genotypes.append(genotype)
return genotypes
def get_genotype_lists(glstring):
"""
Take a GL String, and return a list of genotype lists
"""
parsed = re.split(r'[\^]', glstring)
genotype_lists = []
for genotype_list in parsed:
if "|" in genotype_list:
genotype_lists.append(genotype_list)
return genotype_lists
def get_locus_blocks(glstring):
"""
Take a GL String, and return a list of locus blocks
"""
# return re.split(r'[\^]', glstring)
return glstring.split('^')
def get_phased(glstring):
"""
Take a GL String and return a list of phased alleles
"""
phased_list = []
for phased in re.split(r'[+|^\]', glstring):
if "~" in phased:
phased_list.append(phased)
return phased_list
def get_duplicates(setlist):
"""
Takes a list of sets, and returns a set of items that are found in
more than one set in the list
"""
duplicates = set()
for i, myset in enumerate(setlist):
othersets = set().union(*setlist[i+1:])
duplicates.update(myset & othersets)
return duplicates
def check_locus_blocks(glstring):
"""
Takes a GL String and checks to see if any loci are found in
more than one locus block.
Returns a tuple containing a list of locus blocks, and set of loci
found in more than one block
"""
locusblocks = glstring.split('^')
duplicates = set()
if len(locusblocks) > 1:
loci = []
for locusblock in locusblocks:
loci.append(get_loci(locusblock))
duplicates = get_duplicates(loci)
return locusblocks, duplicates
def check_genotype_lists(glstring):
"""
Takes a GL String, and checks to see if any unphased genotype lists
contain more than one locus. A list of tuples is returned. Each
tuple consists of the genotype list, a set of loci found in the
genotype list, and a text string. For genotype lists containing of
only unphased genotypes, the text string is either 'OK' (if only one
locus is found), or 'WARNING' (if more than one locus if found).
For for genotype lists that contain at lease one phased genotype
(containing '~'), the text string is 'Phased - check separately'
"""
genotype_lists = get_genotype_lists(glstring)
checked_gl = []
for genotype_list in genotype_lists:
loci = get_loci(genotype_list)
if len(loci) > 1:
if '~' not in genotype_list:
for locus in loci:
if "DR" not in locus:
msg = 'Unphased - WARNING'
break
msg = "OK"
else:
msg = 'OK'
else:
msg = 'OK'
checked_gl.append((genotype_list, loci, msg))
return checked_gl
def check_allele_lists(glstring):
"""
Takes a GL String, and checks to see if there are more than one
locus in any of the allele lists. A list of tuples is returned. Each
tuple consists of the allele list, a set of loci found in the allele
list, and a text string. The text string is either 'OK' (if only one
locus is found), or 'WARNING' (if more than one locus if found).
"""
allele_lists = get_allele_lists(glstring)
checked_al = []
if len(allele_lists) > 0:
for allele_list in allele_lists:
loci = get_loci(allele_list)
if len(loci) > 1:
msg = 'WARNING'
else:
msg = 'OK'
checked_al.append((allele_list, loci, msg))
return checked_al
def check_genotypes(glstring):
"""
Takes a GL String, and checks to see if any unphased genotypes
contain more than one locus. A list of tuples is returned. Each
tuple consists of the genotype, a set of loci found in the genotype,
and a text string. For unphased genotypes, the text string is either
'OK' (if only one locus is found), or 'WARNING' (if more than one
locus if found). For phased genotypes (containing '~'), the text
string is 'Phased - check separately'
"""
genotypes = get_genotypes(glstring)
checked_gt = []
for genotype in genotypes:
loci = get_loci(genotype)
if len(loci) > 1:
if '~' in genotype:
msg = 'OK'
else:
for locus in loci:
if "DR" not in locus:
msg = 'Unphased - WARNING'
break
msg = "OK"
else:
msg = 'OK'
checked_gt.append((genotype, loci, msg))
return checked_gt
def checkedstr(checked):
"""
Takes a list of checked items and a description, and prints them.
"""
if len(checked) > 0:
for item in checked:
if item[2]!='OK' :
return '|'.join(item[1])
return 'OK'
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--glstring",
required=True,
help="GL String to be checked",
type=str)
args = parser.parse_args()
if args.glstring:
gl = args.glstring
# print("\n", "GL String =", gl, "\n")
locusblocks, duplicates = check_locus_blocks(gl)
retstr=""
if len(locusblocks) > 1:
if len(duplicates) == 0:
retstr="OK,"
else:
retstr='|'.join(duplicates)+","
else:
retstr="OK,"
retstr = retstr+checkedstr(check_genotype_lists(gl))+','
retstr = retstr+checkedstr(check_genotypes(gl))+','
retstr = retstr+checkedstr(check_allele_lists(gl))
print(retstr)
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
###############################################################################
# Name: SystemSettingsDemo.py #
# Purpose: SystemSettings Test and Demo File #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2009 Cody Precord <staff@editra.org> #
# Licence: wxWindows Licence #
###############################################################################
"""
<b>wx.SystemSettings</b>:
<p>Allows the application to ask for details about the system.</p>
<p>This can include settings such as standard colours, fonts, and user interface
element sizes.</p>
"""
__author__ = "Cody Precord <cprecord@editra.org>"
#-----------------------------------------------------------------------------#
# Imports
import os
import sys
import wx
import wx.lib.scrolledpanel as scrolled
#-----------------------------------------------------------------------------#
class TestPanel(wx.Panel):
def __init__(self, parent, log):
wx.Panel.__init__(self, parent)
# Attributes
self.log = log
self._nb = wx.Notebook(self)
# Setup
panel1 = ScrolledWrapper(self._nb, SysColorPanel, self.log)
self._nb.AddPage(panel1, "System Colors")
panel2 = ScrolledWrapper(self._nb, SysFontPanel, self.log)
self._nb.AddPage(panel2, "System Fonts")
panel3 = ScrolledWrapper(self._nb, SysMetricPanel, self.log)
self._nb.AddPage(panel3, "System Metrics")
panel4 = ScrolledWrapper(self._nb, SysFeaturePanel, self.log)
self._nb.AddPage(panel4, "System Features")
# Layout
self.__DoLayout()
def __DoLayout(self):
"""Layout the panel"""
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self._nb, 1, wx.EXPAND)
self.SetSizer(sizer)
self.SetAutoLayout(True)
#----------------------------------------------------------------------
class SysPanelBase(wx.Panel):
def __init__(self, parent, log):
wx.Panel.__init__(self, parent)#, size=(500, 500))
# Attributes
self.log = log
self._vals = list()
## Event Handlers
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_SCROLLWIN, self.OnScroll)
def DoGetBestSize(self):
"""Return the best size for this panel"""
maxw = 0
for vals in self._vals:
extent = self.GetTextExtent(vals)[0]
if extent > maxw:
maxw = extent
self._maxw = maxw
maxw += 75
maxh = (len(self._vals) + 1) * 22
return (maxw, maxh)
def SetupPaintDC(self, dc):
"""Paint the screen
@param dc: paint DC
"""
dc.SetFont(self.GetFont())
dc.SetBrush(wx.WHITE_BRUSH)
dc.Clear()
dc.DrawRectangle(self.GetClientRect())
dc.SetPen(wx.BLACK_PEN)
dc.SetTextForeground(wx.BLACK)
def OnPaint(self, evt):
dc = wx.PaintDC(self)
evt.Skip()
def OnSize(self, evt):
self.Refresh()
evt.Skip()
def OnScroll(self, evt):
self.Refresh()
evt.Skip()
def OnErase(self, evt):
pass
#----------------------------------------------------------------------
class SysColorPanel(SysPanelBase):
def __init__(self, parent, log):
SysPanelBase.__init__(self, parent, log)
# Attributes:
self._box = (50, 15) # Color box dimensions
self._maxw = 0
self._vals = [ color for color in dir(wx)
if color.startswith('SYS_COLOUR_') and
color != 'SYS_COLOUR_MAX' ]
def OnPaint(self, evt):
dc = wx.AutoBufferedPaintDCFactory(self)
self.SetupPaintDC(dc)
# Draw a sample box for each system color
nextx = 10
nexty = 10
column = 0
row_count = 0
for val in self._vals:
syscolor = wx.SystemSettings.GetColour(getattr(wx, val))
dc.SetBrush(wx.Brush(syscolor))
# Draw label
dc.DrawText(val, nextx, nexty)
# Calculate box position
nextx += self._maxw + 8
dc.DrawRectangle(nextx, nexty, self._box[0], self._box[1])
nextx = 10
nexty += 20
#----------------------------------------------------------------------
class SysFontPanel(SysPanelBase):
def __init__(self, parent, log):
SysPanelBase.__init__(self, parent, log)
# Attributes:
self._maxw = 0
self._vals = ['SYS_ANSI_FIXED_FONT',
'SYS_ANSI_VAR_FONT',
'SYS_DEFAULT_GUI_FONT',
'SYS_DEVICE_DEFAULT_FONT',
# 'SYS_ICONTITLE_FONT',
'SYS_OEM_FIXED_FONT',
# 'SYS_SYSTEM_FIXED_FONT',
'SYS_SYSTEM_FONT'
]
def OnPaint(self, evt):
dc = wx.AutoBufferedPaintDCFactory(self)
self.SetupPaintDC(dc)
# Draw a sample box for each system color
nextx = 10
nexty = 10
column = 0
row_count = 0
for val in self._vals:
dc.SetFont(self.GetFont())
sysfont = wx.SystemSettings.GetFont(getattr(wx, val))
# Draw label
dc.DrawText(val, nextx, nexty)
# Calculate box position
nextx += self._maxw + 8
dc.SetFont(sysfont)
dc.DrawText(sysfont.GetFaceName(), nextx, nexty)
nextx = 10
nexty += 20
#----------------------------------------------------------------------
class SysMetricPanel(SysPanelBase):
def __init__(self, parent, log):
SysPanelBase.__init__(self, parent, log)
# Attributes:
self._maxw = 0
self._vals = ['SYS_BORDER_X', 'SYS_BORDER_Y', 'SYS_CAPTION_Y',
'SYS_CURSOR_X', 'SYS_CURSOR_Y', 'SYS_DCLICK_X',
'SYS_DCLICK_Y', 'SYS_DRAG_X', 'SYS_DRAG_Y',
'SYS_EDGE_X', 'SYS_EDGE_Y', 'SYS_FRAMESIZE_X',
'SYS_FRAMESIZE_Y', 'SYS_HSCROLL_ARROW_X',
'SYS_HSCROLL_ARROW_Y', 'SYS_HSCROLL_Y', 'SYS_HTHUMB_X',
'SYS_ICONSPACING_X', 'SYS_ICONSPACING_Y', 'SYS_ICON_X',
'SYS_ICON_Y', 'SYS_MENU_Y', 'SYS_SCREEN_X',
'SYS_SCREEN_Y', 'SYS_SMALLICON_X', 'SYS_SMALLICON_Y',
'SYS_VSCROLL_ARROW_X', 'SYS_VSCROLL_ARROW_Y',
'SYS_VSCROLL_X', 'SYS_VTHUMB_Y', 'SYS_WINDOWMIN_X',
'SYS_WINDOWMIN_Y', 'SYS_MOUSE_BUTTONS',
'SYS_NETWORK_PRESENT', 'SYS_PENWINDOWS_PRESENT',
'SYS_SHOW_SOUNDS', 'SYS_SWAP_BUTTONS']
self._vals.sort()
def OnPaint(self, evt):
dc = wx.AutoBufferedPaintDCFactory(self)
self.SetupPaintDC(dc)
# Draw a sample box for each system color
nextx = 10
nexty = 10
column = 0
row_count = 0
for val in self._vals:
sysmetric = wx.SystemSettings.GetMetric(getattr(wx, val))
# Draw label
dc.DrawText(val, nextx, nexty)
# Calculate box position
nextx += self._maxw + 8
dc.DrawText(repr(sysmetric), nextx, nexty)
nextx = 10
nexty += 20
#----------------------------------------------------------------------
class SysFeaturePanel(SysPanelBase):
def __init__(self, parent, log):
SysPanelBase.__init__(self, parent, log)
# Attributes:
self._maxw = 0
self._vals = ['SYS_CAN_DRAW_FRAME_DECORATIONS',
'SYS_CAN_ICONIZE_FRAME',
'SYS_TABLET_PRESENT' ]
def OnPaint(self, evt):
dc = wx.AutoBufferedPaintDCFactory(self)
self.SetupPaintDC(dc)
# Draw a sample box for each system color
nextx = 10
nexty = 10
column = 0
row_count = 0
for val in self._vals:
sysfeature = wx.SystemSettings.HasFeature(getattr(wx, val))
# Draw label
dc.DrawText(val, nextx, nexty)
# Calculate box position
nextx += self._maxw + 8
dc.DrawText(repr(sysfeature), nextx, nexty)
nextx = 10
nexty += 20
#----------------------------------------------------------------------
class ScrolledWrapper(scrolled.ScrolledPanel):
def __init__(self, parent, ctor, log):
"""Wrap the given window in a scrolled panel"""
scrolled.ScrolledPanel.__init__(self, parent)
# Attributes
self._panel = ctor(self, log)
# Layout
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self._panel, 1, wx.EXPAND)
self.SetSizer(sizer)
self.SetAutoLayout(True)
# Setup
self.SetupScrolling()
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
class TestLog:
def __init__(self):
pass
def write(self, msg):
print(msg)
#----------------------------------------------------------------------
overview = __doc__
#-----------------------------------------------------------------------------#
if __name__ == '__main__':
try:
import sys
import run
except ImportError:
app = wx.App(False)
frame = wx.Frame(None, title="SystemSettings Demo", size=(500, 500))
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(TestPanel(frame, TestLog()), 1, wx.EXPAND)
frame.CreateStatusBar()
frame.SetSizer(sizer)
frame.Show()
app.MainLoop()
else:
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
| |
# -*- coding: utf-8 -*-
"""Python client for InfluxDB v0.8."""
import warnings
import json
import socket
import requests
import requests.exceptions
from six.moves import xrange
from six.moves.urllib.parse import urlparse
from influxdb import chunked_json
session = requests.Session()
class InfluxDBClientError(Exception):
"""Raised when an error occurs in the request."""
def __init__(self, content, code=-1):
"""Initialize an InfluxDBClientError handler."""
super(InfluxDBClientError, self).__init__(
"{0}: {1}".format(code, content))
self.content = content
self.code = code
class InfluxDBClient(object):
"""Define the standard InfluxDBClient for influxdb v0.8.
The ``InfluxDBClient`` object holds information necessary to connect
to InfluxDB. Requests can be made to InfluxDB directly through the client.
:param host: hostname to connect to InfluxDB, defaults to 'localhost'
:type host: string
:param port: port to connect to InfluxDB, defaults to 'localhost'
:type port: int
:param username: user to connect, defaults to 'root'
:type username: string
:param password: password of the user, defaults to 'root'
:type password: string
:param database: database name to connect to, defaults is None
:type database: string
:param ssl: use https instead of http to connect to InfluxDB, defaults is
False
:type ssl: boolean
:param verify_ssl: verify SSL certificates for HTTPS requests, defaults is
False
:type verify_ssl: boolean
:param retries: number of retries your client will try before aborting,
defaults to 3. 0 indicates try until success
:type retries: int
:param timeout: number of seconds Requests will wait for your client to
establish a connection, defaults to None
:type timeout: int
:param use_udp: use UDP to connect to InfluxDB, defaults is False
:type use_udp: int
:param udp_port: UDP port to connect to InfluxDB, defaults is 4444
:type udp_port: int
"""
def __init__(self,
host='localhost',
port=8086,
username='root',
password='root',
database=None,
ssl=False,
verify_ssl=False,
timeout=None,
retries=3,
use_udp=False,
udp_port=4444):
"""Construct a new InfluxDBClient object."""
self._host = host
self._port = port
self._username = username
self._password = password
self._database = database
self._timeout = timeout
self._retries = retries
self._verify_ssl = verify_ssl
self._use_udp = use_udp
self._udp_port = udp_port
if use_udp:
self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._scheme = "http"
if ssl is True:
self._scheme = "https"
self._baseurl = "{0}://{1}:{2}".format(
self._scheme,
self._host,
self._port)
self._headers = {
'Content-type': 'application/json',
'Accept': 'text/plain'}
@staticmethod
def from_dsn(dsn, **kwargs):
r"""Return an instaance of InfluxDBClient from given data source name.
Returns an instance of InfluxDBClient from the provided data source
name. Supported schemes are "influxdb", "https+influxdb",
"udp+influxdb". Parameters for the InfluxDBClient constructor may be
also be passed to this function.
Examples:
>> cli = InfluxDBClient.from_dsn('influxdb://username:password@\
... localhost:8086/databasename', timeout=5)
>> type(cli)
<class 'influxdb.client.InfluxDBClient'>
>> cli = InfluxDBClient.from_dsn('udp+influxdb://username:pass@\
... localhost:8086/databasename', timeout=5, udp_port=159)
>> print('{0._baseurl} - {0.use_udp} {0.udp_port}'.format(cli))
http://localhost:8086 - True 159
:param dsn: data source name
:type dsn: string
:param **kwargs: additional parameters for InfluxDBClient.
:type **kwargs: dict
:note: parameters provided in **kwargs may override dsn parameters.
:note: when using "udp+influxdb" the specified port (if any) will be
used for the TCP connection; specify the udp port with the additional
udp_port parameter (cf. examples).
:raise ValueError: if the provided DSN has any unexpected value.
"""
init_args = {}
conn_params = urlparse(dsn)
scheme_info = conn_params.scheme.split('+')
if len(scheme_info) == 1:
scheme = scheme_info[0]
modifier = None
else:
modifier, scheme = scheme_info
if scheme != 'influxdb':
raise ValueError('Unknown scheme "{0}".'.format(scheme))
if modifier:
if modifier == 'udp':
init_args['use_udp'] = True
elif modifier == 'https':
init_args['ssl'] = True
else:
raise ValueError('Unknown modifier "{0}".'.format(modifier))
if conn_params.hostname:
init_args['host'] = conn_params.hostname
if conn_params.port:
init_args['port'] = conn_params.port
if conn_params.username:
init_args['username'] = conn_params.username
if conn_params.password:
init_args['password'] = conn_params.password
if conn_params.path and len(conn_params.path) > 1:
init_args['database'] = conn_params.path[1:]
init_args.update(kwargs)
return InfluxDBClient(**init_args)
# Change member variables
def switch_database(self, database):
"""Change client database.
:param database: the new database name to switch to
:type database: string
"""
self._database = database
def switch_db(self, database):
"""Change client database.
DEPRECATED.
"""
warnings.warn(
"switch_db is deprecated, and will be removed "
"in future versions. Please use "
"``InfluxDBClient.switch_database(database)`` instead.",
FutureWarning)
return self.switch_database(database)
def switch_user(self, username, password):
"""Change client username.
:param username: the new username to switch to
:type username: string
:param password: the new password to switch to
:type password: string
"""
self._username = username
self._password = password
def request(self, url, method='GET', params=None, data=None,
expected_response_code=200):
"""Make a http request to API."""
url = "{0}/{1}".format(self._baseurl, url)
if params is None:
params = {}
auth = {
'u': self._username,
'p': self._password
}
params.update(auth)
if data is not None and not isinstance(data, str):
data = json.dumps(data)
retry = True
_try = 0
# Try to send the request more than once by default (see #103)
while retry:
try:
response = session.request(
method=method,
url=url,
params=params,
data=data,
headers=self._headers,
verify=self._verify_ssl,
timeout=self._timeout
)
break
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout):
_try += 1
if self._retries != 0:
retry = _try < self._retries
else:
raise requests.exceptions.ConnectionError
if response.status_code == expected_response_code:
return response
else:
raise InfluxDBClientError(response.content, response.status_code)
def write(self, data):
"""Provide as convenience for influxdb v0.9.0, this may change."""
self.request(
url="write",
method='POST',
params=None,
data=data,
expected_response_code=200
)
return True
# Writing Data
#
# Assuming you have a database named foo_production you can write data
# by doing a POST to /db/foo_production/series?u=some_user&p=some_password
# with a JSON body of points.
def write_points(self, data, time_precision='s', *args, **kwargs):
"""Write to multiple time series names.
An example data blob is:
data = [
{
"points": [
[
12
]
],
"name": "cpu_load_short",
"columns": [
"value"
]
}
]
:param data: A list of dicts in InfluxDB 0.8.x data format.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
"""
def list_chunks(data_list, n):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(data_list), n):
yield data_list[i:i + n]
batch_size = kwargs.get('batch_size')
if batch_size and batch_size > 0:
for item in data:
name = item.get('name')
columns = item.get('columns')
point_list = item.get('points', [])
for batch in list_chunks(point_list, batch_size):
item = [{
"points": batch,
"name": name,
"columns": columns
}]
self._write_points(
data=item,
time_precision=time_precision)
return True
return self._write_points(data=data,
time_precision=time_precision)
def write_points_with_precision(self, data, time_precision='s'):
"""Write to multiple time series names.
DEPRECATED.
"""
warnings.warn(
"write_points_with_precision is deprecated, and will be removed "
"in future versions. Please use "
"``InfluxDBClient.write_points(time_precision='..')`` instead.",
FutureWarning)
return self._write_points(data=data, time_precision=time_precision)
def _write_points(self, data, time_precision):
if time_precision not in ['s', 'm', 'ms', 'u']:
raise Exception(
"Invalid time precision is given. (use 's', 'm', 'ms' or 'u')")
if self._use_udp and time_precision != 's':
raise Exception(
"InfluxDB only supports seconds precision for udp writes"
)
url = "db/{0}/series".format(self._database)
params = {
'time_precision': time_precision
}
if self._use_udp:
self.send_packet(data)
else:
self.request(
url=url,
method='POST',
params=params,
data=data,
expected_response_code=200
)
return True
# One Time Deletes
def delete_points(self, name):
"""Delete an entire series."""
url = "db/{0}/series/{1}".format(self._database, name)
self.request(
url=url,
method='DELETE',
expected_response_code=204
)
return True
# Regularly Scheduled Deletes
def create_scheduled_delete(self, json_body):
"""Create schedule delete from database.
2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
but it is documented in http://influxdb.org/docs/api/http.html.
See also: src/api/http/api.go:l57
"""
raise NotImplementedError()
# get list of deletes
# curl http://localhost:8086/db/site_dev/scheduled_deletes
#
# remove a regularly scheduled delete
# curl -X DELETE http://localhost:8086/db/site_dev/scheduled_deletes/:id
def get_list_scheduled_delete(self):
"""Get list of scheduled deletes.
2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
but it is documented in http://influxdb.org/docs/api/http.html.
See also: src/api/http/api.go:l57
"""
raise NotImplementedError()
def remove_scheduled_delete(self, delete_id):
"""Remove scheduled delete.
2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
but it is documented in http://influxdb.org/docs/api/http.html.
See also: src/api/http/api.go:l57
"""
raise NotImplementedError()
def query(self, query, time_precision='s', chunked=False):
"""Query data from the influxdb v0.8 database.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param chunked: [Optional, default=False] True if the data shall be
retrieved in chunks, False otherwise.
"""
return self._query(query, time_precision=time_precision,
chunked=chunked)
# Querying Data
#
# GET db/:name/series. It takes five parameters
def _query(self, query, time_precision='s', chunked=False):
if time_precision not in ['s', 'm', 'ms', 'u']:
raise Exception(
"Invalid time precision is given. (use 's', 'm', 'ms' or 'u')")
if chunked is True:
chunked_param = 'true'
else:
chunked_param = 'false'
# Build the URL of the series to query
url = "db/{0}/series".format(self._database)
params = {
'q': query,
'time_precision': time_precision,
'chunked': chunked_param
}
response = self.request(
url=url,
method='GET',
params=params,
expected_response_code=200
)
if chunked:
try:
decoded = chunked_json.loads(response.content.decode())
except UnicodeDecodeError:
decoded = chunked_json.loads(response.content.decode('utf-8'))
return list(decoded)
return response.json()
# Creating and Dropping Databases
#
# ### create a database
# curl -X POST http://localhost:8086/db -d '{"name": "site_development"}'
#
# ### drop a database
# curl -X DELETE http://localhost:8086/db/site_development
def create_database(self, database):
"""Create a database on the InfluxDB server.
:param database: the name of the database to create
:type database: string
:rtype: boolean
"""
url = "db"
data = {'name': database}
self.request(
url=url,
method='POST',
data=data,
expected_response_code=201
)
return True
def delete_database(self, database):
"""Drop a database on the InfluxDB server.
:param database: the name of the database to delete
:type database: string
:rtype: boolean
"""
url = "db/{0}".format(database)
self.request(
url=url,
method='DELETE',
expected_response_code=204
)
return True
# ### get list of databases
# curl -X GET http://localhost:8086/db
def get_list_database(self):
"""Get the list of databases."""
url = "db"
response = self.request(
url=url,
method='GET',
expected_response_code=200
)
return response.json()
def get_database_list(self):
"""Get the list of databases.
DEPRECATED.
"""
warnings.warn(
"get_database_list is deprecated, and will be removed "
"in future versions. Please use "
"``InfluxDBClient.get_list_database`` instead.",
FutureWarning)
return self.get_list_database()
def delete_series(self, series):
"""Drop a series on the InfluxDB server.
:param series: the name of the series to delete
:type series: string
:rtype: boolean
"""
url = "db/{0}/series/{1}".format(
self._database,
series
)
self.request(
url=url,
method='DELETE',
expected_response_code=204
)
return True
def get_list_series(self):
"""Get a list of all time series in a database."""
response = self._query('list series')
return [series[1] for series in response[0]['points']]
def get_list_continuous_queries(self):
"""Get a list of continuous queries."""
response = self._query('list continuous queries')
return [query[2] for query in response[0]['points']]
# Security
# get list of cluster admins
# curl http://localhost:8086/cluster_admins?u=root&p=root
# add cluster admin
# curl -X POST http://localhost:8086/cluster_admins?u=root&p=root \
# -d '{"name": "paul", "password": "i write teh docz"}'
# update cluster admin password
# curl -X POST http://localhost:8086/cluster_admins/paul?u=root&p=root \
# -d '{"password": "new pass"}'
# delete cluster admin
# curl -X DELETE http://localhost:8086/cluster_admins/paul?u=root&p=root
# Database admins, with a database name of site_dev
# get list of database admins
# curl http://localhost:8086/db/site_dev/admins?u=root&p=root
# add database admin
# curl -X POST http://localhost:8086/db/site_dev/admins?u=root&p=root \
# -d '{"name": "paul", "password": "i write teh docz"}'
# update database admin password
# curl -X POST http://localhost:8086/db/site_dev/admins/paul?u=root&p=root\
# -d '{"password": "new pass"}'
# delete database admin
# curl -X DELETE \
# http://localhost:8086/db/site_dev/admins/paul?u=root&p=root
def get_list_cluster_admins(self):
"""Get list of cluster admins."""
response = self.request(
url="cluster_admins",
method='GET',
expected_response_code=200
)
return response.json()
def add_cluster_admin(self, new_username, new_password):
"""Add cluster admin."""
data = {
'name': new_username,
'password': new_password
}
self.request(
url="cluster_admins",
method='POST',
data=data,
expected_response_code=200
)
return True
def update_cluster_admin_password(self, username, new_password):
"""Update cluster admin password."""
url = "cluster_admins/{0}".format(username)
data = {
'password': new_password
}
self.request(
url=url,
method='POST',
data=data,
expected_response_code=200
)
return True
def delete_cluster_admin(self, username):
"""Delete cluster admin."""
url = "cluster_admins/{0}".format(username)
self.request(
url=url,
method='DELETE',
expected_response_code=200
)
return True
def set_database_admin(self, username):
"""Set user as database admin."""
return self.alter_database_admin(username, True)
def unset_database_admin(self, username):
"""Unset user as database admin."""
return self.alter_database_admin(username, False)
def alter_database_admin(self, username, is_admin):
"""Alter the database admin."""
url = "db/{0}/users/{1}".format(self._database, username)
data = {'admin': is_admin}
self.request(
url=url,
method='POST',
data=data,
expected_response_code=200
)
return True
def get_list_database_admins(self):
"""Get list of database admins.
2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
but it is documented in http://influxdb.org/docs/api/http.html.
See also: src/api/http/api.go:l57
"""
raise NotImplementedError()
def add_database_admin(self, new_username, new_password):
"""Add cluster admin.
2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
but it is documented in http://influxdb.org/docs/api/http.html.
See also: src/api/http/api.go:l57
"""
raise NotImplementedError()
def update_database_admin_password(self, username, new_password):
"""Update database admin password.
2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
but it is documented in http://influxdb.org/docs/api/http.html.
See also: src/api/http/api.go:l57
"""
raise NotImplementedError()
def delete_database_admin(self, username):
"""Delete database admin.
2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
but it is documented in http://influxdb.org/docs/api/http.html.
See also: src/api/http/api.go:l57
"""
raise NotImplementedError()
###
# Limiting User Access
# Database users
# get list of database users
# curl http://localhost:8086/db/site_dev/users?u=root&p=root
# add database user
# curl -X POST http://localhost:8086/db/site_dev/users?u=root&p=root \
# -d '{"name": "paul", "password": "i write teh docz"}'
# update database user password
# curl -X POST http://localhost:8086/db/site_dev/users/paul?u=root&p=root \
# -d '{"password": "new pass"}'
# delete database user
# curl -X DELETE http://localhost:8086/db/site_dev/users/paul?u=root&p=root
def get_database_users(self):
"""Get list of database users."""
url = "db/{0}/users".format(self._database)
response = self.request(
url=url,
method='GET',
expected_response_code=200
)
return response.json()
def add_database_user(self, new_username, new_password, permissions=None):
"""Add database user.
:param permissions: A ``(readFrom, writeTo)`` tuple
"""
url = "db/{0}/users".format(self._database)
data = {
'name': new_username,
'password': new_password
}
if permissions:
try:
data['readFrom'], data['writeTo'] = permissions
except (ValueError, TypeError):
raise TypeError(
"'permissions' must be (readFrom, writeTo) tuple"
)
self.request(
url=url,
method='POST',
data=data,
expected_response_code=200
)
return True
def update_database_user_password(self, username, new_password):
"""Update password."""
return self.alter_database_user(username, new_password)
def alter_database_user(self, username, password=None, permissions=None):
"""Alter a database user and/or their permissions.
:param permissions: A ``(readFrom, writeTo)`` tuple
:raise TypeError: if permissions cannot be read.
:raise ValueError: if neither password nor permissions provided.
"""
url = "db/{0}/users/{1}".format(self._database, username)
if not password and not permissions:
raise ValueError("Nothing to alter for user {0}.".format(username))
data = {}
if password:
data['password'] = password
if permissions:
try:
data['readFrom'], data['writeTo'] = permissions
except (ValueError, TypeError):
raise TypeError(
"'permissions' must be (readFrom, writeTo) tuple"
)
self.request(
url=url,
method='POST',
data=data,
expected_response_code=200
)
if username == self._username:
self._password = password
return True
def delete_database_user(self, username):
"""Delete database user."""
url = "db/{0}/users/{1}".format(self._database, username)
self.request(
url=url,
method='DELETE',
expected_response_code=200
)
return True
# update the user by POSTing to db/site_dev/users/paul
def update_permission(self, username, json_body):
"""Update read/write permission.
2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
but it is documented in http://influxdb.org/docs/api/http.html.
See also: src/api/http/api.go:l57
"""
raise NotImplementedError()
def send_packet(self, packet):
"""Send a UDP packet along the wire."""
data = json.dumps(packet)
byte = data.encode('utf-8')
self.udp_socket.sendto(byte, (self._host, self._udp_port))
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
""" Key value store interface of MXNet for parameter synchronization."""
from array import array
import ctypes
import warnings
from ..ndarray import NDArray
from ..base import _LIB, c_str_array, c_handle_array, c_array, c_array_buf, c_str
from ..base import check_call, string_types
from ..base import KVStoreHandle
from ..profiler import set_kvstore_handle
__all__ = ['create', 'KVStoreBase']
def _ctype_key_value(keys, vals):
"""Returns ctype arrays for the key-value args, and the whether string keys are used.
For internal use only.
"""
if isinstance(keys, (tuple, list)):
assert(len(keys) == len(vals))
c_keys = []
c_vals = []
use_str_keys = None
for key, val in zip(keys, vals):
c_key_i, c_val_i, str_keys_i = _ctype_key_value(key, val)
c_keys += c_key_i
c_vals += c_val_i
use_str_keys = str_keys_i if use_str_keys is None else use_str_keys
assert(use_str_keys == str_keys_i), "inconsistent types of keys detected."
c_keys_arr = c_array(ctypes.c_char_p, c_keys) if use_str_keys \
else c_array(ctypes.c_int, c_keys)
c_vals_arr = c_array(ctypes.c_void_p, c_vals)
return (c_keys_arr, c_vals_arr, use_str_keys)
assert(isinstance(keys, (int,) + string_types)), \
"unexpected type for keys: " + str(type(keys))
use_str_keys = isinstance(keys, string_types)
if isinstance(vals, NDArray):
c_keys = c_str_array([keys]) if use_str_keys \
else c_array_buf(ctypes.c_int, array('i', [keys]))
return (c_keys, c_handle_array([vals]), use_str_keys)
else:
for value in vals:
assert(isinstance(value, NDArray))
c_keys = c_str_array([keys] * len(vals)) if use_str_keys \
else c_array_buf(ctypes.c_int, array('i', [keys] * len(vals)))
return (c_keys, c_handle_array(vals), use_str_keys)
def _ctype_dict(param_dict):
"""Returns ctype arrays for keys and values(converted to strings) in a dictionary"""
assert(isinstance(param_dict, dict)), \
"unexpected type for param_dict: " + str(type(param_dict))
c_keys = c_array(ctypes.c_char_p, [c_str(k) for k in param_dict.keys()])
c_vals = c_array(ctypes.c_char_p, [c_str(str(v)) for v in param_dict.values()])
return (c_keys, c_vals)
class KVStoreBase(object):
"""An abstract key-value store interface for data parallel training."""
def broadcast(self, key, value, out, priority=0):
""" Broadcast the `value` NDArray at rank 0 to all ranks,
and store the result in `out`
Parameters
----------
key : str or int
The key.
value : NDArray
The value corresponding to the key to broadcast
out : NDArray, or list of NDArray
Values corresponding to the key to store the result
priority : int, optional
The priority of the operation.
Higher priority operations are likely to be executed before other actions.
"""
raise NotImplementedError()
def pushpull(self, key, value, out=None, priority=0):
""" Performs push and pull a single value or a sequence of values from the store.
This function is coalesced form of push and pull operations.
`value` is pushed to the kvstore server for summation with the specified keys,
and the results are pulled from the server to `out`. If `out` is not specified
the pulled values are written to `value`.
Note that for allreduce based approaches such as horovod, there is no notion of
server or store. This function performs allreduce.
Parameters
----------
key : str or int
The key.
value : NDArray, or list of NDArray
Values corresponding to the keys.
out: NDArray, or list of NDArray
Values corresponding to the key.
priority : int, optional
The priority of the operation.
Higher priority operations are likely to be executed before other actions.
"""
raise NotImplementedError()
def set_optimizer(self, optimizer):
""" Registers an optimizer with the kvstore.
When using a single machine, this function updates the local optimizer.
If using multiple machines and this operation is invoked from a worker node,
it will serialized the optimizer with pickle and send it to all servers.
The function returns after all servers have been updated.
Parameters
----------
optimizer : KVStoreBase
The new optimizer for the store
"""
raise NotImplementedError()
OPTIMIZER = 'optimizer'
def is_capable(self, capability):
"""Queries if the KVStore type supports certain capability, such as optimizer algorithm,
gradient compression, sparsity, etc.
Parameters
----------
capability: str
The capability to query
Returns
-------
result : bool
Whether the capability is supported or not.
"""
raise NotImplementedError()
def save_optimizer_states(self, fname, dump_optimizer=False):
"""Saves the optimizer (updater) state to a file. This is often used when checkpointing
the model during training.
Parameters
----------
fname : str
Path to the output states file.
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
"""
raise NotImplementedError()
def load_optimizer_states(self, fname):
"""Loads the optimizer (updater) state from the file.
Parameters
----------
fname : str
Path to input states file.
"""
raise NotImplementedError()
@property
def type(self):
""" Returns the type of this kvstore backend.
Returns
-------
type : str
the string type
"""
raise NotImplementedError()
@property
def rank(self):
""" Returns the rank of this worker node.
Returns
-------
rank : int
The rank of this node, which is in range [0, num_workers())
"""
raise NotImplementedError()
@property
def num_workers(self):
"""Returns the number of worker nodes.
Returns
-------
size :int
The number of worker nodes.
"""
raise NotImplementedError()
kv_registry = {}
@staticmethod
def register(klass):
"""Registers a new KVStore.
Once a kvstore is registered, we can create an instance of this
kvstore with `create` later.
Examples
--------
>>> @mx.kvstore.KVStoreBase.register
... class MyKVStore(mx.kvstore.KVStoreBase):
... pass
>>> kv = mx.kv.create('MyKVStore')
>>> print(type(kv))
<class '__main__.MyKVStore'>
"""
assert(isinstance(klass, type))
name = klass.__name__.lower()
if name in KVStoreBase.kv_registry:
warnings.warn('WARNING: New kvstore %s.%s is overriding '
'existing kvstore %s.%s' %
(klass.__module__, klass.__name__,
KVStoreBase.kv_registry[name].__module__,
KVStoreBase.kv_registry[name].__name__))
KVStoreBase.kv_registry[name] = klass
return klass
@KVStoreBase.register
class TestStore(KVStoreBase):
"""A key-value store for testing."""
def broadcast(self, key, value, out, priority=0):
""" Broadcast the `value` NDArray at rank 0 to all ranks,
and store the result in `out`
Parameters
----------
key : str or int
The key.
value : NDArray
The value corresponding to the key to broadcast
out : NDArray, or list of NDArray
Values corresponding to the key to store the result
priority : int, optional
The priority of the operation.
Higher priority operations are likely to be executed before other actions.
"""
out = out if isinstance(out, list) else [out]
for o in out:
o[:] = value
def pushpull(self, key, value, out=None, priority=0):
""" Performs push and pull a single value or a sequence of values from the store.
This function is coalesced form of push and pull operations.
`value` is pushed to the kvstore server for summation with the specified keys,
and the results are pulled from the server to `out`. If `out` is not specified
the pulled values are written to `value`.
Parameters
----------
key : str or int
The key.
value : NDArray, or list of NDArray
Values corresponding to the keys.
out: NDArray, or list of NDArray
Values corresponding to the key.
priority : int, optional
The priority of the operation.
Higher priority operations are likely to be executed before other actions.
"""
ctx = value[0].context
if isinstance(value, NDArray):
if out is not None:
out = out if isinstance(out, list) else [out]
for o in out:
o[:] = value
else:
reduced_value = sum([val.as_in_context(ctx) for val in value])
if out is None:
for v in value:
v[:] = reduced_value
else:
out = out if isinstance(out, list) else [out]
for o in out:
o[:] = reduced_value
@staticmethod
def is_capable(capability):
"""Queries if the KVStore type supports certain capability, such as optimizer algorithm,
gradient compression, sparsity, etc.
If the kvstore does not store weights in server part, then no optimizer is supported,
this function will return False.
Parameters
----------
capability: str
The capability to query
Returns
-------
result : bool
Whether the capability is supported or not.
"""
if capability.lower() == KVStoreBase.OPTIMIZER:
return False
else:
raise ValueError('Unknown capability: {}'.format(capability))
@property
def type(self):
""" Returns the type of this kvstore.
Returns
-------
type : str
the string type
"""
return 'teststore'
@property
def rank(self):
""" Returns the rank of this worker node.
Returns
-------
rank : int
The rank of this node, which is in range [0, num_workers())
"""
return 0
@property
def num_workers(self):
"""Returns the number of worker nodes.
Returns
-------
size :int
The number of worker nodes.
"""
return 1
def set_optimizer(self, optimizer):
""" Registers an optimizer with the kvstore.
When using a single machine, this function updates the local optimizer.
If using multiple machines and this operation is invoked from a worker node,
it will serialized the optimizer with pickle and send it to all servers.
The function returns after all servers have been updated.
Parameters
----------
optimizer : KVStoreBase
The new optimizer for the store
"""
raise NotImplementedError()
def save_optimizer_states(self, fname, dump_optimizer=False):
"""Saves the optimizer (updater) state to a file. This is often used when checkpointing
the model during training.
Parameters
----------
fname : str
Path to the output states file.
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
"""
raise NotImplementedError()
def load_optimizer_states(self, fname):
"""Loads the optimizer (updater) state from the file.
Parameters
----------
fname : str
Path to input states file.
"""
raise NotImplementedError()
def create(name='local'):
"""Creates a new KVStore.
For single machine training, there are two commonly used types:
``local``: Copies all gradients to CPU memory and updates weights there.
``device``: Aggregates gradients and updates weights on GPUs. With this setting,
the KVStore also attempts to use GPU peer-to-peer communication,
potentially accelerating the communication.
For distributed training, KVStore also supports a number of types:
``dist_sync``: Behaves similarly to ``local`` but with one major difference.
With ``dist_sync``, batch-size now means the batch size used on each machine.
So if there are ``n`` machines and we use batch size ``b``,
then ``dist_sync`` behaves like ``local`` with batch size ``n * b``.
``dist_device_sync``: Identical to ``dist_sync`` with the difference similar
to ``device`` vs ``local``.
``dist_async``: Performs asynchronous updates.
The weights are updated whenever gradients are received from any machine.
No two updates happen on the same weight at the same time. However, the order is not
guaranteed.
``byteps``: Use byteps as broadcast/pushpull backend.
This kind of kvstore doesn't store weights, thus there won't be optimizer in this kvstore server.
Byteps doesn't support pure cpu training, so be sure to enable gpu training when using this kvstore.
Parameters
----------
name : {'local', 'device', 'nccl', 'dist_sync', 'dist_device_sync', 'dist_async', 'horovod', 'byteps'}
The type of KVStore.
Returns
-------
kv : KVStoreBase
The created KVStore.
"""
if not isinstance(name, string_types):
raise TypeError('name must be a string')
name = name.lower()
# first lookup the registry
if name in KVStoreBase.kv_registry:
return KVStoreBase.kv_registry[name]()
else:
# fall back to the native kvstore implementation
handle = KVStoreHandle()
check_call(_LIB.MXKVStoreCreate(c_str(name),
ctypes.byref(handle)))
from .kvstore import KVStore
kv = KVStore(handle)
set_kvstore_handle(kv.handle)
return kv
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ConnectionMonitorsOperations(object):
"""ConnectionMonitorsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.ConnectionMonitor"
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectionMonitor')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.ConnectionMonitor"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ConnectionMonitorResult"]
"""Create or update a connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters that define the operation to create a connection monitor.
:type parameters: ~azure.mgmt.network.v2018_06_01.models.ConnectionMonitor
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectionMonitorResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
"""Gets a connection monitor by name.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def _stop_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
def begin_stop(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stops the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
def _start_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
def begin_start(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Starts the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
def _query_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorQueryResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self._query_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_query_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def begin_query(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ConnectionMonitorQueryResult"]
"""Query a snapshot of the most recent connection states.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name given to the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectionMonitorQueryResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorQueryResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._query_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_query.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ConnectionMonitorListResult"]
"""Lists all connection monitors for the specified Network Watcher.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ConnectionMonitorListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_06_01.models.ConnectionMonitorListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors'} # type: ignore
| |
#########################################################################
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#########################################################################
## Markdown has these types of paragraph: heading, text, list item (bullet or numbered),
## codeblock, table, and block quote.
##
## This script fixes up differences in Markdown dialect, between Github-MD and doxia-markdown.
## Specifically, it fixes these problems:
## 1. In Github-MD, bullets and codeblock starts are self-delimiting. In doxia-markdown, they
## must be separated from preceding text or (in the case of codeblocks) bullets, by a blank line.
## Failure to do so causes the bullet or codeblock delimiter to be interpreted as ordinary text,
## and the content gets munched into the preceding paragraph. The codeblock delimiter (```) as text
## gets interpreted as a codephrase delimiter (`) plus a preceding or following empty codephrase (``).
## 2. Github-MD is liberal in regard to what an 'indent' is, allowing 1, 2, 4, or 8 blanks, or
## a tab. We mostly use 2 blanks. Doxia-markdown requires strictly 4 spaces or a tab. Failure
## to adhere to this requirement causes indents to be ignored or misinterpreted, leading again to
## paragraph munching and delimiter ignoring.
## 3. In Doxia-markdown, if you indent below a header or text paragraph, it is interpreted as
## an implicit codeblock start. In Github-MD, we only start codeblocks with the explicit
## codeblock delimiter (```) and sometimes indent below text just for visual emphasis, so the
## doxia-markdown interpretation is unwelcome. Thus, in our rewrite, we disallow indenting below
## text or headers. This may make the text less pretty than the Github-MD presentation, but it
## avoids the incorrect codeblocking.
## 4. In Doxia-markdown, the indent of the end-codeblock delimiter must match that of the
## begin-codeblock delimiter, or it won't be recognized and the codeblock will run on.
## 5. Relative links need to be re-written. '.md' files need to be changed to '.html', and
## as best we can we will re-write named anchors referring to tags autogenerated from headers.
## The problem with generated tags is that Github-MD forces header text to lower-case, and
## replaces blank spaces with hyphens, while doxia-markdown leaves case unchanged, and replaces
## blanks with underscores. Fortunately we seem to have a culture of using link references that
## are typographically the same as the header text, so we have some basis for fixing most links.
## 6. H1 headers don't get named anchors generated, unlike H2 and lower headers. Don't know
## why doxia-markdown has this deficiency, perhaps it assumes H1 will only be used once at the
## beginning of the doc. We will insert an explicit anchor just before the H1 headers, to fix.
##
## So far, we're ignoring tables and block quotes.
##
## This script also manages the re-writing of named files to *.tmp, then mv to replace the original file.
import sys
import os
import inspect
import re
# These are the characters excluded by Markdown from use in auto-generated anchor text for Headings.
EXCLUDED_CHARS_REGEX_GHM = r'[^\w\-]' # all non-alphanumerics except "-" and "_". Whitespace are previously converted.
EXCLUDED_CHARS_REGEX_DOX = r'[^\w\.\-]' # all non-alphanumerics except "-", "_", and ".". Whitespace are previously converted.
def report_error(s) :
print >>sys.stderr, "ERROR: " + s
print >>sys.stderr, "on line: " + str(FNR) + " in file: " + FILENAME
print >>sys.stderr, inputline
exit(1)
def trace(msg) :
if TRACE :
print >>sys.stderr, "TRACE: " + inspect.currentframe().f_back.f_code.co_name + " : InputLine " + str(FNR) + " : " + msg
class INDENT_STACK :
'This class maintains the indent stack during doc parsing.'
def __init__(self) :
self.my_stack = [ {'physical' : 0, 'logical' : 0, 'type' : 'none' } ]
def init_indent(self) :
del self.my_stack
self.my_stack = [ {'physical' : 0, 'logical' : 0, 'type' : 'none' } ]
def push_indent(self, n, new_type) :
#Increment the logical depth only if under a bullet type. This fixes problem #3.
level = self.logical_indent_level() + (self.current_type() == "bullet") # plus 1 if true
self.my_stack.append( {'physical':n, 'logical':level, 'type':new_type} )
def set_current_type(self, new_type) :
# adjust topmost type
self.my_stack[-1]['type'] = new_type
def pop_indent(self) :
if len(self.my_stack) > 1 :
return self.my_stack.pop()['physical']
else :
return 0
def current_indent(self) :
# top of stack, physical
return self.my_stack[-1]['physical']
def logical_indent_level(self) :
# top of stack, logical
return self.my_stack[-1]['logical']
def current_type(self) :
# top of stack, type
return self.my_stack[-1]['type']
## End class INDENT_STACK
global indent_stack
indent_stack = INDENT_STACK() # single instance
def convert_tabs(s) :
# Courtesy of Python, this does a real column-aware tab expansion.
# If this doesn't work, we'll need to go back to erroring on " \t", that is, spaces followed by tabs.
trace("orig length {0}".format(len(s)) )
ct = s.count("\t")
s = s.expandtabs(4)
trace("after {0} tab substitutions, end length is {1}".format(ct, len(s)) )
return s
def fix_prefix_blanks(new_type) :
global inputline
# Fix up the indenting (prefix blanks) in inputline. This fixes problem #2.
# Don't worry about blank lines here, they are filtered out before calling this method.
# Both uses and maintains the indent stack, which is why we need the new_type passed in.
prefix_blanks = re.search(r'^[\s]*', inputline)
if prefix_blanks :
prefix_blanks = prefix_blanks.group()
trace("After prefix-blanks match, prefix_blanks is |" + prefix_blanks + "| length is " + str(len(prefix_blanks)) )
prefix_blanks = convert_tabs(prefix_blanks)
else :
prefix_blanks = ""
trace("After convert_tabs, prefix_blanks is |" + prefix_blanks + "| length is " + str(len(prefix_blanks)) )
# prefix_blanks now contains the 'physical' indent of the current paragraph, after tab substitution.
# The indent of this paragraph may be > or == to the previous paragraph. Those are the easy cases.
# If the indent is less than previous, is it equal to the indent of the next lower indented object?
# Or of a lower yet object? Or is it intermediate between two lower objects currently in the stack?
# The latter case is an anomoly, but there's no enforcement in Github-MD.
# The following logic is an empirical reverse engineering, that seems adequate so far.
# It basically says, find a prior level of indent that this is not less than, and then pretend that
# the objects between it and this object weren't there.
trace("current logical_indent_level is {0} and current_indent is {1}".format(
indent_stack.logical_indent_level(), indent_stack.current_indent() ))
while len(prefix_blanks) < indent_stack.current_indent() :
indent_stack.pop_indent()
if len(prefix_blanks) > indent_stack.current_indent() :
indent_stack.push_indent(len(prefix_blanks), new_type)
else : # len(prefix_blanks) == indent_stack.current_indent()
indent_stack.set_current_type(new_type)
trace(("After evaluating this line's prefix-blanks and prev_type, new logical_indent_level() is {0} " +
"and current_indent is {1}").format(indent_stack.logical_indent_level(), indent_stack.current_indent() ))
# Now whack off the prefix blanks, and replace with a standardized string of blanks appropriate to
# the logical indent level.
trace("Orig line is " + inputline)
inputline = re.sub(r'^[\s]*', BLANKS[0 : 4*indent_stack.logical_indent_level()], inputline, 1)
trace("New line is " + inputline)
def rewrite_relative_links() :
global inputline
trace("entering with line: " + inputline)
# Fix up the relative links in inputline. This fixes problem #5.
num_links = inputline.count("](")
links = re.findall(r'\[[^\]]+\]\([^)]+\)', inputline)
num_whole_links = len(links)
trace("num_links = {0}, num_whole_links = {1}".format(num_links, num_whole_links))
if (num_links != num_whole_links) :
if re.search(r'\[[^\][!]*\![\s]*\[', inputline) :
# Nested link label expressions, with '!'.
# Special case where a link value is inlined into the link label,
# as in the first line of the base README.md file. Bail on such lines.
trace("WARNING: Found nested link label expressions.")
return
else :
report_error("Found link split across multiple lines. We can't process this.")
for linkitem in links :
pieces = re.search(r'(\[[\s`]*)([^\]]*[^\s`\]])([\s`]*\]\([\s]*)([^\s]+)([\s]*\))', linkitem).groups()
trace("Link: " + linkitem)
trace("Pieces: " + " ".join( (pieces[0],pieces[1],pieces[2],pieces[3],pieces[4]) ))
labeltext = pieces[1]
href = pieces[3]
trace("Extracted labeltext is: " + labeltext)
trace("Extracted href is: " + href)
if re.search(r'^http|\?', href) :
# Don't rewrite absolute or parameterized URLs; neither is native to this markdown book.
trace("skipping absolute or parameterized URL")
continue
# Rewrite implicit index references to explicit, so the book will work as well
# with 'file:///' preview as with a real web server.
# We are only concerned with file path names here, so split at '#' if present.
num_sharps = href.count("#")
if (num_sharps >= 2) :
report_error("Multiple #'s in a single link href.")
elif (num_sharps == 1) :
# Implicit index references are directory names, which seldom have a filetype suffix.
# On the other hand, explicit file references must have filetype, else the browser
# won't know what to do with it. So if no filetype extension, assume is a directory
# and add 'index.html'. Skip if this is an intra-document link.
if not re.search(r'^#|\.[^/#]+#', href) :
if not href.count("/#") :
href = re.sub(r'#', "/#", href, 1)
href = re.sub(r'/#', "/index.html#", href, 1)
# Fix up '.md' references.
href = re.sub(r'^README\.md#', "index.html#", href)
href = re.sub(r'/README\.md#', "/index.html#", href)
href = re.sub(r'\.md#', ".html#", href)
else : # num_sharps == 0
# Same logic as above, just at $ instead of #.
if not re.search(r'\.[^/]+$', href) :
if not href.endswith("/") :
href = href + "/"
href = re.sub(r'/$', "/index.html", href)
# Fix up '.md' references.
href = re.sub(r'^README\.md$', "index.html", href)
href = re.sub(r'/README\.md$', "/index.html", href)
href = re.sub(r'\.md$', ".html", href)
trace("After .md fixup, href is: " + href)
# Re-write named anchors referring to generated tags.
sharp = href.find("#")
if (sharp >= 0) :
named_anchor = href[sharp+1 : ]
trace('named_anchor = "' + named_anchor + '"')
trace('labeltext = "' + labeltext + '"')
scratch = labeltext.lower() # Github-MD forces all anchors to lowercase
scratch = re.sub(r'[\s]', "-", scratch) # convert whitespace to "-"
scratch = re.sub(EXCLUDED_CHARS_REGEX_GHM, "", scratch) # strip non-alphanumerics
if (scratch == named_anchor) :
trace("Found a rewritable case")
scratch = labeltext # Doxia-markdown doesn't change case
scratch = re.sub(r'[\s]', "_", scratch) # convert whitespace to "_"
scratch = re.sub(EXCLUDED_CHARS_REGEX_DOX, "", scratch) # strip non-alphanumerics except "."
href = re.sub("#" + named_anchor, "#" + scratch, href)
trace("After anchor rewrite, href is: " + href)
# Now swap out the bad href for the fixed one in inputline.
if (href != pieces[3]) :
# Assemble the full link string to prevent similar substrings (to href) in different contexts being substituted.
scratch = pieces[0] + pieces[1] + pieces[2] + href + pieces[4]
trace("Fixed link text is: " + scratch)
trace("linkitem is still: " + linkitem)
k = inputline.find(linkitem)
inputline = inputline[ : k] + scratch + inputline[ k + len(linkitem) : ]
trace("Fixed inputline is: " + inputline)
################################################
# begin state machine
global inputline, active_type
BLANKS = " "
TRACE = 0
FNR = -1
trace("Starting trace")
# Github uses relative indents, but doxia wants only and exactly multiples of 4.
# To turn the more forgiving into more regular, we must track both logical and actual indents.
indent_stack.init_indent()
# Paragraph type can be none, text, bullet, code, or heading.
# Note 'current_type()' used in managing the logical indent level on the indent stack,
# and 'active_type' used in the pattern recognition state machine, are deliberately different.
active_type = "none"
# Note: order of the below 'if' clauses is critically important for the state machine.
# Don't change the order.
if len(sys.argv) <= 1 :
report_error("Please provide names of files to be processed, as command line arguments.")
for FILENAME in sys.argv[1:] :
infile = open(FILENAME, 'r')
outfile = open(FILENAME + ".tmp", 'w')
FNR = 0
H1_COUNT = 0
for inputline in infile :
FNR += 1
inputline = inputline.rstrip("\n")
if '](' in inputline :
# Detect lines with hyperlinks in them, and re-write them if necessary and possible.
# This is the only fall-through block, and we put it at the very beginning.
rewrite_relative_links(); # in inputline
# Fall through for further processing.
if (active_type == "code") and ("```" not in inputline) :
trace("in codeblock, regular line")
# what happens in the codeblock, stays in the codeblock
# Put this case first (after link detection), so we don't have to test it in all the other cases.
print >>outfile, inputline
continue
if (active_type == "code") and ("```" in inputline) :
trace("in codeblock, end delimiter line")
# detect end of codeblock
# This must be the second case.
if re.search(r'```[\s]*[^\s]', inputline) :
# If there's text following the end-``` on the same line, error out and fix it in the source file.
report_error("Text following codeblock end delimiter (```) on same line.")
if re.search(r'```.*```', inputline) :
# If there are two sets of triple-ticks on the same line, that's a problem too.
report_error("Two sets of codeblock delimiters (```) on same line.")
active_type = "none"
# Force the indenting of the end-``` to match the beginning. This fixes problem #4.
inputline = re.sub(r'^[\s]*', BLANKS[0 : 4*indent_stack.logical_indent_level()], inputline)
print >>outfile, inputline
continue
if (active_type != "code") and ("```" in inputline) :
trace("start codeblock, delimiter line")
# detect start of codeblock
if re.search(r'[^\s][\s]*```', inputline) :
# If there's text preceding the begin-``` on the same line, error out and fix it in the source file.
report_error("Text preceding codeblock start delimiter (```) on same line.")
if re.search(r'```.*```', inputline) :
# If there are two sets of triple-ticks on the same line, that's a problem too.
report_error("Two sets of codeblock delimiters (```) on same line.")
if active_type == "text" or active_type == "bullet" :
print >>outfile, "" # Need preceding blank line before codeblock, in doxia.
active_type = "code"
fix_prefix_blanks(active_type) # in inputline
print >>outfile, inputline
continue
if re.search(r'^[\s]*$', inputline) :
trace("blank line")
# detect blank lines
active_type = "none"
print >>outfile, inputline # Perhaps this should be print "" instead?
continue
if re.search(r'^[\s]*([*+-]|[\d]+\.)[\s]', inputline) :
trace("bullet line")
# detect bullet line (numbered or not)
if (active_type == "text") :
print >>outfile, "" # Need preceding blank line between text and bullet, in doxia. This fixes problem #1.
active_type = "bullet"
fix_prefix_blanks(active_type); # in inputline
print >>outfile, inputline
continue
if inputline.startswith("#") :
trace("header line")
# detects header lines, which are self-delimiting, and cannot have indenting
# Header line resets the indenting as well as current type
active_type = "none"
indent_stack.init_indent()
if re.search(r'^#[^#]', inputline) :
# First-level headers ("H1") need explicit anchor inserted (Doxia style). This fixes problem #6.
anchor_name = re.sub(r' ', "_", inputline[1:].strip())
anchor_name = re.sub(EXCLUDED_CHARS_REGEX_DOX, "", anchor_name)
anchor_text = '<a name="' + anchor_name + '"></a>'
if H1_COUNT == 0 :
# Treat the first header differently - put the header after instead of before
# This is necessary to preserve document metadata titling in generated html.
# However, it means the title itself gets hidden above the top of window, when the link is used.
H1_COUNT = 1
print >>outfile, inputline
print >>outfile, anchor_text
print >>outfile, "" # Anchors aren't self-delimiting, so insert a blank line after.
else :
print >>outfile, "" # Anchors aren't self-delimiting, so insert a blank line first.
print >>outfile, anchor_text
print >>outfile, inputline
else :
# H2 or deeper level of header, doxia auto-generates anchor.
print >>outfile, inputline
continue
if re.search(r'^[\s]*#', inputline) :
trace("header line, bad")
report_error("Header specification character (#) detected with indenting. This is presumed to be an error, since it will render as text. If intentional, put a period or other printable character before it.")
## default action -- last case in state machine switch
trace("text line")
# Everything else is text-like, and therefore continues active_type, unless none.
if (active_type == "none") :
# Start new text paragraph.
active_type = "text"
fix_prefix_blanks(active_type); # in inputline
print >>outfile, inputline
continue
else :
# This is just a continuation of current text or bullet.
# Indenting is irrelevant.
print >>outfile, inputline
continue
## end loop on inputlines
if (active_type == "code") :
report_error("Unmatched codeblock delimiter (```) detected.")
infile.close()
outfile.close()
os.rename(FILENAME + ".tmp", FILENAME)
## end loop on FILENAMEs
trace("ending trace")
| |
from __future__ import print_function
import os
import sys
import subprocess as sp
import re
import pprint
import tempfile
import shutil
import argparse
import codecs
from .popenwrapper import Popen
from .compilers import llvmCompilerPathEnv
from .compilers import elfSectionName
from .compilers import darwinSegmentName
from .compilers import darwinSectionName
from .compilers import getHashedPathName
from .filetype import FileType
from .logconfig import logConfig, informUser
_logger = logConfig(__name__)
decode_hex = codecs.getdecoder("hex_codec")
def extraction():
""" This is the entry point to extract-bc.
"""
(success, pArgs) = extract_bc_args()
if not success:
return 1
if sys.platform.startswith('freebsd') or sys.platform.startswith('linux'):
return process_file_unix(pArgs)
if sys.platform.startswith('darwin'):
return process_file_darwin(pArgs)
#iam: do we work on anything else?
_logger.error('Unsupported or unrecognized platform: %s', sys.platform)
return 1
bitCodeArchiveExtension = 'bca'
moduleExtension = 'bc'
# Environmental variable for cross-compilation target.
binutilsTargetPrefixEnv = 'BINUTILS_TARGET_PREFIX'
def getSectionSizeAndOffset(sectionName, filename):
"""Returns the size and offset of the section, both in bytes.
Use objdump on the provided binary; parse out the fields
to find the given section. Parses the output,and
extracts thesize and offset of that section (in bytes).
"""
binUtilsTargetPrefix = os.getenv(binutilsTargetPrefixEnv)
objdumpBin = f'{binUtilsTargetPrefix}-{"objdump"}' if binUtilsTargetPrefix else 'objdump'
objdumpCmd = [objdumpBin, '-h', '-w', filename]
objdumpProc = Popen(objdumpCmd, stdout=sp.PIPE)
objdumpOutput = objdumpProc.communicate()[0]
if objdumpProc.returncode != 0:
_logger.error('Could not dump %s', filename)
sys.exit(-1)
for line in [l.decode('utf-8') for l in objdumpOutput.splitlines()]:
fields = line.split()
if len(fields) <= 7:
continue
if fields[1] != sectionName:
continue
try:
size = int(fields[2], 16)
offset = int(fields[5], 16)
return (size, offset)
except ValueError:
continue
# The needed section could not be found
_logger.warning('Could not find "%s" ELF section in "%s", so skipping this entry.', sectionName, filename)
return None
def getSectionContent(size, offset, filename):
"""Reads the entire content of an ELF section into a string."""
with open(filename, mode='rb') as f:
f.seek(offset)
d = ''
try:
c = f.read(size)
d = c.decode('utf-8')
except UnicodeDecodeError:
_logger.error('Failed to read section containing:')
print(c)
raise
# The linker pads sections with null bytes; our real data
# cannot have null bytes because it is just text. Discard
# nulls.
return d.replace('\0', '')
# otool hexdata pattern.
otool_hexdata = re.compile(r'^(?:[0-9a-f]{8,16}\t)?([0-9a-f\s]+)$', re.IGNORECASE)
#iam: 04/09/2021
def convert2octects(otooln):
"""Converts a otool output line into a list of octets.
The otool output format varies between Intel and M1 chips.
Intel:
0000000000000070 2f 55 73 65 72 73 2f 65 32 37 36 35 38 2f 52 65
M1:
000000010000c000 6573552f 692f7372 522f6d61 736f7065
The input string corresponds to substring after the tab that follows
tthe starting address.
"""
octets = []
chunks = otooln.split()
for chunk in chunks:
if len(chunk) == 2:
octets.append(chunk)
else:
twoples = [chunk[i:i+2] for i in range(0, len(chunk), 2)]
twoples.reverse()
octets.extend(twoples)
return octets
def extract_section_darwin(inputFile):
"""Extracts the section as a string, the darwin version.
Uses otool to extract the section, then processes it
to a usable state.
iam: 04/09/2021 Using otool here is starting to be a real pain.
The output format varies between XCode versions, and also between Intel and M1
chips.
"""
retval = None
otoolCmd = ['otool', '-X', '-s', darwinSegmentName, darwinSectionName, inputFile]
otoolProc = Popen(otoolCmd, stdout=sp.PIPE)
otoolOutput = otoolProc.communicate()[0]
if otoolProc.returncode != 0:
_logger.error('otool failed on %s', inputFile)
sys.exit(-1)
lines = otoolOutput.decode('utf8').splitlines()
_logger.debug('otool extracted:\n%s\n', lines)
# iam 03/06/2021: so otool prior to llvm-otool(1): Apple Inc. version cctools-977.1
# would output 'Contents of (__WLLVM,__llvm_bc) section' as the first line
# of the extraction. This seems to have disappeared so we need to be careful
# here:
if lines and lines[0] and lines[0].startswith('Contents'):
_logger.debug('dropping header: "%s"', lines[0])
lines = lines[1:]
try:
octets = []
for line in lines:
m = otool_hexdata.match(line)
if not m:
_logger.debug('otool output:\n\t%s\nDID NOT match expectations.', line)
continue
octetline = m.group(1)
octets.extend(convert2octects(octetline))
_logger.debug('We parsed this as:\n%s', octets)
retval = decode_hex(''.join(octets))[0].splitlines()
# these have become bytes in the "evolution" of python
retval = [ f.decode('utf8') for f in retval]
_logger.debug('decoded:\n%s\n', retval)
if not retval:
_logger.error('%s contained no %s segment', inputFile, darwinSegmentName)
except Exception as e:
_logger.error('extract_section_darwin: %s', str(e))
return retval
def extract_section_linux(inputFile):
"""Extracts the section as a string, the *nix version."""
val = getSectionSizeAndOffset(elfSectionName, inputFile)
if val is None:
return []
(sectionSize, sectionOffset) = val
content = getSectionContent(sectionSize, sectionOffset, inputFile)
contents = content.split('\n')
if not contents:
_logger.error('%s contained no %s. section is empty', inputFile, elfSectionName)
return contents
def getStorePath(bcPath):
storeEnv = os.getenv('WLLVM_BC_STORE')
if storeEnv:
hashName = getHashedPathName(bcPath)
hashPath = os.path.join(storeEnv, hashName)
if os.path.isfile(hashPath):
return hashPath
return None
def getBitcodePath(bcPath):
"""Tries to resolve the whereabouts of the bitcode.
First, checks if the given path points to an existing bitcode file.
If it does not, it tries to look for the bitcode file in the store directory given
by the environment variable WLLVM_BC_STORE.
"""
if not bcPath or os.path.isfile(bcPath):
return bcPath
storePath = getStorePath(bcPath)
if storePath:
return storePath
return bcPath
def linkFiles(pArgs, fileNames):
linkCmd = [pArgs.llvmLinker, '-v'] if pArgs.verboseFlag else [pArgs.llvmLinker]
linkCmd.append(f'-o={pArgs.outputFile}')
fileNames = map(getBitcodePath, fileNames)
linkCmd.extend([x for x in fileNames if x != ''])
try:
linkProc = Popen(linkCmd)
except OSError as e:
if e.errno == 2:
errorMsg = 'Your llvm-link does not seem to be easy to find.\nEither install it or use the -l llvmLinker option.'
else:
errorMsg = f'OS error({e.errno}): {e.strerror}'
_logger.error(errorMsg)
raise Exception(errorMsg) from e
else:
exitCode = linkProc.wait()
_logger.info('%s returned %s', pArgs.llvmLinker, str(exitCode))
return exitCode
def archiveFiles(pArgs, fileNames):
retCode = 0
# We do not want full paths in the archive so we need to chdir into each
# bitcode's folder. Handle this by calling llvm-ar once for all bitcode
# files in the same directory
# Map of directory names to list of bitcode files in that directory
dirToBCMap = {}
for bitCodeFile in fileNames:
dirName = os.path.dirname(bitCodeFile)
basename = os.path.basename(bitCodeFile)
if dirName in dirToBCMap:
dirToBCMap[dirName].append(basename)
else:
dirToBCMap[dirName] = [basename]
_logger.debug('Built up directory to bitcode file list map:\n%s', pprint.pformat(dirToBCMap))
for (dirname, bcList) in dirToBCMap.items():
_logger.debug('Changing directory to "%s"', dirname)
os.chdir(dirname)
larCmd = [pArgs.llvmArchiver, 'rs', pArgs.outputFile] + bcList
larProc = Popen(larCmd)
retCode = larProc.wait()
if retCode != 0:
_logger.error('Failed to execute:\n%s', pprint.pformat(larCmd))
break
if retCode == 0:
informUser(f'Generated LLVM bitcode archive {pArgs.outputFile}\n')
else:
_logger.error('Failed to generate LLVM bitcode archive')
return retCode
def extract_from_thin_archive(inputFile):
"""Extracts the paths from the thin archive.
"""
retval = None
arCmd = ['ar', '-t', inputFile] #iam: check if this might be os dependent
arProc = Popen(arCmd, stdout=sp.PIPE)
arOutput = arProc.communicate()[0]
if arProc.returncode != 0:
_logger.error('ar failed on %s', inputFile)
else:
retval = arOutput.splitlines()
return retval
def handleExecutable(pArgs):
fileNames = pArgs.extractor(pArgs.inputFile)
if not fileNames:
return 1
if pArgs.sortBitcodeFilesFlag:
fileNames = sorted(fileNames)
if pArgs.manifestFlag:
writeManifest(f'{pArgs.inputFile}.llvm.manifest', fileNames)
if pArgs.outputFile is None:
pArgs.outputFile = f'{pArgs.inputFile}.{moduleExtension}'
return linkFiles(pArgs, fileNames)
def handleThinArchive(pArgs):
objectPaths = extract_from_thin_archive(pArgs.inputFile)
if not objectPaths:
return 1
bcFiles = []
for p in objectPaths:
_logger.debug('handleThinArchive: processing %s', p)
contents = pArgs.extractor(p)
for c in contents:
if c:
_logger.debug('\t including %s', c)
bcFiles.append(str(c))
return buildArchive(pArgs, bcFiles)
#iam: do we want to preserve the order in the archive? if so we need to return both the list and the dict.
def fetchTOC(inputFile):
toc = {}
arCmd = ['ar', '-t', inputFile] #iam: check if this might be os dependent
arProc = Popen(arCmd, stdout=sp.PIPE)
arOutput = arProc.communicate()[0]
if arProc.returncode != 0:
_logger.error('ar failed on %s', inputFile)
return toc
lines = arOutput.splitlines()
for line in lines:
if line in toc:
toc[line] += 1
else:
toc[line] = 1
return toc
def extractFile(archive, filename, instance):
arCmd = ['ar', 'xN', str(instance), archive, filename] #iam: check if this might be os dependent
try:
arP = Popen(arCmd)
except Exception as e:
_logger.error(e)
return False
arPE = arP.wait()
if arPE != 0:
errorMsg = f'Failed to execute archiver with command {arCmd}'
_logger.error(errorMsg)
return False
return True
def handleArchiveDarwin(pArgs):
originalDir = os.getcwd() # This will be the destination
pArgs.arCmd.append(pArgs.inputFile)
# Make temporary directory to extract objects to
tempDir = ''
bitCodeFiles = []
try:
tempDir = tempfile.mkdtemp(suffix='wllvm')
os.chdir(tempDir)
# Extract objects from archive
try:
arP = Popen(pArgs.arCmd)
except OSError as e:
if e.errno == 2:
errorMsg = 'Your ar does not seem to be easy to find.\n'
else:
errorMsg = f'OS error({e.errno}): {e.strerror}'
_logger.error(errorMsg)
raise Exception(errorMsg) from e
arPE = arP.wait()
if arPE != 0:
errorMsg = f'Failed to execute archiver with command {pArgs.arCmd}'
_logger.error(errorMsg)
raise Exception(errorMsg)
_logger.debug(2)
# Iterate over objects and examine their bitcode inserts
for (root, _, files) in os.walk(tempDir):
_logger.debug('Exploring "%s"', root)
for f in files:
fPath = os.path.join(root, f)
if FileType.getFileType(fPath) == pArgs.fileType:
# Extract bitcode locations from object
contents = pArgs.extractor(fPath)
for bcFile in contents:
if bcFile != '':
if not os.path.exists(bcFile):
_logger.warning('%s lists bitcode library "%s" but it could not be found', f, bcFile)
else:
bitCodeFiles.append(bcFile)
else:
_logger.info('Ignoring file "%s" in archive', f)
_logger.info('Found the following bitcode file names to build bitcode archive:\n%s', pprint.pformat(bitCodeFiles))
finally:
# Delete the temporary folder
_logger.debug('Deleting temporary folder "%s"', tempDir)
shutil.rmtree(tempDir)
#write the manifest file if asked for
if pArgs.manifestFlag:
writeManifest(f'{pArgs.inputFile}.llvm.manifest', bitCodeFiles)
# Build bitcode archive
os.chdir(originalDir)
return buildArchive(pArgs, bitCodeFiles)
#iam: 5/1/2018
def handleArchiveLinux(pArgs):
""" handleArchiveLinux processes a archive, and creates either a bitcode archive, or a module, depending on the flags used.
Archives on Linux are strange beasts. handleArchive processes the archive by:
1. first creating a table of contents of the archive, which maps file names (in the archive) to the number of
times a file with that name is stored in the archive.
2. for each OCCURENCE of a file (name and count) it extracts the section from the object file, and adds the
bitcode paths to the bitcode list.
3. it then either links all these bitcode files together using llvm-link, or else is creates a bitcode
archive using llvm-ar
"""
inputFile = pArgs.inputFile
originalDir = os.getcwd() # We want to end up back where we started.
toc = fetchTOC(inputFile)
if not toc:
_logger.warning('No files found, so nothing to be done.')
return 0
bitCodeFiles = []
try:
tempDir = tempfile.mkdtemp(suffix='wllvm')
os.chdir(tempDir)
for filename in toc:
count = toc[filename]
for i in range(1, count + 1):
# extact out the ith instance of filename
if extractFile(inputFile, filename, i):
# Extract bitcode locations from object
contents = pArgs.extractor(filename)
_logger.debug('From instance %s of %s in %s we extracted\n\t%s\n', i, filename, inputFile, contents)
if contents:
for path in contents:
if path:
bitCodeFiles.append(path)
else:
_logger.debug('From instance %s of %s in %s we extracted NOTHING\n', i, filename, inputFile)
finally:
# Delete the temporary folder
_logger.debug('Deleting temporary folder "%s"', tempDir)
shutil.rmtree(tempDir)
_logger.debug('From instance %s we extracted\n\t%s\n', inputFile, bitCodeFiles)
# Build bitcode archive
os.chdir(originalDir)
return buildArchive(pArgs, bitCodeFiles)
def buildArchive(pArgs, bitCodeFiles):
if pArgs.bitcodeModuleFlag:
_logger.info('Generating LLVM Bitcode module from an archive')
else:
_logger.info('Generating LLVM Bitcode archive from an archive')
if pArgs.sortBitcodeFilesFlag:
bitCodeFiles = sorted(bitCodeFiles)
#write the manifest file if asked for
if pArgs.manifestFlag:
writeManifest(f'{pArgs.inputFile}.llvm.manifest', bitCodeFiles)
if pArgs.bitcodeModuleFlag:
# Pick output file path if outputFile not set
if pArgs.outputFile is None:
pArgs.outputFile = pArgs.inputFile
pArgs.outputFile += '.' + moduleExtension
informUser(f'Writing output to {pArgs.outputFile}\n')
return linkFiles(pArgs, bitCodeFiles)
# Pick output file path if outputFile not set
if pArgs.outputFile is None:
bcaExtension = '.' + bitCodeArchiveExtension
if pArgs.inputFile.endswith('.a'):
# Strip off .a suffix
pArgs.outputFile = pArgs.inputFile[:-2]
pArgs.outputFile += bcaExtension
else:
pArgs.outputFile = pArgs.inputFile + bcaExtension
informUser(f'Writing output to {pArgs.outputFile}\n')
return archiveFiles(pArgs, bitCodeFiles)
def writeManifest(manifestFile, bitCodeFiles):
with open(manifestFile, 'w') as output:
for f in bitCodeFiles:
output.write(f'{f}\n')
sf = getStorePath(f)
if sf:
output.write(f'{sf}\n')
_logger.warning('Manifest written to %s', manifestFile)
class ExtractedArgs:
def __init__(self):
self.fileType = None
self.outputFile = None
self.inputFile = None
self.output = None
self.extractor = None
self.arCmd = None
def extract_bc_args():
# do we need a path in front?
llvmToolPrefix = os.getenv(llvmCompilerPathEnv)
if not llvmToolPrefix:
llvmToolPrefix = ''
# is our linker called something different?
llvmLinkerName = os.getenv('LLVM_LINK_NAME')
if not llvmLinkerName:
llvmLinkerName = 'llvm-link'
llvmLinker = os.path.join(llvmToolPrefix, llvmLinkerName)
# is our archiver called something different?
llvmArchiverName = os.getenv('LLVM_AR_NAME')
if not llvmArchiverName:
llvmArchiverName = 'llvm-ar'
llvmArchiver = os.path.join(llvmToolPrefix, llvmArchiverName)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(dest='inputFile',
help='A binary produced by wllvm/wllvm++')
parser.add_argument('--linker', '-l',
dest='llvmLinker',
help='The LLVM bitcode linker to use. Default "%(default)s"',
default=llvmLinker)
parser.add_argument('--archiver', '-a',
dest='llvmArchiver',
help='The LLVM bitcode archiver to use. Default "%(default)s"',
default=llvmArchiver)
parser.add_argument('--verbose', '-v',
dest='verboseFlag',
help='Call the external procedures in verbose mode.',
action="store_true")
parser.add_argument('--manifest', '-m',
dest='manifestFlag',
help='Write a manifest file listing all the .bc files used.',
action='store_true')
parser.add_argument('--sort', '-s',
dest='sortBitcodeFilesFlag',
help='Sort the list of bitcode files (for debugging).',
action='store_true')
parser.add_argument('--bitcode', '-b',
dest='bitcodeModuleFlag',
help='Extract a bitcode module rather than an archive. ' +
'Only useful when extracting from an archive.',
action='store_true')
parser.add_argument('--output', '-o',
dest='outputFile',
help='The output file. Defaults to a file in the same directory ' +
'as the input with the same name as the input but with an ' +
'added file extension (.'+ moduleExtension + ' for bitcode '+
'modules and .' + bitCodeArchiveExtension +' for bitcode archives)',
default=None)
pArgs = parser.parse_args(namespace=ExtractedArgs())
# Check file exists
if not os.path.exists(pArgs.inputFile):
_logger.error('File "%s" does not exist.', pArgs.inputFile)
return (False, None)
pArgs.inputFile = os.path.abspath(pArgs.inputFile)
# Check output destitionation if set
outputFile = pArgs.outputFile
if outputFile is not None:
# Get Absolute output path
outputFile = os.path.abspath(outputFile)
if not os.path.exists(os.path.dirname(outputFile)):
_logger.error('Output directory "%s" does not exist.', os.path.dirname(outputFile))
return (False, None)
pArgs.output = outputFile
return (True, pArgs)
def process_file_unix(pArgs):
retval = 1
ft = FileType.getFileType(pArgs.inputFile)
_logger.debug('Detected file type is %s', FileType.revMap[ft])
pArgs.arCmd = ['ar', 'xv'] if pArgs.verboseFlag else ['ar', 'x']
pArgs.extractor = extract_section_linux
pArgs.fileType = FileType.ELF_OBJECT
if ft in (FileType.ELF_EXECUTABLE, FileType.ELF_SHARED, FileType.ELF_OBJECT):
_logger.info('Generating LLVM Bitcode module')
retval = handleExecutable(pArgs)
elif ft == FileType.ARCHIVE:
retval = handleArchiveLinux(pArgs)
elif ft == FileType.THIN_ARCHIVE:
retval = handleThinArchive(pArgs)
else:
_logger.error('File "%s" of type %s cannot be used', pArgs.inputFile, FileType.revMap[ft])
return retval
def process_file_darwin(pArgs):
retval = 1
ft = FileType.getFileType(pArgs.inputFile)
_logger.debug('Detected file type is %s', FileType.revMap[ft])
pArgs.arCmd = ['ar', '-x', '-v'] if pArgs.verboseFlag else ['ar', '-x']
pArgs.extractor = extract_section_darwin
pArgs.fileType = FileType.MACH_OBJECT
if ft in (FileType.MACH_EXECUTABLE, FileType.MACH_SHARED, FileType.MACH_OBJECT):
_logger.info('Generating LLVM Bitcode module')
retval = handleExecutable(pArgs)
elif ft == FileType.ARCHIVE:
_logger.info('Handling archive')
retval = handleArchiveDarwin(pArgs)
else:
_logger.error('File "%s" of type %s cannot be used', pArgs.inputFile, FileType.revMap[ft])
return retval
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._backup_resource_vault_configs_operations import build_get_request, build_put_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BackupResourceVaultConfigsOperations:
"""BackupResourceVaultConfigsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicesbackup.activestamp.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
vault_name: str,
resource_group_name: str,
**kwargs: Any
) -> "_models.BackupResourceVaultConfigResource":
"""Fetches resource vault config.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackupResourceVaultConfigResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.BackupResourceVaultConfigResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupResourceVaultConfigResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.NewErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackupResourceVaultConfigResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupconfig/vaultconfig'} # type: ignore
@distributed_trace_async
async def update(
self,
vault_name: str,
resource_group_name: str,
parameters: "_models.BackupResourceVaultConfigResource",
**kwargs: Any
) -> "_models.BackupResourceVaultConfigResource":
"""Updates vault security config.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param parameters: resource config request.
:type parameters:
~azure.mgmt.recoveryservicesbackup.activestamp.models.BackupResourceVaultConfigResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackupResourceVaultConfigResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.BackupResourceVaultConfigResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupResourceVaultConfigResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'BackupResourceVaultConfigResource')
request = build_update_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.NewErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackupResourceVaultConfigResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupconfig/vaultconfig'} # type: ignore
@distributed_trace_async
async def put(
self,
vault_name: str,
resource_group_name: str,
parameters: "_models.BackupResourceVaultConfigResource",
**kwargs: Any
) -> "_models.BackupResourceVaultConfigResource":
"""Updates vault security config.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param parameters: resource config request.
:type parameters:
~azure.mgmt.recoveryservicesbackup.activestamp.models.BackupResourceVaultConfigResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackupResourceVaultConfigResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.BackupResourceVaultConfigResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupResourceVaultConfigResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'BackupResourceVaultConfigResource')
request = build_put_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.put.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.NewErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackupResourceVaultConfigResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
put.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupconfig/vaultconfig'} # type: ignore
| |
import datetime
import http.client
from rdr_service import clock
from rdr_service.dao.biobank_specimen_dao import BiobankSpecimen, BiobankSpecimenDao, BiobankSpecimenAttributeDao,\
BiobankAliquotDatasetItemDao, BiobankAliquotDao, BiobankAliquotDatasetDao
from rdr_service.dao.biobank_order_dao import BiobankOrderDao
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.dao.participant_summary_dao import ParticipantSummaryDao
from rdr_service.model import config_utils
from rdr_service.model.participant import Participant
from rdr_service.model.biobank_order import BiobankOrderIdentifier, BiobankOrderedSample, BiobankOrder, BiobankAliquot
from tests.helpers.unittest_base import BaseTestCase
TIME_1 = datetime.datetime(2020, 4, 1)
TIME_2 = datetime.datetime(2020, 4, 2)
class BiobankOrderApiTest(BaseTestCase):
def setUp(self):
super().setUp()
self.dao = BiobankSpecimenDao()
with self.dao.session() as session:
self.participant = Participant(participantId=123, biobankId=555)
self.participant_dao = ParticipantDao()
self.participant_dao.insert_with_session(session, self.participant)
self.summary_dao = ParticipantSummaryDao()
self.bo_dao = BiobankOrderDao()
ParticipantSummaryDao().insert_with_session(session, self.participant_summary(self.participant))
self.bio_order = self.bo_dao.insert_with_session(session,
self._make_biobank_order(
participantId=self.participant.participantId
))
def _make_biobank_order(self, **kwargs):
"""Makes a new BiobankOrder (same values every time) with valid/complete defaults.
Kwargs pass through to BiobankOrder constructor, overriding defaults.
"""
for k, default_value in (
("biobankOrderId", "1"),
("created", clock.CLOCK.now()),
("participantId", self.participant.participantId),
("sourceSiteId", 1),
("sourceUsername", "fred@pmi-ops.org"),
("collectedSiteId", 1),
("collectedUsername", "joe@pmi-ops.org"),
("processedSiteId", 1),
("processedUsername", "sue@pmi-ops.org"),
("finalizedSiteId", 2),
("finalizedUsername", "bob@pmi-ops.org"),
("identifiers", [BiobankOrderIdentifier(system="a", value="c")]),
(
"samples",
[
BiobankOrderedSample(
biobankOrderId="1",
test='2SST8',
finalized=TIME_2,
description="description",
processingRequired=True,
)
],
),
):
if k not in kwargs:
kwargs[k] = default_value
return BiobankOrder(**kwargs)
def put_specimen(self, payload, expected_status=200):
rlims_id = payload['rlimsID']
return self.send_put(f'Biobank/specimens/{rlims_id}', request_data=payload, expected_status=expected_status)
def get_minimal_specimen_json(self, rlims_id='sabrina'):
return {
'rlimsID': rlims_id,
'orderID': self.bio_order.biobankOrderId,
'participantID': config_utils.to_client_biobank_id(self.participant.biobankId),
'testcode': 'test 1234567'
}
@staticmethod
def is_matching_json(actual_json, expected_json):
for key in expected_json:
if expected_json[key] != actual_json[key]:
return False
return True
def is_matching_dataset(self, actual_dataset, expected_dataset):
if 'datasetItems' in expected_dataset:
for expected_item in expected_dataset['datasetItems']:
if not any(self.is_matching_json(actual_item, expected_item)
for actual_item in actual_dataset['datasetItems']):
return False
del expected_dataset['datasetItems']
return BiobankOrderApiTest.is_matching_json(actual_dataset, expected_dataset)
def is_matching_aliquot(self, actual_aliquot, expected_aliquot):
if 'status' in expected_aliquot:
if not self.is_matching_json(actual_aliquot['status'], expected_aliquot['status']):
return False
del expected_aliquot['status']
if 'disposalStatus' in expected_aliquot:
if not self.is_matching_json(actual_aliquot['disposalStatus'], expected_aliquot['disposalStatus']):
return False
del expected_aliquot['disposalStatus']
if 'datasets' in expected_aliquot:
for expected_dataset in expected_aliquot['datasets']:
if not any(self.is_matching_dataset(actual_dataset, expected_dataset)
for actual_dataset in actual_aliquot['datasets']):
return False
del expected_aliquot['datasets']
if 'aliquots' in expected_aliquot:
self.assertCollectionsMatch(actual_aliquot['aliquots'], expected_aliquot['aliquots'],
self.is_matching_aliquot, 'Expected nested aliquots to match')
del expected_aliquot['aliquots']
return self.is_matching_json(actual_aliquot, expected_aliquot)
def assertCollectionsMatch(self, actual_list, expected_list, comparator, message):
if expected_list:
for expected_item in expected_list:
if not any(comparator(actual_item, expected_item) for actual_item in actual_list):
self.fail(message)
def assertSpecimenJsonMatches(self, specimen_json, test_json):
for top_level_field in ['rlimsID', 'orderID', 'participantID', 'testcode', 'repositoryID', 'studyID',
'cohortID', 'sampleType', 'collectionDate', 'confirmationDate']:
if top_level_field in test_json:
self.assertEqual(test_json[top_level_field], specimen_json[top_level_field])
if 'status' in test_json:
for status_field in ['status', 'freezeThawCount', 'location', 'quantity', 'quantityUnits',
'processingCompleteDate', 'deviations']:
if status_field in test_json:
self.assertEqual(test_json['status'][status_field], specimen_json['status'][status_field])
if 'disposalStatus' in test_json:
for disposal_field in ['reason', 'disposalDate']:
if disposal_field in test_json['disposalStatus']:
self.assertEqual(test_json['disposalStatus'][disposal_field],
specimen_json['disposalStatus'][disposal_field])
if 'attributes' in test_json:
self.assertCollectionsMatch(specimen_json['attributes'], test_json['attributes'], self.is_matching_json,
'Expected attributes to match')
if 'aliquots' in test_json:
self.assertCollectionsMatch(specimen_json['aliquots'], test_json['aliquots'], self.is_matching_aliquot,
'Expected aliquots to match')
def get_specimen_from_dao(self, _id=None, rlims_id=None):
with self.dao.session() as session:
if rlims_id is not None:
filter_expr = BiobankSpecimen.rlimsId == rlims_id
else:
filter_expr = BiobankSpecimen.id == _id
specimen = session.query(BiobankSpecimen).filter(filter_expr).one()
return specimen
def retrieve_specimen_json(self, specimen_id):
specimen = self.get_specimen_from_dao(_id=specimen_id)
json = self.dao.to_client_json(specimen)
return json
@staticmethod
def get_only_item_from_dao(dao):
return dao.get_all()[0]
def test_put_new_specimen_minimal_data(self):
payload = self.get_minimal_specimen_json()
rlims_id = payload['rlimsID']
result = self.send_put(f'Biobank/specimens/{rlims_id}', request_data=payload)
saved_specimen_client_json = self.retrieve_specimen_json(result['id'])
self.assertSpecimenJsonMatches(saved_specimen_client_json, payload)
def test_put_specimen_with_non_existent_participant(self):
"""Try to use a biobank that has the correct prefix, but doesn't exist in the database"""
payload = self.get_minimal_specimen_json()
payload['participantID'] = config_utils.to_client_biobank_id(123123) # Using a biobankID that doesn't exist
self.put_specimen(payload, expected_status=400)
def test_nonexistent_order_id(self):
payload = self.get_minimal_specimen_json()
payload['orderID'] = 'SOMETHING_MISSING_IN_DATABASE'
result = self.put_specimen(payload)
saved_specimen_client_json = self.retrieve_specimen_json(result['id'])
self.assertSpecimenJsonMatches(saved_specimen_client_json, payload)
@staticmethod
def _add_specimen_data_to_payload(payload):
payload.update({
'repositoryID': 'repo id',
'studyID': 'study id',
'cohortID': 'cohort id',
'sampleType': 'sample',
'status': {
'status': 'Disposed',
'freezeThawCount': 1,
'location': 'Greendale',
'quantity': '1',
'quantityUnits': 'some units',
'processingCompleteDate': TIME_2.isoformat(),
'deviations': 'no deviation'
},
'disposalStatus': {
'reason': 'contaminated',
'disposalDate': TIME_2.isoformat()
},
'attributes': [
{
'name': 'attr_one',
'value': '1'
},
{
'name': 'attr_two',
'value': 'two'
}
],
'collectionDate': TIME_1.isoformat(),
'confirmationDate': TIME_2.isoformat()
})
def test_put_new_specimen_all_data(self):
payload = self.get_minimal_specimen_json()
self._add_specimen_data_to_payload(payload)
result = self.put_specimen(payload)
saved_specimen_client_json = self.retrieve_specimen_json(result['id'])
self.assertSpecimenJsonMatches(saved_specimen_client_json, payload)
def test_allow_for_null_collections(self):
payload = self.get_minimal_specimen_json()
payload['attributes'] = None
result = self.put_specimen(payload)
saved_specimen_client_json = self.retrieve_specimen_json(result['id'])
self.assertSpecimenJsonMatches(saved_specimen_client_json, payload)
def test_allow_for_null_collections_on_migration(self):
payload = self.get_minimal_specimen_json()
payload['attributes'] = None
self.send_put('Biobank/specimens', request_data=[payload])
specimen = self.get_specimen_from_dao(rlims_id=payload['rlimsID'])
saved_specimen_client_json = self.dao.to_client_json(specimen)
self.assertSpecimenJsonMatches(saved_specimen_client_json, payload)
def test_clear_specimen_data(self):
payload = self.get_minimal_specimen_json()
self._add_specimen_data_to_payload(payload)
initial_result = self.put_specimen(payload)
payload.update({
'repositoryID': '',
'studyID': '',
'cohortID': '',
'sampleType': '',
'status': {
'status': '',
'freezeThawCount': 1,
'location': '',
'quantity': '',
'quantityUnits': '',
'processingCompleteDate': '',
'deviations': ''
},
'disposalStatus': {
'reason': '',
'disposalDate': ''
},
'collectionDate': '',
'confirmationDate': ''
})
self.put_specimen(payload)
saved_specimen_client_json = self.retrieve_specimen_json(initial_result['id'])
# Dates are set to set to None when cleared, so those fields are missing when converting specimen to json
del payload['confirmationDate']
del payload['collectionDate']
del payload['disposalStatus']['disposalDate']
del payload['status']['processingCompleteDate']
self.assertSpecimenJsonMatches(saved_specimen_client_json, payload)
def test_put_specimen_exists(self):
payload = self.get_minimal_specimen_json()
initial_result = self.put_specimen(payload)
payload['testcode'] = 'updated testcode'
self.put_specimen(payload)
updated_specimen_json = self.retrieve_specimen_json(initial_result['id'])
self.assertSpecimenJsonMatches(updated_specimen_json, payload)
def test_empty_disposal_status_given(self):
payload = self.get_minimal_specimen_json()
payload['testcode'] = 'disposal test'
payload['disposalStatus'] = {}
result = self.put_specimen(payload)
specimen = self.get_specimen_from_dao(_id=result['id'])
self.assertEqual('disposal test', specimen.testCode)
# Make sure an empty disposal status doesn't modify disposal fields
def test_empty_disposal_status_leaves_fields(self):
payload = self.get_minimal_specimen_json()
payload['disposalStatus'] = {
'reason': 'yolo',
'disposalDate': TIME_2.isoformat()
}
initial_result = self.put_specimen(payload)
payload['disposalStatus'] = {}
self.put_specimen(payload)
updated_specimen = self.get_specimen_from_dao(_id=initial_result['id'])
self.assertEqual('yolo', updated_specimen.disposalReason)
self.assertEqual(TIME_2, updated_specimen.disposalDate)
def test_optional_args_not_cleared(self):
initial_payload = self.get_minimal_specimen_json()
initial_payload['sampleType'] = 'test type'
initial_result = self.put_specimen(initial_payload)
# Make a new request without the optional sampleType field
new_payload = self.get_minimal_specimen_json()
self.put_specimen(new_payload)
# Make sure sampleType is still set on specimen
updated_specimen_json = self.retrieve_specimen_json(initial_result['id'])
self.assertSpecimenJsonMatches(updated_specimen_json, initial_payload)
self.assertEqual(updated_specimen_json['sampleType'], 'test type')
def test_add_attribute_to_existing_specimen(self):
payload = self.get_minimal_specimen_json()
initial_result = self.put_specimen(payload)
payload['attributes'] = [{
'name': 'test',
'value': '123'
}]
self.put_specimen(payload)
saved_specimen_client_json = self.retrieve_specimen_json(initial_result['id'])
self.assertSpecimenJsonMatches(saved_specimen_client_json, payload)
attribute = self.get_only_item_from_dao(BiobankSpecimenAttributeDao())
self.assertEqual(attribute.specimen_rlims_id, 'sabrina')
def test_replacing_attributes(self):
payload = self.get_minimal_specimen_json()
payload['attributes'] = [
{
'name': 'attr_one',
'value': '1'
},
{
'name': 'attr_two',
'value': 'two'
}
]
initial_result = self.put_specimen(payload)
payload['attributes'] = [{
'name': 'test',
'value': '123'
}]
self.put_specimen(payload)
saved_specimen_client_json = self.retrieve_specimen_json(initial_result['id'])
self.assertSpecimenJsonMatches(saved_specimen_client_json, payload)
def test_update_attribute(self):
payload = self.get_minimal_specimen_json()
payload['attributes'] = [
{
'name': 'attr_one',
'value': '1'
}
]
self.put_specimen(payload)
attribute_dao = BiobankSpecimenAttributeDao()
initial_attribute = self.get_only_item_from_dao(attribute_dao)
payload['attributes'] = [{
'name': 'attr_one',
'value': '123'
}]
self.put_specimen(payload)
final_attribute = self.get_only_item_from_dao(attribute_dao)
self.assertEqual(initial_attribute.id, final_attribute.id)
self.assertEqual(final_attribute.value, '123')
def test_put_minimal_aliquot_data(self):
payload = self.get_minimal_specimen_json()
payload['aliquots'] = [
{
"rlimsID": "aliquot_one"
},
{
"rlimsID": "second"
}
]
result = self.put_specimen(payload)
saved_specimen_client_json = self.retrieve_specimen_json(result['id'])
self.assertSpecimenJsonMatches(saved_specimen_client_json, payload)
def test_put_simple_aliquot(self):
payload = self.get_minimal_specimen_json()
payload['aliquots'] = [
{
"rlimsID": "other",
"sampleType": "test",
"status": {
"status": "Disposed",
"freezeThawCount": 3,
"location": "biobank",
"quantity": "5",
"quantityUnits": "tube",
"processingCompleteDate": TIME_1.isoformat(),
"deviations": "no deviations"
},
"disposalStatus": {
"reason": "garbage",
"disposalDate": TIME_2.isoformat()
},
"childPlanService": "feed",
"initialTreatment": "pill",
"containerTypeID": "tubular",
}
]
result = self.put_specimen(payload)
saved_specimen_client_json = self.retrieve_specimen_json(result['id'])
self.assertSpecimenJsonMatches(saved_specimen_client_json, payload)
aliquot = self.get_only_item_from_dao(BiobankAliquotDao())
self.assertEqual(aliquot.specimen_rlims_id, 'sabrina')
def test_update_aliquot(self):
payload = self.get_minimal_specimen_json()
payload['aliquots'] = [
{
"rlimsID": "other",
"sampleType": "test",
"childPlanService": "feed"
}
]
self.put_specimen(payload)
aliquot_dao = BiobankAliquotDao()
initial_aliquot = self.get_only_item_from_dao(aliquot_dao)
payload['aliquots'][0]['sampleType'] = 'check'
self.put_specimen(payload)
final_aliquot = self.get_only_item_from_dao(aliquot_dao)
self.assertEqual(initial_aliquot.id, final_aliquot.id)
self.assertEqual(final_aliquot.sampleType, 'check')
def test_put_simple_aliquot_dataset(self):
payload = self.get_minimal_specimen_json()
payload['aliquots'] = [
{
'rlimsID': 'other',
'datasets': [
{
'rlimsID': 'data_id',
'name': 'test set',
'status': 'nested'
}
]
}
]
result = self.put_specimen(payload)
saved_specimen_client_json = self.retrieve_specimen_json(result['id'])
self.assertSpecimenJsonMatches(saved_specimen_client_json, payload)
dataset = self.get_only_item_from_dao(BiobankAliquotDatasetDao())
self.assertEqual(dataset.aliquot_rlims_id, 'other')
def test_update_aliquot_dataset(self):
payload = self.get_minimal_specimen_json()
payload['aliquots'] = [
{
'rlimsID': 'other',
'datasets': [
{
'rlimsID': 'first_data_set',
'name': 'placeholder',
'status': 'nested'
},
{
'rlimsID': 'data_id',
'name': 'test set',
'status': 'nested'
}
]
}
]
self.put_specimen(payload)
dataset_dao = BiobankAliquotDatasetDao()
initial_dataset = self.get_only_item_from_dao(dataset_dao)
payload['aliquots'][0]['datasets'][0]['status'] = 'updated'
self.put_specimen(payload)
final_dataset = self.get_only_item_from_dao(dataset_dao)
self.assertEqual(initial_dataset.id, final_dataset.id)
self.assertEqual(final_dataset.status, 'updated')
def test_put_simple_aliquot_dataset_items(self):
payload = self.get_minimal_specimen_json()
payload['aliquots'] = [
{
'rlimsID': 'other',
'datasets': [
{
'rlimsID': 'data_id',
'datasetItems': [
{
'paramID': 'param1',
'displayValue': 'One',
'displayUnits': 'param'
}
]
}
]
}
]
result = self.put_specimen(payload)
saved_specimen_client_json = self.retrieve_specimen_json(result['id'])
self.assertSpecimenJsonMatches(saved_specimen_client_json, payload)
dataset_item = self.get_only_item_from_dao(BiobankAliquotDatasetItemDao())
self.assertEqual(dataset_item.dataset_rlims_id, 'data_id')
def test_update_aliquot_dataset_item(self):
payload = self.get_minimal_specimen_json()
payload['aliquots'] = [
{
'rlimsID': 'other',
'datasets': [
{
'rlimsID': 'data_id',
'datasetItems': [
{
'paramID': 'param1',
'displayValue': 'One',
'displayUnits': 'param'
}
]
}
]
}
]
self.put_specimen(payload)
dataset_item_dao = BiobankAliquotDatasetItemDao()
initial_dataset_item = self.get_only_item_from_dao(dataset_item_dao)
payload['aliquots'][0]['datasets'][0]['datasetItems'][0]['displayUnits'] = 'params'
self.put_specimen(payload)
final_dataset_item = self.get_only_item_from_dao(dataset_item_dao)
self.assertEqual(initial_dataset_item.id, final_dataset_item.id)
self.assertEqual(final_dataset_item.displayUnits, 'params')
def test_put_nested_aliquots(self):
payload = self.get_minimal_specimen_json()
payload['aliquots'] = [
{
'rlimsID': 'grandparent',
'aliquots': [
{
'rlimsID': 'parent',
'aliquots': [
{
'rlimsID': 'child'
}
]
}
]
}
]
result = self.put_specimen(payload)
saved_specimen_client_json = self.retrieve_specimen_json(result['id'])
self.assertSpecimenJsonMatches(saved_specimen_client_json, payload)
aliquot_dao = BiobankAliquotDao()
with aliquot_dao.session() as session:
grand_child_aliquot = session.query(BiobankAliquot).filter(BiobankAliquot.rlimsId == 'child').one()
self.assertEqual(grand_child_aliquot.specimen_rlims_id, 'sabrina')
self.assertEqual(grand_child_aliquot.parent_aliquot_rlims_id, 'parent')
def test_deeply_nested_aliquots(self):
payload = self.get_minimal_specimen_json()
aliquot = {
'rlimsID': 'root'
}
payload['aliquots'] = [aliquot]
for level in range(20):
new_aliquot = {
'rlimsID': f'aliquot_descendant_{level}'
}
aliquot['aliquots'] = [new_aliquot]
aliquot = new_aliquot
self.put_specimen(payload)
aliquot_dao = BiobankAliquotDao()
with aliquot_dao.session() as session:
descendant_aliquot = session.query(BiobankAliquot).filter(
BiobankAliquot.rlimsId == 'aliquot_descendant_19').one()
self.assertEqual(descendant_aliquot.parent_aliquot_rlims_id, 'aliquot_descendant_18')
def test_put_multiple_specimen(self):
specimens = [self.get_minimal_specimen_json(rlims_id) for rlims_id in ['sabrina', 'salem']]
specimens[0]['testcode'] = 'migration'
specimens[1]['testcode'] = 'checking'
result = self.send_put('Biobank/specimens', request_data=specimens)
self.assertJsonResponseMatches(result, {
'summary': {
'total_received': 2,
'success_count': 2
}
})
def test_update_multiple_specimen(self):
specimens = [self.get_minimal_specimen_json(rlims_id) for rlims_id in ['one', 'two', 'three', 'four', 'five']]
inital_test_code = specimens[0]['testcode']
result = self.send_put(f"Biobank/specimens", request_data=specimens)
self.assertJsonResponseMatches(result, {
'summary': {
'total_received': 5,
'success_count': 5
}
})
third = self.get_specimen_from_dao(rlims_id='three')
self.assertEqual(third.testCode, inital_test_code)
fifth = self.get_specimen_from_dao(rlims_id='five')
self.assertEqual(fifth.testCode, inital_test_code)
specimens[2]['testcode'] = 'third test code'
specimens[4]['testcode'] = 'checking last too'
self.send_put('Biobank/specimens', request_data=specimens)
third = self.get_specimen_from_dao(_id=third.id)
self.assertEqual(third.testCode, 'third test code')
fifth = self.get_specimen_from_dao(_id=fifth.id)
self.assertEqual(fifth.testCode, 'checking last too')
def test_error_missing_fields_specimen_migration(self):
specimens = [self.get_minimal_specimen_json(rlims_id) for rlims_id in ['sabrina', 'two', 'salem',
'invalid', 'bob', 'missing']]
del specimens[0]['testcode']
del specimens[0]['orderID']
del specimens[1]['rlimsID']
del specimens[1]['orderID']
del specimens[2]['testcode']
# Add aliquot with invalid data to cause an exception in the API code.
# We need to cause an exception we haven't built code around (rather than something like an error we would
# raise on missing fields). It should be something we could crash on without having built
# code to specifically handle the scenario.
# The goal is to test that the endpoint gracefully handles any errors for individual specimen and continues
# to process the rest of the request.
specimens[3]['aliquots'] = [
{
'rlimsID': 'matching'
}, {
'rlimsID': 'matching'
}
]
# Check for migration error about missing biobank ids
specimens[5]['participantID'] = config_utils.to_client_biobank_id(123123)
result = self.send_put(f"Biobank/specimens", request_data=specimens)
self.assertJsonResponseMatches(result, {
'summary': {
'total_received': 6,
'success_count': 1
},
'errors': [
{
'rlimsID': 'sabrina',
'error': 'Missing fields: orderID, testcode'
}, {
'rlimsID': '',
'error': 'Missing fields: rlimsID, orderID',
}, {
'rlimsID': 'salem',
'error': 'Missing fields: testcode'
}, {
# WARNING: read the note above and ensure the error for this specimen is caught by the
# catch-all for any specimen errors
'rlimsID': 'invalid',
'error': 'Unknown error'
}, {
'rlimsID': 'missing',
'error': 'Biobank id Z123123 does not exist'
}
]
})
successful_specimen = self.get_specimen_from_dao(rlims_id='bob')
self.assertIsNotNone(successful_specimen)
def _create_minimal_specimen(self, rlims_id='sabrina'):
return self.put_specimen(self.get_minimal_specimen_json(rlims_id))
def test_parent_status_updated_all_fields(self):
self._create_minimal_specimen()
specimen = self.get_specimen_from_dao(rlims_id='sabrina')
self.send_put('Biobank/specimens/sabrina/status', {
'status': 'new',
'freezeThawCount': 8,
'location': 'Washington',
'quantity': '3',
'quantityUnits': 'some units',
'processingCompleteDate': TIME_2.isoformat(),
'deviations': 'no deviation'
})
specimen = self.get_specimen_from_dao(_id=specimen.id)
self.assertEqual('new', specimen.status)
self.assertEqual(8, specimen.freezeThawCount)
self.assertEqual('Washington', specimen.location)
self.assertEqual('3', specimen.quantity)
self.assertEqual('some units', specimen.quantityUnits)
self.assertEqual(TIME_2, specimen.processingCompleteDate)
self.assertEqual('no deviation', specimen.deviations)
def test_parent_status_updated_required_fields(self):
self._create_minimal_specimen()
specimen = self.get_specimen_from_dao(rlims_id='sabrina')
self.assertIsNone(specimen.status)
self.send_put(f"Biobank/specimens/sabrina/status", {
'status': 'updated'
})
specimen = self.get_specimen_from_dao(_id=specimen.id)
self.assertEqual('updated', specimen.status)
def test_parent_status_update_not_found(self):
self.send_put(f"Biobank/specimens/sabrina/status", {
'status': 'updated'
}, expected_status=http.client.NOT_FOUND)
def test_parent_disposed_all_fields(self):
self._create_minimal_specimen()
specimen = self.get_specimen_from_dao(rlims_id='sabrina')
self.send_put('Biobank/specimens/sabrina/disposalStatus', {
'reason': 'contaminated',
'disposalDate': TIME_2.isoformat()
})
specimen = self.get_specimen_from_dao(_id=specimen.id)
self.assertEqual('contaminated', specimen.disposalReason)
self.assertEqual(TIME_2, specimen.disposalDate)
def test_parent_disposed_optional_fields_not_cleared(self):
payload = self.get_minimal_specimen_json()
payload['disposalStatus'] = {
'reason': 'contaminated',
'disposalDate': TIME_2.isoformat()
}
self.put_specimen(payload)
specimen = self.get_specimen_from_dao(rlims_id='sabrina')
self.send_put(f"Biobank/specimens/sabrina/disposalStatus", {
'disposalDate': TIME_1.isoformat()
})
specimen = self.get_specimen_from_dao(_id=specimen.id)
self.assertEqual('contaminated', specimen.disposalReason)
self.assertEqual(TIME_1, specimen.disposalDate)
def test_parent_disposal_date_not_required(self):
self._create_minimal_specimen()
specimen = self.get_specimen_from_dao(rlims_id='sabrina')
self.assertIsNone(specimen.disposalDate)
self.send_put(f"Biobank/specimens/sabrina/disposalStatus", {
'reason': 'test'
})
specimen = self.get_specimen_from_dao(_id=specimen.id)
self.assertEqual('test', specimen.disposalReason)
def test_disposal_endpoint_sets_status(self):
# /disposalStatus with any information should set status to "Disposed"
payload = self.get_minimal_specimen_json()
payload['status'] = {'status': 'In Circulation'}
self.put_specimen(payload)
self.send_put(f"Biobank/specimens/sabrina/disposalStatus", {
'reason': 'test'
})
specimen = self.get_specimen_from_dao(rlims_id='sabrina')
self.assertEqual('Disposed', specimen.status)
def test_status_endpoint_clears_disposal(self):
# /status with any information should clear the disposal fields
payload = self.get_minimal_specimen_json()
payload['disposalStatus'] = {
'reason': 'mistake',
'disposalDate': TIME_2.isoformat()
}
self.put_specimen(payload)
self.send_put(f"Biobank/specimens/sabrina/status", {
'status': 'updated'
})
specimen = self.get_specimen_from_dao(rlims_id='sabrina')
self.assertEqual('', specimen.disposalReason)
self.assertEqual(None, specimen.disposalDate)
def test_disposal_sets_status(self):
"""Providing disposed fields on primary endpoint should set status"""
# Initialize specimen
payload = self.get_minimal_specimen_json()
payload['status'] = {'status': 'In Circulation'}
self.put_specimen(payload)
# Set it as disposed
payload['disposalStatus'] = {
'reason': 'mistake',
'disposalDate': TIME_2.isoformat()
}
del payload['status']
self.put_specimen(payload)
# Check that disposing it changes the status
specimen = self.get_specimen_from_dao(rlims_id='sabrina')
self.assertEqual('Disposed', specimen.status)
def test_status_update_clears_disposal(self):
"""Setting status on primary endpoint should clear disposal fields"""
# Initialize specimen
payload = self.get_minimal_specimen_json()
payload['disposalStatus'] = {
'reason': 'mistake',
'disposalDate': TIME_2.isoformat()
}
self.put_specimen(payload)
# Set a new status
payload['status'] = {'status': 'In Circulation'}
del payload['disposalStatus']
self.put_specimen(payload)
# Check that setting a status clears the disposal fields
specimen = self.get_specimen_from_dao(rlims_id='sabrina')
self.assertEqual('', specimen.disposalReason)
self.assertEqual(None, specimen.disposalDate)
def test_empty_disposal_leaves_status(self):
"""Providing empty disposed fields on primary endpoint should not set status"""
# Initialize specimen with empty disposal fields
payload = self.get_minimal_specimen_json()
payload['status'] = {'status': 'In Circulation'}
payload['disposalStatus'] = {
'reason': '',
'disposalDate': ''
}
self.put_specimen(payload)
# Check that status field is unchanged
specimen = self.get_specimen_from_dao(rlims_id='sabrina')
self.assertEqual('In Circulation', specimen.status)
def test_disposed_status_update_leaves_disposal(self):
"""Setting status to 'Disposed' (maybe redundant) shouldn't clear disposal fields"""
# Initialize specimen
payload = self.get_minimal_specimen_json()
payload['disposalStatus'] = {
'reason': 'mistake',
'disposalDate': TIME_2.isoformat()
}
self.put_specimen(payload)
# Resend status
payload['status'] = 'Disposed'
del payload['disposalStatus']
self.put_specimen(payload)
# Check that setting a status clears the disposal fields
specimen = self.get_specimen_from_dao(rlims_id='sabrina')
self.assertEqual('mistake', specimen.disposalReason)
self.assertEqual(TIME_2, specimen.disposalDate)
def test_parent_disposed_not_found(self):
self.send_put(f"Biobank/specimens/sabrina/status", {
'disposalDate': TIME_1.isoformat()
}, expected_status=http.client.NOT_FOUND)
def test_parent_attribute_created(self):
result = self._create_minimal_specimen()
specimen = self.retrieve_specimen_json(result['id'])
self.assertIsNone(specimen['attributes'])
self.send_put('Biobank/specimens/sabrina/attributes/attr1', {
'value': 'test attribute'
})
specimen = self.retrieve_specimen_json(specimen['id'])
attribute = specimen['attributes'][0]
self.assertEqual('attr1', attribute['name'])
self.assertEqual('test attribute', attribute['value'])
def test_parent_attribute_update(self):
payload = self.get_minimal_specimen_json()
payload['attributes'] = [
{
'name': 'attr_one',
'value': '1'
},
{
'name': 'attr_two',
'value': 'two'
}
]
initial_result = self.put_specimen(payload)
self.send_put('Biobank/specimens/sabrina/attributes/attr_one', {
'value': 'updated'
})
specimen = self.retrieve_specimen_json(initial_result['id'])
attribute = specimen['attributes'][0]
self.assertEqual('updated', attribute['value'])
def test_attribute_deletion(self):
payload = self.get_minimal_specimen_json()
payload['attributes'] = [
{
'name': 'attr_one',
'value': '1'
},
{
'name': 'attr_two',
'value': 'two'
}
]
initial_result = self.put_specimen(payload)
self.send_delete('Biobank/specimens/sabrina/attributes/attr_one')
specimen = self.retrieve_specimen_json(initial_result['id'])
self.assertEqual(1, len(specimen['attributes']), 'Should only have one attribute after the delete request')
attribute = specimen['attributes'][0]
self.assertEqual('attr_two', attribute['name'])
def test_parent_attribute_not_found(self):
self.send_put(f"Biobank/specimens/sabrina/attributes/attr1", {
'disposalDate': TIME_1.isoformat()
}, expected_status=http.client.NOT_FOUND)
def test_parent_aliquot_created(self):
result = self._create_minimal_specimen()
specimen = self.retrieve_specimen_json(result['id'])
self.assertIsNone(specimen['aliquots'])
self.send_put('Biobank/specimens/sabrina/aliquots/first', {
'sampleType': 'first sample',
'containerTypeID': 'tube'
})
specimen = self.retrieve_specimen_json(specimen['id'])
aliquot = specimen['aliquots'][0]
self.assertEqual('first sample', aliquot['sampleType'])
self.assertEqual('tube', aliquot['containerTypeID'])
def test_parent_aliquot_update(self):
payload = self.get_minimal_specimen_json()
payload['aliquots'] = [
{
'rlimsID': 'salem',
'sampleType': 'first sample',
'containerTypeID': 'tube'
}
]
initial_result = self.put_specimen(payload)
self.send_put('Biobank/specimens/sabrina/aliquots/salem', {
'sampleType': 'updated'
})
specimen = self.retrieve_specimen_json(initial_result['id'])
aliquot = specimen['aliquots'][0]
self.assertEqual('updated', aliquot['sampleType'])
self.assertEqual('tube', aliquot['containerTypeID'])
def test_simplified_aliquot_nesting(self):
"""The aliquots endpoint should allow for defining a new aliquot as a child of an existing aliquot"""
generic_aliquot_data = {
'sampleType': 'first sample',
'containerTypeID': 'tube'
}
# Create a parent aliquot to test with
specimen = self.data_generator.create_database_biobank_specimen()
parent_rlims_id = 'parent_aliquot'
self.send_put(f'Biobank/specimens/{specimen.rlimsId}/aliquots/{parent_rlims_id}', generic_aliquot_data)
# Create another aliquot that is nested within the first aliquot (specimen -> aliquot -> aliquot)
child_rlims_id = 'child_aliquot'
self.send_put(f'Biobank/specimens/{parent_rlims_id}/aliquots/{child_rlims_id}', generic_aliquot_data)
# Verify that the aliquot was successfully created and has the correct nesting structure
aliquot = self.session.query(BiobankAliquot).filter(BiobankAliquot.rlimsId == child_rlims_id).one()
self.assertEqual(parent_rlims_id, aliquot.parent_aliquot_rlims_id)
self.assertEqual(specimen.rlimsId, aliquot.specimen_rlims_id)
# Make an update to the aliquot and make sure the API modifies the existing aliquot
updated_sample_type = 'new updated sample type'
self.send_put(f'Biobank/specimens/{parent_rlims_id}/aliquots/{child_rlims_id}', {
'sampleType': updated_sample_type
})
with self.dao.session() as session:
aliquot = session.query(BiobankAliquot).filter(BiobankAliquot.id == aliquot.id).one()
self.assertEqual(updated_sample_type, aliquot.sampleType)
def test_aliquot_nesting_not_found_error(self):
"""Return 404 if trying to PUT an aliquot as a child of another that doesn't exist"""
generic_aliquot_data = {
'sampleType': 'first sample',
'containerTypeID': 'tube'
}
# Try to upload an aliquot for an ID that doesn't exist, expecting a 404
self.send_put(
'Biobank/specimens/does-not-exist/aliquots/new-aliquot',
generic_aliquot_data,
expected_status=404
)
def _create_minimal_specimen_with_aliquot(self, rlims_id='sabrina', aliquot_rlims_id='salem'):
payload = self.get_minimal_specimen_json(rlims_id)
payload['aliquots'] = [{
'rlimsID': aliquot_rlims_id
}]
return self.put_specimen(payload)
def test_aliquot_status_updated_all_fields(self):
result = self._create_minimal_specimen_with_aliquot()
self.send_put('Biobank/aliquots/salem/status', {
'status': 'new',
'freezeThawCount': 8,
'location': 'Washington',
'quantity': '3',
'quantityUnits': 'some units',
'processingCompleteDate': TIME_2.isoformat(),
'deviations': 'no deviation'
})
specimen = self.retrieve_specimen_json(result['id'])
aliquot_status = specimen['aliquots'][0]['status']
self.assertEqual('new', aliquot_status['status'])
self.assertEqual(8, aliquot_status['freezeThawCount'])
self.assertEqual('Washington', aliquot_status['location'])
self.assertEqual('3', aliquot_status['quantity'])
self.assertEqual('some units', aliquot_status['quantityUnits'])
self.assertEqual(TIME_2.isoformat(), aliquot_status['processingCompleteDate'])
self.assertEqual('no deviation', aliquot_status['deviations'])
def test_aliquot_status_updated_required_fields(self):
result = self._create_minimal_specimen_with_aliquot()
specimen = self.retrieve_specimen_json(result['id'])
self.assertIsNone(specimen['aliquots'][0]['status']['status'])
self.send_put('Biobank/aliquots/salem/status', {
'status': 'updated'
})
specimen = self.retrieve_specimen_json(specimen['id'])
self.assertEqual('updated', specimen['aliquots'][0]['status']['status'])
def test_aliquot_disposed_all_fields(self):
result = self._create_minimal_specimen_with_aliquot()
self.send_put('Biobank/aliquots/salem/disposalStatus', {
'reason': 'contaminated',
'disposalDate': TIME_2.isoformat()
})
specimen = self.retrieve_specimen_json(result['id'])
aliquot_disposal_status = specimen['aliquots'][0]['disposalStatus']
self.assertEqual('contaminated', aliquot_disposal_status['reason'])
self.assertEqual(TIME_2.isoformat(), aliquot_disposal_status['disposalDate'])
def test_aliquot_dataset_created(self):
result = self._create_minimal_specimen_with_aliquot()
specimen = self.retrieve_specimen_json(result['id'])
self.assertIsNone(specimen['aliquots'][0]['datasets'])
self.send_put('Biobank/aliquots/salem/datasets/data1', {
'status': 'created',
'datasetItems': [
{
'paramID': 'param1',
'displayValue': 'One',
'displayUnits': 'param'
}
]
})
specimen = self.retrieve_specimen_json(specimen['id'])
dataset = specimen['aliquots'][0]['datasets'][0]
self.assertEqual('created', dataset['status'])
self.assertEqual('One', dataset['datasetItems'][0]['displayValue'])
def test_aliquot_dataset_update(self):
payload = self.get_minimal_specimen_json()
payload['aliquots'] = [
{
'rlimsID': 'salem',
'datasets': [
{
'rlimsID': 'data_one',
'datasetItems': [
{
'paramID': 'param_one'
}
]
},
{
'rlimsID': 'data_two',
'datasetItems': [
{
'paramID': 'param_one'
}
]
}
]
}
]
result = self.put_specimen(payload)
self.send_put('Biobank/aliquots/salem/datasets/data_two', {
'rlimsID': 'data_two',
'status': 'updated',
'datasetItems': [
{
'paramID': 'param_one',
'displayValue': 'foobar'
}
]
})
specimen = self.retrieve_specimen_json(result['id'])
dataset = specimen['aliquots'][0]['datasets'][1]
self.assertEqual('updated', dataset['status'])
self.assertEqual('foobar', dataset['datasetItems'][0]['displayValue'])
| |
"""Input handling and transformation machinery.
The first class in this module, :class:`InputSplitter`, is designed to tell when
input from a line-oriented frontend is complete and should be executed, and when
the user should be prompted for another line of code instead. The name 'input
splitter' is largely for historical reasons.
A companion, :class:`IPythonInputSplitter`, provides the same functionality but
with full support for the extended IPython syntax (magics, system calls, etc).
The code to actually do these transformations is in :mod:`IPython.core.inputtransformer`.
:class:`IPythonInputSplitter` feeds the raw code to the transformers in order
and stores the results.
For more details, see the class docstrings below.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import ast
import codeop
import io
import re
import sys
import tokenize
import warnings
from IPython.utils.py3compat import cast_unicode
from IPython.core.inputtransformer import (leading_indent,
classic_prompt,
ipy_prompt,
cellmagic,
assemble_logical_lines,
help_end,
escaped_commands,
assign_from_magic,
assign_from_system,
assemble_python_lines,
)
# These are available in this module for backwards compatibility.
from IPython.core.inputtransformer import (ESC_SHELL, ESC_SH_CAP, ESC_HELP,
ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,
ESC_QUOTE, ESC_QUOTE2, ESC_PAREN, ESC_SEQUENCES)
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
# FIXME: These are general-purpose utilities that later can be moved to the
# general ward. Kept here for now because we're being very strict about test
# coverage with this code, and this lets us ensure that we keep 100% coverage
# while developing.
# compiled regexps for autoindent management
dedent_re = re.compile('|'.join([
r'^\s+raise(\s.*)?$', # raise statement (+ space + other stuff, maybe)
r'^\s+raise\([^\)]*\).*$', # wacky raise with immediate open paren
r'^\s+return(\s.*)?$', # normal return (+ space + other stuff, maybe)
r'^\s+return\([^\)]*\).*$', # wacky return with immediate open paren
r'^\s+pass\s*$', # pass (optionally followed by trailing spaces)
r'^\s+break\s*$', # break (optionally followed by trailing spaces)
r'^\s+continue\s*$', # continue (optionally followed by trailing spaces)
]))
ini_spaces_re = re.compile(r'^([ \t\r\f\v]+)')
# regexp to match pure comment lines so we don't accidentally insert 'if 1:'
# before pure comments
comment_line_re = re.compile('^\s*\#')
def num_ini_spaces(s):
"""Return the number of initial spaces in a string.
Note that tabs are counted as a single space. For now, we do *not* support
mixing of tabs and spaces in the user's input.
Parameters
----------
s : string
Returns
-------
n : int
"""
ini_spaces = ini_spaces_re.match(s)
if ini_spaces:
return ini_spaces.end()
else:
return 0
# Fake token types for partial_tokenize:
INCOMPLETE_STRING = tokenize.N_TOKENS
IN_MULTILINE_STATEMENT = tokenize.N_TOKENS + 1
# The 2 classes below have the same API as TokenInfo, but don't try to look up
# a token type name that they won't find.
class IncompleteString:
type = exact_type = INCOMPLETE_STRING
def __init__(self, s, start, end, line):
self.s = s
self.start = start
self.end = end
self.line = line
class InMultilineStatement:
type = exact_type = IN_MULTILINE_STATEMENT
def __init__(self, pos, line):
self.s = ''
self.start = self.end = pos
self.line = line
def partial_tokens(s):
"""Iterate over tokens from a possibly-incomplete string of code.
This adds two special token types: INCOMPLETE_STRING and
IN_MULTILINE_STATEMENT. These can only occur as the last token yielded, and
represent the two main ways for code to be incomplete.
"""
readline = io.StringIO(s).readline
token = tokenize.TokenInfo(tokenize.NEWLINE, '', (1, 0), (1, 0), '')
try:
for token in tokenize.generate_tokens(readline):
yield token
except tokenize.TokenError as e:
# catch EOF error
lines = s.splitlines(keepends=True)
end = len(lines), len(lines[-1])
if 'multi-line string' in e.args[0]:
l, c = start = token.end
s = lines[l-1][c:] + ''.join(lines[l:])
yield IncompleteString(s, start, end, lines[-1])
elif 'multi-line statement' in e.args[0]:
yield InMultilineStatement(end, lines[-1])
else:
raise
def find_next_indent(code):
"""Find the number of spaces for the next line of indentation"""
tokens = list(partial_tokens(code))
if tokens[-1].type == tokenize.ENDMARKER:
tokens.pop()
if not tokens:
return 0
while (tokens[-1].type in {tokenize.DEDENT, tokenize.NEWLINE, tokenize.COMMENT}):
tokens.pop()
if tokens[-1].type == INCOMPLETE_STRING:
# Inside a multiline string
return 0
# Find the indents used before
prev_indents = [0]
def _add_indent(n):
if n != prev_indents[-1]:
prev_indents.append(n)
tokiter = iter(tokens)
for tok in tokiter:
if tok.type in {tokenize.INDENT, tokenize.DEDENT}:
_add_indent(tok.end[1])
elif (tok.type == tokenize.NL):
try:
_add_indent(next(tokiter).start[1])
except StopIteration:
break
last_indent = prev_indents.pop()
# If we've just opened a multiline statement (e.g. 'a = ['), indent more
if tokens[-1].type == IN_MULTILINE_STATEMENT:
if tokens[-2].exact_type in {tokenize.LPAR, tokenize.LSQB, tokenize.LBRACE}:
return last_indent + 4
return last_indent
if tokens[-1].exact_type == tokenize.COLON:
# Line ends with colon - indent
return last_indent + 4
if last_indent:
# Examine the last line for dedent cues - statements like return or
# raise which normally end a block of code.
last_line_starts = 0
for i, tok in enumerate(tokens):
if tok.type == tokenize.NEWLINE:
last_line_starts = i + 1
last_line_tokens = tokens[last_line_starts:]
names = [t.string for t in last_line_tokens if t.type == tokenize.NAME]
if names and names[0] in {'raise', 'return', 'pass', 'break', 'continue'}:
# Find the most recent indentation less than the current level
for indent in reversed(prev_indents):
if indent < last_indent:
return indent
return last_indent
def last_blank(src):
"""Determine if the input source ends in a blank.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
"""
if not src: return False
ll = src.splitlines()[-1]
return (ll == '') or ll.isspace()
last_two_blanks_re = re.compile(r'\n\s*\n\s*$', re.MULTILINE)
last_two_blanks_re2 = re.compile(r'.+\n\s*\n\s+$', re.MULTILINE)
def last_two_blanks(src):
"""Determine if the input source ends in two blanks.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
"""
if not src: return False
# The logic here is tricky: I couldn't get a regexp to work and pass all
# the tests, so I took a different approach: split the source by lines,
# grab the last two and prepend '###\n' as a stand-in for whatever was in
# the body before the last two lines. Then, with that structure, it's
# possible to analyze with two regexps. Not the most elegant solution, but
# it works. If anyone tries to change this logic, make sure to validate
# the whole test suite first!
new_src = '\n'.join(['###\n'] + src.splitlines()[-2:])
return (bool(last_two_blanks_re.match(new_src)) or
bool(last_two_blanks_re2.match(new_src)) )
def remove_comments(src):
"""Remove all comments from input source.
Note: comments are NOT recognized inside of strings!
Parameters
----------
src : string
A single or multiline input string.
Returns
-------
String with all Python comments removed.
"""
return re.sub('#.*', '', src)
def get_input_encoding():
"""Return the default standard input encoding.
If sys.stdin has no encoding, 'ascii' is returned."""
# There are strange environments for which sys.stdin.encoding is None. We
# ensure that a valid encoding is returned.
encoding = getattr(sys.stdin, 'encoding', None)
if encoding is None:
encoding = 'ascii'
return encoding
#-----------------------------------------------------------------------------
# Classes and functions for normal Python syntax handling
#-----------------------------------------------------------------------------
class InputSplitter(object):
r"""An object that can accumulate lines of Python source before execution.
This object is designed to be fed python source line-by-line, using
:meth:`push`. It will return on each push whether the currently pushed
code could be executed already. In addition, it provides a method called
:meth:`push_accepts_more` that can be used to query whether more input
can be pushed into a single interactive block.
This is a simple example of how an interactive terminal-based client can use
this tool::
isp = InputSplitter()
while isp.push_accepts_more():
indent = ' '*isp.indent_spaces
prompt = '>>> ' + indent
line = indent + raw_input(prompt)
isp.push(line)
print 'Input source was:\n', isp.source_reset(),
"""
# Number of spaces of indentation computed from input that has been pushed
# so far. This is the attributes callers should query to get the current
# indentation level, in order to provide auto-indent facilities.
indent_spaces = 0
# String, indicating the default input encoding. It is computed by default
# at initialization time via get_input_encoding(), but it can be reset by a
# client with specific knowledge of the encoding.
encoding = ''
# String where the current full source input is stored, properly encoded.
# Reading this attribute is the normal way of querying the currently pushed
# source code, that has been properly encoded.
source = ''
# Code object corresponding to the current source. It is automatically
# synced to the source, so it can be queried at any time to obtain the code
# object; it will be None if the source doesn't compile to valid Python.
code = None
# Private attributes
# List with lines of input accumulated so far
_buffer = None
# Command compiler
_compile = None
# Mark when input has changed indentation all the way back to flush-left
_full_dedent = False
# Boolean indicating whether the current block is complete
_is_complete = None
# Boolean indicating whether the current block has an unrecoverable syntax error
_is_invalid = False
def __init__(self):
"""Create a new InputSplitter instance.
"""
self._buffer = []
self._compile = codeop.CommandCompiler()
self.encoding = get_input_encoding()
def reset(self):
"""Reset the input buffer and associated state."""
self.indent_spaces = 0
self._buffer[:] = []
self.source = ''
self.code = None
self._is_complete = False
self._is_invalid = False
self._full_dedent = False
def source_reset(self):
"""Return the input source and perform a full reset.
"""
out = self.source
self.reset()
return out
def check_complete(self, source):
"""Return whether a block of code is ready to execute, or should be continued
This is a non-stateful API, and will reset the state of this InputSplitter.
Parameters
----------
source : string
Python input code, which can be multiline.
Returns
-------
status : str
One of 'complete', 'incomplete', or 'invalid' if source is not a
prefix of valid code.
indent_spaces : int or None
The number of spaces by which to indent the next line of code. If
status is not 'incomplete', this is None.
"""
self.reset()
try:
self.push(source)
except SyntaxError:
# Transformers in IPythonInputSplitter can raise SyntaxError,
# which push() will not catch.
return 'invalid', None
else:
if self._is_invalid:
return 'invalid', None
elif self.push_accepts_more():
return 'incomplete', self.indent_spaces
else:
return 'complete', None
finally:
self.reset()
def push(self, lines):
"""Push one or more lines of input.
This stores the given lines and returns a status code indicating
whether the code forms a complete Python block or not.
Any exceptions generated in compilation are swallowed, but if an
exception was produced, the method returns True.
Parameters
----------
lines : string
One or more lines of Python input.
Returns
-------
is_complete : boolean
True if the current input source (the result of the current input
plus prior inputs) forms a complete Python execution block. Note that
this value is also stored as a private attribute (``_is_complete``), so it
can be queried at any time.
"""
self._store(lines)
source = self.source
# Before calling _compile(), reset the code object to None so that if an
# exception is raised in compilation, we don't mislead by having
# inconsistent code/source attributes.
self.code, self._is_complete = None, None
self._is_invalid = False
# Honor termination lines properly
if source.endswith('\\\n'):
return False
self._update_indent()
try:
with warnings.catch_warnings():
warnings.simplefilter('error', SyntaxWarning)
self.code = self._compile(source, symbol="exec")
# Invalid syntax can produce any of a number of different errors from
# inside the compiler, so we have to catch them all. Syntax errors
# immediately produce a 'ready' block, so the invalid Python can be
# sent to the kernel for evaluation with possible ipython
# special-syntax conversion.
except (SyntaxError, OverflowError, ValueError, TypeError,
MemoryError, SyntaxWarning):
self._is_complete = True
self._is_invalid = True
else:
# Compilation didn't produce any exceptions (though it may not have
# given a complete code object)
self._is_complete = self.code is not None
return self._is_complete
def push_accepts_more(self):
"""Return whether a block of interactive input can accept more input.
This method is meant to be used by line-oriented frontends, who need to
guess whether a block is complete or not based solely on prior and
current input lines. The InputSplitter considers it has a complete
interactive block and will not accept more input when either:
* A SyntaxError is raised
* The code is complete and consists of a single line or a single
non-compound statement
* The code is complete and has a blank line at the end
If the current input produces a syntax error, this method immediately
returns False but does *not* raise the syntax error exception, as
typically clients will want to send invalid syntax to an execution
backend which might convert the invalid syntax into valid Python via
one of the dynamic IPython mechanisms.
"""
# With incomplete input, unconditionally accept more
# A syntax error also sets _is_complete to True - see push()
if not self._is_complete:
#print("Not complete") # debug
return True
# The user can make any (complete) input execute by leaving a blank line
last_line = self.source.splitlines()[-1]
if (not last_line) or last_line.isspace():
#print("Blank line") # debug
return False
# If there's just a single line or AST node, and we're flush left, as is
# the case after a simple statement such as 'a=1', we want to execute it
# straight away.
if self.indent_spaces==0:
if len(self.source.splitlines()) <= 1:
return False
try:
code_ast = ast.parse(u''.join(self._buffer))
except Exception:
#print("Can't parse AST") # debug
return False
else:
if len(code_ast.body) == 1 and \
not hasattr(code_ast.body[0], 'body'):
#print("Simple statement") # debug
return False
# General fallback - accept more code
return True
def _update_indent(self):
# self.source always has a trailing newline
self.indent_spaces = find_next_indent(self.source[:-1])
self._full_dedent = (self.indent_spaces == 0)
def _store(self, lines, buffer=None, store='source'):
"""Store one or more lines of input.
If input lines are not newline-terminated, a newline is automatically
appended."""
if buffer is None:
buffer = self._buffer
if lines.endswith('\n'):
buffer.append(lines)
else:
buffer.append(lines+'\n')
setattr(self, store, self._set_source(buffer))
def _set_source(self, buffer):
return u''.join(buffer)
class IPythonInputSplitter(InputSplitter):
"""An input splitter that recognizes all of IPython's special syntax."""
# String with raw, untransformed input.
source_raw = ''
# Flag to track when a transformer has stored input that it hasn't given
# back yet.
transformer_accumulating = False
# Flag to track when assemble_python_lines has stored input that it hasn't
# given back yet.
within_python_line = False
# Private attributes
# List with lines of raw input accumulated so far.
_buffer_raw = None
def __init__(self, line_input_checker=True, physical_line_transforms=None,
logical_line_transforms=None, python_line_transforms=None):
super(IPythonInputSplitter, self).__init__()
self._buffer_raw = []
self._validate = True
if physical_line_transforms is not None:
self.physical_line_transforms = physical_line_transforms
else:
self.physical_line_transforms = [
leading_indent(),
classic_prompt(),
ipy_prompt(),
cellmagic(end_on_blank_line=line_input_checker),
]
self.assemble_logical_lines = assemble_logical_lines()
if logical_line_transforms is not None:
self.logical_line_transforms = logical_line_transforms
else:
self.logical_line_transforms = [
help_end(),
escaped_commands(),
assign_from_magic(),
assign_from_system(),
]
self.assemble_python_lines = assemble_python_lines()
if python_line_transforms is not None:
self.python_line_transforms = python_line_transforms
else:
# We don't use any of these at present
self.python_line_transforms = []
@property
def transforms(self):
"Quick access to all transformers."
return self.physical_line_transforms + \
[self.assemble_logical_lines] + self.logical_line_transforms + \
[self.assemble_python_lines] + self.python_line_transforms
@property
def transforms_in_use(self):
"""Transformers, excluding logical line transformers if we're in a
Python line."""
t = self.physical_line_transforms[:]
if not self.within_python_line:
t += [self.assemble_logical_lines] + self.logical_line_transforms
return t + [self.assemble_python_lines] + self.python_line_transforms
def reset(self):
"""Reset the input buffer and associated state."""
super(IPythonInputSplitter, self).reset()
self._buffer_raw[:] = []
self.source_raw = ''
self.transformer_accumulating = False
self.within_python_line = False
for t in self.transforms:
try:
t.reset()
except SyntaxError:
# Nothing that calls reset() expects to handle transformer
# errors
pass
def flush_transformers(self):
def _flush(transform, outs):
"""yield transformed lines
always strings, never None
transform: the current transform
outs: an iterable of previously transformed inputs.
Each may be multiline, which will be passed
one line at a time to transform.
"""
for out in outs:
for line in out.splitlines():
# push one line at a time
tmp = transform.push(line)
if tmp is not None:
yield tmp
# reset the transform
tmp = transform.reset()
if tmp is not None:
yield tmp
out = []
for t in self.transforms_in_use:
out = _flush(t, out)
out = list(out)
if out:
self._store('\n'.join(out))
def raw_reset(self):
"""Return raw input only and perform a full reset.
"""
out = self.source_raw
self.reset()
return out
def source_reset(self):
try:
self.flush_transformers()
return self.source
finally:
self.reset()
def push_accepts_more(self):
if self.transformer_accumulating:
return True
else:
return super(IPythonInputSplitter, self).push_accepts_more()
def transform_cell(self, cell):
"""Process and translate a cell of input.
"""
self.reset()
try:
self.push(cell)
self.flush_transformers()
return self.source
finally:
self.reset()
def push(self, lines):
"""Push one or more lines of IPython input.
This stores the given lines and returns a status code indicating
whether the code forms a complete Python block or not, after processing
all input lines for special IPython syntax.
Any exceptions generated in compilation are swallowed, but if an
exception was produced, the method returns True.
Parameters
----------
lines : string
One or more lines of Python input.
Returns
-------
is_complete : boolean
True if the current input source (the result of the current input
plus prior inputs) forms a complete Python execution block. Note that
this value is also stored as a private attribute (_is_complete), so it
can be queried at any time.
"""
# We must ensure all input is pure unicode
lines = cast_unicode(lines, self.encoding)
# ''.splitlines() --> [], but we need to push the empty line to transformers
lines_list = lines.splitlines()
if not lines_list:
lines_list = ['']
# Store raw source before applying any transformations to it. Note
# that this must be done *after* the reset() call that would otherwise
# flush the buffer.
self._store(lines, self._buffer_raw, 'source_raw')
for line in lines_list:
out = self.push_line(line)
return out
def push_line(self, line):
buf = self._buffer
def _accumulating(dbg):
#print(dbg)
self.transformer_accumulating = True
return False
for transformer in self.physical_line_transforms:
line = transformer.push(line)
if line is None:
return _accumulating(transformer)
if not self.within_python_line:
line = self.assemble_logical_lines.push(line)
if line is None:
return _accumulating('acc logical line')
for transformer in self.logical_line_transforms:
line = transformer.push(line)
if line is None:
return _accumulating(transformer)
line = self.assemble_python_lines.push(line)
if line is None:
self.within_python_line = True
return _accumulating('acc python line')
else:
self.within_python_line = False
for transformer in self.python_line_transforms:
line = transformer.push(line)
if line is None:
return _accumulating(transformer)
#print("transformers clear") #debug
self.transformer_accumulating = False
return super(IPythonInputSplitter, self).push(line)
| |
from pycparser import c_parser,c_ast
import StringIO
import types
def astFromString(s):
parser = c_parser.CParser()
return parser.parse(s)
t1 = """
struct Foo {
int x;
int y;
};
struct Foo2 {
int moo;
} bar;
struct {
int x;
} barbar;
int x;
int * y;
struct Foo x;
struct {
int x;
} * x;
struct Foobar {
int x;
} * x;
int x[10];
struct unqualified * x;
struct unqualified {
int x;
};
int (*foo)(int,int);
int foo(){
};
"""
def test_typeParsing():
typeTable = types.TypeTable()
ast = astFromString(t1)
#ast.show()
parsed = types.parseTypeDecl(typeTable,ast.ext[0])
for t in [parsed,typeTable.lookupType('Foo',isStructType=True)]:
assert(type(t) == types.Struct)
assert(t.name == 'Foo')
assert(type(t.getMember('x')) == types.Int)
assert(type(t.getMember('y')) == types.Int)
assert(t.getMemberOffset('x') == 0)
parsed = types.parseTypeDecl(typeTable,ast.ext[1])
for t in [parsed,typeTable.lookupType('Foo2',isStructType=True)]:
assert(type(t) == types.Struct)
assert(t.name == 'Foo2')
assert(type(t.getMember('moo')) == types.Int)
assert(t.getMemberOffset('moo') == 0)
t = types.parseTypeDecl(typeTable,ast.ext[2])
assert(type(t) == types.Struct)
assert(t.name == None)
assert(type(t.getMember('x')) == types.Int)
assert(t.getMemberOffset('x') == 0)
parsed = types.parseTypeDecl(typeTable,ast.ext[3])
assert(type(parsed) == types.Int)
#pointer to int
parsed = types.parseTypeDecl(typeTable,ast.ext[4])
assert(type(parsed) == types.Pointer)
assert(type(parsed.type) == types.Int)
parsed = types.parseTypeDecl(typeTable,ast.ext[5])
for t in [parsed,typeTable.lookupType('Foo',isStructType=True)]:
assert(type(t) == types.Struct)
assert(t.name == 'Foo')
assert(type(t.getMember('x')) == types.Int)
assert(type(t.getMember('y')) == types.Int)
assert(t.getMemberOffset('x') == 0)
parsed = types.parseTypeDecl(typeTable,ast.ext[6])
assert(type(parsed) == types.Pointer)
assert(type(parsed.type) == types.Struct)
assert(parsed.type.name == None)
parsed = types.parseTypeDecl(typeTable,ast.ext[7])
assert(type(parsed) == types.Pointer)
assert(type(parsed.type) == types.Struct)
assert(parsed.type.name == "Foobar")
assert(typeTable.lookupType('Foobar',isStructType=True) != None)
parsed = types.parseTypeDecl(typeTable,ast.ext[8])
assert(type(parsed) == types.Array)
assert(type(parsed.type) == types.Int)
assert(parsed.length == 10)
parsed = types.parseTypeDecl(typeTable,ast.ext[9])
assert(type(parsed) == types.Pointer )
parsed = types.parseTypeDecl(typeTable,ast.ext[10])
assert(type(parsed) == types.Struct)
assert(parsed.name == "unqualified")
parsed = types.parseTypeDecl(typeTable,ast.ext[11])
assert(type(parsed) == types.Pointer)
assert(type(parsed.type) == types.Function)
assert(type(parsed.type.rettype) == types.Int)
assert(type(parsed.type.args[0]) == types.Int)
assert(len(parsed.type.args) == 2)
t2 = """
char * foo;
char * bar;
void (*foo) ();
void (*foo)(void);
struct Foo {
int x;
int y;
};
struct Foo bar;
"""
def test_typeMatching1():
typeTable = types.TypeTable()
ast = astFromString(t2)
for i in range(3):
a,b = i*2,i*2+1
#print a,b
typea = types.parseTypeDecl(typeTable,ast.ext[a])
typeb = types.parseTypeDecl(typeTable,ast.ext[b])
assert(typea.strictTypeMatch(typeb))
t3 = """
char * foo;
int * bar;
void (*foo) (int);
void (*foo)(void);
struct Foo {
int x;
int y;
};
struct Bar bar;
"""
def test_typeMatching2():
typeTable = types.TypeTable()
ast = astFromString(t3)
for i in range(3):
a,b = i*2,i*2+1
#print a,b
typea = types.parseTypeDecl(typeTable,ast.ext[a])
typeb = types.parseTypeDecl(typeTable,ast.ext[b])
assert(typea.strictTypeMatch(typeb) == False)
t4 = """
char x;
unsigned char x;
int short x;
short int x;
unsigned short int x;
short x;
unsigned short x;
unsigned x;
int x;
unsigned int x;
long int x;
unsigned long int x;
long x;
unsigned long x;
long unsigned x;
"""
t4solutions = [
types.Char(signed=True),
types.Char(signed=False),
types.ShortInt(signed=True),
types.ShortInt(signed=True),
types.ShortInt(signed=False),
types.ShortInt(signed=True),
types.ShortInt(signed=False),
types.Int(signed=False),
types.Int(signed=True),
types.Int(signed=False),
types.LongInt(signed=True),
types.LongInt(signed=False),
types.LongInt(signed=True),
types.LongInt(signed=False),
types.LongInt(signed=False),
]
def test_IntTypeParsing():
typeTable = types.TypeTable()
ast = astFromString(t4)
assert(len(t4solutions) == len(ast.ext))
for i,ideal in enumerate(t4solutions):
parsed = types.parseTypeDecl(typeTable,ast.ext[i])
print i
print ideal
print parsed
matches = ideal.strictTypeMatch(parsed)
assert(matches == True)
| |
from __future__ import absolute_import, unicode_literals
from future.builtins import int, open
import os
try:
from urllib.parse import urljoin, urlparse
except ImportError:
from urlparse import urljoin, urlparse
from django.contrib import admin
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.admin.options import ModelAdmin
from django.contrib.staticfiles import finders
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.db.models import get_model
from django.http import (HttpResponse, HttpResponseServerError,
HttpResponseNotFound)
from django.shortcuts import redirect
from django.template import RequestContext
from django.template.loader import get_template
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import requires_csrf_token
from mezzanine.conf import settings
from mezzanine.core.forms import get_edit_form
from mezzanine.core.models import Displayable, SitePermission
from mezzanine.utils.cache import add_cache_bypass
from mezzanine.utils.views import is_editable, paginate, render, set_cookie
from mezzanine.utils.sites import has_site_permission
from mezzanine.utils.urls import next_url
def set_device(request, device=""):
"""
Sets a device name in a cookie when a user explicitly wants to go
to the site for a particular device (eg mobile).
"""
response = redirect(add_cache_bypass(next_url(request) or "/"))
set_cookie(response, "mezzanine-device", device, 60 * 60 * 24 * 365)
return response
@staff_member_required
def set_site(request):
"""
Put the selected site ID into the session - posted to from
the "Select site" drop-down in the header of the admin. The
site ID is then used in favour of the current request's
domain in ``mezzanine.core.managers.CurrentSiteManager``.
"""
site_id = int(request.GET["site_id"])
if not request.user.is_superuser:
try:
SitePermission.objects.get(user=request.user, sites=site_id)
except SitePermission.DoesNotExist:
raise PermissionDenied
request.session["site_id"] = site_id
admin_url = reverse("admin:index")
next = next_url(request) or admin_url
# Don't redirect to a change view for an object that won't exist
# on the selected site - go to its list view instead.
if next.startswith(admin_url):
parts = next.split("/")
if len(parts) > 4 and parts[4].isdigit():
next = "/".join(parts[:4])
return redirect(next)
def direct_to_template(request, template, extra_context=None, **kwargs):
"""
Replacement for Django's ``direct_to_template`` that uses
``TemplateResponse`` via ``mezzanine.utils.views.render``.
"""
context = extra_context or {}
context["params"] = kwargs
for (key, value) in context.items():
if callable(value):
context[key] = value()
return render(request, template, context)
@staff_member_required
def edit(request):
"""
Process the inline editing form.
"""
model = get_model(request.POST["app"], request.POST["model"])
obj = model.objects.get(id=request.POST["id"])
form = get_edit_form(obj, request.POST["fields"], data=request.POST,
files=request.FILES)
if not (is_editable(obj, request) and has_site_permission(request.user)):
response = _("Permission denied")
elif form.is_valid():
form.save()
model_admin = ModelAdmin(model, admin.site)
message = model_admin.construct_change_message(request, form, None)
model_admin.log_change(request, obj, message)
response = ""
else:
response = list(form.errors.values())[0][0]
return HttpResponse(response)
def search(request, template="search_results.html"):
"""
Display search results. Takes an optional "contenttype" GET parameter
in the form "app-name.ModelName" to limit search results to a single model.
"""
settings.use_editable()
query = request.GET.get("q", "")
page = request.GET.get("page", 1)
per_page = settings.SEARCH_PER_PAGE
max_paging_links = settings.MAX_PAGING_LINKS
try:
search_model = get_model(*request.GET.get("type", "").split(".", 1))
if not issubclass(search_model, Displayable):
raise TypeError
except TypeError:
search_model = Displayable
search_type = _("Everything")
else:
search_type = search_model._meta.verbose_name_plural.capitalize()
results = search_model.objects.search(query, for_user=request.user)
paginated = paginate(results, page, per_page, max_paging_links)
context = {"query": query, "results": paginated,
"search_type": search_type}
return render(request, template, context)
@staff_member_required
def static_proxy(request):
"""
Serves TinyMCE plugins inside the inline popups and the uploadify
SWF, as these are normally static files, and will break with
cross-domain JavaScript errors if ``STATIC_URL`` is an external
host. URL for the file is passed in via querystring in the inline
popup plugin template, and we then attempt to pull out the relative
path to the file, so that we can serve it locally via Django.
"""
normalize = lambda u: ("//" + u.split("://")[-1]) if "://" in u else u
url = normalize(request.GET["u"])
host = "//" + request.get_host()
static_url = normalize(settings.STATIC_URL)
for prefix in (host, static_url, "/"):
if url.startswith(prefix):
url = url.replace(prefix, "", 1)
response = ""
content_type = ""
path = finders.find(url)
if path:
if isinstance(path, (list, tuple)):
path = path[0]
if url.endswith(".htm"):
# Inject <base href="{{ STATIC_URL }}"> into TinyMCE
# plugins, since the path static files in these won't be
# on the same domain.
static_url = settings.STATIC_URL + os.path.split(url)[0] + "/"
if not urlparse(static_url).scheme:
static_url = urljoin(host, static_url)
base_tag = "<base href='%s'>" % static_url
content_type = "text/html"
with open(path, "r") as f:
response = f.read().replace("<head>", "<head>" + base_tag)
else:
content_type = "application/octet-stream"
with open(path, "rb") as f:
response = f.read()
return HttpResponse(response, content_type=content_type)
def displayable_links_js(request, template_name="admin/displayable_links.js"):
"""
Renders a list of url/title pairs for all ``Displayable`` subclass
instances into JavaScript that's used to populate a list of links
in TinyMCE.
"""
links = []
if "mezzanine.pages" in settings.INSTALLED_APPS:
from mezzanine.pages.models import Page
is_page = lambda obj: isinstance(obj, Page)
else:
is_page = lambda obj: False
# For each item's title, we use its model's verbose_name, but in the
# case of Page subclasses, we just use "Page", and then sort the items
# by whether they're a Page subclass or not, then by their URL.
for url, obj in Displayable.objects.url_map(for_user=request.user).items():
title = getattr(obj, "titles", obj.title)
real = hasattr(obj, "id")
page = is_page(obj)
if real:
verbose_name = _("Page") if page else obj._meta.verbose_name
title = "%s: %s" % (verbose_name, title)
links.append((not page and real, url, title))
context = {"links": [link[1:] for link in sorted(links)]}
content_type = "text/javascript"
return render(request, template_name, context, content_type=content_type)
@requires_csrf_token
def page_not_found(request, template_name="errors/404.html"):
"""
Mimics Django's 404 handler but with a different template path.
"""
context = RequestContext(request, {
"STATIC_URL": settings.STATIC_URL,
"request_path": request.path,
})
t = get_template(template_name)
return HttpResponseNotFound(t.render(context))
@requires_csrf_token
def server_error(request, template_name="errors/500.html"):
"""
Mimics Django's error handler but adds ``STATIC_URL`` to the
context.
"""
context = RequestContext(request, {"STATIC_URL": settings.STATIC_URL})
t = get_template(template_name)
return HttpResponseServerError(t.render(context))
| |
import enum
import sys
import unittest
from enum import Enum, IntEnum, unique, EnumMeta
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
pyver = float('%s.%s' % sys.version_info[:2])
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
unicode
except NameError:
unicode = str
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception:
Stooges = sys.exc_info()[1]
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception:
IntStooges = sys.exc_info()[1]
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception:
FloatStooges = sys.exc_info()[1]
# for pickle test and subclass tests
try:
class StrEnum(str, Enum):
'accepts only string values'
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
except Exception:
Name = sys.exc_info()[1]
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception:
Question = sys.exc_info()[1]
try:
Answer = Enum('Answer', 'him this then there because')
except Exception:
Answer = sys.exc_info()[1]
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception:
Theory = sys.exc_info()[1]
# for doctests
try:
class Fruit(Enum):
tomato = 1
banana = 2
cherry = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None,
protocol=(0, HIGHEST_PROTOCOL)):
start, stop = protocol
failures = []
for protocol in range(start, stop+1):
try:
if target is None:
assertion(loads(dumps(source, protocol=protocol)) is source)
else:
assertion(loads(dumps(source, protocol=protocol)), target)
except Exception:
exc, tb = sys.exc_info()[1:]
failures.append('%2d: %s' %(protocol, exc))
if failures:
raise ValueError('Failed with protocols: %s' % ', '.join(failures))
def test_pickle_exception(assertion, exception, obj,
protocol=(0, HIGHEST_PROTOCOL)):
start, stop = protocol
failures = []
for protocol in range(start, stop+1):
try:
assertion(exception, dumps, obj, protocol=protocol)
except Exception:
exc = sys.exc_info()[1]
failures.append('%d: %s %s' % (protocol, exc.__class__.__name__, exc))
if failures:
raise ValueError('Failed with protocols: %s' % ', '.join(failures))
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
if pyver >= 2.6: # cannot specify custom `dir` on this version
def test_dir_on_class(self):
Season = self.Season
self.assertEqual(
set(dir(Season)),
set(['__class__', '__doc__', '__members__', '__module__',
'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
)
def test_dir_on_item(self):
Season = self.Season
self.assertEqual(
set(dir(Season.WINTER)),
set(['__class__', '__doc__', '__module__', 'name', 'value']),
)
if pyver >= 2.7: # OrderedDict first available here
def test_members_is_ordereddict_if_ordered(self):
class Ordered(Enum):
__order__ = 'first second third'
first = 'bippity'
second = 'boppity'
third = 'boo'
self.assertTrue(type(Ordered.__members__) is OrderedDict)
def test_members_is_ordereddict_if_not_ordered(self):
class Unordered(Enum):
this = 'that'
these = 'those'
self.assertTrue(type(Unordered.__members__) is OrderedDict)
if pyver >= 3.0: # all objects are ordered in Python 2.x
def test_members_is_always_ordered(self):
class AlwaysOrdered(Enum):
first = 1
second = 2
third = 3
self.assertTrue(type(AlwaysOrdered.__members__) is OrderedDict)
def test_comparisons(self):
def bad_compare():
Season.SPRING > 4
Season = self.Season
self.assertNotEqual(Season.SPRING, 1)
self.assertRaises(TypeError, bad_compare)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
def bad_compare():
Season.SPRING < Part.CLIP
self.assertRaises(TypeError, bad_compare)
def test_enum_in_enum_out(self):
Season = self.Season
self.assertTrue(Season(Season.WINTER) is Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split()):
i += 1
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertTrue(e in Season)
self.assertTrue(type(e) is Season)
self.assertTrue(isinstance(e, Season))
self.assertEqual(str(e), 'Season.' + season)
self.assertEqual(
repr(e),
'<Season.%s: %s>' % (season, i),
)
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
def set_name(obj, new_value):
obj.name = new_value
def set_value(obj, new_value):
obj.value = new_value
self.assertRaises(AttributeError, set_name, Season.SPRING, 'invierno', )
self.assertRaises(AttributeError, set_value, Season.SPRING, 2)
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
self.assertRaises(AttributeError, delattr, Season, 'SPRING')
self.assertRaises(AttributeError, delattr, Season, 'DRY')
self.assertRaises(AttributeError, delattr, Season.SPRING, 'name')
def test_invalid_names(self):
def create_bad_class_1():
class Wrong(Enum):
mro = 9
def create_bad_class_2():
class Wrong(Enum):
_reserved_ = 3
self.assertRaises(ValueError, create_bad_class_1)
self.assertRaises(ValueError, create_bad_class_2)
def test_contains(self):
Season = self.Season
self.assertTrue(Season.AUTUMN in Season)
self.assertTrue(3 not in Season)
val = Season(3)
self.assertTrue(val in Season)
class OtherEnum(Enum):
one = 1; two = 2
self.assertTrue(OtherEnum.two not in Season)
if pyver >= 2.6: # when `format` came into being
def test_format_enum(self):
Season = self.Season
self.assertEqual('{0}'.format(Season.SPRING),
'{0}'.format(str(Season.SPRING)))
self.assertEqual( '{0:}'.format(Season.SPRING),
'{0:}'.format(str(Season.SPRING)))
self.assertEqual('{0:20}'.format(Season.SPRING),
'{0:20}'.format(str(Season.SPRING)))
self.assertEqual('{0:^20}'.format(Season.SPRING),
'{0:^20}'.format(str(Season.SPRING)))
self.assertEqual('{0:>20}'.format(Season.SPRING),
'{0:>20}'.format(str(Season.SPRING)))
self.assertEqual('{0:<20}'.format(Season.SPRING),
'{0:<20}'.format(str(Season.SPRING)))
def test_format_enum_custom(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual('{0}'.format(TestFloat.one), 'TestFloat success!')
def assertFormatIsValue(self, spec, member):
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{0}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{0}', Konstants.TAU)
self.assertFormatIsValue('{0:}', Konstants.TAU)
self.assertFormatIsValue('{0:20}', Konstants.TAU)
self.assertFormatIsValue('{0:^20}', Konstants.TAU)
self.assertFormatIsValue('{0:>20}', Konstants.TAU)
self.assertFormatIsValue('{0:<20}', Konstants.TAU)
self.assertFormatIsValue('{0:n}', Konstants.TAU)
self.assertFormatIsValue('{0:5.2}', Konstants.TAU)
self.assertFormatIsValue('{0:f}', Konstants.TAU)
def test_format_enum_int(self):
Grades = self.Grades
self.assertFormatIsValue('{0}', Grades.C)
self.assertFormatIsValue('{0:}', Grades.C)
self.assertFormatIsValue('{0:20}', Grades.C)
self.assertFormatIsValue('{0:^20}', Grades.C)
self.assertFormatIsValue('{0:>20}', Grades.C)
self.assertFormatIsValue('{0:<20}', Grades.C)
self.assertFormatIsValue('{0:+}', Grades.C)
self.assertFormatIsValue('{0:08X}', Grades.C)
self.assertFormatIsValue('{0:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{0}', Directional.WEST)
self.assertFormatIsValue('{0:}', Directional.WEST)
self.assertFormatIsValue('{0:20}', Directional.WEST)
self.assertFormatIsValue('{0:^20}', Directional.WEST)
self.assertFormatIsValue('{0:>20}', Directional.WEST)
self.assertFormatIsValue('{0:<20}', Directional.WEST)
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_enum_duplicates(self):
__order__ = "SPRING SUMMER AUTUMN WINTER"
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertTrue(Season.FALL is Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertTrue(Season(3) is Season.AUTUMN)
self.assertTrue(Season(1) is Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
set([k for k,v in Season.__members__.items() if v.name != k]),
set(['FALL', 'ANOTHER_SPRING']),
)
if pyver >= 3.0:
cls = vars()
result = {'Enum':Enum}
exec("""def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3""",
result)
cls['test_duplicate_name'] = result['test_duplicate_name']
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertTrue(type(Huh.name) is Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited(self):
class StrEnum(str, Enum):
pass
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target):
i += 1
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertTrue(e in WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertTrue(type(e) is WeekDay)
self.assertTrue(isinstance(e, int))
self.assertTrue(isinstance(e, Enum))
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
__order__ = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertTrue(WeekDay.TEUSDAY is WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertTrue, Stooges.CURLY)
test_pickle_dump_load(self.assertTrue, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertTrue, IntStooges.CURLY)
test_pickle_dump_load(self.assertTrue, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertTrue, FloatStooges.CURLY)
test_pickle_dump_load(self.assertTrue, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertTrue, Answer.him)
test_pickle_dump_load(self.assertTrue, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertTrue, Question.who)
test_pickle_dump_load(self.assertTrue, Question)
if pyver >= 3.4:
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_exception(
self.assertRaises, PicklingError, self.NestedEnum.twigs,
protocol=(0, 3))
test_pickle_dump_load(self.assertTrue, self.NestedEnum.twigs,
protocol=(4, HIGHEST_PROTOCOL))
def test_exploding_pickle(self):
BadPickle = Enum('BadPickle', 'dill sweet bread-n-butter')
enum._make_class_unpicklable(BadPickle)
globals()['BadPickle'] = BadPickle
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertTrue(Period(2) is Period.noon)
self.assertTrue(getattr(Period, 'night') is Period.night)
self.assertTrue(Period['morning'] is Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__hash__'))
def test_iteration_order(self):
class Season(Enum):
__order__ = 'SUMMER WINTER AUTUMN SPRING'
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_programatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
dict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
if pyver < 3.0:
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode(self):
SummerMonth = Enum('SummerMonth', unicode('june july august'))
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_list(self):
SummerMonth = Enum('SummerMonth', [unicode('june'), unicode('july'), unicode('august')])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_iterable(self):
SummerMonth = Enum(
'SummerMonth',
((unicode('june'), 1), (unicode('july'), 2), (unicode('august'), 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_from_unicode_dict(self):
SummerMonth = Enum(
'SummerMonth',
dict(((unicode('june'), 1), (unicode('july'), 2), (unicode('august'), 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
if pyver < 3.0:
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_type(self):
SummerMonth = Enum('SummerMonth', unicode('june july august'), type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', unicode('june july august'))
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertTrue(Name.BDFL is getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertTrue, Name.BDFL)
def test_extending(self):
def bad_extension():
class Color(Enum):
red = 1
green = 2
blue = 3
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertRaises(TypeError, bad_extension)
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertFalse(type(whatever.really) is whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
def wrong_inherit():
class Wrong(Enum, str):
NotHere = 'error before this point'
self.assertRaises(TypeError, wrong_inherit)
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertTrue(Number.one._member_type_ is int)
self.assertTrue(Number._member_type_ is int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertTrue(String.yarn._member_type_ is str)
self.assertTrue(String._member_type_ is str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertTrue(Plain.vanilla._member_type_ is object)
self.assertTrue(Plain._member_type_ is object)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertTrue(Monochrome(Gender.female) is Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertTrue(Monochrome(Gender.male) is Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
self.assertRaises(ValueError, Color, 4)
self.assertRaises(KeyError, Color.__getitem__, 'chartreuse')
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(EnumMeta):
def __new__(metacls, cls, bases, classdict):
original_dict = classdict
classdict = enum._EnumDict()
for k, v in original_dict.items():
classdict[k] = v
temp = type(classdict)()
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v == ():
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
AutoNumberedEnum = auto_enum('AutoNumberedEnum', (Enum,), {})
AutoIntEnum = auto_enum('AutoIntEnum', (IntEnum,), {})
class TestAutoNumber(AutoNumberedEnum):
a = ()
b = 3
c = ()
class TestAutoInt(AutoIntEnum):
a = ()
b = 3
c = ()
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
if len(args) < 1:
raise TypeError("name and value must be specified")
name, args = args[0], args[1:]
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertTrue, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y)
if pyver >= 3.4:
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
if len(args) < 2:
raise TypeError("name and value must be specified")
name, args = args[0], args[1:]
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5, protocol=(4, HIGHEST_PROTOCOL))
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y, protocol=(4, HIGHEST_PROTOCOL))
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
if len(args) < 1:
raise TypeError("name and value must be specified")
name, args = args[0], args[1:]
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
if len(args) < 1:
raise TypeError("name and value must be specified")
name, args = args[0], args[1:]
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, args = args[0], args[1:]
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_exception(self.assertRaises, TypeError, NEI.x)
test_pickle_exception(self.assertRaises, PicklingError, NEI)
def test_subclasses_without_direct_pickle_support_using_name(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, args = args[0], args[1:]
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y)
test_pickle_dump_load(self.assertTrue, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple'
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertTrue(type(SomeTuple.first) is SomeTuple)
self.assertTrue(isinstance(SomeTuple.second, tuple))
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertTrue, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
__order__ = 'enum_m enum_d enum_y'
enum_m = ()
enum_d = ()
enum_y = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(int(AutoNumber.enum_d), 2)
self.assertEqual(AutoNumber.enum_y.value, 3)
self.assertTrue(AutoNumber(1) is AutoNumber.enum_m)
self.assertEqual(
list(AutoNumber),
[AutoNumber.enum_m, AutoNumber.enum_d, AutoNumber.enum_y],
)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber2(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber2):
__order__ = 'red green blue'
red = ()
green = ()
blue = ()
self.assertEqual(len(Color), 3, "wrong number of elements: %d (should be %d)" % (len(Color), 3))
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
if pyver >= 3.0:
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber3(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber3):
red = ()
green = ()
blue = ()
self.assertEqual(len(Color), 3, "wrong number of elements: %d (should be %d)" % (len(Color), 3))
Color.red
Color.green
Color.blue
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
__order__ = 'A B C D F'
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertEqual(list(Grade), [Grade.A, Grade.B, Grade.C, Grade.D, Grade.F])
self.assertTrue(Grade.A > Grade.B)
self.assertTrue(Grade.F <= Grade.C)
self.assertTrue(Grade.D < Grade.A)
self.assertTrue(Grade.B >= Grade.B)
def test_extending2(self):
def bad_extension():
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertRaises(TypeError, bad_extension)
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_no_duplicates(self):
def bad_duplicates():
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
self.assertRaises(ValueError, bad_duplicates)
def test_reversed(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
__order__ = 'red green blue'
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
self.assertEqual(ColorInAList.red.value, [1])
self.assertEqual(ColorInAList([1]), ColorInAList.red)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
class TestUnique(unittest.TestCase):
"""2.4 doesn't allow class decorators, use function syntax."""
def test_unique_clean(self):
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
unique(Clean)
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
unique(Cleaner)
def test_unique_dirty(self):
try:
class Dirty(Enum):
__order__ = 'one two tres'
one = 1
two = 'dos'
tres = 1
unique(Dirty)
except ValueError:
exc = sys.exc_info()[1]
message = exc.args[0]
self.assertTrue('tres -> one' in message)
try:
class Dirtier(IntEnum):
__order__ = 'single double triple turkey'
single = 1
double = 1
triple = 3
turkey = 3
unique(Dirtier)
except ValueError:
exc = sys.exc_info()[1]
message = exc.args[0]
self.assertTrue('double -> single' in message)
self.assertTrue('turkey -> triple' in message)
class TestMe(unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main()
| |
# -*- coding: utf-8 -*-
"""
babel.messages.mofile
~~~~~~~~~~~~~~~~~~~~~
Writing of files in the ``gettext`` MO (machine object) format.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import array
import struct
from babel.messages.catalog import Catalog, Message
from babel._compat import range_type
LE_MAGIC = 0x950412de
BE_MAGIC = 0xde120495
def read_mo(fileobj):
"""Read a binary MO file from the given file-like object and return a
corresponding `Catalog` object.
:param fileobj: the file-like object to read the MO file from
:note: The implementation of this function is heavily based on the
``GNUTranslations._parse`` method of the ``gettext`` module in the
standard library.
"""
catalog = Catalog()
headers = {}
filename = getattr(fileobj, 'name', '')
buf = fileobj.read()
buflen = len(buf)
unpack = struct.unpack
# Parse the .mo file header, which consists of 5 little endian 32
# bit words.
magic = unpack('<I', buf[:4])[0] # Are we big endian or little endian?
if magic == LE_MAGIC:
version, msgcount, origidx, transidx = unpack('<4I', buf[4:20])
ii = '<II'
elif magic == BE_MAGIC:
version, msgcount, origidx, transidx = unpack('>4I', buf[4:20])
ii = '>II'
else:
raise IOError(0, 'Bad magic number', filename)
# Now put all messages from the .mo file buffer into the catalog
# dictionary
for i in range_type(0, msgcount):
mlen, moff = unpack(ii, buf[origidx:origidx + 8])
mend = moff + mlen
tlen, toff = unpack(ii, buf[transidx:transidx + 8])
tend = toff + tlen
if mend < buflen and tend < buflen:
msg = buf[moff:mend]
tmsg = buf[toff:tend]
else:
raise IOError(0, 'File is corrupt', filename)
# See if we're looking at GNU .mo conventions for metadata
if mlen == 0:
# Catalog description
lastkey = key = None
for item in tmsg.splitlines():
item = item.strip()
if not item:
continue
if b':' in item:
key, value = item.split(b':', 1)
lastkey = key = key.strip().lower()
headers[key] = value.strip()
elif lastkey:
headers[lastkey] += b'\n' + item
if b'\x04' in msg: # context
ctxt, msg = msg.split(b'\x04')
else:
ctxt = None
if b'\x00' in msg: # plural forms
msg = msg.split(b'\x00')
tmsg = tmsg.split(b'\x00')
if catalog.charset:
msg = [x.decode(catalog.charset) for x in msg]
tmsg = [x.decode(catalog.charset) for x in tmsg]
else:
if catalog.charset:
msg = msg.decode(catalog.charset)
tmsg = tmsg.decode(catalog.charset)
catalog[msg] = Message(msg, tmsg, context=ctxt)
# advance to next entry in the seek tables
origidx += 8
transidx += 8
catalog.mime_headers = headers.items()
return catalog
def write_mo(fileobj, catalog, use_fuzzy=False):
"""Write a catalog to the specified file-like object using the GNU MO file
format.
>>> from babel.messages import Catalog
>>> from gettext import GNUTranslations
>>> from StringIO import StringIO
>>> catalog = Catalog(locale='en_US')
>>> catalog.add('foo', 'Voh')
<Message ...>
>>> catalog.add((u'bar', u'baz'), (u'Bahr', u'Batz'))
<Message ...>
>>> catalog.add('fuz', 'Futz', flags=['fuzzy'])
<Message ...>
>>> catalog.add('Fizz', '')
<Message ...>
>>> catalog.add(('Fuzz', 'Fuzzes'), ('', ''))
<Message ...>
>>> buf = StringIO()
>>> write_mo(buf, catalog)
>>> buf.seek(0)
>>> translations = GNUTranslations(fp=buf)
>>> translations.ugettext('foo')
u'Voh'
>>> translations.ungettext('bar', 'baz', 1)
u'Bahr'
>>> translations.ungettext('bar', 'baz', 2)
u'Batz'
>>> translations.ugettext('fuz')
u'fuz'
>>> translations.ugettext('Fizz')
u'Fizz'
>>> translations.ugettext('Fuzz')
u'Fuzz'
>>> translations.ugettext('Fuzzes')
u'Fuzzes'
:param fileobj: the file-like object to write to
:param catalog: the `Catalog` instance
:param use_fuzzy: whether translations marked as "fuzzy" should be included
in the output
"""
messages = list(catalog)
if not use_fuzzy:
messages[1:] = [m for m in messages[1:] if not m.fuzzy]
messages.sort()
ids = strs = b''
offsets = []
for message in messages:
# For each string, we need size and file offset. Each string is NUL
# terminated; the NUL does not count into the size.
if message.pluralizable:
msgid = b'\x00'.join([
msgid.encode(catalog.charset) for msgid in message.id
])
msgstrs = []
for idx, string in enumerate(message.string):
if not string:
msgstrs.append(message.id[min(int(idx), 1)])
else:
msgstrs.append(string)
msgstr = b'\x00'.join([
msgstr.encode(catalog.charset) for msgstr in msgstrs
])
else:
msgid = message.id.encode(catalog.charset)
if not message.string:
msgstr = message.id.encode(catalog.charset)
else:
msgstr = message.string.encode(catalog.charset)
if message.context:
msgid = b'\x04'.join([message.context.encode(catalog.charset),
msgid])
offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
ids += msgid + b'\x00'
strs += msgstr + b'\x00'
# The header is 7 32-bit unsigned integers. We don't use hash tables, so
# the keys start right after the index tables.
keystart = 7 * 4 + 16 * len(messages)
valuestart = keystart + len(ids)
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
koffsets = []
voffsets = []
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1 + keystart]
voffsets += [l2, o2 + valuestart]
offsets = koffsets + voffsets
fileobj.write(struct.pack('Iiiiiii',
LE_MAGIC, # magic
0, # version
len(messages), # number of entries
7 * 4, # start of key index
7 * 4 + len(messages) * 8, # start of value index
0, 0 # size and offset of hash table
) + array.array("i", offsets).tostring() + ids + strs)
| |
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
from __future__ import print_function
from rez.utils.execution import create_forwarding_script
from rez.exceptions import SuiteError, ResolvedContextError
from rez.resolved_context import ResolvedContext
from rez.utils.data_utils import cached_property
from rez.utils.formatting import columnise, PackageRequest
from rez.utils.colorize import warning, critical, Printer, alias as alias_col
from rez.vendor import yaml
from rez.vendor.yaml.error import YAMLError
from rez.utils.yaml import dump_yaml
from rez.vendor.six import six
from collections import defaultdict
import os
import os.path
import shutil
import sys
basestring = six.string_types[0]
class Suite(object):
"""A collection of contexts.
A suite is a collection of contexts. A suite stores its contexts in a
single directory, and creates wrapper scripts for each tool in each context,
which it stores into a single bin directory. When a tool is invoked, it
executes the actual tool in its associated context. When you add a suite's
bin directory to PATH, you have access to all these tools, which will
automatically run in correctly configured environments.
Tool clashes can occur when a tool of the same name is present in more than
one context. When a context is added to a suite, or prefixed/suffixed, that
context's tools override tools from other contexts.
There are several ways to avoid tool name clashes:
- Hide a tool. This removes it from the suite even if it does not clash;
- Prefix/suffix a context. When you do this, all the tools in the context
have the prefix/suffix applied;
- Explicitly alias a tool using the `alias_tool` method. This takes
precedence over context prefix/suffixing.
"""
def __init__(self):
"""Create a suite."""
self.load_path = None
self.contexts = {}
self.next_priority = 1
self.tools = None
self.tool_conflicts = None
self.hidden_tools = None
@property
def context_names(self):
"""Get the names of the contexts in the suite.
Reurns:
List of strings.
"""
return list(self.contexts.keys())
@cached_property
def tools_path(self):
"""Get the path that should be added to $PATH to expose this suite's
tools.
Returns:
Absolute path as a string, or None if this suite was not loaded
from disk.
"""
return os.path.join(self.load_path, "bin") if self.load_path else None
def activation_shell_code(self, shell=None):
"""Get shell code that should be run to activate this suite."""
from rez.shells import create_shell
from rez.rex import RexExecutor
executor = RexExecutor(interpreter=create_shell(shell),
parent_variables=["PATH"],
shebang=False)
executor.env.PATH.append(self.tools_path)
return executor.get_output().strip()
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, " ".join(self.context_names))
def context(self, name):
"""Get a context.
Args:
name (str): Name to store the context under.
Returns:
`ResolvedContext` object.
"""
data = self._context(name)
context = data.get("context")
if context:
return context
assert self.load_path
context_path = os.path.join(self.load_path, "contexts", "%s.rxt" % name)
context = ResolvedContext.load(context_path)
data["context"] = context
data["loaded"] = True
return context
def add_context(self, name, context, prefix_char=None):
"""Add a context to the suite.
Args:
name (str): Name to store the context under.
context (ResolvedContext): Context to add.
"""
if name in self.contexts:
raise SuiteError("Context already in suite: %r" % name)
if not context.success:
raise SuiteError("Context is not resolved: %r" % name)
self.contexts[name] = dict(name=name,
context=context.copy(),
tool_aliases={},
hidden_tools=set(),
priority=self._next_priority,
prefix_char=prefix_char)
self._flush_tools()
def find_contexts(self, in_request=None, in_resolve=None):
"""Find contexts in the suite based on search criteria.
Args:
in_request (str): Match contexts that contain the given package in
their request.
in_resolve (str or `Requirement`): Match contexts that contain the
given package in their resolve. You can also supply a conflict
requirement - '!foo' will match any contexts whos resolve does
not contain any version of package 'foo'.
Returns:
List of context names that match the search criteria.
"""
names = self.context_names
if in_request:
def _in_request(name):
context = self.context(name)
packages = set(x.name for x in context.requested_packages(True))
return (in_request in packages)
names = [x for x in names if _in_request(x)]
if in_resolve:
if isinstance(in_resolve, basestring):
in_resolve = PackageRequest(in_resolve)
def _in_resolve(name):
context = self.context(name)
variant = context.get_resolved_package(in_resolve.name)
if variant:
overlap = (variant.version in in_resolve.range)
return (
(in_resolve.conflict and not overlap)
or (overlap and not in_resolve.conflict)
)
else:
return in_resolve.conflict
names = [x for x in names if _in_resolve(x)]
return names
def remove_context(self, name):
"""Remove a context from the suite.
Args:
name (str): Name of the context to remove.
"""
self._context(name)
del self.contexts[name]
self._flush_tools()
def set_context_prefix(self, name, prefix):
"""Set a context's prefix.
This will be applied to all wrappers for the tools in this context. For
example, a tool called 'foo' would appear as '<prefix>foo' in the
suite's bin path.
Args:
name (str): Name of the context to prefix.
prefix (str): Prefix to apply to tools.
"""
data = self._context(name)
data["prefix"] = prefix
self._flush_tools()
def remove_context_prefix(self, name):
"""Remove a context's prefix.
Args:
name (str): Name of the context to de-prefix.
"""
self.set_context_prefix(name, "")
def set_context_suffix(self, name, suffix):
"""Set a context's suffix.
This will be applied to all wrappers for the tools in this context. For
example, a tool called 'foo' would appear as 'foo<suffix>' in the
suite's bin path.
Args:
name (str): Name of the context to suffix.
suffix (str): Suffix to apply to tools.
"""
data = self._context(name)
data["suffix"] = suffix
self._flush_tools()
def remove_context_suffix(self, name):
"""Remove a context's suffix.
Args:
name (str): Name of the context to de-suffix.
"""
self.set_context_suffix(name, "")
def bump_context(self, name):
"""Causes the context's tools to take priority over all others."""
data = self._context(name)
data["priority"] = self._next_priority
self._flush_tools()
def hide_tool(self, context_name, tool_name):
"""Hide a tool so that it is not exposed in the suite.
Args:
context_name (str): Context containing the tool.
tool_name (str): Name of tool to hide.
"""
data = self._context(context_name)
hidden_tools = data["hidden_tools"]
if tool_name not in hidden_tools:
self._validate_tool(context_name, tool_name)
hidden_tools.add(tool_name)
self._flush_tools()
def unhide_tool(self, context_name, tool_name):
"""Unhide a tool so that it may be exposed in a suite.
Note that unhiding a tool doesn't guarantee it can be seen - a tool of
the same name from a different context may be overriding it.
Args:
context_name (str): Context containing the tool.
tool_name (str): Name of tool to unhide.
"""
data = self._context(context_name)
hidden_tools = data["hidden_tools"]
if tool_name in hidden_tools:
hidden_tools.remove(tool_name)
self._flush_tools()
def alias_tool(self, context_name, tool_name, tool_alias):
"""Register an alias for a specific tool.
Note that a tool alias takes precedence over a context prefix/suffix.
Args:
context_name (str): Context containing the tool.
tool_name (str): Name of tool to alias.
tool_alias (str): Alias to give the tool.
"""
data = self._context(context_name)
aliases = data["tool_aliases"]
if tool_name in aliases:
raise SuiteError("Tool %r in context %r is already aliased to %r"
% (tool_name, context_name, aliases[tool_name]))
self._validate_tool(context_name, tool_name)
aliases[tool_name] = tool_alias
self._flush_tools()
def unalias_tool(self, context_name, tool_name):
"""Deregister an alias for a specific tool.
Args:
context_name (str): Context containing the tool.
tool_name (str): Name of tool to unalias.
"""
data = self._context(context_name)
aliases = data["tool_aliases"]
if tool_name in aliases:
del aliases[tool_name]
self._flush_tools()
def get_tools(self):
"""Get the tools exposed by this suite.
Returns:
A dict, keyed by aliased tool name, with dict entries:
- tool_name (str): The original, non-aliased name of the tool;
- tool_alias (str): Aliased tool name (same as key);
- context_name (str): Name of the context containing the tool;
- variant (`Variant` or set): Variant providing the tool. If the
tool is in conflict within the context (more than one package has
a tool of the same name), this will be a set of Variants.
"""
self._update_tools()
return self.tools
def get_tool_filepath(self, tool_alias):
"""Given a visible tool alias, return the full path to the executable.
Args:
tool_alias (str): Tool alias to search for.
Returns:
(str): Filepath of executable, or None if the tool is not in the
suite. May also return None because this suite has not been saved
to disk, so a filepath hasn't yet been established.
"""
tools_dict = self.get_tools()
if tool_alias in tools_dict:
if self.tools_path is None:
return None
else:
return os.path.join(self.tools_path, tool_alias)
else:
return None
def get_tool_context(self, tool_alias):
"""Given a visible tool alias, return the name of the context it
belongs to.
Args:
tool_alias (str): Tool alias to search for.
Returns:
(str): Name of the context that exposes a visible instance of this
tool alias, or None if the alias is not available.
"""
tools_dict = self.get_tools()
data = tools_dict.get(tool_alias)
if data:
return data["context_name"]
return None
def get_hidden_tools(self):
"""Get the tools hidden in this suite.
Hidden tools are those that have been explicitly hidden via `hide_tool`.
Returns:
A list of dicts, where each dict contains:
- tool_name (str): The original, non-aliased name of the tool;
- tool_alias (str): Aliased tool name (same as key);
- context_name (str): Name of the context containing the tool;
- variant (`Variant`): Variant providing the tool.
"""
self._update_tools()
return self.hidden_tools
def get_conflicting_aliases(self):
"""Get a list of tool aliases that have one or more conflicts.
Returns:
List of strings.
"""
self._update_tools()
return list(self.tool_conflicts.keys())
def get_alias_conflicts(self, tool_alias):
"""Get a list of conflicts on the given tool alias.
Args:
tool_alias (str): Alias to check for conflicts.
Returns: None if the alias has no conflicts, or a list of dicts, where
each dict contains:
- tool_name (str): The original, non-aliased name of the tool;
- tool_alias (str): Aliased tool name (same as key);
- context_name (str): Name of the context containing the tool;
- variant (`Variant`): Variant providing the tool.
"""
self._update_tools()
return self.tool_conflicts.get(tool_alias)
def validate(self):
"""Validate the suite."""
for context_name in self.context_names:
context = self.context(context_name)
try:
context.validate()
except ResolvedContextError as e:
raise SuiteError("Error in context %r: %s"
% (context_name, str(e)))
def to_dict(self):
contexts_ = {}
for k, data in self.contexts.items():
data_ = data.copy()
if "context" in data_:
del data_["context"]
if "loaded" in data_:
del data_["loaded"]
contexts_[k] = data_
return dict(contexts=contexts_)
@classmethod
def from_dict(cls, d):
s = Suite.__new__(Suite)
s.load_path = None
s.tools = None
s.tool_conflicts = None
s.contexts = d["contexts"]
if s.contexts:
s.next_priority = max(x["priority"]
for x in s.contexts.values()) + 1
else:
s.next_priority = 1
return s
def save(self, path, verbose=False):
"""Save the suite to disk.
Args:
path (str): Path to save the suite to. If a suite is already saved
at `path`, then it will be overwritten. Otherwise, if `path`
exists, an error is raised.
"""
path = os.path.realpath(path)
if os.path.exists(path):
if self.load_path and self.load_path == path:
if verbose:
print("saving over previous suite...")
for context_name in self.context_names:
self.context(context_name) # load before dir deleted
shutil.rmtree(path)
else:
raise SuiteError("Cannot save, path exists: %r" % path)
contexts_path = os.path.join(path, "contexts")
os.makedirs(contexts_path)
# write suite data
data = self.to_dict()
filepath = os.path.join(path, "suite.yaml")
with open(filepath, "w") as f:
f.write(dump_yaml(data))
# write contexts
for context_name in self.context_names:
context = self.context(context_name)
context._set_parent_suite(path, context_name)
filepath = self._context_path(context_name, path)
if verbose:
print("writing %r..." % filepath)
context.save(filepath)
# create alias wrappers
tools_path = os.path.join(path, "bin")
os.makedirs(tools_path)
if verbose:
print("creating alias wrappers in %r..." % tools_path)
tools = self.get_tools()
for tool_alias, d in tools.items():
tool_name = d["tool_name"]
context_name = d["context_name"]
data = self._context(context_name)
prefix_char = data.get("prefix_char")
if verbose:
print("creating %r -> %r (%s context)..."
% (tool_alias, tool_name, context_name))
filepath = os.path.join(tools_path, tool_alias)
create_forwarding_script(filepath,
module="suite",
func_name="_FWD__invoke_suite_tool_alias",
context_name=context_name,
tool_name=tool_name,
prefix_char=prefix_char)
@classmethod
def load(cls, path):
if not os.path.exists(path):
open(path) # raise IOError
filepath = os.path.join(path, "suite.yaml")
if not os.path.isfile(filepath):
raise SuiteError("Not a suite: %r" % path)
try:
with open(filepath) as f:
data = yaml.load(f.read(), Loader=yaml.FullLoader)
except YAMLError as e:
raise SuiteError("Failed loading suite: %s" % str(e))
s = cls.from_dict(data)
s.load_path = os.path.realpath(path)
return s
@classmethod
def visible_suite_paths(cls, paths=None):
"""Get a list of paths to suites that are visible on $PATH.
Returns:
List of str.
"""
suite_paths = []
if paths is None:
paths = os.getenv("PATH", "").split(os.pathsep)
for path in paths:
if path and os.path.isdir(path):
path_ = os.path.dirname(path)
filepath = os.path.join(path_, "suite.yaml")
if os.path.isfile(filepath):
suite_paths.append(path_)
return suite_paths
@classmethod
def load_visible_suites(cls, paths=None):
"""Get a list of suites whos bin paths are visible on $PATH.
Returns:
List of `Suite` objects.
"""
suite_paths = cls.visible_suite_paths(paths)
suites = [cls.load(x) for x in suite_paths]
return suites
def print_info(self, buf=sys.stdout, verbose=False):
"""Prints a message summarising the contents of the suite."""
_pr = Printer(buf)
if not self.contexts:
_pr("Suite is empty.")
return
context_names = sorted(self.contexts.keys())
_pr("Suite contains %d contexts:" % len(context_names))
if not verbose:
_pr(' '.join(context_names))
return
tools = self.get_tools().values()
context_tools = defaultdict(set)
context_variants = defaultdict(set)
for entry in tools:
context_name = entry["context_name"]
context_tools[context_name].add(entry["tool_name"])
context_variants[context_name].add(str(entry["variant"]))
_pr()
rows = [["NAME", "VISIBLE TOOLS", "PATH"],
["----", "-------------", "----"]]
for context_name in context_names:
context_path = self._context_path(context_name) or '-'
ntools = len(context_tools.get(context_name, []))
if ntools:
nvariants = len(context_variants[context_name])
short_desc = "%d tools from %d packages" % (ntools, nvariants)
else:
short_desc = "no tools"
rows.append((context_name, short_desc, context_path))
_pr("\n".join(columnise(rows)))
def print_tools(self, buf=sys.stdout, verbose=False, context_name=None):
"""Print table of tools available in the suite.
Args:
context_name (str): If provided, only print the tools from this
context.
"""
def _get_row(entry):
context_name_ = entry["context_name"]
tool_alias = entry["tool_alias"]
tool_name = entry["tool_name"]
properties = []
col = None
variant = entry["variant"]
if isinstance(variant, set):
properties.append("(in conflict)")
col = critical
if verbose:
package = ", ".join(x.qualified_package_name for x in variant)
else:
v = next(iter(variant))
package = "%s (+%d more)" % (v.qualified_package_name,
len(variant) - 1)
else:
package = variant.qualified_package_name
if tool_name == tool_alias:
tool_name = "-"
else:
properties.append("(aliased)")
if col is None:
col = alias_col
msg = " ".join(properties)
row = [tool_alias, tool_name, package, context_name_, msg]
return row, col
if context_name:
self._context(context_name) # check context exists
context_names = [context_name]
else:
context_names = sorted(self.contexts.keys())
rows = [["TOOL", "ALIASING", "PACKAGE", "CONTEXT", ""],
["----", "--------", "-------", "-------", ""]]
colors = [None, None]
entries_dict = defaultdict(list)
for d in self.get_tools().values():
entries_dict[d["context_name"]].append(d)
if verbose:
# add hidden entries
for d in self.hidden_tools:
d_ = d.copy()
d_["hidden"] = True
entries_dict[d["context_name"]].append(d_)
# add conflicting tools
for docs in self.tool_conflicts.values():
for d in docs:
d_ = d.copy()
d_["conflicting"] = True
entries_dict[d["context_name"]].append(d_)
for i, context_name in enumerate(context_names):
entries = entries_dict.get(context_name, [])
if entries:
if i:
rows.append(('', '', '', '', ''))
colors.append(None)
entries = sorted(entries, key=lambda x: x["tool_alias"].lower())
for entry in entries:
row, col = _get_row(entry)
if "hidden" in entry:
row[-1] = "(hidden)"
rows.append(row)
colors.append(warning)
elif "conflicting" in entry:
row[-1] = "(not visible)"
rows.append(row)
colors.append(warning)
else:
rows.append(row)
colors.append(col)
_pr = Printer(buf)
if rows:
for col, line in zip(colors, columnise(rows)):
_pr(line, col)
else:
_pr("No tools available.")
def _context(self, name):
data = self.contexts.get(name)
if not data:
raise SuiteError("No such context: %r" % name)
return data
def _context_path(self, name, suite_path=None):
suite_path = suite_path or self.load_path
if not suite_path:
return None
filepath = os.path.join(suite_path, "contexts", "%s.rxt" % name)
return filepath
def _sorted_contexts(self):
return sorted(self.contexts.values(), key=lambda x: x["priority"])
@property
def _next_priority(self):
p = self.next_priority
self.next_priority += 1
return p
def _flush_tools(self):
self.tools = None
self.tool_conflicts = None
self.hidden_tools = None
def _validate_tool(self, context_name, tool_name):
context = self.context(context_name)
context_tools = context.get_tools(request_only=True)
for _, tool_names in context_tools.values():
if tool_name in tool_names:
return
raise SuiteError("No such tool %r in context %r"
% (tool_name, context_name))
def _update_tools(self):
if self.tools is not None:
return
self.tools = {}
self.hidden_tools = []
self.tool_conflicts = defaultdict(list)
for data in reversed(self._sorted_contexts()):
context_name = data["name"]
tool_aliases = data["tool_aliases"]
hidden_tools = data["hidden_tools"]
prefix = data.get("prefix", "")
suffix = data.get("suffix", "")
context = self.context(context_name)
context_tools = context.get_tools(request_only=True)
for variant, tool_names in context_tools.values():
for tool_name in tool_names:
alias = tool_aliases.get(tool_name)
if alias is None:
alias = "%s%s%s" % (prefix, tool_name, suffix)
entry = dict(tool_name=tool_name,
tool_alias=alias,
context_name=context_name,
variant=variant)
if tool_name in hidden_tools:
self.hidden_tools.append(entry)
continue
existing_entry = self.tools.get(alias)
if existing_entry:
if existing_entry.get("context_name") == context_name:
# the same tool is provided in the same context by
# more than one package.
existing_variant = existing_entry["variant"]
if isinstance(existing_variant, set):
existing_variant.add(variant)
else:
existing_entry["variant"] = set([existing_variant,
variant])
else:
self.tool_conflicts[alias].append(entry)
else:
self.tools[alias] = entry
def _FWD__invoke_suite_tool_alias(context_name, tool_name, prefix_char=None,
_script=None, _cli_args=None):
suite_path = os.path.dirname(os.path.dirname(_script))
path = os.path.join(suite_path, "contexts", "%s.rxt" % context_name)
context = ResolvedContext.load(path)
from rez.wrapper import Wrapper
w = Wrapper.__new__(Wrapper)
w._init(suite_path, context_name, context, tool_name, prefix_char)
retcode = w.run(*(_cli_args or []))
sys.exit(retcode)
| |
# -*- coding: utf-8 -*-
"""Display formatters.
Inheritance diagram:
.. inheritance-diagram:: IPython.core.formatters
:parts: 3
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import abc
import inspect
import json
import sys
import traceback
import warnings
from IPython.external.decorator import decorator
from IPython.config.configurable import Configurable
from IPython.core.getipython import get_ipython
from IPython.lib import pretty
from IPython.utils.traitlets import (
Bool, Dict, Integer, Unicode, CUnicode, ObjectName, List,
ForwardDeclaredInstance,
)
from IPython.utils.py3compat import (
with_metaclass, string_types, unicode_type,
)
#-----------------------------------------------------------------------------
# The main DisplayFormatter class
#-----------------------------------------------------------------------------
def _safe_get_formatter_method(obj, name):
"""Safely get a formatter method
- Classes cannot have formatter methods, only instance
- protect against proxy objects that claim to have everything
"""
if inspect.isclass(obj):
# repr methods only make sense on instances, not classes
return None
method = pretty._safe_getattr(obj, name, None)
if callable(method):
# obj claims to have repr method...
if callable(pretty._safe_getattr(obj, '_ipython_canary_method_should_not_exist_', None)):
# ...but don't trust proxy objects that claim to have everything
return None
return method
class DisplayFormatter(Configurable):
# When set to true only the default plain text formatter will be used.
plain_text_only = Bool(False, config=True)
def _plain_text_only_changed(self, name, old, new):
warnings.warn("""DisplayFormatter.plain_text_only is deprecated.
Use DisplayFormatter.active_types = ['text/plain']
for the same effect.
""", DeprecationWarning)
if new:
self.active_types = ['text/plain']
else:
self.active_types = self.format_types
active_types = List(Unicode, config=True,
help="""List of currently active mime-types to display.
You can use this to set a white-list for formats to display.
Most users will not need to change this value.
""")
def _active_types_default(self):
return self.format_types
def _active_types_changed(self, name, old, new):
for key, formatter in self.formatters.items():
if key in new:
formatter.enabled = True
else:
formatter.enabled = False
ipython_display_formatter = ForwardDeclaredInstance('FormatterABC')
def _ipython_display_formatter_default(self):
return IPythonDisplayFormatter(parent=self)
# A dict of formatter whose keys are format types (MIME types) and whose
# values are subclasses of BaseFormatter.
formatters = Dict()
def _formatters_default(self):
"""Activate the default formatters."""
formatter_classes = [
PlainTextFormatter,
HTMLFormatter,
MarkdownFormatter,
SVGFormatter,
PNGFormatter,
PDFFormatter,
JPEGFormatter,
LatexFormatter,
JSONFormatter,
JavascriptFormatter
]
d = {}
for cls in formatter_classes:
f = cls(parent=self)
d[f.format_type] = f
return d
def format(self, obj, include=None, exclude=None):
"""Return a format data dict for an object.
By default all format types will be computed.
The following MIME types are currently implemented:
* text/plain
* text/html
* text/markdown
* text/latex
* application/json
* application/javascript
* application/pdf
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
obj : object
The Python object whose format data will be computed.
include : list or tuple, optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list or tuple, optional
A list of format type string (MIME types) to exclude in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
Returns
-------
(format_dict, metadata_dict) : tuple of two dicts
format_dict is a dictionary of key/value pairs, one of each format that was
generated for the object. The keys are the format types, which
will usually be MIME type strings and the values and JSON'able
data structure containing the raw data for the representation in
that format.
metadata_dict is a dictionary of metadata about each mime-type output.
Its keys will be a strict subset of the keys in format_dict.
"""
format_dict = {}
md_dict = {}
if self.ipython_display_formatter(obj):
# object handled itself, don't proceed
return {}, {}
for format_type, formatter in self.formatters.items():
if include and format_type not in include:
continue
if exclude and format_type in exclude:
continue
md = None
try:
data = formatter(obj)
except:
# FIXME: log the exception
raise
# formatters can return raw data or (data, metadata)
if isinstance(data, tuple) and len(data) == 2:
data, md = data
if data is not None:
format_dict[format_type] = data
if md is not None:
md_dict[format_type] = md
return format_dict, md_dict
@property
def format_types(self):
"""Return the format types (MIME types) of the active formatters."""
return list(self.formatters.keys())
#-----------------------------------------------------------------------------
# Formatters for specific format types (text, html, svg, etc.)
#-----------------------------------------------------------------------------
def _safe_repr(obj):
"""Try to return a repr of an object
always returns a string, at least.
"""
try:
return repr(obj)
except Exception as e:
return "un-repr-able object (%r)" % e
class FormatterWarning(UserWarning):
"""Warning class for errors in formatters"""
@decorator
def catch_format_error(method, self, *args, **kwargs):
"""show traceback on failed format call"""
try:
r = method(self, *args, **kwargs)
except NotImplementedError:
# don't warn on NotImplementedErrors
return None
except Exception:
exc_info = sys.exc_info()
ip = get_ipython()
if ip is not None:
ip.showtraceback(exc_info)
else:
traceback.print_exception(*exc_info)
return None
return self._check_return(r, args[0])
class FormatterABC(with_metaclass(abc.ABCMeta, object)):
""" Abstract base class for Formatters.
A formatter is a callable class that is responsible for computing the
raw format data for a particular format type (MIME type). For example,
an HTML formatter would have a format type of `text/html` and would return
the HTML representation of the object when called.
"""
# The format type of the data returned, usually a MIME type.
format_type = 'text/plain'
# Is the formatter enabled...
enabled = True
@abc.abstractmethod
def __call__(self, obj):
"""Return a JSON'able representation of the object.
If the object cannot be formatted by this formatter,
warn and return None.
"""
return repr(obj)
def _mod_name_key(typ):
"""Return a (__module__, __name__) tuple for a type.
Used as key in Formatter.deferred_printers.
"""
module = getattr(typ, '__module__', None)
name = getattr(typ, '__name__', None)
return (module, name)
def _get_type(obj):
"""Return the type of an instance (old and new-style)"""
return getattr(obj, '__class__', None) or type(obj)
_raise_key_error = object()
class BaseFormatter(Configurable):
"""A base formatter class that is configurable.
This formatter should usually be used as the base class of all formatters.
It is a traited :class:`Configurable` class and includes an extensible
API for users to determine how their objects are formatted. The following
logic is used to find a function to format an given object.
1. The object is introspected to see if it has a method with the name
:attr:`print_method`. If is does, that object is passed to that method
for formatting.
2. If no print method is found, three internal dictionaries are consulted
to find print method: :attr:`singleton_printers`, :attr:`type_printers`
and :attr:`deferred_printers`.
Users should use these dictionaries to register functions that will be
used to compute the format data for their objects (if those objects don't
have the special print methods). The easiest way of using these
dictionaries is through the :meth:`for_type` and :meth:`for_type_by_name`
methods.
If no function/callable is found to compute the format data, ``None`` is
returned and this format type is not used.
"""
format_type = Unicode('text/plain')
_return_type = string_types
enabled = Bool(True, config=True)
print_method = ObjectName('__repr__')
# The singleton printers.
# Maps the IDs of the builtin singleton objects to the format functions.
singleton_printers = Dict(config=True)
# The type-specific printers.
# Map type objects to the format functions.
type_printers = Dict(config=True)
# The deferred-import type-specific printers.
# Map (modulename, classname) pairs to the format functions.
deferred_printers = Dict(config=True)
@catch_format_error
def __call__(self, obj):
"""Compute the format for an object."""
if self.enabled:
# lookup registered printer
try:
printer = self.lookup(obj)
except KeyError:
pass
else:
return printer(obj)
# Finally look for special method names
method = _safe_get_formatter_method(obj, self.print_method)
if method is not None:
return method()
return None
else:
return None
def __contains__(self, typ):
"""map in to lookup_by_type"""
try:
self.lookup_by_type(typ)
except KeyError:
return False
else:
return True
def _check_return(self, r, obj):
"""Check that a return value is appropriate
Return the value if so, None otherwise, warning if invalid.
"""
if r is None or isinstance(r, self._return_type) or \
(isinstance(r, tuple) and r and isinstance(r[0], self._return_type)):
return r
else:
warnings.warn(
"%s formatter returned invalid type %s (expected %s) for object: %s" % \
(self.format_type, type(r), self._return_type, _safe_repr(obj)),
FormatterWarning
)
def lookup(self, obj):
"""Look up the formatter for a given instance.
Parameters
----------
obj : object instance
Returns
-------
f : callable
The registered formatting callable for the type.
Raises
------
KeyError if the type has not been registered.
"""
# look for singleton first
obj_id = id(obj)
if obj_id in self.singleton_printers:
return self.singleton_printers[obj_id]
# then lookup by type
return self.lookup_by_type(_get_type(obj))
def lookup_by_type(self, typ):
"""Look up the registered formatter for a type.
Parameters
----------
typ : type or '__module__.__name__' string for a type
Returns
-------
f : callable
The registered formatting callable for the type.
Raises
------
KeyError if the type has not been registered.
"""
if isinstance(typ, string_types):
typ_key = tuple(typ.rsplit('.',1))
if typ_key not in self.deferred_printers:
# We may have it cached in the type map. We will have to
# iterate over all of the types to check.
for cls in self.type_printers:
if _mod_name_key(cls) == typ_key:
return self.type_printers[cls]
else:
return self.deferred_printers[typ_key]
else:
for cls in pretty._get_mro(typ):
if cls in self.type_printers or self._in_deferred_types(cls):
return self.type_printers[cls]
# If we have reached here, the lookup failed.
raise KeyError("No registered printer for {0!r}".format(typ))
def for_type(self, typ, func=None):
"""Add a format function for a given type.
Parameters
-----------
typ : type or '__module__.__name__' string for a type
The class of the object that will be formatted using `func`.
func : callable
A callable for computing the format data.
`func` will be called with the object to be formatted,
and will return the raw data in this formatter's format.
Subclasses may use a different call signature for the
`func` argument.
If `func` is None or not specified, there will be no change,
only returning the current value.
Returns
-------
oldfunc : callable
The currently registered callable.
If you are registering a new formatter,
this will be the previous value (to enable restoring later).
"""
# if string given, interpret as 'pkg.module.class_name'
if isinstance(typ, string_types):
type_module, type_name = typ.rsplit('.', 1)
return self.for_type_by_name(type_module, type_name, func)
try:
oldfunc = self.lookup_by_type(typ)
except KeyError:
oldfunc = None
if func is not None:
self.type_printers[typ] = func
return oldfunc
def for_type_by_name(self, type_module, type_name, func=None):
"""Add a format function for a type specified by the full dotted
module and name of the type, rather than the type of the object.
Parameters
----------
type_module : str
The full dotted name of the module the type is defined in, like
``numpy``.
type_name : str
The name of the type (the class name), like ``dtype``
func : callable
A callable for computing the format data.
`func` will be called with the object to be formatted,
and will return the raw data in this formatter's format.
Subclasses may use a different call signature for the
`func` argument.
If `func` is None or unspecified, there will be no change,
only returning the current value.
Returns
-------
oldfunc : callable
The currently registered callable.
If you are registering a new formatter,
this will be the previous value (to enable restoring later).
"""
key = (type_module, type_name)
try:
oldfunc = self.lookup_by_type("%s.%s" % key)
except KeyError:
oldfunc = None
if func is not None:
self.deferred_printers[key] = func
return oldfunc
def pop(self, typ, default=_raise_key_error):
"""Pop a formatter for the given type.
Parameters
----------
typ : type or '__module__.__name__' string for a type
default : object
value to be returned if no formatter is registered for typ.
Returns
-------
obj : object
The last registered object for the type.
Raises
------
KeyError if the type is not registered and default is not specified.
"""
if isinstance(typ, string_types):
typ_key = tuple(typ.rsplit('.',1))
if typ_key not in self.deferred_printers:
# We may have it cached in the type map. We will have to
# iterate over all of the types to check.
for cls in self.type_printers:
if _mod_name_key(cls) == typ_key:
old = self.type_printers.pop(cls)
break
else:
old = default
else:
old = self.deferred_printers.pop(typ_key)
else:
if typ in self.type_printers:
old = self.type_printers.pop(typ)
else:
old = self.deferred_printers.pop(_mod_name_key(typ), default)
if old is _raise_key_error:
raise KeyError("No registered value for {0!r}".format(typ))
return old
def _in_deferred_types(self, cls):
"""
Check if the given class is specified in the deferred type registry.
Successful matches will be moved to the regular type registry for future use.
"""
mod = getattr(cls, '__module__', None)
name = getattr(cls, '__name__', None)
key = (mod, name)
if key in self.deferred_printers:
# Move the printer over to the regular registry.
printer = self.deferred_printers.pop(key)
self.type_printers[cls] = printer
return True
return False
class PlainTextFormatter(BaseFormatter):
"""The default pretty-printer.
This uses :mod:`IPython.lib.pretty` to compute the format data of
the object. If the object cannot be pretty printed, :func:`repr` is used.
See the documentation of :mod:`IPython.lib.pretty` for details on
how to write pretty printers. Here is a simple example::
def dtype_pprinter(obj, p, cycle):
if cycle:
return p.text('dtype(...)')
if hasattr(obj, 'fields'):
if obj.fields is None:
p.text(repr(obj))
else:
p.begin_group(7, 'dtype([')
for i, field in enumerate(obj.descr):
if i > 0:
p.text(',')
p.breakable()
p.pretty(field)
p.end_group(7, '])')
"""
# The format type of data returned.
format_type = Unicode('text/plain')
# This subclass ignores this attribute as it always need to return
# something.
enabled = Bool(True, config=False)
max_seq_length = Integer(pretty.MAX_SEQ_LENGTH, config=True,
help="""Truncate large collections (lists, dicts, tuples, sets) to this size.
Set to 0 to disable truncation.
"""
)
# Look for a _repr_pretty_ methods to use for pretty printing.
print_method = ObjectName('_repr_pretty_')
# Whether to pretty-print or not.
pprint = Bool(True, config=True)
# Whether to be verbose or not.
verbose = Bool(False, config=True)
# The maximum width.
max_width = Integer(79, config=True)
# The newline character.
newline = Unicode('\n', config=True)
# format-string for pprinting floats
float_format = Unicode('%r')
# setter for float precision, either int or direct format-string
float_precision = CUnicode('', config=True)
def _float_precision_changed(self, name, old, new):
"""float_precision changed, set float_format accordingly.
float_precision can be set by int or str.
This will set float_format, after interpreting input.
If numpy has been imported, numpy print precision will also be set.
integer `n` sets format to '%.nf', otherwise, format set directly.
An empty string returns to defaults (repr for float, 8 for numpy).
This parameter can be set via the '%precision' magic.
"""
if '%' in new:
# got explicit format string
fmt = new
try:
fmt%3.14159
except Exception:
raise ValueError("Precision must be int or format string, not %r"%new)
elif new:
# otherwise, should be an int
try:
i = int(new)
assert i >= 0
except ValueError:
raise ValueError("Precision must be int or format string, not %r"%new)
except AssertionError:
raise ValueError("int precision must be non-negative, not %r"%i)
fmt = '%%.%if'%i
if 'numpy' in sys.modules:
# set numpy precision if it has been imported
import numpy
numpy.set_printoptions(precision=i)
else:
# default back to repr
fmt = '%r'
if 'numpy' in sys.modules:
import numpy
# numpy default is 8
numpy.set_printoptions(precision=8)
self.float_format = fmt
# Use the default pretty printers from IPython.lib.pretty.
def _singleton_printers_default(self):
return pretty._singleton_pprinters.copy()
def _type_printers_default(self):
d = pretty._type_pprinters.copy()
d[float] = lambda obj,p,cycle: p.text(self.float_format%obj)
return d
def _deferred_printers_default(self):
return pretty._deferred_type_pprinters.copy()
#### FormatterABC interface ####
@catch_format_error
def __call__(self, obj):
"""Compute the pretty representation of the object."""
if not self.pprint:
return repr(obj)
else:
# handle str and unicode on Python 2
# io.StringIO only accepts unicode,
# cStringIO doesn't handle unicode on py2,
# StringIO allows str, unicode but only ascii str
stream = pretty.CUnicodeIO()
printer = pretty.RepresentationPrinter(stream, self.verbose,
self.max_width, self.newline,
max_seq_length=self.max_seq_length,
singleton_pprinters=self.singleton_printers,
type_pprinters=self.type_printers,
deferred_pprinters=self.deferred_printers)
printer.pretty(obj)
printer.flush()
return stream.getvalue()
class HTMLFormatter(BaseFormatter):
"""An HTML formatter.
To define the callables that compute the HTML representation of your
objects, define a :meth:`_repr_html_` method or use the :meth:`for_type`
or :meth:`for_type_by_name` methods to register functions that handle
this.
The return value of this formatter should be a valid HTML snippet that
could be injected into an existing DOM. It should *not* include the
```<html>`` or ```<body>`` tags.
"""
format_type = Unicode('text/html')
print_method = ObjectName('_repr_html_')
class MarkdownFormatter(BaseFormatter):
"""A Markdown formatter.
To define the callables that compute the Markdown representation of your
objects, define a :meth:`_repr_markdown_` method or use the :meth:`for_type`
or :meth:`for_type_by_name` methods to register functions that handle
this.
The return value of this formatter should be a valid Markdown.
"""
format_type = Unicode('text/markdown')
print_method = ObjectName('_repr_markdown_')
class SVGFormatter(BaseFormatter):
"""An SVG formatter.
To define the callables that compute the SVG representation of your
objects, define a :meth:`_repr_svg_` method or use the :meth:`for_type`
or :meth:`for_type_by_name` methods to register functions that handle
this.
The return value of this formatter should be valid SVG enclosed in
```<svg>``` tags, that could be injected into an existing DOM. It should
*not* include the ```<html>`` or ```<body>`` tags.
"""
format_type = Unicode('image/svg+xml')
print_method = ObjectName('_repr_svg_')
class PNGFormatter(BaseFormatter):
"""A PNG formatter.
To define the callables that compute the PNG representation of your
objects, define a :meth:`_repr_png_` method or use the :meth:`for_type`
or :meth:`for_type_by_name` methods to register functions that handle
this.
The return value of this formatter should be raw PNG data, *not*
base64 encoded.
"""
format_type = Unicode('image/png')
print_method = ObjectName('_repr_png_')
_return_type = (bytes, unicode_type)
class JPEGFormatter(BaseFormatter):
"""A JPEG formatter.
To define the callables that compute the JPEG representation of your
objects, define a :meth:`_repr_jpeg_` method or use the :meth:`for_type`
or :meth:`for_type_by_name` methods to register functions that handle
this.
The return value of this formatter should be raw JPEG data, *not*
base64 encoded.
"""
format_type = Unicode('image/jpeg')
print_method = ObjectName('_repr_jpeg_')
_return_type = (bytes, unicode_type)
class LatexFormatter(BaseFormatter):
"""A LaTeX formatter.
To define the callables that compute the LaTeX representation of your
objects, define a :meth:`_repr_latex_` method or use the :meth:`for_type`
or :meth:`for_type_by_name` methods to register functions that handle
this.
The return value of this formatter should be a valid LaTeX equation,
enclosed in either ```$```, ```$$``` or another LaTeX equation
environment.
"""
format_type = Unicode('text/latex')
print_method = ObjectName('_repr_latex_')
class JSONFormatter(BaseFormatter):
"""A JSON string formatter.
To define the callables that compute the JSONable representation of
your objects, define a :meth:`_repr_json_` method or use the :meth:`for_type`
or :meth:`for_type_by_name` methods to register functions that handle
this.
The return value of this formatter should be a JSONable list or dict.
JSON scalars (None, number, string) are not allowed, only dict or list containers.
"""
format_type = Unicode('application/json')
_return_type = (list, dict)
print_method = ObjectName('_repr_json_')
def _check_return(self, r, obj):
"""Check that a return value is appropriate
Return the value if so, None otherwise, warning if invalid.
"""
if r is None:
return
md = None
if isinstance(r, tuple):
# unpack data, metadata tuple for type checking on first element
r, md = r
# handle deprecated JSON-as-string form from IPython < 3
if isinstance(r, string_types):
warnings.warn("JSON expects JSONable list/dict containers, not JSON strings",
FormatterWarning)
r = json.loads(r)
if md is not None:
# put the tuple back together
r = (r, md)
return super(JSONFormatter, self)._check_return(r, obj)
class JavascriptFormatter(BaseFormatter):
"""A Javascript formatter.
To define the callables that compute the Javascript representation of
your objects, define a :meth:`_repr_javascript_` method or use the
:meth:`for_type` or :meth:`for_type_by_name` methods to register functions
that handle this.
The return value of this formatter should be valid Javascript code and
should *not* be enclosed in ```<script>``` tags.
"""
format_type = Unicode('application/javascript')
print_method = ObjectName('_repr_javascript_')
class PDFFormatter(BaseFormatter):
"""A PDF formatter.
To define the callables that compute the PDF representation of your
objects, define a :meth:`_repr_pdf_` method or use the :meth:`for_type`
or :meth:`for_type_by_name` methods to register functions that handle
this.
The return value of this formatter should be raw PDF data, *not*
base64 encoded.
"""
format_type = Unicode('application/pdf')
print_method = ObjectName('_repr_pdf_')
_return_type = (bytes, unicode_type)
class IPythonDisplayFormatter(BaseFormatter):
"""A Formatter for objects that know how to display themselves.
To define the callables that compute the representation of your
objects, define a :meth:`_ipython_display_` method or use the :meth:`for_type`
or :meth:`for_type_by_name` methods to register functions that handle
this. Unlike mime-type displays, this method should not return anything,
instead calling any appropriate display methods itself.
This display formatter has highest priority.
If it fires, no other display formatter will be called.
"""
print_method = ObjectName('_ipython_display_')
_return_type = (type(None), bool)
@catch_format_error
def __call__(self, obj):
"""Compute the format for an object."""
if self.enabled:
# lookup registered printer
try:
printer = self.lookup(obj)
except KeyError:
pass
else:
printer(obj)
return True
# Finally look for special method names
method = _safe_get_formatter_method(obj, self.print_method)
if method is not None:
method()
return True
FormatterABC.register(BaseFormatter)
FormatterABC.register(PlainTextFormatter)
FormatterABC.register(HTMLFormatter)
FormatterABC.register(MarkdownFormatter)
FormatterABC.register(SVGFormatter)
FormatterABC.register(PNGFormatter)
FormatterABC.register(PDFFormatter)
FormatterABC.register(JPEGFormatter)
FormatterABC.register(LatexFormatter)
FormatterABC.register(JSONFormatter)
FormatterABC.register(JavascriptFormatter)
FormatterABC.register(IPythonDisplayFormatter)
def format_display_data(obj, include=None, exclude=None):
"""Return a format data dict for an object.
By default all format types will be computed.
The following MIME types are currently implemented:
* text/plain
* text/html
* text/markdown
* text/latex
* application/json
* application/javascript
* application/pdf
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
obj : object
The Python object whose format data will be computed.
Returns
-------
format_dict : dict
A dictionary of key/value pairs, one or each format that was
generated for the object. The keys are the format types, which
will usually be MIME type strings and the values and JSON'able
data structure containing the raw data for the representation in
that format.
include : list or tuple, optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list or tuple, optional
A list of format type string (MIME types) to exclue in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
"""
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.instance().display_formatter.format(
obj,
include,
exclude
)
| |
import sys
import unittest
from django.conf import settings
from django.contrib.admindocs import utils, views
from django.contrib.admindocs.views import get_return_data_type, simplify_regex
from django.contrib.sites.models import Site
from django.db import models
from django.db.models import fields
from django.test import SimpleTestCase, modify_settings, override_settings
from django.test.utils import captured_stderr
from django.urls import reverse
from .models import Company, Person
from .tests import AdminDocsTestCase, TestDataMixin
@unittest.skipUnless(utils.docutils_is_available, "no docutils installed.")
class AdminDocViewTests(TestDataMixin, AdminDocsTestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_index(self):
response = self.client.get(reverse('django-admindocs-docroot'))
self.assertContains(response, '<h1>Documentation</h1>', html=True)
self.assertContains(response, '<h1 id="site-name"><a href="/admin/">Django administration</a></h1>')
self.client.logout()
response = self.client.get(reverse('django-admindocs-docroot'), follow=True)
# Should display the login screen
self.assertContains(response, '<input type="hidden" name="next" value="/admindocs/">', html=True)
def test_bookmarklets(self):
response = self.client.get(reverse('django-admindocs-bookmarklets'))
self.assertContains(response, '/admindocs/views/')
def test_templatetag_index(self):
response = self.client.get(reverse('django-admindocs-tags'))
self.assertContains(response, '<h3 id="built_in-extends">extends</h3>', html=True)
def test_templatefilter_index(self):
response = self.client.get(reverse('django-admindocs-filters'))
self.assertContains(response, '<h3 id="built_in-first">first</h3>', html=True)
def test_view_index(self):
response = self.client.get(reverse('django-admindocs-views-index'))
self.assertContains(
response,
'<h3><a href="/admindocs/views/django.contrib.admindocs.views.BaseAdminDocsView/">/admindocs/</a></h3>',
html=True
)
self.assertContains(response, 'Views by namespace test')
self.assertContains(response, 'Name: <code>test:func</code>.')
self.assertContains(
response,
'<h3><a href="/admindocs/views/admin_docs.views.XViewCallableObject/">'
'/xview/callable_object_without_xview/</a></h3>',
html=True,
)
def test_view_index_with_method(self):
"""
Views that are methods are listed correctly.
"""
response = self.client.get(reverse('django-admindocs-views-index'))
self.assertContains(
response,
'<h3><a href="/admindocs/views/django.contrib.admin.sites.AdminSite.index/">/admin/</a></h3>',
html=True
)
def test_view_detail(self):
url = reverse('django-admindocs-views-detail', args=['django.contrib.admindocs.views.BaseAdminDocsView'])
response = self.client.get(url)
# View docstring
self.assertContains(response, 'Base view for admindocs views.')
@override_settings(ROOT_URLCONF='admin_docs.namespace_urls')
def test_namespaced_view_detail(self):
url = reverse('django-admindocs-views-detail', args=['admin_docs.views.XViewClass'])
response = self.client.get(url)
self.assertContains(response, '<h1>admin_docs.views.XViewClass</h1>')
def test_view_detail_illegal_import(self):
url = reverse('django-admindocs-views-detail', args=['urlpatterns_reverse.nonimported_module.view'])
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules)
def test_view_detail_as_method(self):
"""
Views that are methods can be displayed.
"""
url = reverse('django-admindocs-views-detail', args=['django.contrib.admin.sites.AdminSite.index'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_model_index(self):
response = self.client.get(reverse('django-admindocs-models-index'))
self.assertContains(
response,
'<h2 id="app-auth">Authentication and Authorization (django.contrib.auth)</h2>',
html=True
)
def test_template_detail(self):
response = self.client.get(reverse('django-admindocs-templates', args=['admin_doc/template_detail.html']))
self.assertContains(response, '<h1>Template: <q>admin_doc/template_detail.html</q></h1>', html=True)
def test_missing_docutils(self):
utils.docutils_is_available = False
try:
response = self.client.get(reverse('django-admindocs-docroot'))
self.assertContains(
response,
'<h3>The admin documentation system requires Python\'s '
'<a href="http://docutils.sf.net/">docutils</a> library.</h3>',
html=True
)
self.assertContains(response, '<h1 id="site-name"><a href="/admin/">Django administration</a></h1>')
finally:
utils.docutils_is_available = True
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
@override_settings(SITE_ID=None) # will restore SITE_ID after the test
def test_no_sites_framework(self):
"""
Without the sites framework, should not access SITE_ID or Site
objects. Deleting settings is fine here as UserSettingsHolder is used.
"""
Site.objects.all().delete()
del settings.SITE_ID
response = self.client.get(reverse('django-admindocs-views-index'))
self.assertContains(response, 'View documentation')
@override_settings(TEMPLATES=[{
'NAME': 'ONE',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}, {
'NAME': 'TWO',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}])
@unittest.skipUnless(utils.docutils_is_available, "no docutils installed.")
class AdminDocViewWithMultipleEngines(AdminDocViewTests):
def test_templatefilter_index(self):
# Overridden because non-trivial TEMPLATES settings aren't supported
# but the page shouldn't crash (#24125).
response = self.client.get(reverse('django-admindocs-filters'))
self.assertContains(response, '<title>Template filters</title>', html=True)
def test_templatetag_index(self):
# Overridden because non-trivial TEMPLATES settings aren't supported
# but the page shouldn't crash (#24125).
response = self.client.get(reverse('django-admindocs-tags'))
self.assertContains(response, '<title>Template tags</title>', html=True)
@unittest.skipUnless(utils.docutils_is_available, "no docutils installed.")
class TestModelDetailView(TestDataMixin, AdminDocsTestCase):
def setUp(self):
self.client.force_login(self.superuser)
with captured_stderr() as self.docutils_stderr:
self.response = self.client.get(reverse('django-admindocs-models-detail', args=['admin_docs', 'Person']))
def test_method_excludes(self):
"""
Methods that begin with strings defined in
``django.contrib.admindocs.views.MODEL_METHODS_EXCLUDE``
shouldn't be displayed in the admin docs.
"""
self.assertContains(self.response, "<td>get_full_name</td>")
self.assertNotContains(self.response, "<td>_get_full_name</td>")
self.assertNotContains(self.response, "<td>add_image</td>")
self.assertNotContains(self.response, "<td>delete_image</td>")
self.assertNotContains(self.response, "<td>set_status</td>")
self.assertNotContains(self.response, "<td>save_changes</td>")
def test_methods_with_arguments(self):
"""
Methods that take arguments should also displayed.
"""
self.assertContains(self.response, "<h3>Methods with arguments</h3>")
self.assertContains(self.response, "<td>rename_company</td>")
self.assertContains(self.response, "<td>dummy_function</td>")
self.assertContains(self.response, "<td>suffix_company_name</td>")
def test_methods_with_arguments_display_arguments(self):
"""
Methods with arguments should have their arguments displayed.
"""
self.assertContains(self.response, "<td>new_name</td>")
def test_methods_with_arguments_display_arguments_default_value(self):
"""
Methods with keyword arguments should have their arguments displayed.
"""
self.assertContains(self.response, '<td>suffix='ltd'</td>')
def test_methods_with_multiple_arguments_display_arguments(self):
"""
Methods with multiple arguments should have all their arguments
displayed, but omitting 'self'.
"""
self.assertContains(self.response, "<td>baz, rox, *some_args, **some_kwargs</td>")
def test_instance_of_property_methods_are_displayed(self):
"""Model properties are displayed as fields."""
self.assertContains(self.response, '<td>a_property</td>')
def test_method_data_types(self):
company = Company.objects.create(name="Django")
person = Person.objects.create(first_name="Human", last_name="User", company=company)
self.assertEqual(get_return_data_type(person.get_status_count.__name__), 'Integer')
self.assertEqual(get_return_data_type(person.get_groups_list.__name__), 'List')
def test_descriptions_render_correctly(self):
"""
The ``description`` field should render correctly for each field type.
"""
# help text in fields
self.assertContains(self.response, "<td>first name - The person's first name</td>")
self.assertContains(self.response, "<td>last name - The person's last name</td>")
# method docstrings
self.assertContains(self.response, "<p>Get the full name of the person</p>")
link = '<a class="reference external" href="/admindocs/models/%s/">%s</a>'
markup = '<p>the related %s object</p>'
company_markup = markup % (link % ("admin_docs.company", "admin_docs.Company"))
# foreign keys
self.assertContains(self.response, company_markup)
# foreign keys with help text
self.assertContains(self.response, "%s\n - place of work" % company_markup)
# many to many fields
self.assertContains(
self.response,
"number of related %s objects" % (link % ("admin_docs.group", "admin_docs.Group"))
)
self.assertContains(
self.response,
"all related %s objects" % (link % ("admin_docs.group", "admin_docs.Group"))
)
# "raw" and "include" directives are disabled
self.assertContains(self.response, '<p>"raw" directive disabled.</p>',)
self.assertContains(self.response, '.. raw:: html\n :file: admin_docs/evilfile.txt')
self.assertContains(self.response, '<p>"include" directive disabled.</p>',)
self.assertContains(self.response, '.. include:: admin_docs/evilfile.txt')
out = self.docutils_stderr.getvalue()
self.assertIn('"raw" directive disabled', out)
self.assertIn('"include" directive disabled', out)
def test_model_with_many_to_one(self):
link = '<a class="reference external" href="/admindocs/models/%s/">%s</a>'
response = self.client.get(
reverse('django-admindocs-models-detail', args=['admin_docs', 'company'])
)
self.assertContains(
response,
"number of related %s objects" % (link % ("admin_docs.person", "admin_docs.Person"))
)
self.assertContains(
response,
"all related %s objects" % (link % ("admin_docs.person", "admin_docs.Person"))
)
def test_model_with_no_backward_relations_render_only_relevant_fields(self):
"""
A model with ``related_name`` of `+` shouldn't show backward
relationship links.
"""
response = self.client.get(reverse('django-admindocs-models-detail', args=['admin_docs', 'family']))
fields = response.context_data.get('fields')
self.assertEqual(len(fields), 2)
def test_model_docstring_renders_correctly(self):
summary = (
'<h2 class="subhead"><p>Stores information about a person, related to <a class="reference external" '
'href="/admindocs/models/myapp.company/">myapp.Company</a>.</p></h2>'
)
subheading = '<p><strong>Notes</strong></p>'
body = '<p>Use <tt class="docutils literal">save_changes()</tt> when saving this object.</p>'
model_body = (
'<dl class="docutils"><dt><tt class="'
'docutils literal">company</tt></dt><dd>Field storing <a class="'
'reference external" href="/admindocs/models/myapp.company/">'
'myapp.Company</a> where the person works.</dd></dl>'
)
self.assertContains(self.response, 'DESCRIPTION')
self.assertContains(self.response, summary, html=True)
self.assertContains(self.response, subheading, html=True)
self.assertContains(self.response, body, html=True)
self.assertContains(self.response, model_body, html=True)
def test_model_detail_title(self):
self.assertContains(self.response, '<h1>admin_docs.Person</h1>', html=True)
def test_app_not_found(self):
response = self.client.get(reverse('django-admindocs-models-detail', args=['doesnotexist', 'Person']))
self.assertEqual(response.context['exception'], "App 'doesnotexist' not found")
self.assertEqual(response.status_code, 404)
def test_model_not_found(self):
response = self.client.get(reverse('django-admindocs-models-detail', args=['admin_docs', 'doesnotexist']))
self.assertEqual(response.context['exception'], "Model 'doesnotexist' not found in app 'admin_docs'")
self.assertEqual(response.status_code, 404)
class CustomField(models.Field):
description = "A custom field type"
class DescriptionLackingField(models.Field):
pass
class TestFieldType(unittest.TestCase):
def test_field_name(self):
with self.assertRaises(AttributeError):
views.get_readable_field_data_type("NotAField")
def test_builtin_fields(self):
self.assertEqual(
views.get_readable_field_data_type(fields.BooleanField()),
'Boolean (Either True or False)'
)
def test_custom_fields(self):
self.assertEqual(views.get_readable_field_data_type(CustomField()), 'A custom field type')
self.assertEqual(
views.get_readable_field_data_type(DescriptionLackingField()),
'Field of type: DescriptionLackingField'
)
class AdminDocViewFunctionsTests(SimpleTestCase):
def test_simplify_regex(self):
tests = (
(r'^a', '/a'),
(r'^(?P<a>\w+)/b/(?P<c>\w+)/$', '/<a>/b/<c>/'),
(r'^(?P<a>\w+)/b/(?P<c>\w+)$', '/<a>/b/<c>'),
(r'^(?P<a>\w+)/b/(?P<c>\w+)', '/<a>/b/<c>'),
(r'^(?P<a>\w+)/b/(\w+)$', '/<a>/b/<var>'),
(r'^(?P<a>\w+)/b/(\w+)', '/<a>/b/<var>'),
(r'^(?P<a>\w+)/b/((x|y)\w+)$', '/<a>/b/<var>'),
(r'^(?P<a>\w+)/b/((x|y)\w+)', '/<a>/b/<var>'),
(r'^(?P<a>(x|y))/b/(?P<c>\w+)$', '/<a>/b/<c>'),
(r'^(?P<a>(x|y))/b/(?P<c>\w+)', '/<a>/b/<c>'),
(r'^(?P<a>(x|y))/b/(?P<c>\w+)ab', '/<a>/b/<c>ab'),
(r'^(?P<a>(x|y)(\(|\)))/b/(?P<c>\w+)ab', '/<a>/b/<c>ab'),
(r'^a/?$', '/a/'),
)
for pattern, output in tests:
with self.subTest(pattern=pattern):
self.assertEqual(simplify_regex(pattern), output)
| |
import csv
from .api_test_base import ApiTestBase
class TestAPIOrgDetailsViews(ApiTestBase):
def test_api_view_org_details_total(self):
url = self.api_prefix
url += "/org_details?format=csv"
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
rows = list(csv.DictReader(response.content.decode("utf8").splitlines()))
self.assertEqual(len(rows), 2)
self.assertEqual(rows[0]["date"], "2015-01-01")
self.assertEqual(float(rows[0]["total_list_size"]), 1260)
self.assertEqual(rows[0]["astro_pu_cost"], "705.5")
self.assertEqual(rows[0]["astro_pu_items"], "955.5")
self.assertEqual(rows[0]["star_pu.oral_antibacterials_item"], "95.5")
def test_api_view_org_details_all_ccgs(self):
url = self.api_prefix
url += "/org_details?format=csv&org_type=ccg"
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
rows = list(csv.DictReader(response.content.decode("utf8").splitlines()))
self.assertEqual(len(rows), 4)
self.assertEqual(rows[1]["row_id"], "03V")
self.assertEqual(rows[1]["row_name"], "NHS Corby")
self.assertEqual(rows[1]["date"], "2015-01-01")
self.assertEqual(rows[1]["astro_pu_cost"], "363.3")
self.assertEqual(rows[1]["astro_pu_items"], "453.3")
self.assertEqual(rows[1]["star_pu.oral_antibacterials_item"], "45.3")
self.assertEqual(float(rows[1]["total_list_size"]), 648)
def test_api_view_org_details_all_ccgs_with_keys(self):
url = self.api_prefix
url += "/org_details?format=csv&org_type=ccg&keys=total_list_size"
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
rows = list(csv.DictReader(response.content.decode("utf8").splitlines()))
self.assertEqual(len(rows), 4)
self.assertEqual(rows[1]["row_id"], "03V")
self.assertEqual(rows[1]["row_name"], "NHS Corby")
self.assertEqual(rows[1]["date"], "2015-01-01")
self.assertEqual(rows[1].get("astro_pu_cost"), None)
self.assertEqual(float(rows[1]["total_list_size"]), 648)
def test_api_view_org_details_all_ccgs_with_nothing_key(self):
url = self.api_prefix
url += "/org_details?format=csv&org_type=ccg&keys=nothing"
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
list(csv.DictReader(response.content.decode("utf8").splitlines()))
def test_api_view_org_details_all_ccgs_with_unpermitted_key(self):
url = self.api_prefix
url += "/org_details?format=csv&org_type=ccg&keys=borg"
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 400)
rows = list(csv.DictReader(response.content.decode("utf8").splitlines()))
self.assertEqual(rows[0]["detail"], "borg is not a valid key")
def test_api_view_org_details_all_ccgs_with_json_key(self):
url = self.api_prefix
url += (
"/org_details?format=csv&org_type=ccg"
"&keys=star_pu.oral_antibacterials_item,total_list_size"
)
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
rows = list(csv.DictReader(response.content.decode("utf8").splitlines()))
self.assertEqual(rows[1]["row_id"], "03V")
self.assertEqual(rows[1]["row_name"], "NHS Corby")
self.assertEqual(rows[1]["date"], "2015-01-01")
self.assertEqual(rows[1]["star_pu.oral_antibacterials_item"], "45.3")
def test_api_view_org_details_one_ccg(self):
url = self.api_prefix
url += "/org_details?format=csv&org_type=ccg&org=03V"
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
rows = list(csv.DictReader(response.content.decode("utf8").splitlines()))
self.assertEqual(len(rows), 2)
self.assertEqual(rows[0]["row_id"], "03V")
self.assertEqual(rows[0]["row_name"], "NHS Corby")
self.assertEqual(rows[0]["date"], "2015-01-01")
self.assertEqual(rows[0]["astro_pu_cost"], "363.3")
self.assertEqual(rows[0]["astro_pu_items"], "453.3")
self.assertEqual(rows[0]["star_pu.oral_antibacterials_item"], "45.3")
self.assertEqual(float(rows[0]["total_list_size"]), 648)
def test_api_view_org_details_all_practices(self):
url = self.api_prefix
url += "/org_details?format=csv&org_type=practice"
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
rows = list(csv.DictReader(response.content.decode("utf8").splitlines()))
self.assertEqual(len(rows), 10) # 5 practices, 2 months
self.assertEqual(rows[0]["row_id"], "B82018")
self.assertEqual(rows[0]["row_name"], "ESCRICK SURGERY")
self.assertEqual(rows[0]["date"], "2015-01-01")
self.assertEqual(rows[0]["total_list_size"], "324")
self.assertEqual(rows[0]["astro_pu_cost"], "181.1")
self.assertEqual(rows[0]["astro_pu_items"], "271.1")
self.assertEqual(rows[0]["star_pu.oral_antibacterials_item"], "27.1")
def test_api_view_org_details_ccg_code_to_practices(self):
# Practice K83622 moved from 03Q to 03V so we check that it is only
# included in the results for 03V and not 03Q.
url = self.api_prefix
url += "/org_details?format=csv&org_type=practice&org=03V"
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
rows = list(csv.DictReader(response.content.decode("utf8").splitlines()))
self.assertEqual(len(rows), 6) # 3 practices, 2 months
self.assertIn("K83622", [row["row_id"] for row in rows])
self.assertEqual(rows[0]["row_id"], "K83059")
self.assertEqual(rows[0]["row_name"], "DR KHALID & PARTNERS")
self.assertEqual(rows[0]["date"], "2015-01-01")
self.assertEqual(rows[0]["total_list_size"], "216")
self.assertEqual(rows[0]["astro_pu_cost"], "121.1")
self.assertEqual(rows[0]["astro_pu_items"], "151.1")
self.assertEqual(rows[0]["star_pu.oral_antibacterials_item"], "15.1")
url = self.api_prefix
url += "/org_details?format=csv&org_type=practice&org=03Q"
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
rows = list(csv.DictReader(response.content.decode("utf8").splitlines()))
self.assertEqual(len(rows), 4) # 2 practices, 2 months
self.assertNotIn("K83622", [row["row_id"] for row in rows])
def test_api_view_org_details_one_practice(self):
url = self.api_prefix
url += "/org_details?format=csv&org_type=practice&org=N84014"
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
rows = list(csv.DictReader(response.content.decode("utf8").splitlines()))
self.assertEqual(len(rows), 2) # 2 months
self.assertEqual(rows[0]["row_id"], "N84014")
self.assertEqual(rows[0]["row_name"], "AINSDALE VILLAGE SURGERY")
self.assertEqual(rows[0]["date"], "2015-01-01")
self.assertEqual(rows[0]["total_list_size"], "288")
self.assertEqual(rows[0]["astro_pu_cost"], "161.1")
self.assertEqual(rows[0]["astro_pu_items"], "231.1")
self.assertEqual(rows[0]["star_pu.oral_antibacterials_item"], "23.1")
def test_api_view_org_details_one_practice_with_json_key(self):
url = self.api_prefix
url += (
"/org_details?format=csv&org_type=practice&org=N84014"
"&keys=star_pu.oral_antibacterials_item,total_list_size"
)
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
rows = list(csv.DictReader(response.content.decode("utf8").splitlines()))
self.assertEqual(len(rows), 2) # 2 months
self.assertEqual(rows[0]["row_id"], "N84014")
self.assertEqual(rows[0]["row_name"], "AINSDALE VILLAGE SURGERY")
self.assertEqual(rows[0]["date"], "2015-01-01")
self.assertEqual(rows[0]["total_list_size"], "288")
self.assertEqual(rows[0]["star_pu.oral_antibacterials_item"], "23.1")
self.assertEqual(rows[0].get("astro_pu_cost"), None)
def test_api_view_org_details_all_nhs_with_json_key(self):
url = self.api_prefix
url += (
"/org_details?format=csv"
"&keys=star_pu.oral_antibacterials_item,total_list_size"
)
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
rows = list(csv.DictReader(response.content.decode("utf8").splitlines()))
self.assertEqual(len(rows), 2)
self.assertEqual(rows[0]["date"], "2015-01-01")
self.assertEqual(float(rows[0]["total_list_size"]), 1260)
self.assertEqual(rows[0]["star_pu.oral_antibacterials_item"], "95.5")
self.assertEqual(rows[0].get("astro_pu_cost"), None)
def test_api_view_org_details_one_stp(self):
url = self.api_prefix
url += "/org_details?format=csv&org_type=stp&org=E54000020"
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
rows = list(csv.DictReader(response.content.decode("utf8").splitlines()))
self.assertEqual(len(rows), 2)
self.assertEqual(rows[0]["row_id"], "E54000020")
self.assertEqual(rows[0]["row_name"], "Northamptonshire")
self.assertEqual(rows[0]["date"], "2015-01-01")
self.assertEqual(rows[0]["astro_pu_cost"], "363.3")
self.assertEqual(rows[0]["astro_pu_items"], "453.3")
self.assertEqual(rows[0]["star_pu.oral_antibacterials_item"], "45.3")
self.assertEqual(float(rows[0]["total_list_size"]), 648)
def test_api_view_org_details_one_regional_team(self):
url = self.api_prefix
url += "/org_details?format=csv&org_type=regional_team&org=Y55"
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
rows = list(csv.DictReader(response.content.decode("utf8").splitlines()))
self.assertEqual(len(rows), 2)
self.assertEqual(rows[0]["row_id"], "Y55")
self.assertEqual(
rows[0]["row_name"], "MIDLANDS AND EAST OF ENGLAND COMMISSIONING REGION"
)
self.assertEqual(rows[0]["date"], "2015-01-01")
self.assertEqual(rows[0]["astro_pu_cost"], "363.3")
self.assertEqual(rows[0]["astro_pu_items"], "453.3")
self.assertEqual(rows[0]["star_pu.oral_antibacterials_item"], "45.3")
self.assertEqual(float(rows[0]["total_list_size"]), 648)
def test_api_view_org_details_one_pcn(self):
url = self.api_prefix
url += "/org_details?format=csv&org_type=pcn&org=PCN0001"
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
rows = list(csv.DictReader(response.content.decode("utf8").splitlines()))
self.assertEqual(len(rows), 2)
self.assertEqual(rows[0]["row_id"], "PCN0001")
self.assertEqual(rows[0]["row_name"], "Transformational Sustainability")
self.assertEqual(rows[0]["date"], "2015-01-01")
self.assertEqual(rows[0]["astro_pu_cost"], "262.2")
self.assertEqual(rows[0]["astro_pu_items"], "342.2")
self.assertEqual(rows[0]["star_pu.oral_antibacterials_item"], "34.2")
self.assertEqual(float(rows[0]["total_list_size"]), 468)
| |
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_request,
compat_urlparse,
)
from ..utils import (
clean_html,
int_or_none,
parse_iso8601,
unescapeHTML,
)
class BlipTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:\w+\.)?blip\.tv/(?:(?:.+-|rss/flash/)(?P<id>\d+)|((?:play/|api\.swf#)(?P<lookup_id>[\da-zA-Z+_]+)))'
_TESTS = [
{
'url': 'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352',
'md5': 'c6934ad0b6acf2bd920720ec888eb812',
'info_dict': {
'id': '5779306',
'ext': 'mov',
'title': 'CBR EXCLUSIVE: "Gotham City Imposters" Bats VS Jokerz Short 3',
'description': 'md5:9bc31f227219cde65e47eeec8d2dc596',
'timestamp': 1323138843,
'upload_date': '20111206',
'uploader': 'cbr',
'uploader_id': '679425',
'duration': 81,
}
},
{
# https://github.com/rg3/youtube-dl/pull/2274
'note': 'Video with subtitles',
'url': 'http://blip.tv/play/h6Uag5OEVgI.html',
'md5': '309f9d25b820b086ca163ffac8031806',
'info_dict': {
'id': '6586561',
'ext': 'mp4',
'title': 'Red vs. Blue Season 11 Episode 1',
'description': 'One-Zero-One',
'timestamp': 1371261608,
'upload_date': '20130615',
'uploader': 'redvsblue',
'uploader_id': '792887',
'duration': 279,
}
},
{
# https://bugzilla.redhat.com/show_bug.cgi?id=967465
'url': 'http://a.blip.tv/api.swf#h6Uag5KbVwI',
'md5': '314e87b1ebe7a48fcbfdd51b791ce5a6',
'info_dict': {
'id': '6573122',
'ext': 'mov',
'upload_date': '20130520',
'description': 'Two hapless space marines argue over what to do when they realize they have an astronomically huge problem on their hands.',
'title': 'Red vs. Blue Season 11 Trailer',
'timestamp': 1369029609,
'uploader': 'redvsblue',
'uploader_id': '792887',
}
},
{
'url': 'http://blip.tv/play/gbk766dkj4Yn',
'md5': 'fe0a33f022d49399a241e84a8ea8b8e3',
'info_dict': {
'id': '1749452',
'ext': 'mp4',
'upload_date': '20090208',
'description': 'Witness the first appearance of the Nostalgia Critic character, as Doug reviews the movie Transformers.',
'title': 'Nostalgia Critic: Transformers',
'timestamp': 1234068723,
'uploader': 'NostalgiaCritic',
'uploader_id': '246467',
}
},
{
# https://github.com/rg3/youtube-dl/pull/4404
'note': 'Audio only',
'url': 'http://blip.tv/hilarios-productions/weekly-manga-recap-kingdom-7119982',
'md5': '76c0a56f24e769ceaab21fbb6416a351',
'info_dict': {
'id': '7103299',
'ext': 'flv',
'title': 'Weekly Manga Recap: Kingdom',
'description': 'And then Shin breaks the enemy line, and he's all like HWAH! And then he slices a guy and it's all like FWASHING! And... it's really hard to describe the best parts of this series without breaking down into sound effects, okay?',
'timestamp': 1417660321,
'upload_date': '20141204',
'uploader': 'The Rollo T',
'uploader_id': '407429',
'duration': 7251,
'vcodec': 'none',
}
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
lookup_id = mobj.group('lookup_id')
# See https://github.com/rg3/youtube-dl/issues/857 and
# https://github.com/rg3/youtube-dl/issues/4197
if lookup_id:
urlh = self._request_webpage(
'http://blip.tv/play/%s' % lookup_id, lookup_id, 'Resolving lookup id')
url = compat_urlparse.urlparse(urlh.geturl())
qs = compat_urlparse.parse_qs(url.query)
mobj = re.match(self._VALID_URL, qs['file'][0])
video_id = mobj.group('id')
rss = self._download_xml('http://blip.tv/rss/flash/%s' % video_id, video_id, 'Downloading video RSS')
def blip(s):
return '{http://blip.tv/dtd/blip/1.0}%s' % s
def media(s):
return '{http://search.yahoo.com/mrss/}%s' % s
def itunes(s):
return '{http://www.itunes.com/dtds/podcast-1.0.dtd}%s' % s
item = rss.find('channel/item')
video_id = item.find(blip('item_id')).text
title = item.find('./title').text
description = clean_html(compat_str(item.find(blip('puredescription')).text))
timestamp = parse_iso8601(item.find(blip('datestamp')).text)
uploader = item.find(blip('user')).text
uploader_id = item.find(blip('userid')).text
duration = int(item.find(blip('runtime')).text)
media_thumbnail = item.find(media('thumbnail'))
thumbnail = media_thumbnail.get('url') if media_thumbnail is not None else item.find(itunes('image')).text
categories = [category.text for category in item.findall('category')]
formats = []
subtitles_urls = {}
media_group = item.find(media('group'))
for media_content in media_group.findall(media('content')):
url = media_content.get('url')
role = media_content.get(blip('role'))
msg = self._download_webpage(
url + '?showplayer=20140425131715&referrer=http://blip.tv&mask=7&skin=flashvars&view=url',
video_id, 'Resolving URL for %s' % role)
real_url = compat_urlparse.parse_qs(msg.strip())['message'][0]
media_type = media_content.get('type')
if media_type == 'text/srt' or url.endswith('.srt'):
LANGS = {
'english': 'en',
}
lang = role.rpartition('-')[-1].strip().lower()
langcode = LANGS.get(lang, lang)
subtitles_urls[langcode] = url
elif media_type.startswith('video/'):
formats.append({
'url': real_url,
'format_id': role,
'format_note': media_type,
'vcodec': media_content.get(blip('vcodec')) or 'none',
'acodec': media_content.get(blip('acodec')),
'filesize': media_content.get('filesize'),
'width': int_or_none(media_content.get('width')),
'height': int_or_none(media_content.get('height')),
})
self._check_formats(formats, video_id)
self._sort_formats(formats)
subtitles = self.extract_subtitles(video_id, subtitles_urls)
return {
'id': video_id,
'title': title,
'description': description,
'timestamp': timestamp,
'uploader': uploader,
'uploader_id': uploader_id,
'duration': duration,
'thumbnail': thumbnail,
'categories': categories,
'formats': formats,
'subtitles': subtitles,
}
def _get_subtitles(self, video_id, subtitles_urls):
subtitles = {}
for lang, url in subtitles_urls.items():
# For some weird reason, blip.tv serves a video instead of subtitles
# when we request with a common UA
req = compat_urllib_request.Request(url)
req.add_header('User-Agent', 'youtube-dl')
subtitles[lang] = [{
# The extension is 'srt' but it's actually an 'ass' file
'ext': 'ass',
'data': self._download_webpage(req, None, note=False),
}]
return subtitles
class BlipTVUserIE(InfoExtractor):
_VALID_URL = r'(?:(?:https?://(?:\w+\.)?blip\.tv/)|bliptvuser:)(?!api\.swf)([^/]+)/*$'
_PAGE_SIZE = 12
IE_NAME = 'blip.tv:user'
_TEST = {
'url': 'http://blip.tv/actone',
'info_dict': {
'id': 'actone',
'title': 'Act One: The Series',
},
'playlist_count': 5,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
username = mobj.group(1)
page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
page = self._download_webpage(url, username, 'Downloading user page')
mobj = re.search(r'data-users-id="([^"]+)"', page)
page_base = page_base % mobj.group(1)
title = self._og_search_title(page)
# Download video ids using BlipTV Ajax calls. Result size per
# query is limited (currently to 12 videos) so we need to query
# page by page until there are no video ids - it means we got
# all of them.
video_ids = []
pagenum = 1
while True:
url = page_base + "&page=" + str(pagenum)
page = self._download_webpage(
url, username, 'Downloading video ids from page %d' % pagenum)
# Extract video identifiers
ids_in_page = []
for mobj in re.finditer(r'href="/([^"]+)"', page):
if mobj.group(1) not in ids_in_page:
ids_in_page.append(unescapeHTML(mobj.group(1)))
video_ids.extend(ids_in_page)
# A little optimization - if current page is not
# "full", ie. does not contain PAGE_SIZE video ids then
# we can assume that this page is the last one - there
# are no more ids on further pages - no need to query
# again.
if len(ids_in_page) < self._PAGE_SIZE:
break
pagenum += 1
urls = ['http://blip.tv/%s' % video_id for video_id in video_ids]
url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls]
return self.playlist_result(
url_entries, playlist_title=title, playlist_id=username)
| |
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import hashlib
import binascii
from . import backend
from ._asn1 import (
armor,
Certificate as Asn1Certificate,
DHParameters,
EncryptedPrivateKeyInfo,
Null,
OrderedDict,
Pbkdf2Salt,
PrivateKeyInfo,
PublicKeyInfo,
)
from ._asymmetric import _unwrap_private_key_info
from ._errors import pretty_message
from ._types import type_name, str_cls
from .kdf import pbkdf2, pbkdf2_iteration_calculator
from .symmetric import aes_cbc_pkcs7_encrypt
from .util import rand_bytes
_backend = backend()
if _backend == 'mac':
from ._mac.asymmetric import (
Certificate,
dsa_sign,
dsa_verify,
ecdsa_sign,
ecdsa_verify,
generate_pair,
generate_dh_parameters,
load_certificate,
load_pkcs12,
load_private_key,
load_public_key,
PrivateKey,
PublicKey,
rsa_pkcs1v15_sign,
rsa_pkcs1v15_verify,
rsa_pss_sign,
rsa_pss_verify,
rsa_pkcs1v15_encrypt,
rsa_pkcs1v15_decrypt,
rsa_oaep_encrypt,
rsa_oaep_decrypt,
)
elif _backend == 'win' or _backend == 'winlegacy':
from ._win.asymmetric import (
Certificate,
dsa_sign,
dsa_verify,
ecdsa_sign,
ecdsa_verify,
generate_pair,
generate_dh_parameters,
load_certificate,
load_pkcs12,
load_private_key,
load_public_key,
PrivateKey,
PublicKey,
rsa_pkcs1v15_sign,
rsa_pkcs1v15_verify,
rsa_pss_sign,
rsa_pss_verify,
rsa_pkcs1v15_encrypt,
rsa_pkcs1v15_decrypt,
rsa_oaep_encrypt,
rsa_oaep_decrypt,
)
else:
from ._openssl.asymmetric import (
Certificate,
dsa_sign,
dsa_verify,
ecdsa_sign,
ecdsa_verify,
generate_pair,
generate_dh_parameters,
load_certificate,
load_pkcs12,
load_private_key,
load_public_key,
PrivateKey,
PublicKey,
rsa_pkcs1v15_sign,
rsa_pkcs1v15_verify,
rsa_pss_sign,
rsa_pss_verify,
rsa_pkcs1v15_encrypt,
rsa_pkcs1v15_decrypt,
rsa_oaep_encrypt,
rsa_oaep_decrypt,
)
__all__ = [
'Certificate',
'dsa_sign',
'dsa_verify',
'dump_certificate',
'dump_dh_parameters',
'dump_openssl_private_key',
'dump_private_key',
'dump_public_key',
'ecdsa_sign',
'ecdsa_verify',
'generate_pair',
'generate_dh_parameters',
'load_certificate',
'load_pkcs12',
'load_private_key',
'load_public_key',
'PrivateKey',
'PublicKey',
'rsa_oaep_decrypt',
'rsa_oaep_encrypt',
'rsa_pkcs1v15_decrypt',
'rsa_pkcs1v15_encrypt',
'rsa_pkcs1v15_sign',
'rsa_pkcs1v15_verify',
'rsa_pss_sign',
'rsa_pss_verify',
]
def dump_dh_parameters(dh_parameters, encoding='pem'):
"""
Serializes an asn1crypto.algos.DHParameters object into a byte string
:param dh_parameters:
An asn1crypto.algos.DHParameters object
:param encoding:
A unicode string of "pem" or "der"
:return:
A byte string of the encoded DH parameters
"""
if encoding not in set(['pem', 'der']):
raise ValueError(pretty_message(
'''
encoding must be one of "pem", "der", not %s
''',
repr(encoding)
))
if not isinstance(dh_parameters, DHParameters):
raise TypeError(pretty_message(
'''
dh_parameters must be an instance of asn1crypto.algos.DHParameters,
not %s
''',
type_name(dh_parameters)
))
output = dh_parameters.dump()
if encoding == 'pem':
output = armor('DH PARAMETERS', output)
return output
def dump_public_key(public_key, encoding='pem'):
"""
Serializes a public key object into a byte string
:param public_key:
An oscrypto.asymmetric.PublicKey or asn1crypto.keys.PublicKeyInfo object
:param encoding:
A unicode string of "pem" or "der"
:return:
A byte string of the encoded public key
"""
if encoding not in set(['pem', 'der']):
raise ValueError(pretty_message(
'''
encoding must be one of "pem", "der", not %s
''',
repr(encoding)
))
is_oscrypto = isinstance(public_key, PublicKey)
if not isinstance(public_key, PublicKeyInfo) and not is_oscrypto:
raise TypeError(pretty_message(
'''
public_key must be an instance of oscrypto.asymmetric.PublicKey or
asn1crypto.keys.PublicKeyInfo, not %s
''',
type_name(public_key)
))
if is_oscrypto:
public_key = public_key.asn1
output = public_key.dump()
if encoding == 'pem':
output = armor('PUBLIC KEY', output)
return output
def dump_certificate(certificate, encoding='pem'):
"""
Serializes a certificate object into a byte string
:param certificate:
An oscrypto.asymmetric.Certificate or asn1crypto.x509.Certificate object
:param encoding:
A unicode string of "pem" or "der"
:return:
A byte string of the encoded certificate
"""
if encoding not in set(['pem', 'der']):
raise ValueError(pretty_message(
'''
encoding must be one of "pem", "der", not %s
''',
repr(encoding)
))
is_oscrypto = isinstance(certificate, Certificate)
if not isinstance(certificate, Asn1Certificate) and not is_oscrypto:
raise TypeError(pretty_message(
'''
certificate must be an instance of oscrypto.asymmetric.Certificate
or asn1crypto.x509.Certificate, not %s
''',
type_name(certificate)
))
if is_oscrypto:
certificate = certificate.asn1
output = certificate.dump()
if encoding == 'pem':
output = armor('CERTIFICATE', output)
return output
def dump_private_key(private_key, passphrase, encoding='pem', target_ms=200):
"""
Serializes a private key object into a byte string of the PKCS#8 format
:param private_key:
An oscrypto.asymmetric.PrivateKey or asn1crypto.keys.PrivateKeyInfo
object
:param passphrase:
A unicode string of the passphrase to encrypt the private key with.
A passphrase of None will result in no encryption. A blank string will
result in a ValueError to help ensure that the lack of passphrase is
intentional.
:param encoding:
A unicode string of "pem" or "der"
:param target_ms:
Use PBKDF2 with the number of iterations that takes about this many
milliseconds on the current machine.
:raises:
ValueError - when a blank string is provided for the passphrase
:return:
A byte string of the encoded and encrypted public key
"""
if encoding not in set(['pem', 'der']):
raise ValueError(pretty_message(
'''
encoding must be one of "pem", "der", not %s
''',
repr(encoding)
))
if passphrase is not None:
if not isinstance(passphrase, str_cls):
raise TypeError(pretty_message(
'''
passphrase must be a unicode string, not %s
''',
type_name(passphrase)
))
if passphrase == '':
raise ValueError(pretty_message(
'''
passphrase may not be a blank string - pass None to disable
encryption
'''
))
is_oscrypto = isinstance(private_key, PrivateKey)
if not isinstance(private_key, PrivateKeyInfo) and not is_oscrypto:
raise TypeError(pretty_message(
'''
private_key must be an instance of oscrypto.asymmetric.PrivateKey
or asn1crypto.keys.PrivateKeyInfo, not %s
''',
type_name(private_key)
))
if is_oscrypto:
private_key = private_key.asn1
output = private_key.dump()
if passphrase is not None:
cipher = 'aes256_cbc'
key_length = 32
kdf_hmac = 'sha256'
kdf_salt = rand_bytes(key_length)
iterations = pbkdf2_iteration_calculator(kdf_hmac, key_length, target_ms=target_ms, quiet=True)
# Need a bare minimum of 10,000 iterations for PBKDF2 as of 2015
if iterations < 10000:
iterations = 10000
passphrase_bytes = passphrase.encode('utf-8')
key = pbkdf2(kdf_hmac, passphrase_bytes, kdf_salt, iterations, key_length)
iv, ciphertext = aes_cbc_pkcs7_encrypt(key, output, None)
output = EncryptedPrivateKeyInfo({
'encryption_algorithm': {
'algorithm': 'pbes2',
'parameters': {
'key_derivation_func': {
'algorithm': 'pbkdf2',
'parameters': {
'salt': Pbkdf2Salt(
name='specified',
value=kdf_salt
),
'iteration_count': iterations,
'prf': {
'algorithm': kdf_hmac,
'parameters': Null()
}
}
},
'encryption_scheme': {
'algorithm': cipher,
'parameters': iv
}
}
},
'encrypted_data': ciphertext
}).dump()
if encoding == 'pem':
if passphrase is None:
object_type = 'PRIVATE KEY'
else:
object_type = 'ENCRYPTED PRIVATE KEY'
output = armor(object_type, output)
return output
def dump_openssl_private_key(private_key, passphrase):
"""
Serializes a private key object into a byte string of the PEM formats used
by OpenSSL. The format chosen will depend on the type of private key - RSA,
DSA or EC.
Do not use this method unless you really must interact with a system that
does not support PKCS#8 private keys. The encryption provided by PKCS#8 is
far superior to the OpenSSL formats. This is due to the fact that the
OpenSSL formats don't stretch the passphrase, making it very easy to
brute-force.
:param private_key:
An oscrypto.asymmetric.PrivateKey or asn1crypto.keys.PrivateKeyInfo
object
:param passphrase:
A unicode string of the passphrase to encrypt the private key with.
A passphrase of None will result in no encryption. A blank string will
result in a ValueError to help ensure that the lack of passphrase is
intentional.
:raises:
ValueError - when a blank string is provided for the passphrase
:return:
A byte string of the encoded and encrypted public key
"""
if passphrase is not None:
if not isinstance(passphrase, str_cls):
raise TypeError(pretty_message(
'''
passphrase must be a unicode string, not %s
''',
type_name(passphrase)
))
if passphrase == '':
raise ValueError(pretty_message(
'''
passphrase may not be a blank string - pass None to disable
encryption
'''
))
is_oscrypto = isinstance(private_key, PrivateKey)
if not isinstance(private_key, PrivateKeyInfo) and not is_oscrypto:
raise TypeError(pretty_message(
'''
private_key must be an instance of oscrypto.asymmetric.PrivateKey or
asn1crypto.keys.PrivateKeyInfo, not %s
''',
type_name(private_key)
))
if is_oscrypto:
private_key = private_key.asn1
output = _unwrap_private_key_info(private_key).dump()
headers = None
if passphrase is not None:
iv = rand_bytes(16)
headers = OrderedDict()
headers['Proc-Type'] = '4,ENCRYPTED'
headers['DEK-Info'] = 'AES-128-CBC,%s' % binascii.hexlify(iv).decode('ascii')
key_length = 16
passphrase_bytes = passphrase.encode('utf-8')
key = hashlib.md5(passphrase_bytes + iv[0:8]).digest()
while key_length > len(key):
key += hashlib.md5(key + passphrase_bytes + iv[0:8]).digest()
key = key[0:key_length]
iv, output = aes_cbc_pkcs7_encrypt(key, output, iv)
if private_key.algorithm == 'ec':
object_type = 'EC PRIVATE KEY'
elif private_key.algorithm == 'rsa':
object_type = 'RSA PRIVATE KEY'
elif private_key.algorithm == 'dsa':
object_type = 'DSA PRIVATE KEY'
return armor(object_type, output, headers=headers)
| |
# -*- coding: utf-8 -*-
from contextlib import contextmanager
import inspect
from itertools import chain
from django.conf import settings
from django.utils.decorators import method_decorator
from django.utils.termcolors import colorize
from sekizai.helpers import validate_template
from cms import constants
from cms.models import AliasPluginModel
from cms.utils.conf import get_cms_setting
from cms.utils.compat import DJANGO_1_8
from cms.utils.compat.dj import is_installed
from cms.utils.conf import DEPRECATED_CMS_SETTINGS
SUCCESS = 1
WARNING = 2
ERROR = 3
SKIPPED = 4
CHECKERS = []
class FileOutputWrapper(object):
"""
Wraps two file-like objects (that support at the very least the 'write'
method) into an API to be used by the check function further down in
this module.
The following properties are public (and required) by alternative implementations:
errors: integer count of errors encountered
successes: integer count of successes encountered
warnings: integer count of warnings encountered
skips: integer count of skips encountered
successful: Whether the checks were successful (no errors)
They must also provide these methods:
write_line(message=''): writes a message to stdout
write_stderr_line(message=''): writes a message to stderr
success(message): reports and registers a successful check
error(message): reports and registers an error
warn(message); reports and registers a warning
skip(message): reports and registers a skipped check
section(title): A context manager that starts a new section. For the
Section API see FileSectionWrapper
"""
def __init__(self, stdout, stderr):
self.stdout = stdout
self.stderr = stderr
self.section_wrapper = FileSectionWrapper
self.errors = 0
self.successes = 0
self.warnings = 0
self.skips = 0
def colorize(self, msg, opts=(), **kwargs):
return colorize(msg, opts=opts, **kwargs)
def write_line(self, message=''):
self.write(u'%s\n' % message)
def write(self, message):
self.stdout.write(message)
def write_stderr_line(self, message=''):
self.write_stderr(u'%s\n' % message)
def write_stderr(self, message):
self.stderr.write(message)
def success(self, message):
self.successes += 1
self.write_line(u'%s %s' % (message, self.colorize('[OK]', fg='green', opts=['bold'])))
def error(self, message):
self.errors += 1
self.write_stderr_line(u'%s %s' % (message, self.colorize('[ERROR]', fg='red', opts=['bold'])))
def warn(self, message):
self.warnings += 1
self.write_stderr_line(u'%s %s' % (message, self.colorize('[WARNING]', fg='yellow', opts=['bold'])))
def skip(self, message):
self.skips += 1
self.write_line(u'%s %s' % (message, self.colorize('[SKIP]', fg='blue', opts=['bold'])))
@method_decorator(contextmanager)
def section(self, title):
self.write_line(self.colorize(title, opts=['bold']))
self.write_line(self.colorize('=' * len(title), opts=['bold']))
self.write_line()
wrapper = self.section_wrapper(self)
try:
yield wrapper
except:
self.error('Checker failed, see traceback')
raise
self.errors += wrapper.errors
self.successes += wrapper.successes
self.warnings += wrapper.warnings
self.skips += wrapper.skips
self.write_line('')
@property
def successful(self):
return not self.errors
class FileSectionWrapper(FileOutputWrapper):
"""
Used from FileOutputWrapper to report checks in a section.
If you want to provide your own output class, you may want to subclass
this class for the section reporting too. If you want to use your own,
you must defined at least the same API as FileOutputWrapper, as well
as these four additional methods:
finish_success(message): End the section (successfully)
finish_error(message): End the section with errors
finish_warning(message): End this section with a warning
finish_skip(message): End this (skipped) section
"""
def __init__(self, wrapper):
super(FileSectionWrapper, self).__init__(wrapper.stdout, wrapper.stderr)
self.wrapper = wrapper
def write_line(self, message=''):
self.write(u' - %s\n' % message)
def write_stderr_line(self, message=''):
self.write_stderr(u' - %s\n' % message)
def finish_success(self, message):
self.wrapper.write_line()
self.wrapper.success(message)
def finish_error(self, message):
self.wrapper.write_line()
self.wrapper.error(message)
def finish_warning(self, message):
self.wrapper.write_line()
self.wrapper.warning(message)
def finish_skip(self, message):
self.wrapper.write_lin()
self.wrapper.skip(message)
def define_check(func):
"""
Helper decorator to register a check function.
"""
CHECKERS.append(func)
return func
@define_check
def check_sekizai(output):
with output.section("Sekizai") as section:
sekizai_installed = is_installed('sekizai')
if sekizai_installed:
section.success("Sekizai is installed")
else:
section.error("Sekizai is not installed, could not find 'sekizai' in INSTALLED_APPS")
processors = list(
chain(*[template['OPTIONS'].get('context_processors', []) for template in settings.TEMPLATES]))
if 'sekizai.context_processors.sekizai' in processors:
section.success("Sekizai template context processor is installed")
else:
section.error("Sekizai template context processor is not installed, could not find "
"'sekizai.context_processors.sekizai' in TEMPLATES option context_processors")
if not sekizai_installed:
# sekizai is not installed.
# we can't reliable check templates
# because template loading won't work
return
for template, _ in get_cms_setting('TEMPLATES'):
if template == constants.TEMPLATE_INHERITANCE_MAGIC:
continue
if validate_template(template, ['js', 'css']):
section.success("Sekizai namespaces 'js' and 'css' found in %r" % template)
else:
section.error("Sekizai namespaces 'js' and 'css' not found in %r" % template)
if section.successful:
section.finish_success("Sekizai configuration okay")
else:
section.finish_error("Sekizai configuration has errors")
@define_check
def check_i18n(output):
with output.section("Internationalization") as section:
if isinstance(getattr(settings, 'CMS_LANGUAGES', {}), dict):
section.success("New style CMS_LANGUAGES")
else:
section.warn("Old style (tuple based) CMS_LANGUAGES, please switch to the new (dictionary based) style")
if getattr(settings, 'LANGUAGE_CODE', '').find('_') > -1:
section.warn("LANGUAGE_CODE must contain a valid language code, not a locale (e.g.: 'en-us' instead of "
"'en_US'): '%s' provided" % getattr(settings, 'LANGUAGE_CODE', ''))
for lang in getattr(settings, 'LANGUAGES', ()):
if lang[0].find('_') > -1:
section.warn("LANGUAGES must contain valid language codes, not locales (e.g.: 'en-us' instead of "
"'en_US'): '%s' provided" % lang[0])
if settings.SITE_ID == hash(settings.SITE_ID):
for site, items in get_cms_setting('LANGUAGES').items():
if type(site) == int:
for lang in items:
if lang['code'].find('_') > -1:
section.warn("CMS_LANGUAGES entries must contain valid language codes, not locales (e.g.: "
"'en-us' instead of 'en_US'): '%s' provided" % lang['code'])
else:
section.error("SITE_ID must be an integer, not %r" % settings.SITE_ID)
@define_check
def check_middlewares(output):
with output.section("Middlewares") as section:
required_middlewares = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware',
)
if getattr(settings, 'MIDDLEWARE', None):
middlewares = settings.MIDDLEWARE
else:
middlewares = settings.MIDDLEWARE_CLASSES
for middleware in required_middlewares:
if middleware not in middlewares:
section.error("%s middleware must be in MIDDLEWARE_CLASSES" % middleware)
@define_check
def check_context_processors(output):
with output.section("Context processors") as section:
processors = list(
chain(*[template['OPTIONS'].get('context_processors', []) for template in settings.TEMPLATES]))
required_processors = (
'cms.context_processors.cms_settings',
)
for processor in required_processors:
if processor not in processors:
section.error("%s context processor must be in TEMPLATES option context_processors" % processor)
@define_check
def check_deprecated_settings(output):
with output.section("Deprecated settings") as section:
found = False
for deprecated, new_setting in DEPRECATED_CMS_SETTINGS.items():
if not hasattr(settings, deprecated):
continue
if new_setting:
message = "Deprecated setting %s found. This setting has been replaced by %s" % new_setting
section.warn(message)
else:
message = "Deprecated setting %s found. This setting is no longer in use and can be removed" % deprecated
section.warn(message)
found = True
if not found:
section.skip("No deprecated settings found")
@define_check
def check_plugin_instances(output):
from cms.management.commands.subcommands.list import plugin_report
with output.section("Plugin instances") as section:
# get the report
report = plugin_report()
section.success("Plugin instances of %s types found in the database" % len(report))
# loop over plugin types in the report
for plugin_type in report:
# warn about those that are not installed
if not plugin_type["model"]:
section.error("%s has instances but is no longer installed" % plugin_type["type"] )
# warn about those that have unsaved instances
if plugin_type["unsaved_instances"]:
section.error(
"%s has %s unsaved instances" % (plugin_type["type"], len(plugin_type["unsaved_instances"])))
if section.successful:
section.finish_success("The plugins in your database are in good order")
else:
section.finish_error("There are potentially serious problems with the plugins in your database. \nEven if "
"your site works, you should run the 'manage.py cms list plugins' \ncommand and then "
"the 'manage.py cms delete-orphaned-plugins' command. \nThis will alter your "
"database; read the documentation before using it.")
@define_check
def check_copy_relations(output):
from cms.plugin_pool import plugin_pool
from cms.extensions import extension_pool
from cms.extensions.models import BaseExtension
from cms.models.pluginmodel import CMSPlugin
c_to_s = lambda klass: '%s.%s' % (klass.__module__, klass.__name__)
def get_class(method_name, model):
for cls in inspect.getmro(model):
if method_name in cls.__dict__:
return cls
return None
with output.section('Presence of "copy_relations"') as section:
plugin_pool.discover_plugins()
for plugin in plugin_pool.plugins.values():
plugin_class = plugin.model
if get_class('copy_relations', plugin_class) is not CMSPlugin or plugin_class is CMSPlugin:
# this class defines a ``copy_relations`` method, nothing more
# to do
continue
for rel in plugin_class._meta.many_to_many:
section.warn('%s has a many-to-many relation to %s,\n but no "copy_relations" method defined.' % (
c_to_s(plugin_class),
c_to_s(rel.model),
))
for rel in plugin_class._get_related_objects():
if rel.model != CMSPlugin and not issubclass(rel.model, plugin.model) and rel.model != AliasPluginModel:
section.warn('%s has a foreign key from %s,\n but no "copy_relations" method defined.' % (
c_to_s(plugin_class),
c_to_s(rel.model),
))
for extension in chain(extension_pool.page_extensions, extension_pool.title_extensions):
if get_class('copy_relations', extension) is not BaseExtension:
# OK, looks like there is a 'copy_relations' defined in the
# extension... move along...
continue
for rel in extension._meta.many_to_many:
if DJANGO_1_8:
section.warn('%s has a many-to-many relation to %s,\n '
'but no "copy_relations" method defined.' % (
c_to_s(extension),
c_to_s(rel.related.model),
))
else:
section.warn('%s has a many-to-many relation to %s,\n '
'but no "copy_relations" method defined.' % (
c_to_s(extension),
c_to_s(rel.remote_field.model),
))
for rel in extension._get_related_objects():
if rel.model != extension:
section.warn('%s has a foreign key from %s,\n but no "copy_relations" method defined.' % (
c_to_s(extension),
c_to_s(rel.model),
))
if not section.warnings:
section.finish_success('All plugins and page/title extensions have "copy_relations" method if needed.')
else:
section.finish_success('Some plugins or page/title extensions do not define a "copy_relations" method.\n'
'This might lead to data loss when publishing or copying plugins/extensions.\n'
'See https://django-cms.readthedocs.io/en/latest/extending_cms/custom_plugins.html#handling-relations or ' # noqa
'https://django-cms.readthedocs.io/en/latest/extending_cms/extending_page_title.html#handling-relations.') # noqa
def check(output):
"""
Checks the configuration/environment of this django CMS installation.
'output' should be an object that provides the same API as FileOutputWrapper.
Returns whether the configuration/environment are okay (has no errors)
"""
title = "Checking django CMS installation"
border = '*' * len(title)
output.write_line(output.colorize(border, opts=['bold']))
output.write_line(output.colorize(title, opts=['bold']))
output.write_line(output.colorize(border, opts=['bold']))
output.write_line()
for checker in CHECKERS:
checker(output)
output.write_line()
with output.section("OVERALL RESULTS"):
if output.errors:
output.write_stderr_line(output.colorize("%s errors!" % output.errors, opts=['bold'], fg='red'))
if output.warnings:
output.write_stderr_line(output.colorize("%s warnings!" % output.warnings, opts=['bold'], fg='yellow'))
if output.skips:
output.write_line(output.colorize("%s checks skipped!" % output.skips, opts=['bold'], fg='blue'))
output.write_line(output.colorize("%s checks successful!" % output.successes, opts=['bold'], fg='green'))
output.write_line()
if output.errors:
output.write_stderr_line(output.colorize('Please check the errors above', opts=['bold'], fg='red'))
elif output.warnings:
output.write_stderr_line(output.colorize('Installation okay, but please check warnings above',
opts=['bold'], fg='yellow'))
else:
output.write_line(output.colorize('Installation okay', opts=['bold'], fg='green'))
return output.successful
| |
#!/usr/bin/env python
#
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# a simple command line OF-CONFIG client
#
# a usage example:
# % PYTHONPATH=. ./bin/of_config_cli \
# --peers=sw1=localhost:1830:username:password
# (Cmd) raw_get sw1
import ryu.contrib
from ryu import cfg
import cmd
import sys
import lxml.etree as ET
from ryu.lib import of_config
from ryu.lib.of_config import capable_switch
from ncclient.operations.rpc import RPCError
import ryu.lib.of_config.classes as ofc
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.ListOpt('peers', default=[], help='list of peers')
])
class Peer(capable_switch.OFCapableSwitch):
def __init__(self, name, host, port, username, password):
self._name = name
super(Peer, self).__init__(
host=host, port=port, username=username, password=password,
unknown_host_cb=lambda host, fingeprint: True)
peers = {}
def add_peer(name, host, port, username, password):
peers[name] = Peer(name, host, port, username, password)
def et_tostring_pp(tree):
# pretty_print is an lxml feature, not available in ElementTree
try:
return ET.tostring(tree, pretty_print=True)
except TypeError:
return ET.tostring(tree)
def validate(tree):
schema = ET.XMLSchema(file=of_config.OF_CONFIG_1_1_1_XSD)
if not schema(tree):
print schema.error_log
class Cmd(cmd.Cmd):
def __init__(self, *args, **kwargs):
self._in_onecmd = False
cmd.Cmd.__init__(self, *args, **kwargs)
def _request(self, line, f):
args = line.split()
try:
peer = args[0]
except:
print "argument error"
return
try:
p = peers[peer]
except KeyError:
print "unknown peer", peer
return
try:
f(p, args[1:])
except RPCError, e:
print "RPC Error", e
except EOFError:
print "disconnected"
def _complete_peer(self, text, line, _begidx, _endidx):
if len((line + 'x').split()) >= 3:
return []
return [name for name in peers if name.startswith(text)]
def do_list_cap(self, line):
"""list_cap <peer>
"""
def f(p, args):
for i in p.netconf.server_capabilities:
print i
self._request(line, f)
def do_raw_get(self, line):
"""raw_get <peer>
"""
def f(p, args):
result = p.raw_get()
tree = ET.fromstring(result)
validate(tree)
print et_tostring_pp(tree)
self._request(line, f)
def do_raw_get_config(self, line):
"""raw_get_config <peer> <source>
"""
def f(p, args):
try:
source = args[0]
except:
print "argument error"
return
result = p.raw_get_config(source)
tree = ET.fromstring(result)
validate(tree)
print et_tostring_pp(tree)
self._request(line, f)
def do_get(self, line):
"""get <peer>
eg. get sw1
"""
def f(p, args):
print p.get()
self._request(line, f)
def do_commit(self, line):
"""commit <peer>
eg. commit sw1
"""
def f(p, args):
print p.commit()
self._request(line, f)
def do_discard(self, line):
"""discard <peer>
eg. discard sw1
"""
def f(p, args):
print p.discard_changes()
self._request(line, f)
def do_get_config(self, line):
"""get_config <peer> <source>
eg. get_config sw1 startup
"""
def f(p, args):
try:
source = args[0]
except:
print "argument error"
return
print p.get_config(source)
self._request(line, f)
def do_delete_config(self, line):
"""delete_config <peer> <source>
eg. delete_config sw1 startup
"""
def f(p, args):
try:
source = args[0]
except:
print "argument error"
return
print p.delete_config(source)
self._request(line, f)
def do_copy_config(self, line):
"""copy_config <peer> <source> <target>
eg. copy_config sw1 running startup
"""
def f(p, args):
try:
source, target = args
except:
print "argument error"
return
print p.copy_config(source, target)
self._request(line, f)
def do_list_port(self, line):
"""list_port <peer>
"""
def f(p, args):
o = p.get()
for p in o.resources.port:
print p.resource_id, p.name, p.number
self._request(line, f)
_port_settings = [
'admin-state',
'no-forward',
'no-packet-in',
'no-receive',
]
def do_get_port_config(self, line):
"""get_config_port <peer> <source> <port>
eg. get_port_config sw1 running LogicalSwitch7-Port2
"""
def f(p, args):
try:
source, port = args
except:
print "argument error"
return
o = p.get_config(source)
for p in o.resources.port:
if p.resource_id != port:
continue
print p.resource_id
conf = p.configuration
for k in self._port_settings:
try:
v = getattr(conf, k)
except AttributeError:
continue
print k, v
self._request(line, f)
def do_set_port_config(self, line):
"""set_port_config <peer> <target> <port> <key> <value>
eg. set_port_config sw1 running LogicalSwitch7-Port2 admin-state down
eg. set_port_config sw1 running LogicalSwitch7-Port2 no-forward false
"""
def f(p, args):
try:
target, port, key, value = args
except:
print "argument error"
print args
return
# get switch id
o = p.get()
capable_switch_id = o.id
try:
capable_switch = ofc.OFCapableSwitchType(
id=capable_switch_id,
resources=ofc.OFCapableSwitchResourcesType(
port=[
ofc.OFPortType(
resource_id=port,
configuration=ofc.OFPortConfigurationType(
**{key: value}))
]
)
)
except TypeError:
print "argument error"
return
try:
p.edit_config(target, capable_switch)
except Exception, e:
print e
self._request(line, f)
def do_list_queue(self, line):
"""list_queue <peer>
"""
def f(p, args):
o = p.get()
if o.resources.queue:
for q in o.resources.queue:
print q.resource_id, q.port
self._request(line, f)
_queue_settings = [
'max-rate',
'min-rate',
'experimenter',
]
def do_get_queue_config(self, line):
"""get_queue_port <peer> <source> <queue>
eg. get_queue_config sw1 running LogicalSwitch7-Port1-Queue922
"""
def f(p, args):
try:
source, queue = args
except:
print "argument error"
return
o = p.get_config(source)
for q in o.resources.queue:
if q.resource_id != queue:
continue
print q.resource_id
conf = q.properties
for k in self._queue_settings:
try:
v = getattr(conf, k)
except AttributeError:
continue
print k, v
self._request(line, f)
def do_set_queue_config(self, line):
"""set_queue_config <peer> <target> <queue> <key> <value>
eg. set_queue_config sw1 running LogicalSwitch7-Port1-Queue922 \
max-rate 100
"""
def f(p, args):
try:
target, queue, key, value = args
except:
print "argument error"
print args
return
# get switch id
o = p.get()
capable_switch_id = o.id
try:
capable_switch = ofc.OFCapableSwitchType(
id=capable_switch_id,
resources=ofc.OFCapableSwitchResourcesType(
queue=[
ofc.OFQueueType(
resource_id=queue,
properties=ofc.OFQueuePropertiesType(
**{key: value})),
]
)
)
except TypeError:
print "argument error"
return
try:
p.edit_config(target, capable_switch)
except Exception, e:
print e
self._request(line, f)
def do_add_queue(self, line):
"""add_queue <peer> <target> <logical-switch> <queue>
eg. add_queue sw1 running LogicalSwitch7 NameOfNewQueue
"""
def f(p, args):
try:
target, lsw, queue = args
except:
print "argument error"
print args
return
# get switch id
o = p.get()
capable_switch_id = o.id
try:
capable_switch = ofc.OFCapableSwitchType(
id=capable_switch_id,
resources=ofc.OFCapableSwitchResourcesType(
queue=[
ofc.OFQueueType(resource_id=queue)
]
),
logical_switches=ofc.OFCapableSwitchLogicalSwitchesType(
switch=[ofc.OFLogicalSwitchType(
id=lsw,
resources=ofc.OFLogicalSwitchResourcesType(
queue=[queue])
)]
)
)
except TypeError:
print "argument error"
return
try:
p.edit_config(target, capable_switch)
except Exception, e:
print e
self._request(line, f)
def do_list_logical_switch(self, line):
"""list_logical_switch <peer>
"""
def f(p, args):
o = p.get()
for s in o.logical_switches.switch:
print s.id, s.datapath_id
self._request(line, f)
def do_show_logical_switch(self, line):
"""show_logical_switch <peer> <logical switch>
"""
def f(p, args):
try:
(lsw,) = args
except:
print "argument error"
return
o = p.get()
for s in o.logical_switches.switch:
if s.id != lsw:
continue
print s.id
print 'datapath-id', s.datapath_id
if s.resources.queue:
print 'queues:'
for q in s.resources.queue:
print '\t', q
if s.resources.port:
print 'ports:'
for p in s.resources.port:
print '\t', p
self._request(line, f)
_lsw_settings = [
'lost-connection-behavior',
]
def do_get_logical_switch_config(self, line):
"""get_logical_switch_config <peer> <source> <logical switch>
"""
def f(p, args):
try:
source, lsw = args
except:
print "argument error"
return
o = p.get_config(source)
for l in o.logical_switches.switch:
if l.id != lsw:
continue
print l.id
for k in self._lsw_settings:
try:
v = getattr(l, k)
except AttributeError:
continue
print k, v
self._request(line, f)
def do_set_logical_switch_config(self, line):
"""set_logical_switch_config <peer> <logical switch> <key> <value>
eg. set_logical_switch_config sw1 running LogicalSwitch7 \
lost-connection-behavior failStandaloneMode
"""
def f(p, args):
try:
target, lsw, key, value = args
except:
print "argument error"
return
# get switch id
o = p.get_config(target)
capable_switch_id = o.id
try:
capable_switch = ofc.OFCapableSwitchType(
id=capable_switch_id,
logical_switches=ofc.OFCapableSwitchLogicalSwitchesType(
switch=[ofc.OFLogicalSwitchType(
id=lsw,
**{key: value}
)]
)
)
except TypeError:
print "argument error"
return
try:
p.edit_config(target, capable_switch)
except Exception, e:
print e
self._request(line, f)
completedefault = _complete_peer
def complete_EOF(self, _text, _line, _begidx, _endidx):
return []
def do_EOF(self, _line):
sys.exit(0)
def onecmd(self, string):
self._in_onecmd = True
try:
return cmd.Cmd.onecmd(self, string)
finally:
self._in_onecmd = False
def main(args=None, prog=None):
CONF(args=args, prog=prog,
project='of-config-cli', version='of-config-cli')
for p_str in CONF.peers:
name, addr = p_str.split('=')
host, port, username, password = addr.rsplit(':', 3)
add_peer(name, host, port, username, password)
Cmd().cmdloop()
if __name__ == "__main__":
main()
| |
import numpy
import pandas
from palm.base.data_predictor import DataPredictor
from palm.likelihood_prediction import LikelihoodPrediction
from palm.forward_calculator import ForwardCalculator
from palm.linalg import ScipyMatrixExponential, ScipyMatrixExponential2, DiagonalExpm, vector_product
from palm.probability_vector import VectorTrajectory, ProbabilityVector
from palm.rate_matrix import RateMatrixTrajectory
from palm.util import ALMOST_ZERO
class ForwardPredictor(DataPredictor):
"""
Computes the log likelihood of a trajectory using the Forward algorithm.
Attributes
----------
forward_calculator : ForwardCalculator
The calculator handles the linear algebra required to compute
the likelihood.
diag_forward_calculator : ForwardCalculator
A calculator for diagonal rate matrices. Useful for single dark
state models, in which the matrix of dark-to-dark transitions
is diagonal.
prediction_factory : class
A class that makes `Prediction` objects.
vector_trajectory : VectorTrajectory
Each intermediate step of the calculation results in a vector.
This data structure saves the intermediate vectors if `archive_matrices`
is True.
rate_matrix_trajectory : RateMatrixTrajectory
Data structure that saves the rate matrix at each intermediate step
of the calculation if `archive_matrices` is True.
scaling_factor_set : ScalingFactorSet
Probability vector is scaled at each step of the calculation
to prevent numerical underflow and the resulting scaling factors are
saved in this data structure.
Parameters
----------
expm_calculator : MatrixExponential
An object with a `compute_matrix_exp` method.
always_rebuild_rate_matrix : bool
Whether to rebuild rate matrix for every trajectory segment.
archive_matrices : bool, optional
Whether to save the intermediate results of the calculation for
later plotting, debugging, etc.
"""
def __init__(self, expm_calculator, always_rebuild_rate_matrix,
archive_matrices=False, diagonal_dark=False,
noisy=False):
super(ForwardPredictor, self).__init__()
self.always_rebuild_rate_matrix = always_rebuild_rate_matrix
self.archive_matrices = archive_matrices
self.diagonal_dark = diagonal_dark
diag_expm = DiagonalExpm()
self.forward_calculator = ForwardCalculator(expm_calculator)
self.diag_forward_calculator = ForwardCalculator(diag_expm)
self.prediction_factory = LikelihoodPrediction
self.vector_trajectory = None
self.rate_matrix_trajectory = None
self.scaling_factor_set = None
self.noisy = noisy
def predict_data(self, model, trajectory):
self.scaling_factor_set = self.compute_forward_vectors(
model, trajectory)
likelihood = 1./(self.scaling_factor_set.compute_product())
if likelihood < ALMOST_ZERO:
likelihood = ALMOST_ZERO
log_likelihood = numpy.log10(likelihood)
return self.prediction_factory(log_likelihood)
def compute_forward_vectors(self, model, trajectory):
"""
Computes forward vector for each trajectory segment, starting from
the first segment and working forward toward the last segment.
Parameters
----------
model : BlinkModel
trajectory : Trajectory
Returns
-------
scaling_factor_set : ScalingFactorSet
"""
if self.archive_matrices:
self.rate_matrix_trajectory = RateMatrixTrajectory()
self.vector_trajectory = VectorTrajectory(model.state_id_collection)
else:
pass
# initialize probability vector
scaling_factor_set = ScalingFactorSet(self.noisy)
rate_matrix_organizer = RateMatrixOrganizer(model)
rate_matrix_organizer.build_rate_matrix(time=0.0)
init_prob = model.get_initial_probability_vector()
scaling_factor_set.scale_vector(init_prob)
if self.archive_matrices:
self.vector_trajectory.add_vector(0.0, init_prob)
prev_alpha = init_prob
# loop through trajectory segments, compute likelihood for each segment
for segment_number, segment in enumerate(trajectory):
if self.noisy:
print 'segment %d' % segment_number
# get current segment class and duration
cumulative_time = trajectory.get_cumulative_time(segment_number)
segment_duration = segment.get_duration()
start_class = segment.get_class()
# get next segment class (if there is a next one)
next_segment = trajectory.get_segment(segment_number + 1)
if next_segment:
end_class = next_segment.get_class()
else:
end_class = None
# update the rate matrix to reflect changes to
# kinetic rates that vary with time.
if self.always_rebuild_rate_matrix:
rate_matrix_organizer.build_rate_matrix(time=cumulative_time)
# skip updating the rate matrix. we should only do this when none of the rates vary with time.
else:
pass
rate_matrix_aa = rate_matrix_organizer.get_submatrix(
start_class, start_class)
rate_matrix_ab = rate_matrix_organizer.get_submatrix(
start_class, end_class)
if self.archive_matrices:
self.rate_matrix_trajectory.add_matrix(
rate_matrix_organizer.rate_matrix)
else:
pass
alpha = self._compute_alpha( rate_matrix_aa, rate_matrix_ab,
segment_number, segment_duration,
start_class, end_class,
prev_alpha)
# scale probability vector to avoid numerical underflow
scaled_alpha = scaling_factor_set.scale_vector(alpha)
if scaled_alpha.is_finite() and scaled_alpha.is_positive():
pass
else:
print "Likelihood calculation failure"
print rate_matrix_aa
print rate_matrix_ab
print scaled_alpha
if self.archive_matrices:
df = self.vector_trajectory.convert_to_df()
output_csv = 'archived_vecs_from_crash.csv'
df.to_csv(output_csv, index=False)
print "Wrote", output_csv
raise RuntimeError
# store handle to current alpha vector for next iteration
prev_alpha = scaled_alpha
if self.archive_matrices:
self.vector_trajectory.add_vector(cumulative_time, scaled_alpha)
# end for loop
final_prob_vec = model.get_final_probability_vector()
total_alpha_scalar = vector_product(prev_alpha, final_prob_vec,
do_alignment=True)
total_alpha_vec = ProbabilityVector()
total_alpha_vec.series = pandas.Series([total_alpha_scalar,])
scaled_total_alpha = scaling_factor_set.scale_vector(total_alpha_vec)
if self.archive_matrices:
self.vector_trajectory.add_vector(trajectory.get_end_time(),
scaled_total_alpha)
return scaling_factor_set
def _compute_alpha(self, rate_matrix_aa, rate_matrix_ab, segment_number,
segment_duration, start_class, end_class, prev_alpha):
if self.diagonal_dark and start_class == 'dark':
alpha = self.diag_forward_calculator.compute_forward_vector(
prev_alpha, rate_matrix_aa, rate_matrix_ab,
segment_duration)
else:
alpha = self.forward_calculator.compute_forward_vector(
prev_alpha, rate_matrix_aa, rate_matrix_ab,
segment_duration)
return alpha
class ScalingFactorSet(object):
def __init__(self, noisy):
self.factor_list = []
self.noisy = noisy
def __len__(self):
return len(self.factor_list)
def __str__(self):
return str(self.factor_list)
def get_factor_set(self):
return self.factor_list
def append(self, factor):
self.factor_list.append(factor)
def compute_product(self):
scaling_factor_array = numpy.array(self.factor_list)
return numpy.prod(scaling_factor_array)
def scale_vector(self, vector):
vector_sum = vector.sum_vector()
if vector_sum < ALMOST_ZERO:
this_scaling_factor = 1./ALMOST_ZERO
else:
this_scaling_factor = 1./vector_sum
vector.scale_vector(this_scaling_factor)
self.append(this_scaling_factor)
if self.noisy:
print 'vector'
print vector
print 'scaling_factor:', this_scaling_factor
return vector
class RateMatrixOrganizer(object):
"""
Helper class for building rate matrices.
Parameters
----------
model : AggregatedKineticModel
The model from which to build the rate matrix.
"""
def __init__(self, model):
super(RateMatrixOrganizer, self).__init__()
self.model = model
self.rate_matrix = None
def build_rate_matrix(self, time):
self.rate_matrix = self.model.build_rate_matrix(time=time)
return
def get_submatrix(self, start_class, end_class):
if start_class and end_class:
submatrix = self.model.get_submatrix(
self.rate_matrix, start_class, end_class)
else:
submatrix = None
return submatrix
| |
import numpy as np
from .._common import lhs, messages, optimizer, selection_async, selection_sync
from .._helpers import OptimizeResult, register
from ._constraints import _constraints_map
from ._strategy import _strategy_map
__all__ = [
"minimize",
]
def minimize(
fun,
bounds,
x0=None,
args=(),
maxiter=100,
popsize=10,
mutation=0.5,
recombination=0.9,
strategy="best1bin",
seed=None,
xtol=1.0e-8,
ftol=1.0e-8,
constraints=None,
updating="immediate",
workers=1,
backend=None,
return_all=False,
callback=None,
):
"""
Minimize an objective function using Differential Evolution (DE).
Parameters
----------
fun : callable
The objective function to be minimized. Must be in the form ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array and args is a tuple of any additional fixed parameters needed to completely specify the function.
bounds : array_like
Bounds for variables. ``(min, max)`` pairs for each element in ``x``, defining the finite lower and upper bounds for the optimizing argument of ``fun``. It is required to have ``len(bounds) == len(x)``. ``len(bounds)`` is used to determine the number of parameters in ``x``.
x0 : array_like or None, optional, default None
Initial population. Array of real elements with shape (``popsize``, ``ndim``), where ``ndim`` is the number of independent variables. If ``x0`` is not specified, the population is initialized using Latin Hypercube sampling.
args : tuple, optional, default None
Extra arguments passed to the objective function.
maxiter : int, optional, default 100
The maximum number of generations over which the entire population is evolved.
popsize : int, optional, default 10
Total population size.
mutation : scalar, optional, default 0.5
The mutation constant. In the literature this is also known as differential weight, being denoted by F. It should be in the range [0, 2]. Increasing the mutation constant increases the search radius, but will slow down convergence.
recombination : scalar, optional, default 0.9
The recombination constant, should be in the range [0, 1]. In the literature this is also known as the crossover probability, being denoted by CR. Increasing this value allows a larger number of mutants to progress into the next generation, but at the risk of population stability.
strategy : str, optional, default 'best1bin'
The differential evolution strategy to use. Should be one of:
- 'rand1bin'
- 'rand2bin'
- 'best1bin'
- 'best2bin'
seed : int or None, optional, default None
Seed for random number generator.
xtol : scalar, optional, default 1.0e-8
Solution tolerance for termination.
ftol : scalar, optional, default 1.0e-8
Objective function value tolerance for termination.
constraints : str or None, optional, default None
Constraints definition:
- None: no constraint
- 'Random': infeasible solutions are resampled in the feasible space defined by `bounds`
updating : str {'immediate', 'deferred'}, optional, default 'immediate'
If ``'immediate'``, the best solution vector is continuously updated within a single generation. This can lead to faster convergence as candidate solutions can take advantage of continuous improvements in the best solution. With ``'deferred'``, the best solution vector is updated once per generation. Only ``'deferred'`` is compatible with parallelization, and is overridden when ``workers`` is not ``0`` or ``1`` or ``backend == 'mpi'``.
workers : int, optional, default 1
The population is subdivided into workers sections and evaluated in parallel (uses :class:`joblib.Parallel`). Supply -1 to use all available CPU cores.
backend : str {'loky', 'threading', 'mpi'}, optional, default 'threading'
Parallel backend to use when ``workers`` is not ``0`` or ``1``:
- 'loky': disable threading
- 'threading': enable threading
- 'mpi': use MPI (uses :mod:`mpi4py`)
return_all : bool, optional, default False
Set to True to return an array with shape (``nit``, ``popsize``, ``ndim``) of all the solutions at each iteration.
callback : callable or None, optional, default None
Called after each iteration. It is a callable with the signature ``callback(X, OptimizeResult state)``, where ``X`` is the current population and ``state`` is a partial :class:`stochopy.optimize.OptimizeResult` object with the same fields as the ones from the return (except ``"success"``, ``"status"`` and ``"message"``).
Returns
-------
:class:`stochopy.optimize.OptimizeResult`
The optimization result represented as a :class:`stochopy.optimize.OptimizeResult`. Important attributes are:
- ``x``: the solution array
- ``fun``: the solution function value
- ``success``: a Boolean flag indicating if the optimizer exited successfully
- ``message``: a string which describes the cause of the termination
References
----------
.. [1] R. Storn and K. Price, *Differential Evolution - A Simple and Efficient Heuristic for global Optimization over Continuous Spaces*, Journal of Global Optimization, 1997, 11(4): 341-359
"""
# Cost function
if not hasattr(fun, "__call__"):
raise TypeError()
# Dimensionality and search space
if np.ndim(bounds) != 2:
raise ValueError()
# Initial guess x0
if x0 is not None:
if np.ndim(x0) != 2 or np.shape(x0)[1] != len(bounds):
raise ValueError()
# Population size
if popsize < 2:
raise ValueError()
if x0 is not None and len(x0) != popsize:
raise ValueError()
# DE parameters
if not 0.0 <= mutation <= 2.0:
raise ValueError()
if not 0.0 <= recombination <= 1.0:
raise ValueError()
if updating not in {"immediate", "deferred"}:
raise ValueError()
F = mutation
CR = recombination
mut = _strategy_map[strategy]
# Synchronize
sync = updating == "deferred"
sync = sync or workers not in {0, 1}
sync = sync or backend == "mpi"
# Seed
if seed is not None:
np.random.seed(seed)
# Callback
if callback is not None and not hasattr(callback, "__call__"):
raise ValueError()
# Run in serial or parallel
optargs = (
bounds,
x0,
maxiter,
popsize,
F,
CR,
mut,
constraints,
xtol,
ftol,
return_all,
callback,
)
res = de(fun, args, sync, workers, backend, *optargs)
return res
@optimizer
def de(
fun,
args,
sync,
workers,
backend,
bounds,
x0,
maxiter,
popsize,
F,
CR,
mut,
constraints,
xtol,
ftol,
return_all,
callback,
):
"""Optimize with DE."""
ndim = len(bounds)
lower, upper = np.transpose(bounds)
# Constraints
cons = _constraints_map[constraints](lower, upper)
# Iteration
de_iter = de_sync if sync else de_async
# Initial population
X = x0 if x0 is not None else lhs(popsize, ndim, bounds)
U = np.empty((popsize, ndim))
# Evaluate initial population
pfit = fun(X)
pbestfit = pfit.copy()
# Initial best solution
gbidx = np.argmin(pbestfit)
gfit = pbestfit[gbidx]
gbest = X[gbidx].copy()
# Initialize arrays
if return_all:
xall = np.empty((maxiter, popsize, ndim))
funall = np.empty((maxiter, popsize))
xall[0] = X.copy()
funall[0] = pfit.copy()
# First iteration for callback
if callback is not None:
res = OptimizeResult(x=gbest, fun=gfit, nfev=popsize, nit=1)
if return_all:
res.update({"xall": xall[:1], "funall": funall[:1]})
callback(X, res)
# Iterate until one of the termination criterion is satisfied
it = 1
converged = False
while not converged:
it += 1
r1 = np.random.rand(popsize, ndim)
X, gbest, pbestfit, gfit, pfit, status = de_iter(
it,
X,
U,
gbest,
pbestfit,
gfit,
pfit,
F,
CR,
r1,
maxiter,
xtol,
ftol,
fun,
mut,
cons,
)
if return_all:
xall[it - 1] = X.copy()
funall[it - 1] = pbestfit.copy()
converged = status is not None
if callback is not None:
res = OptimizeResult(x=gbest, fun=gfit, nfev=it * popsize, nit=it)
if return_all:
res.update({"xall": xall[:it], "funall": funall[:it]})
callback(X, res)
res = OptimizeResult(
x=gbest,
success=status >= 0,
status=status,
message=messages[status],
fun=gfit,
nfev=it * popsize,
nit=it,
)
if return_all:
res.update({"xall": xall[:it], "funall": funall[:it]})
return res
def delete_shuffle_sync(popsize):
"""Delete current solution from population for mutation (synchronous)."""
return np.transpose([delete_shuffle_async(i, popsize) for i in range(popsize)])
def delete_shuffle_async(i, popsize):
"""Delete current solution from population for mutation (asynchronous)."""
return np.random.permutation(np.delete(np.arange(popsize), i))
def de_sync(
it,
X,
U,
gbest,
pbestfit,
gfit,
pfit,
F,
CR,
r1,
maxiter,
xtol,
ftol,
fun,
mut,
cons,
):
"""Synchronous DE."""
popsize, ndim = X.shape
# Mutation
V = mut(delete_shuffle_sync(popsize), F, X, gbest)
# Recombination
mask = np.zeros_like(r1, dtype=bool)
irand = np.random.randint(ndim, size=popsize)
for i in range(popsize):
mask[i, irand[i]] = True
U[:] = cons(np.where(np.logical_or(mask, r1 <= CR), V, X))
# Selection
gbest, gfit, pfit, status = selection_sync(
it, U, gbest, X, pbestfit, maxiter, xtol, ftol, fun
)
return X, gbest, pbestfit, gfit, pfit, status
def de_async(
it,
X,
U,
gbest,
pbestfit,
gfit,
pfit,
F,
CR,
r1,
maxiter,
xtol,
ftol,
fun,
mut,
cons,
):
"""Asynchronous DE."""
popsize, ndim = X.shape
for i in range(popsize):
# Mutation
V = mut(delete_shuffle_async(i, popsize), F, X, gbest)
# Recombination
mask = np.zeros(ndim, dtype=bool)
irand = np.random.randint(ndim)
mask[irand] = True
U[i] = cons(np.where(np.logical_or(mask, r1[i] <= CR), V, X[i]))
# Selection
gbest, gfit, pfit[i], status = selection_async(
it, U, gbest, gfit, X, pbestfit, maxiter, xtol, ftol, fun, i
)
# Stop if maximum iteration is reached
if status is None and it >= maxiter:
status = -1
return X, gbest, pbestfit, gfit, pfit, status
register("de", minimize)
| |
# $Id: core.py 7466 2012-06-25 14:56:51Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Calling the ``publish_*`` convenience functions (or instantiating a
`Publisher` object) with component names will result in default
behavior. For custom behavior (setting component options), create
custom component objects first, and pass *them* to
``publish_*``/`Publisher`. See `The Docutils Publisher`_.
.. _The Docutils Publisher: http://docutils.sf.net/docs/api/publisher.html
"""
__docformat__ = 'reStructuredText'
import sys
import pprint
from docutils import __version__, __version_details__, SettingsSpec
from docutils import frontend, io, utils, readers, writers
from docutils.frontend import OptionParser
from docutils.transforms import Transformer
from docutils.utils.error_reporting import ErrorOutput, ErrorString
import docutils.readers.doctree
class Publisher:
"""
A facade encapsulating the high-level logic of a Docutils system.
"""
def __init__(self, reader=None, parser=None, writer=None,
source=None, source_class=io.FileInput,
destination=None, destination_class=io.FileOutput,
settings=None):
"""
Initial setup. If any of `reader`, `parser`, or `writer` are not
specified, the corresponding ``set_...`` method should be called with
a component name (`set_reader` sets the parser as well).
"""
self.document = None
"""The document tree (`docutils.nodes` objects)."""
self.reader = reader
"""A `docutils.readers.Reader` instance."""
self.parser = parser
"""A `docutils.parsers.Parser` instance."""
self.writer = writer
"""A `docutils.writers.Writer` instance."""
for component in 'reader', 'parser', 'writer':
assert not isinstance(getattr(self, component), str), (
'passed string "%s" as "%s" parameter; pass an instance, '
'or use the "%s_name" parameter instead (in '
'docutils.core.publish_* convenience functions).'
% (getattr(self, component), component, component))
self.source = source
"""The source of input data, a `docutils.io.Input` instance."""
self.source_class = source_class
"""The class for dynamically created source objects."""
self.destination = destination
"""The destination for docutils output, a `docutils.io.Output`
instance."""
self.destination_class = destination_class
"""The class for dynamically created destination objects."""
self.settings = settings
"""An object containing Docutils settings as instance attributes.
Set by `self.process_command_line()` or `self.get_settings()`."""
self._stderr = ErrorOutput()
def set_reader(self, reader_name, parser, parser_name):
"""Set `self.reader` by name."""
reader_class = readers.get_reader_class(reader_name)
self.reader = reader_class(parser, parser_name)
self.parser = self.reader.parser
def set_writer(self, writer_name):
"""Set `self.writer` by name."""
writer_class = writers.get_writer_class(writer_name)
self.writer = writer_class()
def set_components(self, reader_name, parser_name, writer_name):
if self.reader is None:
self.set_reader(reader_name, self.parser, parser_name)
if self.parser is None:
if self.reader.parser is None:
self.reader.set_parser(parser_name)
self.parser = self.reader.parser
if self.writer is None:
self.set_writer(writer_name)
def setup_option_parser(self, usage=None, description=None,
settings_spec=None, config_section=None,
**defaults):
if config_section:
if not settings_spec:
settings_spec = SettingsSpec()
settings_spec.config_section = config_section
parts = config_section.split()
if len(parts) > 1 and parts[-1] == 'application':
settings_spec.config_section_dependencies = ['applications']
#@@@ Add self.source & self.destination to components in future?
option_parser = OptionParser(
components=(self.parser, self.reader, self.writer, settings_spec),
defaults=defaults, read_config_files=True,
usage=usage, description=description)
return option_parser
def get_settings(self, usage=None, description=None,
settings_spec=None, config_section=None, **defaults):
"""
Set and return default settings (overrides in `defaults` dict).
Set components first (`self.set_reader` & `self.set_writer`).
Explicitly setting `self.settings` disables command line option
processing from `self.publish()`.
"""
option_parser = self.setup_option_parser(
usage, description, settings_spec, config_section, **defaults)
self.settings = option_parser.get_default_values()
return self.settings
def process_programmatic_settings(self, settings_spec,
settings_overrides,
config_section):
if self.settings is None:
defaults = (settings_overrides or {}).copy()
# Propagate exceptions by default when used programmatically:
defaults.setdefault('traceback', True)
self.get_settings(settings_spec=settings_spec,
config_section=config_section,
**defaults)
def process_command_line(self, argv=None, usage=None, description=None,
settings_spec=None, config_section=None,
**defaults):
"""
Pass an empty list to `argv` to avoid reading `sys.argv` (the
default).
Set components first (`self.set_reader` & `self.set_writer`).
"""
option_parser = self.setup_option_parser(
usage, description, settings_spec, config_section, **defaults)
if argv is None:
argv = sys.argv[1:]
# converting to Unicode (Python 3 does this automatically):
if sys.version_info < (3,0):
# TODO: make this failsafe and reversible?
argv_encoding = (frontend.locale_encoding or 'ascii')
argv = [a.decode(argv_encoding) for a in argv]
self.settings = option_parser.parse_args(argv)
def set_io(self, source_path=None, destination_path=None):
if self.source is None:
self.set_source(source_path=source_path)
if self.destination is None:
self.set_destination(destination_path=destination_path)
def set_source(self, source=None, source_path=None):
if source_path is None:
source_path = self.settings._source
else:
self.settings._source = source_path
# Raise IOError instead of system exit with `tracback == True`
# TODO: change io.FileInput's default behaviour and remove this hack
try:
self.source = self.source_class(
source=source, source_path=source_path,
encoding=self.settings.input_encoding)
except TypeError:
self.source = self.source_class(
source=source, source_path=source_path,
encoding=self.settings.input_encoding)
def set_destination(self, destination=None, destination_path=None):
if destination_path is None:
destination_path = self.settings._destination
else:
self.settings._destination = destination_path
self.destination = self.destination_class(
destination=destination, destination_path=destination_path,
encoding=self.settings.output_encoding,
error_handler=self.settings.output_encoding_error_handler)
def apply_transforms(self):
self.document.transformer.populate_from_components(
(self.source, self.reader, self.reader.parser, self.writer,
self.destination))
self.document.transformer.apply_transforms()
def publish(self, argv=None, usage=None, description=None,
settings_spec=None, settings_overrides=None,
config_section=None, enable_exit_status=False):
"""
Process command line options and arguments (if `self.settings` not
already set), run `self.reader` and then `self.writer`. Return
`self.writer`'s output.
"""
exit = None
try:
if self.settings is None:
self.process_command_line(
argv, usage, description, settings_spec, config_section,
**(settings_overrides or {}))
self.set_io()
self.document = self.reader.read(self.source, self.parser,
self.settings)
self.apply_transforms()
output = self.writer.write(self.document, self.destination)
self.writer.assemble_parts()
except SystemExit as error:
exit = 1
exit_status = error.code
except Exception as error:
if not self.settings: # exception too early to report nicely
raise
if self.settings.traceback: # Propagate exceptions?
self.debugging_dumps()
raise
self.report_Exception(error)
exit = True
exit_status = 1
self.debugging_dumps()
if (enable_exit_status and self.document
and (self.document.reporter.max_level
>= self.settings.exit_status_level)):
sys.exit(self.document.reporter.max_level + 10)
elif exit:
sys.exit(exit_status)
return output
def debugging_dumps(self):
if not self.document:
return
if self.settings.dump_settings:
print('\n::: Runtime settings:', file=self._stderr)
print(pprint.pformat(self.settings.__dict__), file=self._stderr)
if self.settings.dump_internals:
print('\n::: Document internals:', file=self._stderr)
print(pprint.pformat(self.document.__dict__), file=self._stderr)
if self.settings.dump_transforms:
print('\n::: Transforms applied:', file=self._stderr)
print((' (priority, transform class, '
'pending node details, keyword args)'), file=self._stderr)
print(pprint.pformat(
[(priority, '%s.%s' % (xclass.__module__, xclass.__name__),
pending and pending.details, kwargs)
for priority, xclass, pending, kwargs
in self.document.transformer.applied]), file=self._stderr)
if self.settings.dump_pseudo_xml:
print('\n::: Pseudo-XML:', file=self._stderr)
print(self.document.pformat().encode(
'raw_unicode_escape'), file=self._stderr)
def report_Exception(self, error):
if isinstance(error, utils.SystemMessage):
self.report_SystemMessage(error)
elif isinstance(error, UnicodeEncodeError):
self.report_UnicodeError(error)
elif isinstance(error, io.InputError):
self._stderr.write('Unable to open source file for reading:\n'
' %s\n' % ErrorString(error))
elif isinstance(error, io.OutputError):
self._stderr.write(
'Unable to open destination file for writing:\n'
' %s\n' % ErrorString(error))
else:
print('%s' % ErrorString(error), file=self._stderr)
print(("""\
Exiting due to error. Use "--traceback" to diagnose.
Please report errors to <docutils-users@lists.sf.net>.
Include "--traceback" output, Docutils version (%s [%s]),
Python version (%s), your OS type & version, and the
command line used.""" % (__version__, __version_details__,
sys.version.split()[0])), file=self._stderr)
def report_SystemMessage(self, error):
print(('Exiting due to level-%s (%s) system message.'
% (error.level,
utils.Reporter.levels[error.level])), file=self._stderr)
def report_UnicodeError(self, error):
data = error.object[error.start:error.end]
self._stderr.write(
'%s\n'
'\n'
'The specified output encoding (%s) cannot\n'
'handle all of the output.\n'
'Try setting "--output-encoding-error-handler" to\n'
'\n'
'* "xmlcharrefreplace" (for HTML & XML output);\n'
' the output will contain "%s" and should be usable.\n'
'* "backslashreplace" (for other output formats);\n'
' look for "%s" in the output.\n'
'* "replace"; look for "?" in the output.\n'
'\n'
'"--output-encoding-error-handler" is currently set to "%s".\n'
'\n'
'Exiting due to error. Use "--traceback" to diagnose.\n'
'If the advice above doesn\'t eliminate the error,\n'
'please report it to <docutils-users@lists.sf.net>.\n'
'Include "--traceback" output, Docutils version (%s),\n'
'Python version (%s), your OS type & version, and the\n'
'command line used.\n'
% (ErrorString(error),
self.settings.output_encoding,
data.encode('ascii', 'xmlcharrefreplace'),
data.encode('ascii', 'backslashreplace'),
self.settings.output_encoding_error_handler,
__version__, sys.version.split()[0]))
default_usage = '%prog [options] [<source> [<destination>]]'
default_description = ('Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sf.net/docs/user/config.html> for '
'the full reference.')
def publish_cmdline(reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=True, argv=None,
usage=default_usage, description=default_description):
"""
Set up & run a `Publisher` for command-line-based file I/O (input and
output file paths taken automatically from the command line). Return the
encoded string output also.
Parameters: see `publish_programmatically` for the remainder.
- `argv`: Command-line argument list to use instead of ``sys.argv[1:]``.
- `usage`: Usage string, output if there's a problem parsing the command
line.
- `description`: Program description, output for the "--help" option
(along with command-line option descriptions).
"""
pub = Publisher(reader, parser, writer, settings=settings)
pub.set_components(reader_name, parser_name, writer_name)
output = pub.publish(
argv, usage, description, settings_spec, settings_overrides,
config_section=config_section, enable_exit_status=enable_exit_status)
return output
def publish_file(source=None, source_path=None,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None, settings_overrides=None,
config_section=None, enable_exit_status=False):
"""
Set up & run a `Publisher` for programmatic use with file-like I/O.
Return the encoded string output also.
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source_class=io.FileInput, source=source, source_path=source_path,
destination_class=io.FileOutput,
destination=destination, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return output
def publish_string(source, source_path=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=False):
"""
Set up & run a `Publisher` for programmatic use with string I/O. Return
the encoded string or Unicode string output.
For encoded string output, be sure to set the 'output_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
output. Here's one way::
publish_string(..., settings_overrides={'output_encoding': 'unicode'})
Similarly for Unicode string input (`source`)::
publish_string(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source_class=io.StringInput, source=source, source_path=source_path,
destination_class=io.StringOutput,
destination=None, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return output
def publish_parts(source, source_path=None, source_class=io.StringInput,
destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=False):
"""
Set up & run a `Publisher`, and return a dictionary of document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client. For programmatic use with string I/O.
For encoded string input, be sure to set the 'input_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
input. Here's how::
publish_parts(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source=source, source_path=source_path, source_class=source_class,
destination_class=io.StringOutput,
destination=None, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return pub.writer.parts
def publish_doctree(source, source_path=None,
source_class=io.StringInput,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=False):
"""
Set up & run a `Publisher` for programmatic use with string I/O.
Return the document tree.
For encoded string input, be sure to set the 'input_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
input. Here's one way::
publish_doctree(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
pub = Publisher(reader=reader, parser=parser, writer=None,
settings=settings,
source_class=source_class,
destination_class=io.NullOutput)
pub.set_components(reader_name, parser_name, 'null')
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_source(source, source_path)
pub.set_destination(None, None)
output = pub.publish(enable_exit_status=enable_exit_status)
return pub.document
def publish_from_doctree(document, destination_path=None,
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=False):
"""
Set up & run a `Publisher` to render from an existing document
tree data structure, for programmatic use with string I/O. Return
the encoded string output.
Note that document.settings is overridden; if you want to use the settings
of the original `document`, pass settings=document.settings.
Also, new document.transformer and document.reporter objects are
generated.
For encoded string output, be sure to set the 'output_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
output. Here's one way::
publish_from_doctree(
..., settings_overrides={'output_encoding': 'unicode'})
Parameters: `document` is a `docutils.nodes.document` object, an existing
document tree.
Other parameters: see `publish_programmatically`.
"""
reader = docutils.readers.doctree.Reader(parser_name='null')
pub = Publisher(reader, None, writer,
source=io.DocTreeInput(document),
destination_class=io.StringOutput, settings=settings)
if not writer and writer_name:
pub.set_writer(writer_name)
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_destination(None, destination_path)
return pub.publish(enable_exit_status=enable_exit_status)
def publish_cmdline_to_binary(reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=True, argv=None,
usage=default_usage, description=default_description,
destination=None, destination_class=io.BinaryFileOutput
):
"""
Set up & run a `Publisher` for command-line-based file I/O (input and
output file paths taken automatically from the command line). Return the
encoded string output also.
This is just like publish_cmdline, except that it uses
io.BinaryFileOutput instead of io.FileOutput.
Parameters: see `publish_programmatically` for the remainder.
- `argv`: Command-line argument list to use instead of ``sys.argv[1:]``.
- `usage`: Usage string, output if there's a problem parsing the command
line.
- `description`: Program description, output for the "--help" option
(along with command-line option descriptions).
"""
pub = Publisher(reader, parser, writer, settings=settings,
destination_class=destination_class)
pub.set_components(reader_name, parser_name, writer_name)
output = pub.publish(
argv, usage, description, settings_spec, settings_overrides,
config_section=config_section, enable_exit_status=enable_exit_status)
return output
def publish_programmatically(source_class, source, source_path,
destination_class, destination, destination_path,
reader, reader_name,
parser, parser_name,
writer, writer_name,
settings, settings_spec,
settings_overrides, config_section,
enable_exit_status):
"""
Set up & run a `Publisher` for custom programmatic use. Return the
encoded string output and the Publisher object.
Applications should not need to call this function directly. If it does
seem to be necessary to call this function directly, please write to the
Docutils-develop mailing list
<http://docutils.sf.net/docs/user/mailing-lists.html#docutils-develop>.
Parameters:
* `source_class` **required**: The class for dynamically created source
objects. Typically `io.FileInput` or `io.StringInput`.
* `source`: Type depends on `source_class`:
- If `source_class` is `io.FileInput`: Either a file-like object
(must have 'read' and 'close' methods), or ``None``
(`source_path` is opened). If neither `source` nor
`source_path` are supplied, `sys.stdin` is used.
- If `source_class` is `io.StringInput` **required**: The input
string, either an encoded 8-bit string (set the
'input_encoding' setting to the correct encoding) or a Unicode
string (set the 'input_encoding' setting to 'unicode').
* `source_path`: Type depends on `source_class`:
- `io.FileInput`: Path to the input file, opened if no `source`
supplied.
- `io.StringInput`: Optional. Path to the file or object that produced
`source`. Only used for diagnostic output.
* `destination_class` **required**: The class for dynamically created
destination objects. Typically `io.FileOutput` or `io.StringOutput`.
* `destination`: Type depends on `destination_class`:
- `io.FileOutput`: Either a file-like object (must have 'write' and
'close' methods), or ``None`` (`destination_path` is opened). If
neither `destination` nor `destination_path` are supplied,
`sys.stdout` is used.
- `io.StringOutput`: Not used; pass ``None``.
* `destination_path`: Type depends on `destination_class`:
- `io.FileOutput`: Path to the output file. Opened if no `destination`
supplied.
- `io.StringOutput`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
* `reader`: A `docutils.readers.Reader` object.
* `reader_name`: Name or alias of the Reader class to be instantiated if
no `reader` supplied.
* `parser`: A `docutils.parsers.Parser` object.
* `parser_name`: Name or alias of the Parser class to be instantiated if
no `parser` supplied.
* `writer`: A `docutils.writers.Writer` object.
* `writer_name`: Name or alias of the Writer class to be instantiated if
no `writer` supplied.
* `settings`: A runtime settings (`docutils.frontend.Values`) object, for
dotted-attribute access to runtime settings. It's the end result of the
`SettingsSpec`, config file, and option processing. If `settings` is
passed, it's assumed to be complete and no further setting/config/option
processing is done.
* `settings_spec`: A `docutils.SettingsSpec` subclass or object. Provides
extra application-specific settings definitions independently of
components. In other words, the application becomes a component, and
its settings data is processed along with that of the other components.
Used only if no `settings` specified.
* `settings_overrides`: A dictionary containing application-specific
settings defaults that override the defaults of other components.
Used only if no `settings` specified.
* `config_section`: A string, the name of the configuration file section
for this application. Overrides the ``config_section`` attribute
defined by `settings_spec`. Used only if no `settings` specified.
* `enable_exit_status`: Boolean; enable exit status at end of processing?
"""
pub = Publisher(reader, parser, writer, settings=settings,
source_class=source_class,
destination_class=destination_class)
pub.set_components(reader_name, parser_name, writer_name)
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_source(source, source_path)
pub.set_destination(destination, destination_path)
output = pub.publish(enable_exit_status=enable_exit_status)
return output, pub
| |
# Copyright (c) 2013 Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from blazarnova.scheduler.filters import blazar_filter
from nova import objects
from nova import test
from nova.tests.unit.scheduler import fakes
from nova.virt import fake
from oslo_config import cfg
FLAVOR_EXTRA_SPEC = "aggregate_instance_extra_specs:reservation"
class BlazarFilterTestCase(test.TestCase):
"""Filter test case.
This test case provides tests for the schedule filters available
on Blazar.
"""
def setUp(self):
super(BlazarFilterTestCase, self).setUp()
# Let's have at hand a brand new blazar filter
self.f = blazar_filter.BlazarFilter()
# A fake host state
self.host = fakes.FakeHostState('host1', 'node1', {})
# A fake instance (which has a reservation id 'r-fakeres')
fake.FakeInstance('instance1', 'Running', '123')
# And a base spec_obj
self.spec_obj = objects.RequestSpec(
project_id='fakepj',
scheduler_hints={},
flavor=objects.Flavor(flavorid='flavor-id1', extra_specs={})
)
def test_blazar_filter_no_pool_available_requested(self):
# Given the host doesn't belong to any pool
self.host.aggregates = []
# And the 'r-fakeres' pool is requested in the filter
self.spec_obj.scheduler_hints = {'reservation': ['r-fakeres']}
# When the host goes through the filter
self.host.passes = self.f.host_passes(
self.host,
self.spec_obj)
# Then the host shall NOT pass
self.assertFalse(self.host.passes)
def test_blazar_filter_no_pool_available_not_requested(self):
# Given the host doesn't belong to any pool
self.host.aggregates = []
# And the filter doesn't require any pool (using filter as in setup())
# When the host goes through the filter
self.host.passes = self.f.host_passes(
self.host,
self.spec_obj)
# Then the host shall pass
self.assertTrue(self.host.passes)
def test_blazar_filter_host_in_freepool_and_none_requested(self):
# Given the host is in the free pool (named "freepool")
self.host.aggregates = [
objects.Aggregate(
name=cfg.CONF['blazar:physical:host'].aggregate_freepool_name,
metadata={'availability_zone': 'unknown',
self.spec_obj.project_id: True})]
# And the filter doesn't require any pool (using filter as in setup())
# When the host goes through the filter
self.host.passes = self.f.host_passes(
self.host,
self.spec_obj)
# Then the host shall NOT pass
self.assertFalse(self.host.passes)
def test_blazar_filter_host_in_pool_none_requested(self):
# Given the host belongs to the 'r-fakeres' reservation pool
self.host.aggregates = [
objects.Aggregate(
name='r-fakeres',
metadata={'availability_zone': (cfg
.CONF['blazar:physical:host']
.blazar_az_prefix) + 'XX',
self.spec_obj.project_id: True})]
# And the filter doesn't require any pool (using filter as in setup())
# When the host goes through the filter
self.host.passes = self.f.host_passes(
self.host,
self.spec_obj)
# Then the host shall NOT pass
self.assertFalse(self.host.passes)
def test_blazar_filter_host_in_another_pool(self):
# Given the host belongs to a pool different to 'r-fakeres'
self.host.aggregates = [
objects.Aggregate(
name='not_the_r-fakeres_pool',
metadata={'availability_zone': 'unknown',
self.spec_obj.project_id: True})]
# And the 'r-fakeres' pool is requested in the filter
self.spec_obj.scheduler_hints = {'reservation': ['r-fakeres']}
# When the host goes through the filter
self.host.passes = self.f.host_passes(
self.host,
self.spec_obj)
# Then the host shall NOT pass
self.assertFalse(self.host.passes)
def test_blazar_filter_host_not_auth_in_current_tenant(self):
# Given the host is NOT authorized in the current tenant
# And thee pool name is NOT 'r-fakeres'
self.host.aggregates = [
objects.Aggregate(
name='r-fackers',
metadata={'availability_zone': 'unknown',
self.spec_obj.project_id: False})]
# And the 'r-fakeres' pool is requested in the filter
self.spec_obj.scheduler_hints = {'reservation': ['r-fakeres']}
# When the host goes through the filter
self.host.passes = self.f.host_passes(
self.host,
self.spec_obj)
# Then the host shall NOT pass
self.assertFalse(self.host.passes)
def test_blazar_filter_host_auth_in_current_tenant(self):
# Given the host is authorized in the current tenant
# And the pool name is 'r-fakeres'
self.host.aggregates = [
objects.Aggregate(
name='r-fakeres',
metadata={'availability_zone': (cfg
.CONF['blazar:physical:host']
.blazar_az_prefix),
self.spec_obj.project_id: True})]
# And the 'r-fakeres' pool is requested in the filter
self.spec_obj.scheduler_hints = {'reservation': ['r-fakeres']}
# When the host goes through the filter
self.host.passes = self.f.host_passes(
self.host,
self.spec_obj)
# Then the host shall pass
self.assertTrue(self.host.passes)
def test_blazar_filter_host_authorized_by_owner(self):
# Given the host blazar owner is the current project id
# And the pool name is 'r-fakeres'
self.host.aggregates = [
objects.Aggregate(
name='r-fakeres',
metadata={'availability_zone': (cfg
.CONF['blazar:physical:host']
.blazar_az_prefix),
cfg.CONF['blazar:physical:host'].blazar_owner: (
self.spec_obj.project_id),
self.spec_obj.project_id: False})]
# And the 'r-fakeres' pool is requested in the filter
self.spec_obj.scheduler_hints = {'reservation': ['r-fakeres']}
# When the host goes through the filter
self.host.passes = self.f.host_passes(
self.host,
self.spec_obj)
# Then the host shall pass
self.assertTrue(self.host.passes)
def test_blazar_filter_host_not_authorized_by_owner(self):
# Given the host blazar owner is NOT the current project id
# And the pool name is 'r-fakeres'
self.host.aggregates = [
objects.Aggregate(
name='r-fakeres',
metadata={'availability_zone': (cfg
.CONF['blazar:physical:host']
.blazar_az_prefix),
cfg.CONF['blazar:physical:host'].blazar_owner: (
'another_project_id')})]
# And the 'r-fakeres' pool is requested in the filter
self.spec_obj.scheduler_hints = {'reservation': ['r-fakeres']}
# When the host goes through the filter
self.host.passes = self.f.host_passes(
self.host,
self.spec_obj)
# Then the host shall NOT pass
self.assertFalse(self.host.passes)
def test_blazar_filter_host_not_in_requested_pools(self):
# Given the host is in the free pool
self.host.aggregates = [
objects.Aggregate(
name=cfg.CONF['blazar:physical:host'].aggregate_freepool_name,
metadata={'availability_zone': 'unknown'})]
# And the 'r-fakeres' pool is requested in the filter
self.spec_obj.scheduler_hints = {'reservation': ['r-fakeres']}
# When the host goes through the filter
self.host.passes = self.f.host_passes(
self.host,
self.spec_obj)
# Then the host shall NOT pass
self.assertFalse(self.host.passes)
def test_blazar_filter_unicode_requested_pool(self):
# Given the host is in a pool with unicode characters
self.host.aggregates = [
objects.Aggregate(
name=U'r-fakeres',
metadata={'availability_zone': (cfg
.CONF['blazar:physical:host']
.blazar_az_prefix),
self.spec_obj.project_id: True})]
# And the filter is requesting for a host with the same name (ucode)
self.spec_obj.scheduler_hints = {'reservation': [U'r-fakeres']}
# When the host goes through the filter
self.host.passes = self.f.host_passes(
self.host,
self.spec_obj)
# Then the host shall pass
self.assertTrue(self.host.passes)
def test_instance_reservation_requested(self):
# A host is not in any aggregate
self.host.aggregates = []
# And instance-reservation-id1 is requested by an instance
self.spec_obj.flavor.extra_specs = {
FLAVOR_EXTRA_SPEC: 'instance-reservation-id1'}
self.spec_obj.flavor.flavorid = 'instance-reservation-id1'
self.host.passes = self.f.host_passes(self.host, self.spec_obj)
self.assertTrue(self.host.passes)
def test_blazar_filter_host_in_freepool_for_preemptibles(self):
# Given preemptibles are allowed
cfg.CONF.set_override('allow_preemptibles', True,
group='blazar:physical:host')
self.addCleanup(cfg.CONF.clear_override, 'allow_preemptibles',
group='blazar:physical:host')
# Given the host is in the free pool
self.host.aggregates = [
objects.Aggregate(
name='freepool',
metadata={'availability_zone': ''})]
# And the instance is launched with a flavor marked as preemptible
self.spec_obj.flavor.extra_specs = {'blazar:preemptible': 'true'}
# When the host goes through the filter
self.host.passes = self.f.host_passes(self.host, self.spec_obj)
# Then the host shall pass
self.assertTrue(self.host.passes)
def test_blazar_filter_host_in_preemptibles(self):
# Given preemptibles are allowed and dedicated aggregate is used
cfg.CONF.set_override('allow_preemptibles', True,
group='blazar:physical:host')
self.addCleanup(cfg.CONF.clear_override, 'allow_preemptibles',
group='blazar:physical:host')
cfg.CONF.set_override('preemptible_aggregate', 'preemptibles',
group='blazar:physical:host')
self.addCleanup(cfg.CONF.clear_override, 'preemptible_aggregate',
group='blazar:physical:host')
# Given the host is in the preemptibles aggregate
self.host.aggregates = [
objects.Aggregate(
name='preemptibles',
metadata={'availability_zone': ''})]
# And the instance is launched with a flavor marked as preemptible
self.spec_obj.flavor.extra_specs = {'blazar:preemptible': 'true'}
# When the host goes through the filter
self.host.passes = self.f.host_passes(self.host, self.spec_obj)
# Then the host shall pass
self.assertTrue(self.host.passes)
def test_blazar_filter_host_not_in_preemptibles(self):
# Given preemptibles are allowed and dedicated aggregate is used
cfg.CONF.set_override('allow_preemptibles', True,
group='blazar:physical:host')
self.addCleanup(cfg.CONF.clear_override, 'allow_preemptibles',
group='blazar:physical:host')
cfg.CONF.set_override('preemptible_aggregate', 'preemptibles',
group='blazar:physical:host')
self.addCleanup(cfg.CONF.clear_override, 'preemptible_aggregate',
group='blazar:physical:host')
# Given the host is in the free pool
self.host.aggregates = [
objects.Aggregate(
name=cfg.CONF['blazar:physical:host'].aggregate_freepool_name,
metadata={'availability_zone': 'unknown',
self.spec_obj.project_id: True})]
# And the instance is launched with a flavor marked as preemptible
self.spec_obj.flavor.extra_specs = {'blazar:preemptible': 'true'}
# When the host goes through the filter
self.host.passes = self.f.host_passes(self.host, self.spec_obj)
# Then the host shall NOT pass
self.assertFalse(self.host.passes)
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from xml.etree import ElementTree
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class JunitTestsIntegrationTest(PantsRunIntegrationTest):
def _assert_junit_output_exists_for_class(self, workdir, classname):
self.assertTrue(os.path.exists(
os.path.join(workdir, 'test', 'junit', '{}.out.txt'.format(classname))))
self.assertTrue(os.path.exists(
os.path.join(workdir, 'test', 'junit', '{}.err.txt'.format(classname))))
def _assert_junit_output(self, workdir):
self._assert_junit_output_exists_for_class(workdir, 'org.pantsbuild.example.hello.greet.GreetingTest')
self._assert_junit_output_exists_for_class(workdir, 'org.pantsbuild.example.hello.welcome.WelSpec')
def test_junit_test_custom_interpreter(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'examples/tests/java/org/pantsbuild/example/hello/greet',
'examples/tests/scala/org/pantsbuild/example/hello/welcome',
'--interpreter=CPython>=2.6,<3',
'--interpreter=CPython>=3.3'],
workdir)
self.assert_success(pants_run)
self._assert_junit_output(workdir)
def test_junit_test(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'testprojects/tests/scala/org/pantsbuild/testproject/empty'],
workdir)
self.assert_failure(pants_run)
def test_junit_test_with_test_option_with_relpath(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-test=examples/tests/java/org/pantsbuild/example/hello/greet/GreetingTest.java',
'examples/tests/java/org/pantsbuild/example/hello/greet',
'examples/tests/scala/org/pantsbuild/example/hello/welcome'],
workdir)
self.assert_success(pants_run)
self._assert_junit_output_exists_for_class(workdir, 'org.pantsbuild.example.hello.greet.GreetingTest')
def test_junit_test_with_test_option_with_dot_slash_relpath(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-test=./examples/tests/java/org/pantsbuild/example/hello/greet/GreetingTest.java',
'examples/tests/java/org/pantsbuild/example/hello/greet',
'examples/tests/scala/org/pantsbuild/example/hello/welcome'],
workdir)
self.assert_success(pants_run)
self._assert_junit_output_exists_for_class(workdir, 'org.pantsbuild.example.hello.greet.GreetingTest')
def test_junit_test_with_test_option_with_classname(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-test=org.pantsbuild.example.hello.greet.GreetingTest',
'examples/tests/java/org/pantsbuild/example/hello/greet',
'examples/tests/scala/org/pantsbuild/example/hello/welcome'],
workdir)
self.assert_success(pants_run)
self._assert_junit_output_exists_for_class(workdir, 'org.pantsbuild.example.hello.greet.GreetingTest')
def test_junit_test_with_emma(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'examples/tests/java//org/pantsbuild/example/hello/greet',
'examples/tests/scala/org/pantsbuild/example/hello/welcome',
'--interpreter=CPython>=2.6,<3',
'--interpreter=CPython>=3.3',
'--test-junit-coverage-processor=emma',
'--test-junit-coverage',
'--test-junit-coverage-jvm-options=-Xmx1g',
'--test-junit-coverage-jvm-options=-XX:MaxPermSize=256m'],
workdir)
self.assert_success(pants_run)
self._assert_junit_output(workdir)
# TODO(Eric Ayers): Why does emma puts coverage.xml in a different directory from cobertura?
self.assertTrue(os.path.exists(
os.path.join(workdir, 'test', 'junit', 'coverage', 'coverage.xml')))
self.assertTrue(os.path.exists(
os.path.join(workdir, 'test', 'junit', 'coverage', 'html', 'index.html')))
# Look for emma report in stdout_data:
# 23:20:21 00:02 [emma-report][EMMA v2.1.5320 (stable) report, generated Mon Oct 13 ...
self.assertIn('[emma-report]', pants_run.stdout_data)
# See if the two test classes ended up generating data in the coverage report.
lines = pants_run.stdout_data.split('\n')
in_package_report = False
package_report = ""
for line in lines:
if 'COVERAGE BREAKDOWN BY PACKAGE:' in line:
in_package_report = True
if in_package_report:
package_report += line
self.assertIn('org.pantsbuild.example.hello.welcome', package_report)
self.assertIn('org.pantsbuild.example.hello.greet', package_report)
def test_junit_test_with_coberta(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'examples/tests/java//org/pantsbuild/example/hello/greet',
'examples/tests/scala/org/pantsbuild/example/hello/welcome',
'--interpreter=CPython>=2.6,<3',
'--interpreter=CPython>=3.3',
'--test-junit-coverage-processor=cobertura',
'--test-junit-coverage',
'--test-junit-coverage-jvm-options=-Xmx1g',
'--test-junit-coverage-jvm-options=-XX:MaxPermSize=256m'],
workdir)
self.assert_success(pants_run)
self._assert_junit_output(workdir)
self.assertTrue(os.path.exists(
os.path.join(workdir, 'test', 'junit', 'coverage', 'html', 'index.html')))
xmlf = os.path.join(workdir, 'test', 'junit', 'coverage', 'xml', 'coverage.xml')
self.assertTrue(os.path.exists(xmlf))
hits = ElementTree.parse(xmlf).findall("packages/package/classes/class/lines/line")
if all(i.attrib['hits'] == "0" for i in hits):
self.fail("no nonzero hits found in the generated coverage.xml")
def test_junit_test_requiring_cwd_fails_without_option_specified(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/cwdexample',
'--interpreter=CPython>=2.6,<3',
'--interpreter=CPython>=3.3',
'--jvm-test-junit-options=-Dcwd.test.enabled=true'])
self.assert_failure(pants_run)
def test_junit_test_requiring_cwd_passes_with_option_with_value_specified(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/cwdexample',
'--interpreter=CPython>=2.6,<3',
'--interpreter=CPython>=3.3',
'--jvm-test-junit-options=-Dcwd.test.enabled=true',
'--test-junit-cwd=testprojects/src/java/org/pantsbuild/testproject/cwdexample/subdir'])
self.assert_success(pants_run)
def test_junit_test_requiring_cwd_fails_with_option_with_no_value_specified(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/cwdexample',
'--interpreter=CPython>=2.6,<3',
'--interpreter=CPython>=3.3',
'--jvm-test-junit-options=-Dcwd.test.enabled=true'])
self.assert_failure(pants_run)
def test_junit_test_suppress_output_flag(self):
pants_run = self.run_pants([
'test.junit',
'--no-suppress-output',
'testprojects/tests/java/org/pantsbuild/testproject/dummies:passing_target'])
self.assertIn('Hello from test1!', pants_run.stdout_data)
self.assertIn('Hello from test2!', pants_run.stdout_data)
def test_junit_test_target_cwd(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/workdirs/onedir',
])
self.assert_success(pants_run)
def test_junit_test_annotation_processor(self):
pants_run = self.run_pants([
'test',
'--compile-java-strategy=isolated',
'testprojects/tests/java/org/pantsbuild/testproject/annotation',
])
self.assert_success(pants_run)
def test_junit_test_duplicate_resources(self):
pants_run = self.run_pants([
'test',
'testprojects/maven_layout/junit_resource_collision',
])
self.assert_success(pants_run)
def test_junit_test_target_cwd_overrides_option(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/workdirs/onedir',
'--test-junit-cwd=testprojects/tests/java/org/pantsbuild/testproject/dummies'
])
self.assert_success(pants_run)
| |
#from __pyjamas__ import debugger
# --------------------------------------------------------------------
# public interface
# flags
I = IGNORECASE = 1 # ignore case
L = LOCALE = 2 # assume current 8-bit locale
U = UNICODE = 4 # assume unicode locale
M = MULTILINE = 8 # make anchors look for newline
S = DOTALL = 16 # make dot match newline
X = VERBOSE = 32 # ignore whitespace and comments
def match(pattern, string, flags=0):
# Try to apply the pattern at the start of the string, returning
# a match object, or None if no match was found.
return compile(pattern, flags).match(string)
def search(pattern, string, flags=0):
# Scan through string looking for a match to the pattern, returning
# a match object, or None if no match was found.
return compile(pattern, flags).search(string)
def sub(pattern, repl, string, count=0):
# Return the string obtained by replacing the leftmost
# non-overlapping occurrences of the pattern in string by the
# replacement repl. repl can be either a string or a callable;
# if a callable, it's passed the match object and must return
# a replacement string to be used.
return compile(pattern, 0).sub(repl, string, count)
def subn(pattern, repl, string, count=0):
# Return a 2-tuple containing (new_string, number).
# new_string is the string obtained by replacing the leftmost
# non-overlapping occurrences of the pattern in the source
# string by the replacement repl. number is the number of
# substitutions that were made. repl can be either a string or a
# callable; if a callable, it's passed the match object and must
# return a replacement string to be used.
return compile(pattern, 0).subn(repl, string, count)
def split(pattern, string, maxsplit=0):
# Split the source string by the occurrences of the pattern,
# returning a list containing the resulting substrings.
return compile(pattern, 0).split(string, maxsplit)
def findall(pattern, string, flags=0):
# Return a list of all non-overlapping matches in the string.
#
# If one or more groups are present in the pattern, return a
# list of groups; this will be a list of tuples if the pattern
# has more than one group.
#
# Empty matches are included in the result.
return compile(pattern, flags).findall(string)
def finditer(pattern, string, flags=0):
# Return an iterator over all non-overlapping matches in the
# string. For each match, the iterator returns a match object.
#
# Empty matches are included in the result.
return compile(pattern, flags).finditer(string)
def compile(pattern, flags=0):
return SRE_Pattern(pattern, flags, _compile(pattern, flags))
def purge():
# "Clear the regular expression cache"
_cache.clear()
_cache_repl.clear()
def template(pattern, flags=0):
# "Compile a template pattern, returning a pattern object"
raise NotImplementedError("re.template")
#return compile(pattern, flags|T)
_alphanum = {}
for c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890':
_alphanum[c] = 1
del c
def escape(pattern):
"Escape all non-alphanumeric characters in pattern."
s = list(pattern)
alphanum = _alphanum
for i in range(len(pattern)):
c = pattern[i]
if c not in alphanum:
if c == "\000":
s[i] = "\\000"
else:
s[i] = "\\" + c
return pattern[:0].join(s)
# --------------------------------------------------------------------
# internals
from __pyjamas__ import JS, debugger
__inline_flags_re__ = JS(r"""new RegExp("[(][?][iLmsux]+[)]")""")
_cache = {}
_cache_repl = {}
_MAXCACHE = 100
def _compile(pat, flags=0):
cachekey = (pat, flags)
p = _cache.get(cachekey)
if p is not None:
return p
flgs = ""
while True:
m = __inline_flags_re__.Exec(pat)
if JS("@{{m}} === null"):
m = None
break
pat = pat.replace(__inline_flags_re__, "")
for m in list(m):
if JS("@{{m}} === null"):
continue
for c in str(m):
if c in ['(','?',')']:
pass
elif c == 'i':
flags |= IGNORECASE
elif c == 'L':
flags |= LOCALE
elif c == 'm':
flags |= MULTILINE
elif c == 's':
flags |= DOTALL
elif c == 'u':
flags |= UNICODE
elif c == 'x':
flags |= VERBOSE
if flags:
if flags & LOCALE:
raise NotImplementedError("L/LOCALE flag is not implemented")
if flags & UNICODE:
raise NotImplementedError("U/UNICODE flag is not implemented")
if flags & VERBOSE:
raise NotImplementedError("X/VERBOSE flag is not implemented")
if flags & DOTALL:
# Replace the '.' with '[\s\S]' iff the dot is not within []
p = ''
brack = -1
backslash = -2
for i, c in enumerate(pat):
if backslash != i - 1:
if brack < 0:
if c == '[':
brack = i
elif c == '.':
c = r'[\s\S]'
elif c == '\\':
backslash = i
else:
if c == ']' and brack != i-1:
brack = -1
p += c
pat = p
if flags & IGNORECASE:
flgs += 'i'
if flags & MULTILINE:
flgs += 'm'
spat = r"([\s\S]*?)(" + pat + r")[\s\S]*"
p = JS(r"""new RegExp(@{{pat}}, @{{flgs}})"""), JS(r"""new RegExp(@{{spat}}, @{{flgs}})"""), JS(r"""new RegExp(@{{pat}}, "g"+@{{flgs}})""")
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache[cachekey] = p
return p
class SRE_Match:
def __init__(self, re, string, pos, endpos, groups, start, lastindex, lastgroup):
self._groups = groups
self._start = start
self._end = start + len(groups[0])
self.re = re
self.string = string
self.pos = pos
self.endpos = endpos
self.lastindex = lastindex
self.lastgroup = lastgroup
def start(self, group=0):
# Returns the indices of the start of the substring matched by group;
# group defaults to zero (meaning the whole matched substring). Returns -1
# if group exists but did not contribute to the match.
if group != 0:
raise NotImplementedError("group argument not supported")
return self._start
def end(self, group=0):
# Returns the indices of the end of the substring matched by group;
# group defaults to zero (meaning the whole matched substring). Returns -1
# if group exists but did not contribute to the match.
if group != 0:
raise NotImplementedError("group argument not supported")
return self._end
def span(self, group=0):
# Returns the 2-tuple (m.start(group), m.end(group)).
if group != 0:
raise NotImplementedError("group argument not supported")
return self.start(group), self.end(group)
def expand(self, template):
# Return the string obtained by doing backslash substitution and
# resolving group references on template.
raise NotImplementedError('expand')
def groups(self, default=None):
# Returns a tuple containing all the subgroups of the match. The
# default argument is used for groups that did not participate in the
# match (defaults to None).
return tuple([x if x is not None else default for x in self._groups[1:]])
def groupdict(self, default=None):
# Return a dictionary containing all the named subgroups of the match.
# The default argument is used for groups that did not participate in the
# match (defaults to None).
raise NotImplementedError('groupdict')
def group(self, *args):
# Returns one or more subgroups of the match. Each argument is either a
# group index or a group name.
if len(args) == 0:
args = (0,)
grouplist = []
for group in args:
grouplist.append(self._groups[group])
if len(grouplist) == 1:
return grouplist[0]
else:
return tuple(grouplist)
def __copy__():
raise TypeError, "cannot copy this pattern object"
def __deepcopy__():
raise TypeError, "cannot copy this pattern object"
class SRE_Pattern:
def __init__(self, pat, flags, code):
self.pat = pat
self.flags = flags
self.match_code = code[0]
self.search_code = code[1]
self.findall_code = code[2]
def match(self, string, pos=0, endpos=None):
# If zero or more characters at the beginning of string match this
# regular expression, return a corresponding MatchObject instance. Return
# None if the string does not match the pattern.
if not endpos is None:
string = string[:endpos]
else:
endpos = len(string)
if pos == 0:
groups = self.match_code.Exec(string)
if JS("@{{groups}} === null"):
return None
_groups = []
for i in list(groups):
if JS("@{{i}} === null"):
_groups.append(None)
else:
_groups.append(str(i))
groups = _groups
elif pos >= len(string):
return None
else:
# Strickly, we shouldn't use string[pos:]
# The '^' pattern character should match at the real beginning of
# the string and at positions just after a newline, but not
# necessarily at the index where the search is to start.
# Maybe, we should raise an error if there's a '^' in pat (not in [])
groups = self.match_code.Exec(string[pos:])
if JS("@{{groups}} === null"):
return None
if groups.index != 0:
return None
_groups = []
for i in list(groups):
if JS("@{{i}} === null"):
_groups.append(None)
else:
_groups.append(str(i))
groups = _groups
return SRE_Match(self, string, pos, endpos, groups, pos, None, None)
def search(self, string, pos=0, endpos=None):
# Scan through string looking for a location where this regular
# expression produces a match, and return a corresponding MatchObject
# instance. Return None if no position in the string matches the
# pattern.
if not endpos is None:
string = string[:endpos]
if pos == 0:
groups = self.search_code.Exec(string)
if JS("@{{groups}} === null"):
return None
_groups = []
for i in list(groups):
if JS("@{{i}} === null"):
_groups.append(None)
else:
_groups.append(str(i))
groups = _groups
elif pos >= len(string):
return None
else:
# Strickly, we shouldn't use string[pos:]
groups = self.search_code.Exec(string[pos:])
if JS("@{{groups}} === null"):
return None
_groups = []
for i in list(groups):
if JS("@{{i}} === null"):
_groups.append(None)
else:
_groups.append(str(i))
groups = _groups
return SRE_Match(self, string, pos, endpos, groups[2:], pos + len(groups[1]),None, None)
def findall(self, string, pos=0, endpos=None):
# Return a list of all non-overlapping matches of pattern in string.
if not endpos is None:
string = string[:endpos]
all = []
while True:
m = self.search(string, pos)
if m is None:
break
span = m.span()
all.append(string[span[0]:span[1]])
pos = span[1]
return all
# Next line bugs in FF2
return list(string[pos:].match(self.findall_code))
def sub(self, repl, string, count=0):
# Return the string obtained by replacing the leftmost non-overlapping
# occurrences of pattern in string by the replacement repl.
return self.subn(repl, string, count)[0]
def subn(self, repl, string, count=0):
# Return the tuple (new_string, number_of_subs_made) found by replacing
# the leftmost non-overlapping occurrences of pattern with the replacement
# repl.
res = ''
n = 0
subst = repl
pos = 0
while count >= 0:
m = self.search(string, pos)
if m is None:
break
span = m.span()
if callable(repl):
subst = repl(m)
res += string[pos:span[0]]
res += subst
pos = span[1]
n += 1
if count:
if count == 1:
break
count -= 1
return res + string[pos:], n
def split(self, string, maxsplit=0):
# Split string by the occurrences of pattern.
splitted = []
pos = 0
while maxsplit >= 0:
m = self.search(string, pos)
if m is None:
break
span = m.span()
splitted.append(string[pos:span[0]])
pos = span[1]
if pos < len(string):
splitted.append(string[pos:])
return splitted
def finditer(self, string, pos=0, endpos=None):
# Return a list of all non-overlapping matches of pattern in string.
return self.findall(string, pos, endpos).__iter__()
def scanner(self, string, start=0, end=None):
raise NotImplementedError('scanner')
def __copy__(self):
raise TypeError, "cannot copy this pattern object"
def __deepcopy__(self):
raise TypeError, "cannot copy this pattern object"
| |
"""(disabled by default) support for testing pytest and pytest plugins."""
from __future__ import absolute_import, division, print_function
import codecs
import gc
import os
import platform
import re
import subprocess
import six
import sys
import time
import traceback
from fnmatch import fnmatch
from weakref import WeakKeyDictionary
from _pytest.capture import MultiCapture, SysCapture
from _pytest._code import Source
import py
import pytest
from _pytest.main import Session, EXIT_INTERRUPTED, EXIT_OK
from _pytest.assertion.rewrite import AssertionRewritingHook
from _pytest.compat import Path
from _pytest.compat import safe_str
IGNORE_PAM = [ # filenames added when obtaining details about the current user
u"/var/lib/sss/mc/passwd"
]
def pytest_addoption(parser):
parser.addoption(
"--lsof",
action="store_true",
dest="lsof",
default=False,
help="run FD checks if lsof is available",
)
parser.addoption(
"--runpytest",
default="inprocess",
dest="runpytest",
choices=("inprocess", "subprocess"),
help=(
"run pytest sub runs in tests using an 'inprocess' "
"or 'subprocess' (python -m main) method"
),
)
parser.addini(
"pytester_example_dir", help="directory to take the pytester example files from"
)
def pytest_configure(config):
if config.getvalue("lsof"):
checker = LsofFdLeakChecker()
if checker.matching_platform():
config.pluginmanager.register(checker)
class LsofFdLeakChecker(object):
def get_open_files(self):
out = self._exec_lsof()
open_files = self._parse_lsof_output(out)
return open_files
def _exec_lsof(self):
pid = os.getpid()
return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
def _parse_lsof_output(self, out):
def isopen(line):
return line.startswith("f") and (
"deleted" not in line
and "mem" not in line
and "txt" not in line
and "cwd" not in line
)
open_files = []
for line in out.split("\n"):
if isopen(line):
fields = line.split("\0")
fd = fields[0][1:]
filename = fields[1][1:]
if filename in IGNORE_PAM:
continue
if filename.startswith("/"):
open_files.append((fd, filename))
return open_files
def matching_platform(self):
try:
py.process.cmdexec("lsof -v")
except (py.process.cmdexec.Error, UnicodeDecodeError):
# cmdexec may raise UnicodeDecodeError on Windows systems with
# locale other than English:
# https://bitbucket.org/pytest-dev/py/issues/66
return False
else:
return True
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_protocol(self, item):
lines1 = self.get_open_files()
yield
if hasattr(sys, "pypy_version_info"):
gc.collect()
lines2 = self.get_open_files()
new_fds = {t[0] for t in lines2} - {t[0] for t in lines1}
leaked_files = [t for t in lines2 if t[0] in new_fds]
if leaked_files:
error = []
error.append("***** %s FD leakage detected" % len(leaked_files))
error.extend([str(f) for f in leaked_files])
error.append("*** Before:")
error.extend([str(f) for f in lines1])
error.append("*** After:")
error.extend([str(f) for f in lines2])
error.append(error[0])
error.append("*** function %s:%s: %s " % item.location)
error.append("See issue #2366")
item.warn(pytest.PytestWarning("\n".join(error)))
# XXX copied from execnet's conftest.py - needs to be merged
winpymap = {
"python2.7": r"C:\Python27\python.exe",
"python3.4": r"C:\Python34\python.exe",
"python3.5": r"C:\Python35\python.exe",
"python3.6": r"C:\Python36\python.exe",
}
def getexecutable(name, cache={}):
try:
return cache[name]
except KeyError:
executable = py.path.local.sysfind(name)
if executable:
import subprocess
popen = subprocess.Popen(
[str(executable), "--version"],
universal_newlines=True,
stderr=subprocess.PIPE,
)
out, err = popen.communicate()
if name == "jython":
if not err or "2.5" not in err:
executable = None
if "2.5.2" in err:
executable = None # http://bugs.jython.org/issue1790
elif popen.returncode != 0:
# handle pyenv's 127
executable = None
cache[name] = executable
return executable
@pytest.fixture(params=["python2.7", "python3.4", "pypy", "pypy3"])
def anypython(request):
name = request.param
executable = getexecutable(name)
if executable is None:
if sys.platform == "win32":
executable = winpymap.get(name, None)
if executable:
executable = py.path.local(executable)
if executable.check():
return executable
pytest.skip("no suitable %s found" % (name,))
return executable
# used at least by pytest-xdist plugin
@pytest.fixture
def _pytest(request):
"""Return a helper which offers a gethookrecorder(hook) method which
returns a HookRecorder instance which helps to make assertions about called
hooks.
"""
return PytestArg(request)
class PytestArg(object):
def __init__(self, request):
self.request = request
def gethookrecorder(self, hook):
hookrecorder = HookRecorder(hook._pm)
self.request.addfinalizer(hookrecorder.finish_recording)
return hookrecorder
def get_public_names(values):
"""Only return names from iterator values without a leading underscore."""
return [x for x in values if x[0] != "_"]
class ParsedCall(object):
def __init__(self, name, kwargs):
self.__dict__.update(kwargs)
self._name = name
def __repr__(self):
d = self.__dict__.copy()
del d["_name"]
return "<ParsedCall %r(**%r)>" % (self._name, d)
class HookRecorder(object):
"""Record all hooks called in a plugin manager.
This wraps all the hook calls in the plugin manager, recording each call
before propagating the normal calls.
"""
def __init__(self, pluginmanager):
self._pluginmanager = pluginmanager
self.calls = []
def before(hook_name, hook_impls, kwargs):
self.calls.append(ParsedCall(hook_name, kwargs))
def after(outcome, hook_name, hook_impls, kwargs):
pass
self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after)
def finish_recording(self):
self._undo_wrapping()
def getcalls(self, names):
if isinstance(names, str):
names = names.split()
return [call for call in self.calls if call._name in names]
def assert_contains(self, entries):
__tracebackhide__ = True
i = 0
entries = list(entries)
backlocals = sys._getframe(1).f_locals
while entries:
name, check = entries.pop(0)
for ind, call in enumerate(self.calls[i:]):
if call._name == name:
print("NAMEMATCH", name, call)
if eval(check, backlocals, call.__dict__):
print("CHECKERMATCH", repr(check), "->", call)
else:
print("NOCHECKERMATCH", repr(check), "-", call)
continue
i += ind + 1
break
print("NONAMEMATCH", name, "with", call)
else:
pytest.fail("could not find %r check %r" % (name, check))
def popcall(self, name):
__tracebackhide__ = True
for i, call in enumerate(self.calls):
if call._name == name:
del self.calls[i]
return call
lines = ["could not find call %r, in:" % (name,)]
lines.extend([" %s" % x for x in self.calls])
pytest.fail("\n".join(lines))
def getcall(self, name):
values = self.getcalls(name)
assert len(values) == 1, (name, values)
return values[0]
# functionality for test reports
def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
return [x.report for x in self.getcalls(names)]
def matchreport(
self,
inamepart="",
names="pytest_runtest_logreport pytest_collectreport",
when=None,
):
"""return a testreport whose dotted import path matches"""
values = []
for rep in self.getreports(names=names):
try:
if not when and rep.when != "call" and rep.passed:
# setup/teardown passing reports - let's ignore those
continue
except AttributeError:
pass
if when and getattr(rep, "when", None) != when:
continue
if not inamepart or inamepart in rep.nodeid.split("::"):
values.append(rep)
if not values:
raise ValueError(
"could not find test report matching %r: "
"no test reports at all!" % (inamepart,)
)
if len(values) > 1:
raise ValueError(
"found 2 or more testreports matching %r: %s" % (inamepart, values)
)
return values[0]
def getfailures(self, names="pytest_runtest_logreport pytest_collectreport"):
return [rep for rep in self.getreports(names) if rep.failed]
def getfailedcollections(self):
return self.getfailures("pytest_collectreport")
def listoutcomes(self):
passed = []
skipped = []
failed = []
for rep in self.getreports("pytest_collectreport pytest_runtest_logreport"):
if rep.passed:
if getattr(rep, "when", None) == "call":
passed.append(rep)
elif rep.skipped:
skipped.append(rep)
elif rep.failed:
failed.append(rep)
return passed, skipped, failed
def countoutcomes(self):
return [len(x) for x in self.listoutcomes()]
def assertoutcome(self, passed=0, skipped=0, failed=0):
realpassed, realskipped, realfailed = self.listoutcomes()
assert passed == len(realpassed)
assert skipped == len(realskipped)
assert failed == len(realfailed)
def clear(self):
self.calls[:] = []
@pytest.fixture
def linecomp(request):
return LineComp()
@pytest.fixture(name="LineMatcher")
def LineMatcher_fixture(request):
return LineMatcher
@pytest.fixture
def testdir(request, tmpdir_factory):
return Testdir(request, tmpdir_factory)
rex_outcome = re.compile(r"(\d+) ([\w-]+)")
class RunResult(object):
"""The result of running a command.
Attributes:
:ret: the return value
:outlines: list of lines captured from stdout
:errlines: list of lines captures from stderr
:stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to
reconstruct stdout or the commonly used ``stdout.fnmatch_lines()``
method
:stderr: :py:class:`LineMatcher` of stderr
:duration: duration in seconds
"""
def __init__(self, ret, outlines, errlines, duration):
self.ret = ret
self.outlines = outlines
self.errlines = errlines
self.stdout = LineMatcher(outlines)
self.stderr = LineMatcher(errlines)
self.duration = duration
def parseoutcomes(self):
"""Return a dictionary of outcomestring->num from parsing the terminal
output that the test process produced.
"""
for line in reversed(self.outlines):
if "seconds" in line:
outcomes = rex_outcome.findall(line)
if outcomes:
d = {}
for num, cat in outcomes:
d[cat] = int(num)
return d
raise ValueError("Pytest terminal report not found")
def assert_outcomes(
self, passed=0, skipped=0, failed=0, error=0, xpassed=0, xfailed=0
):
"""Assert that the specified outcomes appear with the respective
numbers (0 means it didn't occur) in the text output from a test run.
"""
d = self.parseoutcomes()
obtained = {
"passed": d.get("passed", 0),
"skipped": d.get("skipped", 0),
"failed": d.get("failed", 0),
"error": d.get("error", 0),
"xpassed": d.get("xpassed", 0),
"xfailed": d.get("xfailed", 0),
}
expected = {
"passed": passed,
"skipped": skipped,
"failed": failed,
"error": error,
"xpassed": xpassed,
"xfailed": xfailed,
}
assert obtained == expected
class CwdSnapshot(object):
def __init__(self):
self.__saved = os.getcwd()
def restore(self):
os.chdir(self.__saved)
class SysModulesSnapshot(object):
def __init__(self, preserve=None):
self.__preserve = preserve
self.__saved = dict(sys.modules)
def restore(self):
if self.__preserve:
self.__saved.update(
(k, m) for k, m in sys.modules.items() if self.__preserve(k)
)
sys.modules.clear()
sys.modules.update(self.__saved)
class SysPathsSnapshot(object):
def __init__(self):
self.__saved = list(sys.path), list(sys.meta_path)
def restore(self):
sys.path[:], sys.meta_path[:] = self.__saved
class Testdir(object):
"""Temporary test directory with tools to test/run pytest itself.
This is based on the ``tmpdir`` fixture but provides a number of methods
which aid with testing pytest itself. Unless :py:meth:`chdir` is used all
methods will use :py:attr:`tmpdir` as their current working directory.
Attributes:
:tmpdir: The :py:class:`py.path.local` instance of the temporary directory.
:plugins: A list of plugins to use with :py:meth:`parseconfig` and
:py:meth:`runpytest`. Initially this is an empty list but plugins can
be added to the list. The type of items to add to the list depends on
the method using them so refer to them for details.
"""
def __init__(self, request, tmpdir_factory):
self.request = request
self._mod_collections = WeakKeyDictionary()
name = request.function.__name__
self.tmpdir = tmpdir_factory.mktemp(name, numbered=True)
self.plugins = []
self._cwd_snapshot = CwdSnapshot()
self._sys_path_snapshot = SysPathsSnapshot()
self._sys_modules_snapshot = self.__take_sys_modules_snapshot()
self.chdir()
self.request.addfinalizer(self.finalize)
method = self.request.config.getoption("--runpytest")
if method == "inprocess":
self._runpytest_method = self.runpytest_inprocess
elif method == "subprocess":
self._runpytest_method = self.runpytest_subprocess
def __repr__(self):
return "<Testdir %r>" % (self.tmpdir,)
def finalize(self):
"""Clean up global state artifacts.
Some methods modify the global interpreter state and this tries to
clean this up. It does not remove the temporary directory however so
it can be looked at after the test run has finished.
"""
self._sys_modules_snapshot.restore()
self._sys_path_snapshot.restore()
self._cwd_snapshot.restore()
def __take_sys_modules_snapshot(self):
# some zope modules used by twisted-related tests keep internal state
# and can't be deleted; we had some trouble in the past with
# `zope.interface` for example
def preserve_module(name):
return name.startswith("zope")
return SysModulesSnapshot(preserve=preserve_module)
def make_hook_recorder(self, pluginmanager):
"""Create a new :py:class:`HookRecorder` for a PluginManager."""
pluginmanager.reprec = reprec = HookRecorder(pluginmanager)
self.request.addfinalizer(reprec.finish_recording)
return reprec
def chdir(self):
"""Cd into the temporary directory.
This is done automatically upon instantiation.
"""
self.tmpdir.chdir()
def _makefile(self, ext, args, kwargs, encoding="utf-8"):
items = list(kwargs.items())
def to_text(s):
return s.decode(encoding) if isinstance(s, bytes) else six.text_type(s)
if args:
source = u"\n".join(to_text(x) for x in args)
basename = self.request.function.__name__
items.insert(0, (basename, source))
ret = None
for basename, value in items:
p = self.tmpdir.join(basename).new(ext=ext)
p.dirpath().ensure_dir()
source = Source(value)
source = u"\n".join(to_text(line) for line in source.lines)
p.write(source.strip().encode(encoding), "wb")
if ret is None:
ret = p
return ret
def makefile(self, ext, *args, **kwargs):
r"""Create new file(s) in the testdir.
:param str ext: The extension the file(s) should use, including the dot, e.g. `.py`.
:param list[str] args: All args will be treated as strings and joined using newlines.
The result will be written as contents to the file. The name of the
file will be based on the test function requesting this fixture.
:param kwargs: Each keyword is the name of a file, while the value of it will
be written as contents of the file.
Examples:
.. code-block:: python
testdir.makefile(".txt", "line1", "line2")
testdir.makefile(".ini", pytest="[pytest]\naddopts=-rs\n")
"""
return self._makefile(ext, args, kwargs)
def makeconftest(self, source):
"""Write a contest.py file with 'source' as contents."""
return self.makepyfile(conftest=source)
def makeini(self, source):
"""Write a tox.ini file with 'source' as contents."""
return self.makefile(".ini", tox=source)
def getinicfg(self, source):
"""Return the pytest section from the tox.ini config file."""
p = self.makeini(source)
return py.iniconfig.IniConfig(p)["pytest"]
def makepyfile(self, *args, **kwargs):
"""Shortcut for .makefile() with a .py extension."""
return self._makefile(".py", args, kwargs)
def maketxtfile(self, *args, **kwargs):
"""Shortcut for .makefile() with a .txt extension."""
return self._makefile(".txt", args, kwargs)
def syspathinsert(self, path=None):
"""Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`.
This is undone automatically when this object dies at the end of each
test.
"""
if path is None:
path = self.tmpdir
sys.path.insert(0, str(path))
# a call to syspathinsert() usually means that the caller wants to
# import some dynamically created files, thus with python3 we
# invalidate its import caches
self._possibly_invalidate_import_caches()
def _possibly_invalidate_import_caches(self):
# invalidate caches if we can (py33 and above)
try:
import importlib
except ImportError:
pass
else:
if hasattr(importlib, "invalidate_caches"):
importlib.invalidate_caches()
def mkdir(self, name):
"""Create a new (sub)directory."""
return self.tmpdir.mkdir(name)
def mkpydir(self, name):
"""Create a new python package.
This creates a (sub)directory with an empty ``__init__.py`` file so it
gets recognised as a python package.
"""
p = self.mkdir(name)
p.ensure("__init__.py")
return p
def copy_example(self, name=None):
import warnings
from _pytest.warning_types import PYTESTER_COPY_EXAMPLE
warnings.warn(PYTESTER_COPY_EXAMPLE, stacklevel=2)
example_dir = self.request.config.getini("pytester_example_dir")
if example_dir is None:
raise ValueError("pytester_example_dir is unset, can't copy examples")
example_dir = self.request.config.rootdir.join(example_dir)
for extra_element in self.request.node.iter_markers("pytester_example_path"):
assert extra_element.args
example_dir = example_dir.join(*extra_element.args)
if name is None:
func_name = self.request.function.__name__
maybe_dir = example_dir / func_name
maybe_file = example_dir / (func_name + ".py")
if maybe_dir.isdir():
example_path = maybe_dir
elif maybe_file.isfile():
example_path = maybe_file
else:
raise LookupError(
"{} cant be found as module or package in {}".format(
func_name, example_dir.bestrelpath(self.request.confg.rootdir)
)
)
else:
example_path = example_dir.join(name)
if example_path.isdir() and not example_path.join("__init__.py").isfile():
example_path.copy(self.tmpdir)
return self.tmpdir
elif example_path.isfile():
result = self.tmpdir.join(example_path.basename)
example_path.copy(result)
return result
else:
raise LookupError(
'example "{}" is not found as a file or directory'.format(example_path)
)
Session = Session
def getnode(self, config, arg):
"""Return the collection node of a file.
:param config: :py:class:`_pytest.config.Config` instance, see
:py:meth:`parseconfig` and :py:meth:`parseconfigure` to create the
configuration
:param arg: a :py:class:`py.path.local` instance of the file
"""
session = Session(config)
assert "::" not in str(arg)
p = py.path.local(arg)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([str(p)], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
return res
def getpathnode(self, path):
"""Return the collection node of a file.
This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to
create the (configured) pytest Config instance.
:param path: a :py:class:`py.path.local` instance of the file
"""
config = self.parseconfigure(path)
session = Session(config)
x = session.fspath.bestrelpath(path)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([x], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
return res
def genitems(self, colitems):
"""Generate all test items from a collection node.
This recurses into the collection node and returns a list of all the
test items contained within.
"""
session = colitems[0].session
result = []
for colitem in colitems:
result.extend(session.genitems(colitem))
return result
def runitem(self, source):
"""Run the "test_func" Item.
The calling test instance (class containing the test method) must
provide a ``.getrunner()`` method which should return a runner which
can run the test protocol for a single item, e.g.
:py:func:`_pytest.runner.runtestprotocol`.
"""
# used from runner functional tests
item = self.getitem(source)
# the test class where we are called from wants to provide the runner
testclassinstance = self.request.instance
runner = testclassinstance.getrunner()
return runner(item)
def inline_runsource(self, source, *cmdlineargs):
"""Run a test module in process using ``pytest.main()``.
This run writes "source" into a temporary file and runs
``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance
for the result.
:param source: the source code of the test module
:param cmdlineargs: any extra command line arguments to use
:return: :py:class:`HookRecorder` instance of the result
"""
p = self.makepyfile(source)
values = list(cmdlineargs) + [p]
return self.inline_run(*values)
def inline_genitems(self, *args):
"""Run ``pytest.main(['--collectonly'])`` in-process.
Runs the :py:func:`pytest.main` function to run all of pytest inside
the test process itself like :py:meth:`inline_run`, but returns a
tuple of the collected items and a :py:class:`HookRecorder` instance.
"""
rec = self.inline_run("--collect-only", *args)
items = [x.item for x in rec.getcalls("pytest_itemcollected")]
return items, rec
def inline_run(self, *args, **kwargs):
"""Run ``pytest.main()`` in-process, returning a HookRecorder.
Runs the :py:func:`pytest.main` function to run all of pytest inside
the test process itself. This means it can return a
:py:class:`HookRecorder` instance which gives more detailed results
from that run than can be done by matching stdout/stderr from
:py:meth:`runpytest`.
:param args: command line arguments to pass to :py:func:`pytest.main`
:param plugin: (keyword-only) extra plugin instances the
``pytest.main()`` instance should use
:return: a :py:class:`HookRecorder` instance
"""
finalizers = []
try:
# When running pytest inline any plugins active in the main test
# process are already imported. So this disables the warning which
# will trigger to say they can no longer be rewritten, which is
# fine as they have already been rewritten.
orig_warn = AssertionRewritingHook._warn_already_imported
def revert_warn_already_imported():
AssertionRewritingHook._warn_already_imported = orig_warn
finalizers.append(revert_warn_already_imported)
AssertionRewritingHook._warn_already_imported = lambda *a: None
# Any sys.module or sys.path changes done while running pytest
# inline should be reverted after the test run completes to avoid
# clashing with later inline tests run within the same pytest test,
# e.g. just because they use matching test module names.
finalizers.append(self.__take_sys_modules_snapshot().restore)
finalizers.append(SysPathsSnapshot().restore)
# Important note:
# - our tests should not leave any other references/registrations
# laying around other than possibly loaded test modules
# referenced from sys.modules, as nothing will clean those up
# automatically
rec = []
class Collect(object):
def pytest_configure(x, config):
rec.append(self.make_hook_recorder(config.pluginmanager))
plugins = kwargs.get("plugins") or []
plugins.append(Collect())
ret = pytest.main(list(args), plugins=plugins)
if len(rec) == 1:
reprec = rec.pop()
else:
class reprec(object):
pass
reprec.ret = ret
# typically we reraise keyboard interrupts from the child run
# because it's our user requesting interruption of the testing
if ret == EXIT_INTERRUPTED and not kwargs.get("no_reraise_ctrlc"):
calls = reprec.getcalls("pytest_keyboard_interrupt")
if calls and calls[-1].excinfo.type == KeyboardInterrupt:
raise KeyboardInterrupt()
return reprec
finally:
for finalizer in finalizers:
finalizer()
def runpytest_inprocess(self, *args, **kwargs):
"""Return result of running pytest in-process, providing a similar
interface to what self.runpytest() provides.
"""
if kwargs.get("syspathinsert"):
self.syspathinsert()
now = time.time()
capture = MultiCapture(Capture=SysCapture)
capture.start_capturing()
try:
try:
reprec = self.inline_run(*args, **kwargs)
except SystemExit as e:
class reprec(object):
ret = e.args[0]
except Exception:
traceback.print_exc()
class reprec(object):
ret = 3
finally:
out, err = capture.readouterr()
capture.stop_capturing()
sys.stdout.write(out)
sys.stderr.write(err)
res = RunResult(reprec.ret, out.split("\n"), err.split("\n"), time.time() - now)
res.reprec = reprec
return res
def runpytest(self, *args, **kwargs):
"""Run pytest inline or in a subprocess, depending on the command line
option "--runpytest" and return a :py:class:`RunResult`.
"""
args = self._ensure_basetemp(args)
return self._runpytest_method(*args, **kwargs)
def _ensure_basetemp(self, args):
args = list(args)
for x in args:
if safe_str(x).startswith("--basetemp"):
break
else:
args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp"))
return args
def parseconfig(self, *args):
"""Return a new pytest Config instance from given commandline args.
This invokes the pytest bootstrapping code in _pytest.config to create
a new :py:class:`_pytest.core.PluginManager` and call the
pytest_cmdline_parse hook to create a new
:py:class:`_pytest.config.Config` instance.
If :py:attr:`plugins` has been populated they should be plugin modules
to be registered with the PluginManager.
"""
args = self._ensure_basetemp(args)
import _pytest.config
config = _pytest.config._prepareconfig(args, self.plugins)
# we don't know what the test will do with this half-setup config
# object and thus we make sure it gets unconfigured properly in any
# case (otherwise capturing could still be active, for example)
self.request.addfinalizer(config._ensure_unconfigure)
return config
def parseconfigure(self, *args):
"""Return a new pytest configured Config instance.
This returns a new :py:class:`_pytest.config.Config` instance like
:py:meth:`parseconfig`, but also calls the pytest_configure hook.
"""
config = self.parseconfig(*args)
config._do_configure()
self.request.addfinalizer(config._ensure_unconfigure)
return config
def getitem(self, source, funcname="test_func"):
"""Return the test item for a test function.
This writes the source to a python file and runs pytest's collection on
the resulting module, returning the test item for the requested
function name.
:param source: the module source
:param funcname: the name of the test function for which to return a
test item
"""
items = self.getitems(source)
for item in items:
if item.name == funcname:
return item
assert 0, "%r item not found in module:\n%s\nitems: %s" % (
funcname,
source,
items,
)
def getitems(self, source):
"""Return all test items collected from the module.
This writes the source to a python file and runs pytest's collection on
the resulting module, returning all test items contained within.
"""
modcol = self.getmodulecol(source)
return self.genitems([modcol])
def getmodulecol(self, source, configargs=(), withinit=False):
"""Return the module collection node for ``source``.
This writes ``source`` to a file using :py:meth:`makepyfile` and then
runs the pytest collection on it, returning the collection node for the
test module.
:param source: the source code of the module to collect
:param configargs: any extra arguments to pass to
:py:meth:`parseconfigure`
:param withinit: whether to also write an ``__init__.py`` file to the
same directory to ensure it is a package
"""
if isinstance(source, Path):
path = self.tmpdir.join(str(source))
assert not withinit, "not supported for paths"
else:
kw = {self.request.function.__name__: Source(source).strip()}
path = self.makepyfile(**kw)
if withinit:
self.makepyfile(__init__="#")
self.config = config = self.parseconfigure(path, *configargs)
return self.getnode(config, path)
def collect_by_name(self, modcol, name):
"""Return the collection node for name from the module collection.
This will search a module collection node for a collection node
matching the given name.
:param modcol: a module collection node; see :py:meth:`getmodulecol`
:param name: the name of the node to return
"""
if modcol not in self._mod_collections:
self._mod_collections[modcol] = list(modcol.collect())
for colitem in self._mod_collections[modcol]:
if colitem.name == name:
return colitem
def popen(self, cmdargs, stdout, stderr, **kw):
"""Invoke subprocess.Popen.
This calls subprocess.Popen making sure the current working directory
is in the PYTHONPATH.
You probably want to use :py:meth:`run` instead.
"""
env = os.environ.copy()
env["PYTHONPATH"] = os.pathsep.join(
filter(None, [os.getcwd(), env.get("PYTHONPATH", "")])
)
kw["env"] = env
popen = subprocess.Popen(
cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw
)
popen.stdin.close()
return popen
def run(self, *cmdargs):
"""Run a command with arguments.
Run a process using subprocess.Popen saving the stdout and stderr.
Returns a :py:class:`RunResult`.
"""
cmdargs = [
str(arg) if isinstance(arg, py.path.local) else arg for arg in cmdargs
]
p1 = self.tmpdir.join("stdout")
p2 = self.tmpdir.join("stderr")
print("running:", *cmdargs)
print(" in:", py.path.local())
f1 = codecs.open(str(p1), "w", encoding="utf8")
f2 = codecs.open(str(p2), "w", encoding="utf8")
try:
now = time.time()
popen = self.popen(
cmdargs, stdout=f1, stderr=f2, close_fds=(sys.platform != "win32")
)
ret = popen.wait()
finally:
f1.close()
f2.close()
f1 = codecs.open(str(p1), "r", encoding="utf8")
f2 = codecs.open(str(p2), "r", encoding="utf8")
try:
out = f1.read().splitlines()
err = f2.read().splitlines()
finally:
f1.close()
f2.close()
self._dump_lines(out, sys.stdout)
self._dump_lines(err, sys.stderr)
return RunResult(ret, out, err, time.time() - now)
def _dump_lines(self, lines, fp):
try:
for line in lines:
print(line, file=fp)
except UnicodeEncodeError:
print("couldn't print to %s because of encoding" % (fp,))
def _getpytestargs(self):
return sys.executable, "-mpytest"
def runpython(self, script):
"""Run a python script using sys.executable as interpreter.
Returns a :py:class:`RunResult`.
"""
return self.run(sys.executable, script)
def runpython_c(self, command):
"""Run python -c "command", return a :py:class:`RunResult`."""
return self.run(sys.executable, "-c", command)
def runpytest_subprocess(self, *args, **kwargs):
"""Run pytest as a subprocess with given arguments.
Any plugins added to the :py:attr:`plugins` list will added using the
``-p`` command line option. Additionally ``--basetemp`` is used put
any temporary files and directories in a numbered directory prefixed
with "runpytest-" so they do not conflict with the normal numbered
pytest location for temporary files and directories.
Returns a :py:class:`RunResult`.
"""
p = py.path.local.make_numbered_dir(
prefix="runpytest-", keep=None, rootdir=self.tmpdir
)
args = ("--basetemp=%s" % p,) + args
plugins = [x for x in self.plugins if isinstance(x, str)]
if plugins:
args = ("-p", plugins[0]) + args
args = self._getpytestargs() + args
return self.run(*args)
def spawn_pytest(self, string, expect_timeout=10.0):
"""Run pytest using pexpect.
This makes sure to use the right pytest and sets up the temporary
directory locations.
The pexpect child is returned.
"""
basetemp = self.tmpdir.mkdir("temp-pexpect")
invoke = " ".join(map(str, self._getpytestargs()))
cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
return self.spawn(cmd, expect_timeout=expect_timeout)
def spawn(self, cmd, expect_timeout=10.0):
"""Run a command using pexpect.
The pexpect child is returned.
"""
pexpect = pytest.importorskip("pexpect", "3.0")
if hasattr(sys, "pypy_version_info") and "64" in platform.machine():
pytest.skip("pypy-64 bit not supported")
if sys.platform.startswith("freebsd"):
pytest.xfail("pexpect does not work reliably on freebsd")
logfile = self.tmpdir.join("spawn.out").open("wb")
child = pexpect.spawn(cmd, logfile=logfile)
self.request.addfinalizer(logfile.close)
child.timeout = expect_timeout
return child
def getdecoded(out):
try:
return out.decode("utf-8")
except UnicodeDecodeError:
return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
py.io.saferepr(out),
)
class LineComp(object):
def __init__(self):
self.stringio = py.io.TextIO()
def assert_contains_lines(self, lines2):
"""Assert that lines2 are contained (linearly) in lines1.
Return a list of extralines found.
"""
__tracebackhide__ = True
val = self.stringio.getvalue()
self.stringio.truncate(0)
self.stringio.seek(0)
lines1 = val.split("\n")
return LineMatcher(lines1).fnmatch_lines(lines2)
class LineMatcher(object):
"""Flexible matching of text.
This is a convenience class to test large texts like the output of
commands.
The constructor takes a list of lines without their trailing newlines, i.e.
``text.splitlines()``.
"""
def __init__(self, lines):
self.lines = lines
self._log_output = []
def str(self):
"""Return the entire original text."""
return "\n".join(self.lines)
def _getlines(self, lines2):
if isinstance(lines2, str):
lines2 = Source(lines2)
if isinstance(lines2, Source):
lines2 = lines2.strip().lines
return lines2
def fnmatch_lines_random(self, lines2):
"""Check lines exist in the output using in any order.
Lines are checked using ``fnmatch.fnmatch``. The argument is a list of
lines which have to occur in the output, in any order.
"""
self._match_lines_random(lines2, fnmatch)
def re_match_lines_random(self, lines2):
"""Check lines exist in the output using ``re.match``, in any order.
The argument is a list of lines which have to occur in the output, in
any order.
"""
self._match_lines_random(lines2, lambda name, pat: re.match(pat, name))
def _match_lines_random(self, lines2, match_func):
"""Check lines exist in the output.
The argument is a list of lines which have to occur in the output, in
any order. Each line can contain glob whildcards.
"""
lines2 = self._getlines(lines2)
for line in lines2:
for x in self.lines:
if line == x or match_func(x, line):
self._log("matched: ", repr(line))
break
else:
self._log("line %r not found in output" % line)
raise ValueError(self._log_text)
def get_lines_after(self, fnline):
"""Return all lines following the given line in the text.
The given line can contain glob wildcards.
"""
for i, line in enumerate(self.lines):
if fnline == line or fnmatch(line, fnline):
return self.lines[i + 1 :]
raise ValueError("line %r not found in output" % fnline)
def _log(self, *args):
self._log_output.append(" ".join((str(x) for x in args)))
@property
def _log_text(self):
return "\n".join(self._log_output)
def fnmatch_lines(self, lines2):
"""Search captured text for matching lines using ``fnmatch.fnmatch``.
The argument is a list of lines which have to match and can use glob
wildcards. If they do not match a pytest.fail() is called. The
matches and non-matches are also printed on stdout.
"""
self._match_lines(lines2, fnmatch, "fnmatch")
def re_match_lines(self, lines2):
"""Search captured text for matching lines using ``re.match``.
The argument is a list of lines which have to match using ``re.match``.
If they do not match a pytest.fail() is called.
The matches and non-matches are also printed on stdout.
"""
self._match_lines(lines2, lambda name, pat: re.match(pat, name), "re.match")
def _match_lines(self, lines2, match_func, match_nickname):
"""Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``.
:param list[str] lines2: list of string patterns to match. The actual
format depends on ``match_func``
:param match_func: a callable ``match_func(line, pattern)`` where line
is the captured line from stdout/stderr and pattern is the matching
pattern
:param str match_nickname: the nickname for the match function that
will be logged to stdout when a match occurs
"""
lines2 = self._getlines(lines2)
lines1 = self.lines[:]
nextline = None
extralines = []
__tracebackhide__ = True
for line in lines2:
nomatchprinted = False
while lines1:
nextline = lines1.pop(0)
if line == nextline:
self._log("exact match:", repr(line))
break
elif match_func(nextline, line):
self._log("%s:" % match_nickname, repr(line))
self._log(" with:", repr(nextline))
break
else:
if not nomatchprinted:
self._log("nomatch:", repr(line))
nomatchprinted = True
self._log(" and:", repr(nextline))
extralines.append(nextline)
else:
self._log("remains unmatched: %r" % (line,))
pytest.fail(self._log_text)
| |
#!/usr/env/python
from collections import deque
import numpy as np
import pytest
from landlab import FieldError, HexModelGrid, RasterModelGrid
from landlab.components import (
FlowAccumulator,
FlowDirectorDINF,
FlowDirectorSteepest,
LakeMapperBarnes,
)
from landlab.utils import StablePriorityQueue
"""
These tests test specific aspects of LakeMapperBarnes not picked up in the
various docstrings.
"""
def test_route_to_multiple_error_raised_init():
mg = RasterModelGrid((10, 10))
z = mg.add_zeros("topographic__elevation", at="node")
z += mg.x_of_node + mg.y_of_node
fa = FlowAccumulator(mg, flow_director="MFD")
fa.run_one_step()
with pytest.raises(NotImplementedError):
LakeMapperBarnes(mg)
def test_bad_init_method1():
rmg = RasterModelGrid((5, 5), xy_spacing=2.0)
rmg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(rmg, flow_director="D8")
with pytest.raises(ValueError):
LakeMapperBarnes(rmg, method="Nope")
def test_bad_init_method2():
rmg = RasterModelGrid((5, 5), xy_spacing=2.0)
rmg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(rmg, flow_director="D8")
with pytest.raises(ValueError):
LakeMapperBarnes(rmg, method="d8")
def test_bad_init_gridmethod():
hmg = HexModelGrid((30, 29), spacing=3.0)
hmg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(hmg, flow_director="Steepest")
with pytest.raises(ValueError):
LakeMapperBarnes(hmg, method="D8")
def test_closed_up_grid():
mg = RasterModelGrid((5, 5))
for edge in ("left", "right", "top", "bottom"):
mg.status_at_node[mg.nodes_at_edge(edge)] = mg.BC_NODE_IS_CLOSED
mg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(mg, flow_director="D8")
with pytest.raises(ValueError):
LakeMapperBarnes(mg)
def test_neighbor_shaping_no_fldir():
mg = RasterModelGrid((5, 5))
mg.add_zeros("topographic__elevation", at="node", dtype=float)
with pytest.raises(FieldError):
LakeMapperBarnes(mg, method="D8", redirect_flow_steepest_descent=True)
def test_neighbor_shaping_no_creation():
mg = RasterModelGrid((5, 5))
mg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(mg)
lmb = LakeMapperBarnes(mg, method="D8", redirect_flow_steepest_descent=False)
with pytest.raises(AttributeError):
lmb._neighbor_arrays
def test_neighbor_shaping_D8():
mg = RasterModelGrid((5, 5))
mg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(mg)
lmb = LakeMapperBarnes(mg, method="D8", redirect_flow_steepest_descent=True)
for arr in (lmb._neighbor_arrays, lmb._link_arrays):
assert len(arr) == 2
assert arr[0].shape == (25, 4)
assert arr[1].shape == (25, 4)
assert len(lmb._neighbor_lengths) == mg.number_of_d8
def test_neighbor_shaping_D4():
mg = RasterModelGrid((5, 5))
mg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(mg)
lmb = LakeMapperBarnes(mg, method="Steepest", redirect_flow_steepest_descent=True)
for arr in (lmb._neighbor_arrays, lmb._link_arrays):
assert len(arr) == 1
assert arr[0].shape == (25, 4)
assert len(lmb._neighbor_lengths) == mg.number_of_links
def test_neighbor_shaping_hex():
hmg = HexModelGrid((6, 5), spacing=1.0)
hmg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(hmg)
lmb = LakeMapperBarnes(hmg, redirect_flow_steepest_descent=True)
for arr in (lmb._neighbor_arrays, lmb._link_arrays):
assert len(arr) == 1
assert arr[0].shape == (hmg.number_of_nodes, 6)
assert len(lmb._neighbor_lengths) == hmg.number_of_links
def test_accum_wo_reroute():
mg = RasterModelGrid((5, 5))
mg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(mg)
with pytest.raises(ValueError):
LakeMapperBarnes(
mg,
method="Steepest",
redirect_flow_steepest_descent=False,
reaccumulate_flow=True,
)
def test_redirect_no_lakes():
mg = RasterModelGrid((5, 5))
mg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(mg)
with pytest.raises(ValueError):
LakeMapperBarnes(
mg, method="D8", track_lakes=False, redirect_flow_steepest_descent=True
)
def test_route_to_many():
mg = RasterModelGrid((5, 5))
mg.add_zeros("topographic__elevation", at="node", dtype=float)
fd = FlowDirectorDINF(mg, "topographic__elevation")
_ = FlowAccumulator(mg)
fd.run_one_step()
assert mg.at_node["flow__receiver_node"].shape == (mg.number_of_nodes, 2)
with pytest.raises(NotImplementedError):
LakeMapperBarnes(mg, method="D8", redirect_flow_steepest_descent=True)
def test_permitted_overfill():
mg = RasterModelGrid((3, 7))
for edge in ("top", "right", "bottom"):
mg.status_at_node[mg.nodes_at_edge(edge)] = mg.BC_NODE_IS_CLOSED
z = mg.add_zeros("topographic__elevation", at="node", dtype=float)
z.reshape(mg.shape)[1, 1:-1] = [1.0, 0.2, 0.1, 1.0000000000000004, 1.5]
_ = FlowAccumulator(mg)
lmb = LakeMapperBarnes(mg, method="Steepest")
lmb._closed = mg.zeros("node", dtype=bool)
lmb._closed[mg.status_at_node == mg.BC_NODE_IS_CLOSED] = True
open = StablePriorityQueue()
edges = np.array([7])
for edgenode in edges:
open.add_task(edgenode, priority=z[edgenode])
lmb._closed[edges] = True
while True:
try:
lmb._fill_one_node_to_slant(
z, mg.adjacent_nodes_at_node, lmb._pit, open, lmb._closed, True
)
except KeyError:
break
def test_no_reroute():
mg = RasterModelGrid((5, 5), xy_spacing=2.0)
z = mg.add_zeros("topographic__elevation", at="node", dtype=float)
z[1] = -1.0
z[6] = -2.0
z[19] = -2.0
z[18] = -1.0
z[17] = -3.0
fd = FlowDirectorSteepest(mg)
fa = FlowAccumulator(mg)
lmb = LakeMapperBarnes(
mg,
method="Steepest",
fill_flat=True,
redirect_flow_steepest_descent=True,
track_lakes=True,
)
openq = StablePriorityQueue()
lake_dict = {1: deque([6]), 18: deque([17])}
fd.run_one_step() # fill the director fields
fa.run_one_step() # get a drainage_area
orig_surf = lmb._track_original_surface()
lmb._redirect_flowdirs(orig_surf, lake_dict, openq)
assert mg.at_node["flow__receiver_node"][6] == 1
assert mg.at_node["flow__receiver_node"][17] == 18
assert mg.at_node["flow__receiver_node"][18] == 19
| |
# Copyright (C) 2009-2010 Nokia Corporation and/or its subsidiary(-ies).
# All rights reserved. Contact: Pasi Eronen <pasi.eronen@nokia.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Nokia Corporation and/or its
# subsidiary(-ies) nor the names of its contributors may be used
# to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
from django import template
from django.core.urlresolvers import reverse as urlreverse
from django.conf import settings
from django.db.models import Q
from django.utils.safestring import mark_safe
from ietf.ietfauth.utils import user_is_person, has_role
from ietf.doc.models import BallotDocEvent, BallotPositionDocEvent, IESG_BALLOT_ACTIVE_STATES, IESG_SUBSTATE_TAGS
register = template.Library()
def render_ballot_icon(user, doc):
if not doc:
return ""
# FIXME: temporary backwards-compatibility hack
from ietf.doc.models import Document
if not isinstance(doc, Document):
doc = doc._draft
if doc.type_id == "draft":
if doc.get_state_slug("draft-iesg") not in IESG_BALLOT_ACTIVE_STATES:
return ""
elif doc.type_id == "charter":
if doc.get_state_slug() not in ("intrev", "iesgrev"):
return ""
elif doc.type_id == "conflrev":
if doc.get_state_slug() not in ("iesgeval","defer"):
return ""
elif doc.type_id == "statchg":
if doc.get_state_slug() not in ("iesgeval","defer"):
return ""
ballot = doc.active_ballot()
if not ballot:
return ""
def sort_key(t):
_, pos = t
if not pos:
return (2, 0)
elif pos.pos.blocking:
return (0, pos.pos.order)
else:
return (1, pos.pos.order)
positions = list(doc.active_ballot().active_ad_positions().items())
positions.sort(key=sort_key)
edit_position_url = ""
if has_role(user, "Area Director"):
edit_position_url = urlreverse('ietf.idrfc.views_ballot.edit_position', kwargs=dict(name=doc.name, ballot_id=ballot.pk))
title = "IESG positions (click to show more%s)" % (", right-click to edit position" if edit_position_url else "")
res = ['<a href="%s" data-popup="%s" data-edit="%s" title="%s" class="ballot-icon"><table>' % (
urlreverse("doc_ballot", kwargs=dict(name=doc.name, ballot_id=ballot.pk)),
urlreverse("ietf.doc.views_doc.ballot_popup", kwargs=dict(name=doc.name, ballot_id=ballot.pk)),
edit_position_url,
title
)]
res.append("<tr>")
for i, (ad, pos) in enumerate(positions):
if i > 0 and i % 5 == 0:
res.append("</tr>")
res.append("<tr>")
c = "position-%s" % (pos.pos.slug if pos else "norecord")
if user_is_person(user, ad):
c += " my"
res.append('<td class="%s" />' % c)
res.append("</tr>")
res.append("</table></a>")
return "".join(res)
class BallotIconNode(template.Node):
def __init__(self, doc_var):
self.doc_var = doc_var
def render(self, context):
doc = template.resolve_variable(self.doc_var, context)
return render_ballot_icon(context.get("user"), doc)
def do_ballot_icon(parser, token):
try:
tag_name, doc_name = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError, "%r tag requires exactly two arguments" % token.contents.split()[0]
return BallotIconNode(doc_name)
register.tag('ballot_icon', do_ballot_icon)
@register.filter
def my_position(doc, user):
if not has_role(user, "Area Director"):
return None
# FIXME: temporary backwards-compatibility hack
from ietf.doc.models import Document
if not isinstance(doc, Document):
doc = doc._draft
ballot = doc.active_ballot()
pos = "No Record"
if ballot:
changed_pos = doc.latest_event(BallotPositionDocEvent, type="changed_ballot_position", ad__user=user, ballot=ballot)
if changed_pos:
pos = changed_pos.pos.name;
return pos
@register.filter()
def state_age_colored(doc):
# FIXME: temporary backwards-compatibility hack
from ietf.doc.models import Document
if not isinstance(doc, Document):
doc = doc._draft
if doc.type_id == 'draft':
if not doc.get_state_slug() in ["active", "rfc"]:
# Don't show anything for expired/withdrawn/replaced drafts
return ""
main_state = doc.get_state_slug('draft-iesg')
if not main_state:
return ""
if main_state in ["dead", "watching", "pub"]:
return ""
try:
state_date = doc.docevent_set.filter(
Q(desc__istartswith="Draft Added by ")|
Q(desc__istartswith="Draft Added in state ")|
Q(desc__istartswith="Draft added in state ")|
Q(desc__istartswith="State changed to ")|
Q(desc__istartswith="State Changes to ")|
Q(desc__istartswith="Sub state has been changed to ")|
Q(desc__istartswith="State has been changed to ")|
Q(desc__istartswith="IESG has approved and state has been changed to")|
Q(desc__istartswith="IESG process started in state")
).order_by('-time')[0].time.date()
except IndexError:
state_date = datetime.date(1990,1,1)
days = (datetime.date.today() - state_date).days
# loosely based on
# http://trac.tools.ietf.org/group/iesg/trac/wiki/PublishPath
if main_state == "lc":
goal1 = 30
goal2 = 30
elif main_state == "rfcqueue":
goal1 = 60
goal2 = 120
elif main_state in ["lc-req", "ann"]:
goal1 = 4
goal2 = 7
elif 'need-rev' in [x.slug for x in doc.tags.all()]:
goal1 = 14
goal2 = 28
elif main_state == "pub-req":
goal1 = 7
goal2 = 14
elif main_state == "ad-eval":
goal1 = 14
goal2 = 28
else:
goal1 = 14
goal2 = 28
if days > goal2:
class_name = "ietf-small ietf-highlight-r"
elif days > goal1:
class_name = "ietf-small ietf-highlight-y"
else:
class_name = "ietf-small"
if days > goal1:
title = ' title="Goal is <%d days"' % (goal1,)
else:
title = ''
return mark_safe('<span class="%s"%s>(for %d day%s)</span>' % (
class_name, title, days, 's' if days != 1 else ''))
else:
return ""
| |
"""
Python implementation of the fast ICA algorithms.
Reference: Tables 8.3 and 8.4 page 196 in the book:
Independent Component Analysis, by Hyvarinen et al.
"""
# Author: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,
# Bertrand Thirion, Alexandre Gramfort
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import array2d, as_float_array, check_random_state
__all__ = ['fastica', 'FastICA']
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W
Parameters
----------
w: array of shape(n), to be orthogonalized
W: array of shape(p, n), null space definition
j: int < p
caveats
-------
assumes that W is orthogonal
w changed in place
"""
w -= np.dot(np.dot(w, W[:j].T), W[:j])
return w
def _sym_decorrelation(W):
""" Symmetric decorrelation
i.e. W <- (W * W.T) ^{-1/2} * W
"""
K = np.dot(W, W.T)
s, u = linalg.eigh(K)
# u (resp. s) contains the eigenvectors (resp. square roots of
# the eigenvalues) of W * W.T
W = np.dot(np.dot(np.dot(u, np.diag(1.0 / np.sqrt(s))), u.T), W)
return W
def _ica_def(X, tol, g, gprime, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=float)
# j is the index of the extracted component
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w ** 2).sum())
n_iterations = 0
# we set lim to tol+1 to be sure to enter at least once in next while
lim = tol + 1
while ((lim > tol) & (n_iterations < (max_iter - 1))):
wtx = np.dot(w.T, X)
nonlin = g(wtx, fun_args)
if isinstance(nonlin, tuple):
gwtx, g_wtx = nonlin
else:
if not callable(gprime):
raise ValueError('The function supplied does not return a '
'tuple. Therefore fun_prime has to be a '
'function, not %s' % str(type(gprime)))
warnings.warn("Passing g and gprime separately is deprecated "
"and will be removed in 0.14.",
DeprecationWarning, stacklevel=2)
gwtx = nonlin
g_wtx = gprime(wtx, fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1 ** 2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
n_iterations = n_iterations + 1
W[j, :] = w
return W
def _ica_par(X, tol, g, gprime, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
n, p = X.shape
W = _sym_decorrelation(w_init)
# we set lim to tol+1 to be sure to enter at least once in next while
lim = tol + 1
it = 0
while ((lim > tol) and (it < (max_iter - 1))):
wtx = np.dot(W, X)
nonlin = g(wtx, fun_args)
if isinstance(nonlin, tuple):
gwtx, g_wtx = nonlin
else:
if not callable(gprime):
raise ValueError('The function supplied does not return a '
'tuple. Therefore fun_prime has to be a '
'function, not %s' % str(type(gprime)))
warnings.warn("Passing g and gprime separately is deprecated "
"and will be removed in 0.14.",
DeprecationWarning, stacklevel=2)
gwtx = nonlin
g_wtx = gprime(wtx, fun_args)
W1 = (np.dot(gwtx, X.T) / float(p)
- np.dot(np.diag(g_wtx.mean(axis=1)), W))
W1 = _sym_decorrelation(W1)
lim = max(abs(abs(np.diag(np.dot(W1, W.T))) - 1))
W = W1
it += 1
return W
def fastica(X, n_components=None, algorithm="parallel", whiten=True,
fun="logcosh", fun_prime='', fun_args={}, max_iter=200,
tol=1e-04, w_init=None, random_state=None):
"""Perform Fast Independent Component Analysis.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
n_components : int, optional
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, optional
Apply a parallel or deflational FASTICA algorithm.
whiten: boolean, optional
If True perform an initial whitening of the data.
If False, the data is assumed to have already been
preprocessed: it should be centered, normed and white.
Otherwise you will get incorrect results.
In this case the parameter n_components will be ignored.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
Supplying the derivative through the `fun_prime` attribute is
still supported, but deprecated.
fun_prime : empty string ('') or function, optional, deprecated.
See fun.
fun_args: dictionary, optional
Arguments to send to the functional form.
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter: int, optional
Maximum number of iterations to perform
tol: float, optional
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged
w_init: (n_components, n_components) array, optional
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used
source_only: boolean, optional
If True, only the sources matrix is returned.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
K: (n_components, p) array or None.
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n.comp principal components. If whiten is 'False', K is
'None'.
W: (n_components, n_components) array
estimated un-mixing matrix
The mixing matrix can be obtained by::
w = np.dot(W, K.T)
A = w.T * (w * w.T).I
S: (n_components, n) array
estimated source matrix
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
Implemented using FastICA:
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
random_state = check_random_state(random_state)
# make interface compatible with other decompositions
X = array2d(X).T
alpha = fun_args.get('alpha', 1.0)
if (alpha < 1) or (alpha > 2):
raise ValueError("alpha must be in [1,2]")
gprime = None
if isinstance(fun, str):
# Some standard nonlinear functions
# XXX: these should be optimized, as they can be a bottleneck.
if fun == 'logcosh':
def g(x, fun_args):
alpha = fun_args.get('alpha', 1.0) # comment it out?
gx = np.tanh(alpha * x)
g_x = alpha * (1 - gx ** 2)
return gx, g_x
elif fun == 'exp':
def g(x, fun_args):
exp = np.exp(-(x ** 2) / 2)
gx = x * exp
g_x = (1 - x ** 2) * exp
return gx, g_x
elif fun == 'cube':
def g(x, fun_args):
return x ** 3, 3 * x ** 2
else:
raise ValueError('fun argument should be one of logcosh, exp or'
' cube')
elif callable(fun):
def g(x, fun_args):
return fun(x, **fun_args)
if callable(fun_prime):
def gprime(x, fun_args):
return fun_prime(x, **fun_args)
else:
raise ValueError('fun argument should be either a string '
'(one of logcosh, exp or cube) or a function')
n, p = X.shape
if not whiten and n_components is not None:
n_components = None
warnings.warn('Ignoring n_components with whiten=False.')
if n_components is None:
n_components = min(n, p)
if (n_components > min(n, p)):
n_components = min(n, p)
print("n_components is too large: it will be set to %s" % n_components)
if whiten:
# Centering the columns (ie the variables)
X = X - X.mean(axis=-1)[:, np.newaxis]
# Whitening and preprocessing by PCA
u, d, _ = linalg.svd(X, full_matrices=False)
del _
K = (u / d).T[:n_components] # see (6.33) p.140
del u, d
X1 = np.dot(K, X)
# see (13.6) p.267 Here X1 is white and data
# in X has been projected onto a subspace by PCA
X1 *= np.sqrt(p)
else:
# X must be casted to floats to avoid typing issues with numpy
# 2.0 and the line below
X1 = as_float_array(X, copy=True)
if w_init is None:
w_init = random_state.normal(size=(n_components, n_components))
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError("w_init has invalid shape -- should be %(shape)s"
% {'shape': (n_components, n_components)})
kwargs = {'tol': tol,
'g': g,
'gprime': gprime,
'fun_args': fun_args,
'max_iter': max_iter,
'w_init': w_init}
if algorithm == 'parallel':
W = _ica_par(X1, **kwargs)
elif algorithm == 'deflation':
W = _ica_def(X1, **kwargs)
else:
raise ValueError('Invalid algorithm: must be either `parallel` or' +
' `deflation`.')
del X1
if whiten:
S = np.dot(np.dot(W, K), X)
return K, W, S.T
else:
S = np.dot(W, X)
return None, W, S.T
class FastICA(BaseEstimator, TransformerMixin):
"""FastICA; a fast algorithm for Independent Component Analysis
Parameters
----------
n_components : int, optional
Number of components to use. If none is passed, all are used.
algorithm : {'parallel', 'deflation'}
Apply parallel or deflational algorithm for FastICA
whiten : boolean, optional
If whiten is false, the data is already considered to be
whitened, and no whitening is performed.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
Supplying the derivative through the `fun_prime` attribute is
still supported, but deprecated.
fun_prime : empty string ('') or function, optional, deprecated.
See fun.
fun_args: dictionary, optional
Arguments to send to the functional form.
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, optional
Maximum number of iterations during fit
tol : float, optional
Tolerance on update at each iteration
w_init : None of an (n_components, n_components) ndarray
The mixing matrix to be used to initialize the algorithm.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`components_` : 2D array, [n_components, n_features]
The unmixing matrix
`sources_`: 2D array, [n_samples, n_components]
The estimated latent sources of the data.
Notes
-----
Implementation based on
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
def __init__(self, n_components=None, algorithm='parallel', whiten=True,
fun='logcosh', fun_prime='', fun_args=None, max_iter=200,
tol=1e-4, w_init=None, random_state=None):
super(FastICA, self).__init__()
self.n_components = n_components
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.fun_prime = fun_prime
self.fun_args = fun_args
self.max_iter = max_iter
self.tol = tol
self.w_init = w_init
self.random_state = random_state
def fit(self, X, y=None):
fun_args = {} if self.fun_args is None else self.fun_args
whitening_, unmixing_, sources_ = fastica(
X, self.n_components, self.algorithm, self.whiten, self.fun,
self.fun_prime, fun_args, self.max_iter, self.tol, self.w_init,
random_state=self.random_state)
if self.whiten:
self.components_ = np.dot(unmixing_, whitening_)
else:
self.components_ = unmixing_
self.sources_ = sources_
return self
def transform(self, X, y=None):
"""Apply un-mixing matrix "W" to X to recover the sources
S = X * W.T
"""
X = array2d(X)
return np.dot(X, self.components_.T)
def get_mixing_matrix(self):
"""Compute the mixing matrix
"""
return linalg.pinv(self.components_)
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Erkan Ozgur Yilmaz
#
# This module is part of oyProjectManager and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import os
import logging
logger = logging.getLogger(__name__)
class Config(object):
"""Config abstraction
Idea is coming from Sphinx config.
Holds system wide configuration variables. See
`configuring oyProjectManager`_ for more detail.
.. _configuring oyProjectManager: ../configure.html
"""
default_config_values = dict(
database_url="sqlite:///$OYPROJECTMANAGER_PATH/project_manager.db",
status_list=[
'WTS',
'WIP',
'REV',
'APP',
'CMP'
],
status_list_long_names=[
'Waiting To Start',
'Work In Progress',
'For Review',
'Approved',
'Completed'
],
status_bg_colors=[
(192, 80, 77), #WTS
(255, 192, 0), #WIP
( 89, 141, 213), #REV
(155, 187, 89), #APP
(155, 187, 89), #CMP
],
status_fg_colors=[
(255, 255, 255), #WTS
( 0, 0, 0), #WIP
( 0, 0, 0), #REV
( 0, 0, 0), #APP
( 0, 0, 0), #CMP
],
sequence_format="%h%p%t %R",
shot_number_prefix="SH",
shot_number_padding=3,
rev_number_prefix="r",
rev_number_padding=2,
ver_number_prefix="v",
ver_number_padding=3,
default_fps=25,
default_asset_type_name="Generic",
default_take_name="Main",
users_data=[{"name": "Administrator", "initials": "adm"}],
# just use one repository for now
repository_env_key="REPO",
repository={
"name": "Default",
"windows_path": "~/Projects",
"linux_path": "~/Projects",
"osx_path": "~/Projects"
},
file_size_format="%.2f MB",
time_format='%d.%m.%Y %H:%M',
environments=[
{
"name": "Maya",
"extensions": ["ma", "mb"]
},
{
"name": "Houdini",
"extensions": ["hip"]
},
{
"name": "Nuke",
"extensions": ["nk"],
},
{
"name": "Photoshop",
"extensions": ["psd", "pdd"],
"export_extensions": ["tif", "tga", "bmp", "jpg", "iff"],
},
{
"name": "3DEqualizer",
"extensions": ["3te"]
},
{
"name": "Fusion",
"extensions": ["comp"]
}
],
resolution_presets={
"PC Video": [640, 480, 1.0],
"NTSC": [720, 486, 0.91],
"NTSC 16:9": [720, 486, 1.21],
"PAL": [720, 576, 1.067],
"PAL 16:9": [720, 576, 1.46],
"HD 720": [1280, 720, 1.0],
"HD 1080": [1920, 1080, 1.0],
"1K Super 35": [1024, 778, 1.0],
"2K Super 35": [2048, 1556, 1.0],
"4K Super 35": [4096, 3112, 1.0],
"A4 Portrait": [2480, 3508, 1.0],
"A4 Landscape": [3508, 2480, 1.0],
"A3 Portrait": [3508, 4960, 1.0],
"A3 Landscape": [4960, 3508, 1.0],
"A2 Portrait": [4960, 7016, 1.0],
"A2 Landscape": [7016, 4960, 1.0],
"50x70cm Poster Portrait": [5905, 8268, 1.0],
"50x70cm Poster Landscape": [8268, 5905, 1.0],
"70x100cm Poster Portrait": [8268, 11810, 1.0],
"70x100cm Poster Landscape": [11810, 8268, 1.0],
"1k Square": [1024, 1024, 1.0],
"2k Square": [2048, 2048, 1.0],
"3k Square": [3072, 3072, 1.0],
"4k Square": [4096, 4096, 1.0],
},
default_resolution_preset="HD 1080",
project_structure="""{% for sequence in project.sequences %}
{% set seq_path = project.full_path + '/Sequences/' + sequence.code %}
{{seq_path}}/Edit/Offline
{{seq_path}}/Edit/Sound
{{seq_path}}/References/Artworks
{{seq_path}}/References/Text/Scenario
{{seq_path}}/References/Text/Brief
{{seq_path}}/References/Photos_Images
{{seq_path}}/References/Videos
{{seq_path}}/References/Others
{{seq_path}}/References/Storyboard
{% for shot in sequence.shots %}
{{seq_path}}/Shots/{{shot.code}}
{{seq_path}}/Shots/{{shot.code}}/Plate
{{seq_path}}/Shots/{{shot.code}}/Reference
{{seq_path}}/Shots/{{shot.code}}/Texture
{% endfor %}
{% endfor %}
{% for asset in project.assets%}
{% set asset_path = project.full_path + '/Assets/' + asset.type + '/' + asset.code %}
{{asset_path}}/Texture
{{asset_path}}/Reference
{% endfor %}
""",
asset_thumbnail_path="{{project.code}}/Assets/{{asset.type}}/{{asset.code}}/Thumbnail",
asset_thumbnail_filename="{{asset.code}}_thumbnail.{{extension}}",
shot_thumbnail_path="{{project.code}}/Sequences/{{sequence.code}}/Shots/{{shot.code}}/Thumbnail",
shot_thumbnail_filename="{{shot.code}}_thumbnail.{{extension}}",
thumbnail_format="jpg",
thumbnail_quality=70,
thumbnail_size=[320, 180],
version_types=[
{
"name": "Animation",
"code": "Anim",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Maya", "Houdini"],
"type_for": "Shot"
},
{
"name": "Camera",
"code": "Cam",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Maya", "Houdini"],
"type_for": "Shot"
},
{
"name": "Composition",
"code": "Comp",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}/v{{'%03d'|format(version.version_number)}}",
"extra_folders": "{{version.path}}/Elements",
"environments": ["Nuke", "Fusion"],
"type_for": "Shot"
},
# {
# "name": "Edit",
# "code": "Edit",
# "path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
# "filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
# "output_path": "{{version._path}}/Output/{{version.take_name}}",
# "extra_folders": "",
# "environments": ["Nuke", "Fusion"],
# "type_for": "Shot"
# },
{
"name": "FX",
"code": "FX",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": """{{version.path}}/anim
{{version.path}}/cache
{{version.path}}/exports""",
"environments": ["Maya", "Houdini"],
"type_for": "Shot"
},
{
"name": "Model",
"code": "Model",
"path": "{{project.code}}/Assets/{{asset.type}}/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Maya", "Houdini"],
"type_for": "Asset"
},
{
"name": "Other",
"code": "Other",
"path": "{{project.code}}/Assets/{{asset.type}}/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Maya", "Houdini", "Nuke", "Fusion",
"Photoshop"],
"type_for": "Asset"
},
{
"name": "Previs",
"code": "Previs",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Maya", "Houdini"],
"type_for": "Shot"
},
{
"name": "Lighting",
"code": "Lighting",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Maya", "Houdini"],
"type_for": "Shot"
},
{
"name": "Rig",
"code": "Rig",
"path": "{{project.code}}/Assets/{{asset.type}}/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Maya", "Houdini"],
"type_for": "Asset"
},
{
"name": "Roto",
"code": "Roto",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Nuke", "Fusion"],
"type_for": "Shot"
},
{
"name": "Layout",
"code": "Layout",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Maya", "Houdini"],
"type_for": "Shot"
},
{
"name": "Matte",
"code": "Matte",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Photoshop"],
"type_for": "Shot"
},
{
"name": "Texture",
"code": "Texture",
"path": "{{project.code}}/Assets/{{asset.type}}/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Photoshop", "Nuke", "Fusion"],
"type_for": "Asset",
},
{
"name": "Illustration",
"code": "Illust",
"path": "{{project.code}}/Assets/{{asset.type}}/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Photoshop"],
"type_for": "Asset"
},
{
"name": "Look Development",
"code": "LookDev",
"path": "{{project.code}}/Assets/{{asset.type}}/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["Maya", "Houdini"],
"type_for": "Asset"
},
{
"name": "Match Move",
"code": "MM",
"path": "{{project.code}}/Sequences/{{sequence.code}}/Shots/{{version.base_name}}/{{type.code}}",
"filename": "{{version.base_name}}_{{version.take_name}}_{{type.code}}_v{{'%03d'|format(version.version_number)}}{{version.extension}}",
"output_path": "{{version._path}}/Output/{{version.take_name}}",
"extra_folders": "",
"environments": ["3DEqualizer"],
"type_for": "Shot"
}
],
maya_workspace_file_content="""workspace -fr "3dPaintTextures" ".mayaFiles/sourceimages/3dPaintTextures/";
workspace -fr "Adobe(R) Illustrator(R)" ".mayaFiles/data/";
workspace -fr "aliasWire" ".mayaFiles/data/";
workspace -fr "animImport" ".mayaFiles/data/";
workspace -fr "animExport" ".mayaFiles/data/";
workspace -fr "audio" ".mayaFiles/sound/";
workspace -fr "autoSave" ".mayaFiles/autosave/";
workspace -fr "clips" ".mayaFiles/clips/";
workspace -fr "DAE_FBX" ".mayaFiles/data/";
workspace -fr "DAE_FBX export" ".mayaFiles/data/";
workspace -fr "depth" ".mayaFiles/renderData/depth/";
workspace -fr "diskCache" ".mayaFiles/cache/";
workspace -fr "DXF" ".mayaFiles/data/";
workspace -fr "DXF export" ".mayaFiles/data/";
workspace -fr "DXF_FBX" ".mayaFiles/data/";
workspace -fr "DXF_FBX export" ".mayaFiles/data/";
workspace -fr "eps" ".mayaFiles/data/";
workspace -fr "EPS" ".mayaFiles/data/";
workspace -fr "FBX" ".mayaFiles/data/";
workspace -fr "FBX export" ".mayaFiles/data/";
workspace -fr "fluidCache" ".mayaFiles/cache/fluid/";
workspace -fr "furAttrMap" ".mayaFiles/renderData/fur/furAttrMap/";
workspace -fr "furEqualMap" ".mayaFiles/renderData/fur/furEqualMap/";
workspace -fr "furFiles" ".mayaFiles/renderData/fur/furFiles/";
workspace -fr "furImages" ".mayaFiles/renderData/fur/furImages/";
workspace -fr "furShadowMap" ".mayaFiles/renderData/fur/furShadowMap/";
workspace -fr "IGES" ".mayaFiles/data/";
workspace -fr "IGESexport" ".mayaFiles/data/";
workspace -fr "illustrator" ".mayaFiles/data/";
workspace -fr "image" ".mayaFiles/images/";
workspace -fr "images" ".mayaFiles/images/";
workspace -fr "iprImages" ".mayaFiles/renderData/iprImages/";
workspace -fr "lights" ".mayaFiles/renderData/shaders/";
workspace -fr "mayaAscii" ".mayaFiles/scenes/";
workspace -fr "mayaBinary" ".mayaFiles/scenes/";
workspace -fr "mel" ".mayaFiles/scripts/";
workspace -fr "mentalray" ".mayaFiles/renderData/mentalray/";
workspace -fr "mentalRay" ".mayaFiles/renderData/mentalray";
workspace -fr "move" ".mayaFiles/data/";
workspace -fr "movie" ".mayaFiles/movies/";
workspace -fr "OBJ" ".mayaFiles/data/";
workspace -fr "OBJexport" ".mayaFiles/data/";
workspace -fr "offlineEdit" ".mayaFiles/scenes/edits/";
workspace -fr "particles" ".mayaFiles/particles/";
workspace -fr "renderData" ".mayaFiles/renderData/";
workspace -fr "renderScenes" ".mayaFiles/scenes/";
workspace -fr "RIB" ".mayaFiles/data/";
workspace -fr "RIBexport" ".mayaFiles/data/";
workspace -fr "scene" ".mayaFiles/scenes/";
workspace -fr "scripts" ".mayaFiles/scripts/";
workspace -fr "shaders" ".mayaFiles/renderData/shaders/";
workspace -fr "sound" ".mayaFiles/sound/";
workspace -fr "sourceImages" ".mayaFiles/sourceimages/";
workspace -fr "templates" ".mayaFiles/assets/";
workspace -fr "textures" ".mayaFiles/images/";
workspace -fr "translatorData" ".mayaFiles/data/";
"""
)
def __init__(self):
self.config_values = Config.default_config_values.copy()
self.user_config = {}
# the priority order is
# oyProjectManager.config
# config.py under .oyrc directory
# config.py under $OYPROJECTMANAGER_PATH
self._parse_settings()
def _parse_settings(self):
# for now just use $OYPROJECTMANAGER_PATH
ENV_KEY = "OYPROJECTMANAGER_PATH"
# try to get the environment variable
if not os.environ.has_key(ENV_KEY):
# don't do anything
logger.debug("no environment key found for user settings")
else:
logger.debug("environment key found")
resolved_path = os.path.expanduser(
os.path.join(
os.environ[ENV_KEY],
"config.py"
)
)
# using `while` is not safe to expand variables
# do the expansion for 5 times which is complex enough
# and I don't (hopefully) expect anybody to use
# more than 5 level deep environment variables
resolved_path = os.path.expandvars(
os.path.expandvars(
os.path.expandvars(
os.path.expandvars(
resolved_path
)
)
)
)
try:
try:
logger.debug("importing user config")
execfile(resolved_path, self.user_config)
except SyntaxError, err:
raise RuntimeError("There is a syntax error in your "
"configuration file: " + str(err))
# append the data to the current settings
logger.debug("updating system config")
for key in self.user_config:
if key in self.config_values:
self.config_values[key] = self.user_config[key]
except IOError:
logger.warning("The $OYPROJETMANAGER_PATH:" + resolved_path + \
" doesn't exists! skipping user config")
def __getattr__(self, name):
return self.config_values[name]
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
return setattr(self, name, value)
def __delitem__(self, name):
delattr(self, name)
def __contains__(self, name):
return name in self.config_values
@property
def last_user_id(self):
"""returns the last user id
It is not very much related with the config.py and user settings, but
it seems the most appropriate place is this one to get information from
individual users.
This should work fairly fast, because it uses the local filesystem not
the network thus the fileserver.
"""
# TODO: This should be replaced with beaker.session
file_name = 'last_user_id'
file_path = os.path.expanduser("~/.oypmrc/")
file_full_path = os.path.join(file_path, file_name)
last_user_id = None
try:
last_user_file = open(file_full_path)
except IOError:
pass
else:
last_user_id = int(last_user_file.readline().strip())
last_user_file.close()
return last_user_id
@last_user_id.setter
def last_user_id(self, user_id):
"""sets the user id for the last user
"""
if not isinstance(user_id, int):
raise RuntimeWarning("user_id for last_user_id should be an int")
file_name = 'last_user_id'
file_path = os.path.expanduser("~/.oypmrc/")
file_full_path = os.path.join(file_path, file_name)
logger.debug("saving user id to %s" % file_full_path)
# create the folder first
try:
os.makedirs(file_path)
except OSError:
# already created
pass
try:
last_user_file = open(file_full_path, 'w')
except IOError as e:
pass
else:
last_user_file.write(str(user_id))
last_user_file.close()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Transform operators."""
from . import _make
from ..expr import TupleWrapper, const
def cast(data, dtype):
"""Cast input tensor to data type.
Parameters
----------
data : relay.Expr
The input data to the operator.
dtype: str
The target data type
Returns
-------
result : relay.Expr
The casted result.
"""
from .. import _make as _relay_make
return _relay_make.cast(data, dtype)
def cast_like(data, dtype_like):
"""Cast input tensor to data type of another tensor.
Parameters
----------
data : relay.Expr
The input data to the operator.
dtype_like: relay.Expr
The tensor to cast to.
Returns
-------
result : relay.Expr
The casted result.
"""
from .. import _make as _relay_make
return _relay_make.cast_like(data, dtype_like)
def reinterpret(data, dtype):
"""Reinterpret input tensor to data type.
Parameters
----------
data : relay.Expr
The input data to the operator.
dtype: str
The target data type
Returns
-------
result : relay.Expr
The reinterpreted result.
"""
from .. import _make as _relay_make
return _relay_make.reinterpret(data, dtype)
def expand_dims(data, axis, num_newaxis=1):
"""Insert `num_newaxis` axises at the position given by `axis`.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis : int
The axis at which the input array is expanded.
Should lie in range `[-data.ndim - 1, data.ndim]`.
If `axis < 0`, it is the first axis inserted;
If `axis >= 0`, it is the last axis inserted in Python's negative indexing.
num_newaxis : int
Number of axes to be inserted. Should be >= 0.
Returns
-------
result : relay.Expr
The reshaped result.
"""
return _make.expand_dims(data, axis, num_newaxis)
def transpose(data, axes=None):
"""Permutes the dimensions of an array.
Parameters
----------
data : relay.Expr
The input data to the operator.
axes : None or List[int]
The target axes order, reverse order if not specified.
Returns
-------
result : relay.Expr
The transposed result.
"""
if axes is not None:
axes = list(axes)
return _make.transpose(data, axes)
def squeeze(data, axis=None):
"""Squeeze axes in the array.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
axis : None or List[int]
The set of axes to remove.
If axis = None, remove all axis of dimensions 1.
If any specified axis has dimension that does not equal 1, it is an error.
Returns
-------
result : tvm.relay.Expr
The squeezed result.
"""
return _make.squeeze(data, axis)
def reshape(data, newshape):
"""Reshapes the input array.
Example::
To give user more convenience in without doing manual shape inference,
some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}.
The significance of each is explained below:
- ``0`` copy this dimension from the input to the output shape.
Example::
- data.shape = (2,3,4), newshape = (4,0,2), result.shape = (4,3,2)
- data.shape = (2,3,4), newshape = (2,0,0), result.shape = (2,3,4)
- ``-1`` infers the dimension of the output shape by using the remainder of the input dimensions
keeping the size of the new array same as that of the input array.
At most one dimension of shape can be -1.
Example::
- data.shape = (2,3,4), newshape = (6,1,-1), result.shape = (6,1,4)
- data.shape = (2,3,4), newshape = (3,-1,8), result.shape = (3,1,8)
- data.shape = (2,3,4), newshape = (-1,), result.shape = (24,)
- ``-2`` copy all/remainder of the input dimensions to the output shape.
Example::
- data.shape = (2,3,4), newshape = (-2,), result.shape = (2,3,4)
- data.shape = (2,3,4), newshape = (2,-2), result.shape = (2,3,4)
- data.shape = (2,3,4), newshape = (-2,1,1), result.shape = (2,3,4,1,1)
- ``-3`` use the product of two consecutive dimensions of the input shape
as the output dimension.
Example::
- data.shape = (2,3,4), newshape = (-3,4), result.shape = (6,4)
- data.shape = (2,3,4,5), newshape = (-3,-3), result.shape = (6,20)
- data.shape = (2,3,4), newshape = (0,-3), result.shape = (2,12)
- data.shape = (2,3,4), newshape = (-3,-2), result.shape = (6,4)
- ``-4`` split one dimension of the input into two dimensions passed subsequent
to -4 in shape (can contain -1).
Example::
- data.shape = (2,3,4), newshape = (-4,1,2,-2), result.shape = (1,2,3,4)
- data.shape = (2,3,4), newshape = (2,-4,-1,3,-2), result.shape = (2,1,3,4)
Parameters
----------
data : relay.Expr
The input data to the operator.
newshape : Union[int, Tuple[int], List[int]]
The new shape. Should be compatible with the original shape.
Returns
-------
result : relay.Expr
The reshaped result.
"""
if isinstance(newshape, int):
newshape = [newshape]
return _make.reshape(data, list(newshape))
def reshape_like(data, shape_like):
"""Reshapes the input array by the size of another array.
For an input array with shape ``(d1, d2, ..., dk)``, `reshape_like` operation reshapes
the input array into an output array with the same shape as the second input array.
.. note::
Sizes for both array should be compatible.
Parameters
----------
data : relay.Expr
The input data to the operator.
shape_like : tuple of int
The new shape. Should be compatible with the original shape.
Returns
-------
ret : relay.Expr
The computed result.
"""
return _make.reshape_like(data, shape_like)
def take(data, indices, axis=None, mode="clip"):
"""Take elements from an array along an axis.
Parameters
----------
data : relay.Expr
The source array.
indices : rely.Expr
The indices of the values to extract.
axis : int, optional
The axis over which to select values. By default,
the flattened input array is used.
mode : str, optional
Specifies how out-of-bound indices will behave [clip, wrap, fast].
clip: clip to the range (default).
wrap: wrap around the indices.
fast: no clip or wrap around (user must make sure indices are in-bound).
Returns
-------
ret : relay.Expr
The computed result.
"""
return _make.take(data, indices, axis, mode)
def full(fill_value, shape=(), dtype=""):
"""Fill array with scalar value.
Parameters
----------
fill_value : relay.Expr
The value to fill. Must be a scalar.
shape : tuple of int
The shape of the target.
dtype : data type, optional (defaults to data type of the fill value)
The data type of the target.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
return _make.full(fill_value, shape, dtype)
def full_like(data, fill_value):
"""Return a scalar value array with the same shape and type as the input array.
Parameters
----------
data : relay.Expr
The input tensor.
fill_value : relay.Expr
The scalar value to fill.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
return _make.full_like(data, fill_value)
def arange(start, stop=None, step=None, dtype="float32"):
"""Return evenly spaced values within a given interval.
.. note::
Similar to ``numpy.arange``, when only one argument is given, it is used
as `stop` instead of `start` while `start` takes default value 0.
Warning: Undefined behavior when dtype is incompatible with start/stop/step.
It could lead to different results compared to numpy, MXNet, pytorch, etc.
Parameters
----------
start : tvm.Expr, optional
Start of interval. The interval includes this value. The default start
value is 0.
stop : tvm.Expr
Stop of interval. The interval does not include this value.
step : tvm.Expr, optional
Spacing between values. The default step size is 1.
dtype : str, optional
The target data type.
Returns
-------
result : relay.Expr
The resulting tensor.
Examples
--------
.. code-block:: python
relay.arange(5) = [0, 1, 2, 3, 4]
relay.arange(1, 5) = [1, 2, 3, 4]
relay.arange(1, 5, 1.5) = [1, 2.5, 4]
"""
if step is None:
step = const(1, dtype)
if stop is None:
stop = start
start = const(0, dtype=dtype)
return _make.arange(start, stop, step, dtype)
def repeat(data, repeats, axis):
"""Repeats elements of an array.
By default, repeat flattens the input array into 1-D and then repeats the elements.
repeats : int
The number of repetitions for each element.
axis: int
The axis along which to repeat values. The negative numbers are interpreted
counting from the backward. By default, use the flattened input array, and
return a flat output array.
Returns
-------
ret : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
x = [[1, 2], [3, 4]]
relay.repeat(x, repeats=2) = [1., 1., 2., 2., 3., 3., 4., 4.]
relay.repeat(x, repeats=2, axis=1) = [[1., 1., 2., 2.],
[3., 3., 4., 4.]]
"""
return _make.repeat(data, repeats, axis)
def tile(data, reps):
"""Repeats the whole array multiple times.
Parameters
----------
data : relay.Expr
The input data to the operator.
reps : tuple of int
The number of times repeating the tensor data.
.. note::
Each dim size of reps must be a positive integer. If reps has length d,
the result will have dimension of max(d, data.ndim); If data.ndim < d,
data is promoted to be d-dimensional by prepending new axes.
If data.ndim >= d, reps is promoted to a.ndim by pre-pending 1's to it.
Returns
-------
ret : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
x = [[1, 2], [3, 4]]
relay.tile(x, reps=(2,3)) = [[1., 2., 1., 2., 1., 2.],
[3., 4., 3., 4., 3., 4.],
[1., 2., 1., 2., 1., 2.],
[3., 4., 3., 4., 3., 4.]]
relay.tile(x, reps=(2,)) = [[1., 2., 1., 2.],
[3., 4., 3., 4.]]
"""
return _make.tile(data, reps)
def reverse(data, axis):
"""Reverses the order of elements along given axis while preserving array shape.
By default, repeat flattens the input array into 1-D and then repeats the elements.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis: int
The axis along which to reverse elements.
Returns
-------
ret : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
x = [[1., 2.], [3., 4.]]
relay.reverse(x, axis=0) = [[3., 4.], [1., 2.]]
relay.reverse(x, axis=1) = [[2., 1.], [4., 3.]]
"""
return _make.reverse(data, axis)
def where(condition, x, y):
"""Selecting elements from either x or y depending on the value of the
condition.
.. note::
The shape of condition, x, and y needs to be the same.
Parameters
----------
condition : relay.Expr
The condition array. The n-th element in `y` is selected when the n-th
value in the `condition` array is zero. Otherwise, the corresponding
element from `x` will be picked.
x : relay.Expr
The first array to be selected.
y : relay.Expr
The second array to be selected.
Returns
-------
result : relay.Expr
The selected array.
Examples
--------
.. code-block:: python
x = [[1, 2], [3, 4]]
y = [[5, 6], [7, 8]]
condition = [[0, 1], [-1, 0]]
relay.where(conditon, x, y) = [[5, 2], [3, 8]]
condition = [1, 0]
relay.where(conditon, x, y) = [[1, 2], [7, 8]]
"""
return _make.where(condition, x, y)
def broadcast_to(data, shape):
"""Return a scalar value array with the same type, broadcast to
the provided shape.
Parameters
----------
data : relay.Expr
The input tensor.
shape : shape
Provide the shape to broadcast to.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
return _make.broadcast_to(data, shape)
def broadcast_to_like(data, broadcast_type):
"""Return a scalar value array with the same shape and type as the input array.
Parameters
----------
data : relay.Expr
The input tensor.
broadcast_type : relay.Expr
Provide the type to broadcast to.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
return _make.broadcast_to_like(data, broadcast_type)
def collapse_sum_like(data, collapse_type):
"""Return a scalar value array with the same shape and type as the input array.
Parameters
----------
data : relay.Expr
The input tensor.
collapse_type : relay.Expr
Provide the type to collapse to.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
return _make.collapse_sum_like(data, collapse_type)
def split(data, indices_or_sections, axis=0):
"""Split input tensor along axis by sections or indices.
If indices_or_sections is an integer, the input will be divided equally
along given axis. If such a split is not possible, an error is raised.
If indices_or_sections is a tuple of sorted integers,
the entries indicate where along axis the array is split.
Parameters
----------
data : relay.Expr
The source array.
indices_or_sections : int or tuple of int
Indices or sections to split into. Accepts an int or a tuple
axis : int, optional
The axis over which to split.
Returns
-------
ret : relay.Tuple([relay.Expr, relay.Expr])
The computed result.
"""
if isinstance(indices_or_sections, int):
ret_size = indices_or_sections
else:
ret_size = len(indices_or_sections) + 1
return TupleWrapper(_make.split(data, indices_or_sections, axis), ret_size)
def strided_slice(data, begin, end, strides=None):
"""Strided slice of an array.
Parameters
----------
data : relay.Expr
The source array to be sliced.
begin: list of int
The indices to begin with in the slicing.
end: list of int
Indices indicating end of the slice.
strides: list of int, optional
Specifies the stride values, it can be negative in that case,
the input tensor will be reversed in that particular axis.
Returns
-------
ret : relay.Expr
The computed result.
"""
strides = strides or []
return _make.strided_slice(data, list(begin), list(end), list(strides))
def slice_like(data, shape_like, axes=None):
"""Slice the first input with respect to the second input.
For an input array with shape ``(d1, d2, ..., dk)``, `slice_like` operation slices the
the input array corresponding size of second array. By default will slice on all axes.
Parameters
----------
data : tvm.relay.Expr
The source array.
shape_like : tvm.relay.Expr
The new shape.
axes : Optional[Tuple[int]]
List of axes on which input data will be sliced according to the corresponding size of
the second input. By default will slice on all axes. Negative axes mean counting in reverse.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.slice_like(data, shape_like, axes)
def layout_transform(data, src_layout, dst_layout):
"""Transform the layout of a tensor
Parameters
----------
data : relay.Expr
The source tensor to be transformed
src_layout: str
The source layout. (e.g NCHW)
dst_layout: str
The destination layout. (e.g. NCHW16c)
Returns
-------
ret : relay.Expr
The transformed tensor.
"""
return _make.layout_transform(data, src_layout, dst_layout)
def reverse_reshape(data, newshape):
"""Reshapes the input array where the special values are inferred from
right to left.
Example::
The special values have the same semantics as :py:class:`tvm.relay.reshape`.
The difference is that special values are inferred from right to left. It
can be explained in the example below::
- data.shape = (10,5,4), newshape = (-1,0), reshape results in (40,5)
- data.shape = (10,5,4), newshape = (-1,0), reverse_reshape results in (40,5)
Parameters
----------
data : relay.Expr
The input data to the operator.
newshape : Union[int, Tuple[int], List[int]]
The new shape. Should be compatible with the original shape.
Returns
-------
result : relay.Expr
The reshaped result.
"""
if isinstance(newshape, int):
newshape = [newshape]
return _make._contrib_reverse_reshape(data, list(newshape))
def gather_nd(data, indices):
"""Gather elements or slices from data and store to a tensor whose shape is
defined by indices.
Parameters
----------
data : relay.Expr
The input data to the operator.
indices : relay.Expr
The shape of output tensor.
Returns
-------
ret : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
data = [[0, 1], [2, 3]]
indices = [[1, 1, 0], [0, 1, 0]]
relay.gather_nd(data, indices) = [2, 3, 0]
data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
indices = [[0, 1], [1, 0]]
relay.gather_nd(data, indices) = [[3, 4], [5, 6]]
"""
return _make.gather_nd(data, indices)
def sequence_mask(data, valid_length, mask_value=0, axis=0):
"""Sets all elements outside the expected length of the sequence to a constant value.
This function takes an n-dimensional input array of the form [MAX_LENGTH, batch_size, ...] or
[batch_size, MAX_LENGTH, ...] and returns an array of the same shape.
Parameters
----------
data : relay.Expr
The input data.
valid_length : relay.Expr
The expected (valid) length of each sequence in the tensor.
mask_value : float
The masking value.
axis : int
The axis of the length dimension.
Returns
-------
ret : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
x = [[[ 1., 2., 3.], [ 4., 5., 6.]],
[[ 7., 8., 9.], [ 10., 11., 12.]],
[[ 13., 14., 15.], [ 16., 17., 18.]]]
relay.sequence_mask(x, valid_length=[1, 1]) =
[[[ 1., 2., 3.], [ 4., 5., 6.]],
[[ 0., 0., 0.], [ 0., 0., 0.]],
[[ 0., 0., 0.], [ 0., 0., 0.]]]
relay.sequence_mask(x, valid_length=[2, 3], mask_value=0.1) =
[[[ 1., 2., 3.], [ 4., 5., 6.]],
[[ 7., 8., 9.], [ 10., 11., 12.]],
[[ 0.1, 0.1, 0.1], [ 16., 17., 18.]]]
"""
return _make.sequence_mask(data, valid_length, mask_value, axis)
def one_hot(indices, on_value, off_value, depth, axis, dtype):
"""
Returns a one-hot tensor where the locations repsented by indices take value on_value,
other locations take value off_value.
Final dimension is <indices outer dimensions> x depth x <indices inner dimensions>.
Parameters
----------
indices : relay.Expr
Locations to set to on_value.
on_value : relay.Expr
Value to fill at indices.
off_value : relay.Expr
Value to fill at all other positions besides indices.
depth : int
Depth of the one-hot dimension.
axis : int
Axis to fill.
dtype : str
Data type of the output tensor.
Returns
-------
ret : relay.Expr
The one-hot tensor.
Examples
--------
.. code-block:: python
indices = [0, 1, 2]
relay.one_hot(indices, 3) =
[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
"""
return _make.one_hot(indices, on_value, off_value, depth, axis, dtype)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.