gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Wishart distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_full
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
class _WishartOperatorPD(distribution.Distribution):
"""The matrix Wishart distribution on positive definite matrices.
This distribution is defined by a scalar number of degrees of freedom `df` and
an instance of `OperatorPDBase`, which provides matrix-free access to a
symmetric positive definite operator, which defines the scale matrix.
#### Mathematical details.
The PDF of this distribution is,
```
f(X) = det(X)^(0.5 (df-k-1)) exp(-0.5 tr[inv(scale) X]) / B(scale, df)
```
where `df >= k` denotes the degrees of freedom, `scale` is a symmetric, pd,
`k x k` matrix, and the normalizing constant `B(scale, df)` is given by:
```
B(scale, df) = 2^(0.5 df k) |det(scale)|^(0.5 df) Gamma_k(0.5 df)
```
where `Gamma_k` is the multivariate Gamma function.
#### Examples
See `WishartFull`, `WishartCholesky` for examples of initializing and using
this class.
"""
def __init__(self,
df,
scale_operator_pd,
cholesky_input_output_matrices=False,
validate_args=False,
allow_nan_stats=True,
name=None):
"""Construct Wishart distributions.
Args:
df: `float` or `double` tensor, the degrees of freedom of the
distribution(s). `df` must be greater than or equal to `k`.
scale_operator_pd: `float` or `double` instance of `OperatorPDBase`.
cholesky_input_output_matrices: `Boolean`. Any function which whose input
or output is a matrix assumes the input is Cholesky and returns a
Cholesky factored matrix. Example`log_pdf` input takes a Cholesky and
`sample_n` returns a Cholesky when
`cholesky_input_output_matrices=True`.
validate_args: `Boolean`, default `False`. Whether to validate input with
asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g., mean, mode) is undefined for any batch
member. If True, batch members with valid parameters leading to
undefined statistics will return `NaN` for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if scale is not floating-type
TypeError: if scale.dtype != df.dtype
ValueError: if df < k, where scale operator event shape is `(k, k)`
"""
parameters = locals()
parameters.pop("self")
self._cholesky_input_output_matrices = cholesky_input_output_matrices
with ops.name_scope(name) as ns:
with ops.name_scope("init", values=[df, scale_operator_pd]):
if not scale_operator_pd.dtype.is_floating:
raise TypeError(
"scale_operator_pd.dtype=%s is not a floating-point type" %
scale_operator_pd.dtype)
self._scale_operator_pd = scale_operator_pd
self._df = ops.convert_to_tensor(
df, dtype=scale_operator_pd.dtype, name="df")
contrib_tensor_util.assert_same_float_dtype(
(self._df, self._scale_operator_pd))
if (self._scale_operator_pd.get_shape().ndims is None or
self._scale_operator_pd.get_shape()[-1].value is None):
self._dimension = math_ops.cast(
self._scale_operator_pd.vector_space_dimension(),
dtype=self._scale_operator_pd.dtype, name="dimension")
else:
self._dimension = ops.convert_to_tensor(
self._scale_operator_pd.get_shape()[-1].value,
dtype=self._scale_operator_pd.dtype, name="dimension")
df_val = tensor_util.constant_value(self._df)
dim_val = tensor_util.constant_value(self._dimension)
if df_val is not None and dim_val is not None:
df_val = np.asarray(df_val)
if not df_val.shape: df_val = (df_val,)
if any(df_val < dim_val):
raise ValueError(
"Degrees of freedom (df = %s) cannot be less than dimension of "
"scale matrix (scale.dimension = %s)"
% (df_val, dim_val))
elif validate_args:
assertions = check_ops.assert_less_equal(
self._dimension, self._df,
message=("Degrees of freedom (df = %s) cannot be less than "
"dimension of scale matrix (scale.dimension = %s)" %
(self._dimension, self._df)))
self._df = control_flow_ops.with_dependencies([assertions], self._df)
super(_WishartOperatorPD, self).__init__(
dtype=self._scale_operator_pd.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
is_continuous=True,
is_reparameterized=True,
parameters=parameters,
graph_parents=([self._df, self._dimension] +
self._scale_operator_pd.inputs),
name=ns)
@property
def df(self):
"""Wishart distribution degree(s) of freedom."""
return self._df
def scale(self):
"""Wishart distribution scale matrix."""
if self._cholesky_input_output_matrices:
return self.scale_operator_pd.sqrt_to_dense()
else:
return self.scale_operator_pd.to_dense()
@property
def scale_operator_pd(self):
"""Wishart distribution scale matrix as an OperatorPD."""
return self._scale_operator_pd
@property
def cholesky_input_output_matrices(self):
"""Boolean indicating if `Tensor` input/outputs are Cholesky factorized."""
return self._cholesky_input_output_matrices
@property
def dimension(self):
"""Dimension of underlying vector space. The `p` in `R^(p*p)`."""
return self._dimension
def _event_shape(self):
s = self.scale_operator_pd.shape()
return array_ops.slice(s, array_ops.shape(s) - 2, [2])
def _get_event_shape(self):
return self.scale_operator_pd.get_shape()[-2:]
def _batch_shape(self):
return self.scale_operator_pd.batch_shape()
def _get_batch_shape(self):
return self.scale_operator_pd.get_batch_shape()
def _sample_n(self, n, seed):
batch_shape = self.batch_shape()
event_shape = self.event_shape()
batch_ndims = array_ops.shape(batch_shape)[0]
ndims = batch_ndims + 3 # sample_ndims=1, event_ndims=2
shape = array_ops.concat(0, ((n,), batch_shape, event_shape))
# Complexity: O(nbk^2)
x = random_ops.random_normal(shape=shape,
mean=0.,
stddev=1.,
dtype=self.dtype,
seed=seed)
# Complexity: O(nbk)
# This parametrization is equivalent to Chi2, i.e.,
# ChiSquared(k) == Gamma(alpha=k/2, beta=1/2)
g = random_ops.random_gamma(shape=(n,),
alpha=self._multi_gamma_sequence(
0.5 * self.df, self.dimension),
beta=0.5,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(
seed, "wishart"))
# Complexity: O(nbk^2)
x = array_ops.matrix_band_part(x, -1, 0) # Tri-lower.
# Complexity: O(nbk)
x = array_ops.matrix_set_diag(x, math_ops.sqrt(g))
# Make batch-op ready.
# Complexity: O(nbk^2)
perm = array_ops.concat(0, (math_ops.range(1, ndims), (0,)))
x = array_ops.transpose(x, perm)
shape = array_ops.concat(0, (batch_shape, (event_shape[0], -1)))
x = array_ops.reshape(x, shape)
# Complexity: O(nbM) where M is the complexity of the operator solving a
# vector system. E.g., for OperatorPDDiag, each matmul is O(k^2), so
# this complexity is O(nbk^2). For OperatorPDCholesky, each matmul is
# O(k^3) so this step has complexity O(nbk^3).
x = self.scale_operator_pd.sqrt_matmul(x)
# Undo make batch-op ready.
# Complexity: O(nbk^2)
shape = array_ops.concat(0, (batch_shape, event_shape, (n,)))
x = array_ops.reshape(x, shape)
perm = array_ops.concat(0, ((ndims-1,), math_ops.range(0, ndims-1)))
x = array_ops.transpose(x, perm)
if not self.cholesky_input_output_matrices:
# Complexity: O(nbk^3)
x = math_ops.batch_matmul(x, x, adj_y=True)
return x
def _log_prob(self, x):
if self.cholesky_input_output_matrices:
x_sqrt = x
else:
# Complexity: O(nbk^3)
x_sqrt = linalg_ops.cholesky(x)
batch_shape = self.batch_shape()
event_shape = self.event_shape()
ndims = array_ops.rank(x_sqrt)
# sample_ndims = ndims - batch_ndims - event_ndims
sample_ndims = ndims - array_ops.shape(batch_shape)[0] - 2
sample_shape = array_ops.slice(
array_ops.shape(x_sqrt), [0], [sample_ndims])
# We need to be able to pre-multiply each matrix by its corresponding
# batch scale matrix. Since a Distribution Tensor supports multiple
# samples per batch, this means we need to reshape the input matrix `x`
# so that the first b dimensions are batch dimensions and the last two
# are of shape [dimension, dimensions*number_of_samples]. Doing these
# gymnastics allows us to do a batch_solve.
#
# After we're done with sqrt_solve (the batch operation) we need to undo
# this reshaping so what we're left with is a Tensor partitionable by
# sample, batch, event dimensions.
# Complexity: O(nbk^2) since transpose must access every element.
scale_sqrt_inv_x_sqrt = x_sqrt
perm = array_ops.concat(0, (math_ops.range(sample_ndims, ndims),
math_ops.range(0, sample_ndims)))
scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
shape = array_ops.concat(
0, (batch_shape,
(math_ops.cast(self.dimension, dtype=dtypes.int32), -1)))
scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)
# Complexity: O(nbM*k) where M is the complexity of the operator solving
# a vector system. E.g., for OperatorPDDiag, each solve is O(k), so
# this complexity is O(nbk^2). For OperatorPDCholesky, each solve is
# O(k^2) so this step has complexity O(nbk^3).
scale_sqrt_inv_x_sqrt = self.scale_operator_pd.sqrt_solve(
scale_sqrt_inv_x_sqrt)
# Undo make batch-op ready.
# Complexity: O(nbk^2)
shape = array_ops.concat(0, (batch_shape, event_shape, sample_shape))
scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)
perm = array_ops.concat(0, (math_ops.range(ndims - sample_ndims, ndims),
math_ops.range(0, ndims - sample_ndims)))
scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
# Write V = SS', X = LL'. Then:
# tr[inv(V) X] = tr[inv(S)' inv(S) L L']
# = tr[inv(S) L L' inv(S)']
# = tr[(inv(S) L) (inv(S) L)']
# = sum_{ik} (inv(S) L)_{ik}^2
# The second equality follows from the cyclic permutation property.
# Complexity: O(nbk^2)
trace_scale_inv_x = math_ops.reduce_sum(
math_ops.square(scale_sqrt_inv_x_sqrt),
reduction_indices=[-2, -1])
# Complexity: O(nbk)
half_log_det_x = math_ops.reduce_sum(
math_ops.log(array_ops.matrix_diag_part(x_sqrt)),
reduction_indices=[-1])
# Complexity: O(nbk^2)
log_prob = ((self.df - self.dimension - 1.) * half_log_det_x -
0.5 * trace_scale_inv_x -
self.log_normalizing_constant())
# Set shape hints.
# Try to merge what we know from the input then what we know from the
# parameters of this distribution.
if x.get_shape().ndims is not None:
log_prob.set_shape(x.get_shape()[:-2])
if (log_prob.get_shape().ndims is not None and
self.get_batch_shape().ndims is not None and
self.get_batch_shape().ndims > 0):
log_prob.get_shape()[-self.get_batch_shape().ndims:].merge_with(
self.get_batch_shape())
return log_prob
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _entropy(self):
half_dp1 = 0.5 * self.dimension + 0.5
half_df = 0.5 * self.df
return (self.dimension * (half_df + half_dp1 * math.log(2.)) +
half_dp1 * self.scale_operator_pd.log_det() +
self._multi_lgamma(half_df, self.dimension) +
(half_dp1 - half_df) * self._multi_digamma(half_df, self.dimension))
def _mean(self):
if self.cholesky_input_output_matrices:
return math_ops.sqrt(self.df) * self.scale_operator_pd.sqrt_to_dense()
return self.df * self.scale_operator_pd.to_dense()
def _variance(self):
x = math_ops.sqrt(self.df) * self.scale_operator_pd.to_dense()
d = array_ops.expand_dims(array_ops.matrix_diag_part(x), -1)
v = math_ops.square(x) + math_ops.batch_matmul(d, d, adj_y=True)
if self.cholesky_input_output_matrices:
return linalg_ops.cholesky(v)
return v
def _std(self):
if self.cholesky_input_output_matrices:
raise ValueError(
"Computing std. dev. when is cholesky_input_output_matrices=True "
"does not make sense.")
return linalg_ops.cholesky(self.variance())
def _mode(self):
s = self.df - self.dimension - 1.
s = math_ops.select(
math_ops.less(s, 0.),
constant_op.constant(float("NaN"), dtype=self.dtype, name="nan"),
s)
if self.cholesky_input_output_matrices:
return math_ops.sqrt(s) * self.scale_operator_pd.sqrt_to_dense()
return s * self.scale_operator_pd.to_dense()
def mean_log_det(self, name="mean_log_det"):
"""Computes E[log(det(X))] under this Wishart distribution."""
with self._name_scope(name):
return (self._multi_digamma(0.5 * self.df, self.dimension) +
self.dimension * math.log(2.) +
self.scale_operator_pd.log_det())
def log_normalizing_constant(self, name="log_normalizing_constant"):
"""Computes the log normalizing constant, log(Z)."""
with self._name_scope(name):
return (self.df * self.scale_operator_pd.sqrt_log_det() +
0.5 * self.df * self.dimension * math.log(2.) +
self._multi_lgamma(0.5 * self.df, self.dimension))
def _multi_gamma_sequence(self, a, p, name="multi_gamma_sequence"):
"""Creates sequence used in multivariate (di)gamma; shape = shape(a)+[p]."""
with self._name_scope(name, values=[a, p]):
# Linspace only takes scalars, so we'll add in the offset afterwards.
seq = math_ops.linspace(
constant_op.constant(0., dtype=self.dtype),
0.5 - 0.5 * p,
math_ops.cast(p, dtypes.int32))
return seq + array_ops.expand_dims(a, [-1])
def _multi_lgamma(self, a, p, name="multi_lgamma"):
"""Computes the log multivariate gamma function; log(Gamma_p(a))."""
with self._name_scope(name, values=[a, p]):
seq = self._multi_gamma_sequence(a, p)
return (0.25 * p * (p - 1.) * math.log(math.pi) +
math_ops.reduce_sum(math_ops.lgamma(seq),
reduction_indices=(-1,)))
def _multi_digamma(self, a, p, name="multi_digamma"):
"""Computes the multivariate digamma function; Psi_p(a)."""
with self._name_scope(name, values=[a, p]):
seq = self._multi_gamma_sequence(a, p)
return math_ops.reduce_sum(math_ops.digamma(seq),
reduction_indices=(-1,))
class WishartCholesky(_WishartOperatorPD):
"""The matrix Wishart distribution on positive definite matrices.
This distribution is defined by a scalar degrees of freedom `df` and a
lower, triangular Cholesky factor which characterizes the scale matrix.
Using WishartCholesky is a constant-time improvement over WishartFull. It
saves an O(nbk^3) operation, i.e., a matrix-product operation for sampling
and a Cholesky factorization in log_prob. For most use-cases it often saves
another O(nbk^3) operation since most uses of Wishart will also use the
Cholesky factorization.
#### Mathematical details.
The PDF of this distribution is,
```
f(X) = det(X)^(0.5 (df-k-1)) exp(-0.5 tr[inv(scale) X]) / B(scale, df)
```
where `df >= k` denotes the degrees of freedom, `scale` is a symmetric, pd,
`k x k` matrix, and the normalizing constant `B(scale, df)` is given by:
```
B(scale, df) = 2^(0.5 df k) |det(scale)|^(0.5 df) Gamma_k(0.5 df)
```
where `Gamma_k` is the multivariate Gamma function.
#### Examples
```python
# Initialize a single 3x3 Wishart with Cholesky factored scale matrix and 5
# degrees-of-freedom.(*)
df = 5
chol_scale = tf.cholesky(...) # Shape is [3, 3].
dist = tf.contrib.distributions.WishartCholesky(df=df, scale=chol_scale)
# Evaluate this on an observation in R^3, returning a scalar.
x = ... # A 3x3 positive definite matrix.
dist.pdf(x) # Shape is [], a scalar.
# Evaluate this on a two observations, each in R^{3x3}, returning a length two
# Tensor.
x = [x0, x1] # Shape is [2, 3, 3].
dist.pdf(x) # Shape is [2].
# Initialize two 3x3 Wisharts with Cholesky factored scale matrices.
df = [5, 4]
chol_scale = tf.cholesky(...) # Shape is [2, 3, 3].
dist = tf.contrib.distributions.WishartCholesky(df=df, scale=chol_scale)
# Evaluate this on four observations.
x = [[x0, x1], [x2, x3]] # Shape is [2, 2, 3, 3].
dist.pdf(x) # Shape is [2, 2].
# (*) - To efficiently create a trainable covariance matrix, see the example
# in tf.contrib.distributions.matrix_diag_transform.
```
"""
def __init__(self,
df,
scale,
cholesky_input_output_matrices=False,
validate_args=False,
allow_nan_stats=True,
name="WishartCholesky"):
"""Construct Wishart distributions.
Args:
df: `float` or `double` `Tensor`. Degrees of freedom, must be greater than
or equal to dimension of the scale matrix.
scale: `float` or `double` `Tensor`. The Cholesky factorization of
the symmetric positive definite scale matrix of the distribution.
cholesky_input_output_matrices: `Boolean`. Any function which whose input
or output is a matrix assumes the input is Cholesky and returns a
Cholesky factored matrix. Example`log_pdf` input takes a Cholesky and
`sample_n` returns a Cholesky when
`cholesky_input_output_matrices=True`.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g., mean, mode) is undefined for any batch
member. If True, batch members with valid parameters leading to
undefined statistics will return `NaN` for this statistic.
name: The name scope to give class member ops.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[scale]) as ns:
super(WishartCholesky, self).__init__(
df=df,
scale_operator_pd=operator_pd_cholesky.OperatorPDCholesky(
scale, verify_pd=validate_args),
cholesky_input_output_matrices=cholesky_input_output_matrices,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
class WishartFull(_WishartOperatorPD):
"""The matrix Wishart distribution on positive definite matrices.
This distribution is defined by a scalar degrees of freedom `df` and a
symmetric, positive definite scale matrix.
Evaluation of the pdf, determinant, and sampling are all `O(k^3)` operations
where `(k, k)` is the event space shape.
#### Mathematical details.
The PDF of this distribution is,
```
f(X) = det(X)^(0.5 (df-k-1)) exp(-0.5 tr[inv(scale) X]) / B(scale, df)
```
where `df >= k` denotes the degrees of freedom, `scale` is a symmetric, pd,
`k x k` matrix, and the normalizing constant `B(scale, df)` is given by:
```
B(scale, df) = 2^(0.5 df k) |det(scale)|^(0.5 df) Gamma_k(0.5 df)
```
where `Gamma_k` is the multivariate Gamma function.
#### Examples
```python
# Initialize a single 3x3 Wishart with Full factored scale matrix and 5
# degrees-of-freedom.(*)
df = 5
scale = ... # Shape is [3, 3]; positive definite.
dist = tf.contrib.distributions.WishartFull(df=df, scale=scale)
# Evaluate this on an observation in R^3, returning a scalar.
x = ... # A 3x3 positive definite matrix.
dist.pdf(x) # Shape is [], a scalar.
# Evaluate this on a two observations, each in R^{3x3}, returning a length two
# Tensor.
x = [x0, x1] # Shape is [2, 3, 3].
dist.pdf(x) # Shape is [2].
# Initialize two 3x3 Wisharts with Full factored scale matrices.
df = [5, 4]
scale = ... # Shape is [2, 3, 3].
dist = tf.contrib.distributions.WishartFull(df=df, scale=scale)
# Evaluate this on four observations.
x = [[x0, x1], [x2, x3]] # Shape is [2, 2, 3, 3]; xi is positive definite.
dist.pdf(x) # Shape is [2, 2].
# (*) - To efficiently create a trainable covariance matrix, see the example
# in tf.contrib.distributions.matrix_diag_transform.
```
"""
def __init__(self,
df,
scale,
cholesky_input_output_matrices=False,
validate_args=False,
allow_nan_stats=True,
name="WishartFull"):
"""Construct Wishart distributions.
Args:
df: `float` or `double` `Tensor`. Degrees of freedom, must be greater than
or equal to dimension of the scale matrix.
scale: `float` or `double` `Tensor`. The symmetric positive definite
scale matrix of the distribution.
cholesky_input_output_matrices: `Boolean`. Any function which whose input
or output is a matrix assumes the input is Cholesky and returns a
Cholesky factored matrix. Example`log_pdf` input takes a Cholesky and
`sample_n` returns a Cholesky when
`cholesky_input_output_matrices=True`.
validate_args: `Boolean`, default `False`. Whether to validate input with
asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g., mean, mode) is undefined for any batch
member. If True, batch members with valid parameters leading to
undefined statistics will return `NaN` for this statistic.
name: The name scope to give class member ops.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[scale]) as ns:
super(WishartFull, self).__init__(
df=df,
scale_operator_pd=operator_pd_full.OperatorPDFull(
scale, verify_pd=validate_args),
cholesky_input_output_matrices=cholesky_input_output_matrices,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
|
|
# =============================================================================
#
# Copyright (c) 2016, Cisco Systems
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
import re
from csmpe.plugins import CSMPlugin
from utils import available_space
from utils import number_of_rsp
from utils import install_folder
from utils import check_issu_readiness
from utils import remove_exist_subpkgs
from utils import install_package_family
from utils import create_folder
from utils import xe_show_platform
from utils import check_pkg_conf
class Plugin(CSMPlugin):
"""This plugin performs pre-activate tasks."""
name = "Install Pre-Activate Plugin"
platforms = {'ASR900'}
phases = {'Pre-Activate'}
os = {'XE'}
def run(self):
self.ctx.info("Hardware platform: {}".format(self.ctx._connection.platform))
self.ctx.info("OS Version: {}".format(self.ctx._connection.os_version))
try:
packages = self.ctx.software_packages
except AttributeError:
self.ctx.warning("No package list provided. Skipping calculation of required free bootflash memory.")
return
pkg = ''.join(packages)
con_platforms = ['ASR-902', 'ASR-920']
sub_platforms = ['ASR-903', 'ASR-907']
rsp_count = 1
folder = 'bootflash:'
stby_folder = 'stby-bootflash:'
# check the device type vs the package family
supported_imgs = {}
supported_imgs['asr902'] = ['asr900', 'asr903']
supported_imgs['asr903'] = ['asr900', 'asr903']
supported_imgs['asr907'] = ['asr900', 'asr903']
supported_imgs['asr920'] = ['asr920']
m = re.search('ASR-(\d+)', self.ctx._connection.platform)
if m:
device_family = m.group(1)
device_family = 'asr' + device_family
else:
self.ctx.error("Unspported device: {}".format(self.ctx._connection.platform))
return
pkg_family = install_package_family(pkg)
if not pkg_family:
self.ctx.info("Private device image: {}".format(pkg))
if pkg_family not in supported_imgs[device_family]:
self.ctx.info("Private device image: {} on {}".format(pkg, self.ctx._connection.platform))
# check the RSP type between image and device:
curr_rsp = None
pkg_rsp = None
output = self.ctx.send("show version | include RSP")
if output:
m = re.search('(RSP\d)', output)
if m:
curr_rsp = m.group(0).lower()
m = re.search('(rsp\d)', pkg)
if m:
pkg_rsp = m.group(0)
if curr_rsp and pkg_rsp and curr_rsp != pkg_rsp:
self.ctx.info("Incompatible Route processor in {} for this device {}".format(pkg, curr_rsp))
# Determine one of the following modes: consolidated, subpackage, or issu
if self.ctx._connection.platform in con_platforms:
mode = 'consolidated'
elif self.ctx._connection.platform in sub_platforms:
mode = 'subpackage'
# Determine the number of RSP's in the chassis
rsp_count = number_of_rsp(self.ctx)
if rsp_count == 0:
self.ctx.error("No RSP is discovered")
return
# Determine the install folder
folder = install_folder(self.ctx)
stby_folder = 'stby-' + folder
# Create the folder if it does not exist
if not create_folder(self.ctx, folder):
self.ctx.error("Install folder {} creation failed", format(folder))
return
if rsp_count == 2 and not create_folder(self.ctx, stby_folder):
self.ctx.error("Install folder {} creation "
"failed", format(stby_folder))
return
else:
self.ctx.error("Unsupported platform: {}".format(self.ctx._connection.platform))
return
total_size = 10000000
valid_pkg_conf = False
if mode == 'subpackage':
# Check if the packages.conf is valid
valid_pkg_conf = check_pkg_conf(self.ctx, folder)
# Remove residual image files from previous installations
if valid_pkg_conf:
remove_exist_subpkgs(self.ctx, folder, pkg)
else:
self.ctx.warning("Empty or invalid {}/packages.conf".format(folder))
self.ctx.warning("Residual packages from previous installations are not "
"automatically removed from bootflash: / stby-bootflash:.")
self.ctx.info("Sub-package mode will be performed to "
"activate package = {}".format(pkg))
cmd = "dir bootflash: | include " + pkg
output = self.ctx.send(cmd)
if output:
m = re.search('-rw-\s+(\d+)\s+', output)
if m:
total_size += int(m.group(1))
flash_free = available_space(self.ctx, 'bootflash:')
self.ctx.info("Total required / bootflash "
"available: {} / {} bytes".format(total_size, flash_free))
if flash_free < total_size:
self.ctx.error("Not enough space on bootflash: to install packages. "
"The install process can't proceed.\n"
"Please erase unused images, crashinfo, "
"core files, and tracelogs")
else:
self.ctx.info("There is enough space on bootflash: to install packages.")
if rsp_count == 2:
if valid_pkg_conf:
remove_exist_subpkgs(self.ctx, stby_folder, pkg)
stby_free = available_space(self.ctx, 'stby-bootflash:')
self.ctx.info("Total required / stby-bootflash "
"available: {} / {} bytes".format(total_size, stby_free))
if stby_free < total_size:
self.ctx.error("Not enough space on stby-bootflash: to "
"install packages. The install process can't proceed.\n"
"Please erase unused images, crashinfo, core files, "
"and tracelogs")
else:
self.ctx.info("There is enough space on stby-bootflash: to install packages.")
# Determine if ISSU is feasible
if mode == 'subpackage' and rsp_count == 2 and valid_pkg_conf:
if check_issu_readiness(self.ctx, pkg, total_size):
mode = 'issu'
self.ctx.info("ISSU will be performed to activate package = {}".format(pkg))
# Log the status of RP and SIP
platform_info = xe_show_platform(self.ctx)
if not platform_info:
self.ctx.error("The CLI 'show platform' is not able to determine the status of RP and SIP ")
return
self.ctx.info("show platform = {}".format(platform_info))
self.ctx.info("Activate number of RSP = {}".format(rsp_count))
self.ctx.info("Activate package = {}".format(pkg))
self.ctx.info("Install folder = {}".format(folder))
self.ctx.info("Activate package mode = {}".format(mode))
self.ctx.save_data('xe_rsp_count', rsp_count)
self.ctx.save_data('xe_activate_pkg', pkg)
self.ctx.save_data('xe_boot_mode', mode)
self.ctx.save_data('xe_install_folder', folder)
self.ctx.save_data('xe_show_platform', platform_info)
return True
|
|
# Copyright 2020 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
from collections import namedtuple
from functools import partial
from os import environ
import time
import matplotlib as mpl
import matplotlib.pyplot as plt
import jax.numpy as np
import jax.lax as lax
import jax.random as random
from jax import vmap, jit, pmap
from jax.tree_util import tree_map, tree_multimap
from jax.config import config
config.update("jax_enable_x64", True)
config.enable_omnistaging()
def length(x): return np.sqrt(np.dot(x, x))
def normalize(x): return x / length(x)
def relu(x): return np.maximum(x, 0.0)
def cross(a, b):
(a1, a2, a3) = a
(b1, b2, b3) = b
return np.array([a2 * b3 - a3 * b2, a3 * b1 - a1 * b3, a1 * b2 - a2 * b1])
def directionAndLength(x):
l = length(x)
return (x / length(x), l)
def rotateX(p, angle):
c = np.cos(angle)
s = np.sin(angle)
(px, py, pz) = p
return np.array([px, c*py - s*pz, s*py + c*pz])
def rotateY(p, angle):
c = np.cos(angle)
s = np.sin(angle)
(px, py, pz) = p
return np.array([c*px + s*pz, py, - s*px+ c*pz])
def rotateZ(p, angle):
c = np.cos(angle)
s = np.sin(angle)
(px, py, pz) = p
return np.array([c*px - s*py, s*px+c*py, pz])
def sampleCosineWeightedHemisphere(normal, k):
(k1, k2) = random.split(k)
u1 = random.uniform(k1)
u2 = random.uniform(k2)
uu = normalize (cross(normal, np.array([0.0, 1.1, 1.1])))
vv = cross(uu, normal)
ra = np.sqrt(u2)
rx = ra * np.cos (2.0 * np.pi * u1)
ry = ra * np.sin (2.0 * np.pi * u1)
rz = np.sqrt (1.0 - u2)
rr = (rx * uu) + (ry * vv) + (rz * normal)
return normalize(rr)
def positiveProjection(x, y):
assert x.shape == (3,)
assert y.shape == (3,), y
return np.dot(x, y) > 0.0
# TODO: use AD instead
def gradNumerical(f, xs):
eps = 0.0001
n, = xs.shape
return vmap(lambda delta: (f(xs + delta) - f (xs - delta)) / (2.0 * eps))(eps * np.eye(n))
xHat = np.array([1.0, 0.0, 0.0], dtype=np.float64)
yHat = np.array([0.0, 1.0, 0.0], dtype=np.float64)
zHat = np.array([0.0, 0.0, 1.0], dtype=np.float64)
def dummy_float_vec(n):
return np.array([np.nan]*n, dtype=np.float64)
dummy_color = dummy_float_vec(3)
dummy_pos = dummy_float_vec(3)
dummy_dir = dummy_float_vec(3)
# Wall Block Sphere
ObjectGeom = namedtuple("ObjectGeom",
["tag",
"wallDirection", "wallDistance",
"blockPos", "blockHW", "blockAngle",
"spherePos", "sphereRadius"])
dummy_object_geom = ObjectGeom(-1,
dummy_dir, 0.0,
dummy_pos, dummy_float_vec(3), 0.0,
dummy_pos, 0.0)
WALL_OBJECT = 0
BLOCK_OBJECT = 1
SPHERE_OBJECT = 2
def make_wall(wallDirection, wallDistance):
geom = dummy_object_geom
geom = geom._replace( tag = WALL_OBJECT
, wallDirection = wallDirection
, wallDistance = wallDistance)
return geom
def make_block(blockPos, blockHW, blockAngle):
geom = dummy_object_geom
geom = geom._replace( tag = BLOCK_OBJECT
, blockPos = blockPos
, blockHW = blockHW
, blockAngle = blockAngle)
return geom
def make_sphere(spherePos, sphereRadius):
geom = dummy_object_geom
geom = geom._replace( tag = SPHERE_OBJECT
, spherePos = spherePos
, sphereRadius = sphereRadius)
return geom
Surface = namedtuple("Surface", ["tag", "matteColor"])
dummy_surface = Surface(0, dummy_color)
MATTE = 0
MIRROR = 1
def make_matte(color):
return Surface(MATTE, color)
def make_mirror():
return Surface(MIRROR, dummy_color)
Obj = namedtuple("Obj",
["tag",
"passiveGeom", "passiveSurface",
"lightPos", "lightHW", "lightRadiance"])
dummy_object = Obj(-1, dummy_object_geom, dummy_surface, dummy_pos, 0.0, dummy_color)
PASSIVE_OBJECT = 0
LIGHT = 1
def make_passive_object(passiveGeom, passiveSurface):
obj = dummy_object
obj = obj._replace(tag=PASSIVE_OBJECT, passiveGeom=passiveGeom,
passiveSurface=passiveSurface)
return obj
def make_light(lightPos, lightHW, lightRadiance):
obj = dummy_object
obj = obj._replace(tag=LIGHT, lightPos=lightPos,
lightHW=lightHW,lightRadiance=lightRadiance)
return obj
def sampleReflection(nor_surf, ray, k):
(pos, dir) = ray
(nor, surf) = nor_surf
def matteCase(_): return sampleCosineWeightedHemisphere(nor, k)
def mirrorCase(_): return dir - (2.0 * np.dot(dir, nor)) * nor
newRay = lax.switch(surf.tag, [matteCase, mirrorCase], ())
return (pos, newRay)
def probReflection(nor_surf, _, ray):
(nor, surf) = nor_surf
(_, outRayDir) = ray
def matteCase(_): return relu (np.dot(nor, outRayDir))
def mirrorCase(_): return 0.0
return lax.switch(surf.tag, [matteCase, mirrorCase], ())
def applyFilter(filter, radiance): return filter * radiance
def surfaceFilter(filter, surf):
def matteCase(_): return filter * surf.matteColor
def mirrorCase(_): return filter
return lax.switch(surf.tag, [matteCase, mirrorCase], ())
def sdObj(pos, obj):
def passiveObjCase(_):
geom = obj.passiveGeom
def wallCase(_):
(nor, d) = (geom.wallDirection, geom.wallDistance)
return d + np.dot(nor, pos)
def blockCase(_):
blockPos, halfWidths, angle = (geom.blockPos, geom.blockHW, geom.blockAngle)
relPos = rotateY(pos - blockPos, angle)
return length(np.maximum(np.abs(relPos) - halfWidths, 0.0))
def sphereCase(_):
spherePos, r = (geom.spherePos, geom.sphereRadius)
relPos = pos - spherePos
return np.maximum(length(relPos) - r, 0.0)
return lax.switch(geom.tag, [wallCase, blockCase, sphereCase], ())
def lightCase(args):
(squarePos, hw) = (obj.lightPos, obj.lightHW)
relPos = pos - squarePos
halfWidths = np.array([hw, 0.01, hw])
return length(np.maximum(np.abs(relPos) - halfWidths, 0.0))
return lax.switch(obj.tag, (passiveObjCase, lightCase), ())
def sdScene(scene, pos):
distances = vmap(partial(sdObj, pos))(scene)
d = np.min(distances)
i = np.argmin(distances)
return (tree_map(lambda x: x[i], scene), d)
def calcNormal(scene, pos):
dist = lambda p: sdScene(scene, p)[1]
return normalize(gradNumerical(dist, pos))
RayMarchResult = namedtuple("RayMarchResult", ["tag", "ray", "surf", "radiance"])
dummy_oriented_surface = (dummy_dir, dummy_surface)
dummy_raymarch_result = RayMarchResult(-1, (dummy_pos, dummy_dir),
dummy_oriented_surface, dummy_color)
HIT_OBJ = 0
HIT_LIGHT = 1
HIT_NOTHING = 2
def make_hit_obj(ray, surf):
ans = dummy_raymarch_result
ans = ans._replace(tag=HIT_OBJ, ray=ray, surf=surf)
return ans
def make_hit_light(radiance):
ans = dummy_raymarch_result
ans = ans._replace(tag=HIT_LIGHT, radiance=radiance)
return ans
def make_hit_nothing():
ans = dummy_raymarch_result
ans = ans._replace(tag=HIT_NOTHING)
return ans
def raymarch(scene, ray):
max_iters = 100
tol = 0.01
startLength = 10.0 * tol
(rayOrigin, rayDir) = ray
err_init = 100000.0
def cond(i_err_result_rayLength):
(i, err, _, rayLength) = i_err_result_rayLength
def false_case(_):
rayPos = rayOrigin + rayLength * rayDir
surfNorm = calcNormal(scene, rayPos)
return positiveProjection(rayDir, surfNorm)
return lax.cond(np.logical_and(i < max_iters, err >= tol),
lambda _: True, false_case, ())
def body(i_err_result_rayLength):
(i, _, _, rayLength) = i_err_result_rayLength
rayPos = rayOrigin + rayLength * rayDir
(obj, d) = sdScene(scene, rayPos)
# 0.9 ensures we come close to the surface but don't touch it
rayLengthNew = rayLength + 0.9 * d
def passiveCase(_):
surfNorm = calcNormal(scene, rayPos)
return make_hit_obj((rayPos, rayDir), (surfNorm, obj.passiveSurface))
def lightCase(_):
return make_hit_light(obj.lightRadiance)
raymarchResult = lax.switch(obj.tag, [passiveCase, lightCase], ())
return (i + 1, d, raymarchResult, rayLengthNew)
(_, _, result, _) = lax.while_loop(
cond, body, (0, err_init, make_hit_nothing(), 0.0))
return result
zero_radiance = np.array([0.0, 0.0, 0.0], dtype=np.float64)
def rayDirectRadiance(scene, ray):
ans = raymarch(scene, ray)
return np.where(ans.tag == HIT_LIGHT,
ans.radiance, zero_radiance)
def sampleSquare(hw, k):
(kx, kz) = random.split(k)
x = random.uniform(kx, minval= -hw, maxval=hw)
z = random.uniform(kz, minval= -hw, maxval=hw)
return np.array([x, 0.0, z])
def num_scene_objects(scene):
cell = lambda: None
def record_shape(xs):
cell.contents = xs.shape
tree_map(record_shape, scene)
return cell.contents[0]
def sampleLightRadiance(scene, osurf, inRay, k):
(surfNor, surf) = osurf
(rayPos, _) = inRay
def body(i, total):
obj = tree_map(lambda x: x[i], scene)
def passive_case(_):
return zero_radiance
def light_case(_):
(lightPos, hw) = (obj.lightPos, obj.lightHW)
(dirToLight, distToLight) = directionAndLength(lightPos + sampleSquare(hw, k) - rayPos)
def this_side_case(_):
fracSolidAngle = relu(np.dot(dirToLight, yHat)) * (hw**2) / (np.pi * (distToLight**2))
outRay = (rayPos, dirToLight)
coeff = fracSolidAngle * probReflection(osurf, inRay, outRay)
return coeff * rayDirectRadiance(scene, outRay)
def far_side_case(_):
return zero_radiance
return lax.cond(positiveProjection(dirToLight, surfNor), this_side_case, far_side_case, ())
radiance = lax.switch(obj.tag, [passive_case, light_case], ())
return total + radiance
return lax.fori_loop(0, num_scene_objects(scene), body, zero_radiance)
dummy_ray = (np.zeros((3,)), np.zeros((3,)))
dummy_filter = np.zeros((3,))
def trace(params, scene, init_ray, k):
(_, max_bounces, _) = params
noFilter = np.array([1.0, 1.0, 1.0])
def while_cond(args):
(i, (finished, _, _, _)) = args
return np.logical_and(i < max_bounces, np.logical_not(finished))
def while_body(args):
(i, (_, color_filter, total_radiance, cur_ray)) = args
raymarchResult = raymarch(scene, cur_ray)
def hitObjCase(_):
(incidentRay, osurf) = (raymarchResult.ray, raymarchResult.surf)
(k1, k2) = random.split(random.fold_in(k, i))
lightRadiance = sampleLightRadiance(scene, osurf, incidentRay, k1)
outRayHemisphere = sampleReflection (osurf, incidentRay, k2)
newFilter = surfaceFilter(color_filter, osurf[1])
newRadiance = total_radiance + applyFilter(newFilter, lightRadiance)
return (False, newFilter, newRadiance, outRayHemisphere)
def hitLightCase(_):
radiance = np.where(i == 0, raymarchResult.radiance, total_radiance)
return (True, dummy_filter, radiance, dummy_ray)
def hitNothingCase(_):
return (True, color_filter, total_radiance, cur_ray)
result = lax.switch(raymarchResult.tag, [hitObjCase, hitLightCase, hitNothingCase], ())
return (i + 1, result)
loop_init = (0, (False, np.array([1.0,1.0,1.0]), np.array([0.0,0.0,0.0]), init_ray))
(_, (_, _, total, _)) = lax.while_loop(while_cond, while_body, loop_init)
return total
class WrapList(object):
def __init__(self, x):
self.contents = x
def zipNestedNamedTuples(records):
ls = tree_map(lambda _: WrapList([]), records[0])
for record in records:
def f(l, x): l.append(x)
tree_multimap(lambda l, x: l.contents.append(x), ls, record)
return tree_map(lambda l: np.array(l.contents), ls)
Pixel = namedtuple("Pixel", ["xmin", "xmax", "ymin", "ymax"])
def pixel_positions(camera):
(n, _, halfWidth, _) = camera
pixHalfWidth = halfWidth / n
ys = np.broadcast_to(np.flip(np.linspace(-halfWidth, halfWidth, n))[:, None], (n, n))
xs = np.broadcast_to( np.linspace(-halfWidth, halfWidth, n )[None, :], (n, n))
return Pixel(xs - pixHalfWidth, xs + pixHalfWidth,
ys - pixHalfWidth, ys + pixHalfWidth)
def pixelColor(params, camera, pix_positions, scene, k, i, j):
(n, pos, halfWidth, sensorDist) = camera
pixHalfWidth = halfWidth / n
(k1, k2) = random.split(k)
(kx, ky) = random.split(k1)
pix = tree_map(lambda arr: arr[i, j], pix_positions)
x = random.uniform(kx, minval=pix.xmin, maxval=pix.xmax)
y = random.uniform(ky, minval=pix.ymin, maxval=pix.ymax)
ray = (pos, normalize(np.array([x, y, -sensorDist])))
return trace(params, scene, ray, k2)
def pixelColorAvg(params, camera, pix_positions, scene, k_root, i, j):
(num_samples, _, _) = params
ks = random.split(k_root, num=num_samples)
assert ks.shape == (num_samples, 2)
ans = vmap(lambda k: pixelColor(params, camera,
pix_positions, scene, k, i, j))(ks)
return np.mean(ans, axis=0)
def timeit(f):
t0 = time.time()
ans = f()
print("{} seconds".format(time.time() - t0))
return ans
num_pix = 32
num_samples = 8
num_bounces = 10
share_prng = True
params = (num_samples, num_bounces, share_prng)
camera = (num_pix, 10.0 * zHat, 0.3, 1.0)
lightColor = np.array([0.2, 0.2, 0.2])
leftWallColor = 1.5 * np.array([0.611, 0.0555, 0.062])
rightWallColor = 1.5 * np.array([0.117, 0.4125, 0.115])
whiteWallColor = np.array([255.0, 239.0, 196.0]) / 255.0
blockColor = np.array([200.0, 200.0, 255.0]) / 255.0
theScene = zipNestedNamedTuples(
[ make_light(1.9 * yHat, 0.5, lightColor)
, make_passive_object(make_wall( xHat, 2.0), make_matte(leftWallColor))
, make_passive_object(make_wall(- xHat, 2.0), make_matte(rightWallColor))
, make_passive_object(make_wall( yHat, 2.0), make_matte(whiteWallColor))
, make_passive_object(make_wall(- yHat, 2.0), make_matte(whiteWallColor))
, make_passive_object(make_wall( zHat, 2.0), make_matte(whiteWallColor))
, make_passive_object(make_block(np.array([ 1.0, -1.6, 1.2]),
np.array([0.6, 0.8, 0.6]), 0.5),
make_matte(blockColor))
, make_passive_object(make_sphere(np.array([-1.0, -1.2, 0.2]), 0.8),
make_matte(0.7 * whiteWallColor))
, make_passive_object(make_sphere(np.array([ 2.0, 2.0, -2.0]), 1.5),
make_mirror())
])
pix_positions = pixel_positions(camera)
xidxs = np.broadcast_to(np.arange(num_pix)[None, :], (num_pix, num_pix))
yidxs = np.broadcast_to(np.arange(num_pix)[:, None], (num_pix, num_pix))
num_cores = 32
def tile_cores(arr):
assert arr.shape[0] % num_cores == 0
size = arr.shape[0] // num_cores
return np.reshape(arr, (num_cores,size) + arr.shape[1:])
def untile_cores(arr):
assert arr.shape[0] == num_cores
num_tiles = arr.shape[1]
return np.reshape(arr, (num_cores * num_tiles,) + arr.shape[2:])
@jit
def renderScene_pmap(yidxs, xidxs):
return untile_cores(pmap(vmap(vmap(
partial(pixelColorAvg, params, camera, pix_positions,
theScene, random.PRNGKey(0)))))(tile_cores(yidxs), tile_cores(xidxs)))
@jit
def renderScene_no_pmap(yidxs, xidxs):
return vmap(vmap(
partial(pixelColorAvg, params, camera, pix_positions,
theScene, random.PRNGKey(0))))(yidxs, xidxs)
using_gpu = not environ.get("CUDA_VISIBLE_DEVICES") == ""
if using_gpu:
renderScene = renderScene_no_pmap
else:
renderScene = renderScene_pmap
print("Compile time + run time")
img1 = timeit(lambda: renderScene(yidxs, xidxs).block_until_ready())
print("Run time")
img2 = timeit(lambda: renderScene(yidxs, xidxs).block_until_ready())
img = img2 / np.mean(img2)
plt.imshow(img, interpolation='none')
plt.grid('off')
plt.axis('off')
plt.savefig("jax-render.png")
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tflite_convert.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow import keras
from tensorflow.core.framework import graph_pb2
from tensorflow.lite.python import tflite_convert
from tensorflow.lite.python.convert import register_custom_opdefs
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework.importer import import_graph_def
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.saved_model.save import save
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.training_util import write_graph
class TestModels(test_util.TensorFlowTestCase):
def _getFilepath(self, filename):
return os.path.join(self.get_temp_dir(), filename)
def _run(self, flags_str, should_succeed):
output_file = os.path.join(self.get_temp_dir(), 'model.tflite')
tflite_bin = resource_loader.get_path_to_datafile('tflite_convert')
cmdline = '{0} --output_file={1} {2}'.format(tflite_bin, output_file,
flags_str)
exitcode = os.system(cmdline)
if exitcode == 0:
with gfile.Open(output_file, 'rb') as model_file:
content = model_file.read()
self.assertEqual(content is not None, should_succeed)
os.remove(output_file)
else:
self.assertFalse(should_succeed)
def _getKerasModelFile(self):
x = np.array([[1.], [2.]])
y = np.array([[2.], [4.]])
model = keras.models.Sequential([
keras.layers.Dropout(0.2, input_shape=(1,)),
keras.layers.Dense(1),
])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
keras_file = self._getFilepath('model.h5')
keras.models.save_model(model, keras_file)
return keras_file
class TfLiteConvertV1Test(TestModels):
def _run(self, flags_str, should_succeed):
if tf2.enabled():
flags_str += ' --enable_v1_converter'
super(TfLiteConvertV1Test, self)._run(flags_str, should_succeed)
def testFrozenGraphDef(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = self._getFilepath('model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
flags_str = ('--graph_def_file={0} --input_arrays={1} '
'--output_arrays={2}'.format(graph_def_file, 'Placeholder',
'add'))
self._run(flags_str, should_succeed=True)
os.remove(graph_def_file)
# Run `tflite_convert` explicitly with the legacy converter.
# Before the new converter is enabled by default, this flag has no real
# effects.
def testFrozenGraphDefWithLegacyConverter(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = self._getFilepath('model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
flags_str = (
'--graph_def_file={0} --input_arrays={1} '
'--output_arrays={2} --experimental_new_converter=false'.format(
graph_def_file, 'Placeholder', 'add'))
self._run(flags_str, should_succeed=True)
os.remove(graph_def_file)
def testFrozenGraphDefNonPlaceholder(self):
with ops.Graph().as_default():
in_tensor = random_ops.random_normal(shape=[1, 16, 16, 3], name='random')
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = self._getFilepath('model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
flags_str = ('--graph_def_file={0} --input_arrays={1} '
'--output_arrays={2}'.format(graph_def_file, 'random', 'add'))
self._run(flags_str, should_succeed=True)
os.remove(graph_def_file)
def testQATFrozenGraphDefInt8(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
_ = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output',
num_bits=16) # INT8 inference type works for 16 bits fake quant.
sess = session.Session()
# Write graph to file.
graph_def_file = self._getFilepath('model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
flags_str = ('--inference_type=INT8 --std_dev_values=128,128 '
'--mean_values=128,128 '
'--graph_def_file={0} --input_arrays={1},{2} '
'--output_arrays={3}'.format(graph_def_file, 'inputA',
'inputB', 'output'))
self._run(flags_str, should_succeed=True)
os.remove(graph_def_file)
def testQATFrozenGraphDefUInt8(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
_ = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Write graph to file.
graph_def_file = self._getFilepath('model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Define converter flags
flags_str = ('--std_dev_values=128,128 --mean_values=128,128 '
'--graph_def_file={0} --input_arrays={1} '
'--output_arrays={2}'.format(
graph_def_file, 'inputA,inputB', 'output'))
# Set inference_type UINT8 and (default) inference_input_type UINT8
flags_str_1 = flags_str + ' --inference_type=UINT8'
self._run(flags_str_1, should_succeed=True)
# Set inference_type UINT8 and inference_input_type FLOAT
flags_str_2 = flags_str_1 + ' --inference_input_type=FLOAT'
self._run(flags_str_2, should_succeed=True)
os.remove(graph_def_file)
def testSavedModel(self):
saved_model_dir = self._getFilepath('model')
with ops.Graph().as_default():
with session.Session() as sess:
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = in_tensor + in_tensor
inputs = {'x': in_tensor}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
flags_str = '--saved_model_dir={}'.format(saved_model_dir)
self._run(flags_str, should_succeed=True)
def _createSavedModelWithCustomOp(self):
custom_opdefs_str = (
'name: \'CustomAdd\' input_arg: {name: \'Input1\' type: DT_FLOAT} '
'input_arg: {name: \'Input2\' type: DT_FLOAT} output_arg: {name: '
'\'Output\' type: DT_FLOAT}')
# Create a graph that has one add op.
new_graph = graph_pb2.GraphDef()
with ops.Graph().as_default():
with session.Session() as sess:
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='input')
out_tensor = in_tensor + in_tensor
inputs = {'x': in_tensor}
outputs = {'z': out_tensor}
new_graph.CopyFrom(sess.graph_def)
# Rename Add op name to CustomAdd.
for node in new_graph.node:
if node.op.startswith('Add'):
node.op = 'CustomAdd'
del node.attr['T']
# Register custom op defs to import modified graph def.
register_custom_opdefs([custom_opdefs_str])
# Store saved model.
saved_model_dir = self._getFilepath('model')
with ops.Graph().as_default():
with session.Session() as sess:
import_graph_def(new_graph, name='')
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return (saved_model_dir, custom_opdefs_str)
def testEnsureCustomOpdefsFlag(self):
saved_model_dir, _ = self._createSavedModelWithCustomOp()
# Ensure --custom_opdefs.
flags_str = ('--saved_model_dir={0} --allow_custom_ops '
'--experimental_new_converter'.format(saved_model_dir))
self._run(flags_str, should_succeed=False)
def testSavedModelWithCustomOpdefsFlag(self):
saved_model_dir, custom_opdefs_str = self._createSavedModelWithCustomOp()
# Valid conversion.
flags_str = (
'--saved_model_dir={0} --custom_opdefs="{1}" --allow_custom_ops '
'--experimental_new_converter'.format(saved_model_dir,
custom_opdefs_str))
self._run(flags_str, should_succeed=True)
def testSavedModelWithInvalidCustomOpdefsFlag(self):
saved_model_dir, _ = self._createSavedModelWithCustomOp()
invalid_custom_opdefs_str = (
'name: \'CustomAdd\' input_arg: {name: \'Input1\' type: DT_FLOAT} '
'output_arg: {name: \'Output\' type: DT_FLOAT}')
# Valid conversion.
flags_str = (
'--saved_model_dir={0} --custom_opdefs="{1}" --allow_custom_ops '
'--experimental_new_converter'.format(saved_model_dir,
invalid_custom_opdefs_str))
self._run(flags_str, should_succeed=False)
def testKerasFile(self):
keras_file = self._getKerasModelFile()
flags_str = '--keras_model_file={}'.format(keras_file)
self._run(flags_str, should_succeed=True)
os.remove(keras_file)
def testKerasFileMLIR(self):
keras_file = self._getKerasModelFile()
flags_str = (
'--keras_model_file={} --experimental_new_converter'.format(keras_file))
self._run(flags_str, should_succeed=True)
os.remove(keras_file)
def testConversionSummary(self):
keras_file = self._getKerasModelFile()
log_dir = self.get_temp_dir()
flags_str = ('--keras_model_file={} --experimental_new_converter '
'--conversion_summary_dir={}'.format(keras_file, log_dir))
self._run(flags_str, should_succeed=True)
os.remove(keras_file)
num_items_conversion_summary = len(os.listdir(log_dir))
self.assertTrue(num_items_conversion_summary)
def testConversionSummaryWithOldConverter(self):
keras_file = self._getKerasModelFile()
log_dir = self.get_temp_dir()
flags_str = ('--keras_model_file={} --experimental_new_converter=false '
'--conversion_summary_dir={}'.format(keras_file, log_dir))
self._run(flags_str, should_succeed=True)
os.remove(keras_file)
num_items_conversion_summary = len(os.listdir(log_dir))
self.assertEqual(num_items_conversion_summary, 0)
def _initObjectDetectionArgs(self):
# Initializes the arguments required for the object detection model.
# Looks for the model file which is saved in a different location internally
# and externally.
filename = resource_loader.get_path_to_datafile('testdata/tflite_graph.pb')
if not os.path.exists(filename):
filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
'../tflite_mobilenet_ssd_quant_protobuf/tflite_graph.pb')
if not os.path.exists(filename):
raise IOError("File '{0}' does not exist.".format(filename))
self._graph_def_file = filename
self._input_arrays = 'normalized_input_image_tensor'
self._output_arrays = (
'TFLite_Detection_PostProcess,TFLite_Detection_PostProcess:1,'
'TFLite_Detection_PostProcess:2,TFLite_Detection_PostProcess:3')
self._input_shapes = '1,300,300,3'
def testObjectDetection(self):
"""Tests object detection model through TOCO."""
self._initObjectDetectionArgs()
flags_str = ('--graph_def_file={0} --input_arrays={1} '
'--output_arrays={2} --input_shapes={3} '
'--allow_custom_ops'.format(self._graph_def_file,
self._input_arrays,
self._output_arrays,
self._input_shapes))
self._run(flags_str, should_succeed=True)
def testObjectDetectionMLIR(self):
"""Tests object detection model through MLIR converter."""
self._initObjectDetectionArgs()
custom_opdefs_str = (
'name: \'TFLite_Detection_PostProcess\' '
'input_arg: { name: \'raw_outputs/box_encodings\' type: DT_FLOAT } '
'input_arg: { name: \'raw_outputs/class_predictions\' type: DT_FLOAT } '
'input_arg: { name: \'anchors\' type: DT_FLOAT } '
'output_arg: { name: \'TFLite_Detection_PostProcess\' type: DT_FLOAT } '
'output_arg: { name: \'TFLite_Detection_PostProcess:1\' '
'type: DT_FLOAT } '
'output_arg: { name: \'TFLite_Detection_PostProcess:2\' '
'type: DT_FLOAT } '
'output_arg: { name: \'TFLite_Detection_PostProcess:3\' '
'type: DT_FLOAT } '
'attr : { name: \'h_scale\' type: \'float\'} '
'attr : { name: \'max_classes_per_detection\' type: \'int\'} '
'attr : { name: \'max_detections\' type: \'int\'} '
'attr : { name: \'nms_iou_threshold\' type: \'float\'} '
'attr : { name: \'nms_score_threshold\' type: \'float\'} '
'attr : { name: \'num_classes\' type: \'int\'} '
'attr : { name: \'w_scale\' type: \'float\'} '
'attr : { name: \'x_scale\' type: \'float\'} '
'attr : { name: \'y_scale\' type: \'float\'}')
flags_str = ('--graph_def_file={0} --input_arrays={1} '
'--output_arrays={2} --input_shapes={3} '
'--custom_opdefs="{4}"'.format(self._graph_def_file,
self._input_arrays,
self._output_arrays,
self._input_shapes,
custom_opdefs_str))
# Ensure --allow_custom_ops.
flags_str_final = ('{} --allow_custom_ops').format(flags_str)
self._run(flags_str_final, should_succeed=False)
# Ensure --experimental_new_converter.
flags_str_final = ('{} --experimental_new_converter').format(flags_str)
self._run(flags_str_final, should_succeed=False)
# Valid conversion.
flags_str_final = ('{} --allow_custom_ops '
'--experimental_new_converter').format(flags_str)
self._run(flags_str_final, should_succeed=True)
class TfLiteConvertV2Test(TestModels):
@test_util.run_v2_only
def testSavedModel(self):
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: 2. * x)
to_save = root.f.get_concrete_function(input_data)
saved_model_dir = self._getFilepath('model')
save(root, saved_model_dir, to_save)
flags_str = '--saved_model_dir={}'.format(saved_model_dir)
self._run(flags_str, should_succeed=True)
@test_util.run_v2_only
def testKerasFile(self):
keras_file = self._getKerasModelFile()
flags_str = '--keras_model_file={}'.format(keras_file)
self._run(flags_str, should_succeed=True)
os.remove(keras_file)
@test_util.run_v2_only
def testKerasFileMLIR(self):
keras_file = self._getKerasModelFile()
flags_str = (
'--keras_model_file={} --experimental_new_converter'.format(keras_file))
self._run(flags_str, should_succeed=True)
os.remove(keras_file)
def testMissingRequired(self):
self._run('--invalid_args', should_succeed=False)
def testMutuallyExclusive(self):
self._run(
'--keras_model_file=model.h5 --saved_model_dir=/tmp/',
should_succeed=False)
class ArgParserTest(test_util.TensorFlowTestCase):
def test_without_experimental_new_converter(self):
args = [
'--saved_model_dir=/tmp/saved_model/',
'--output_file=/tmp/output.tflite',
]
# Note that when the flag parses to None, the converter uses the default
# value, which is True.
# V1 parser.
parser = tflite_convert._get_parser(use_v2_converter=False)
parsed_args = parser.parse_args(args)
self.assertIsNone(parsed_args.experimental_new_converter)
# V2 parser.
parser = tflite_convert._get_parser(use_v2_converter=True)
parsed_args = parser.parse_args(args)
self.assertIsNone(parsed_args.experimental_new_converter)
def test_experimental_new_converter(self):
args = [
'--saved_model_dir=/tmp/saved_model/',
'--output_file=/tmp/output.tflite',
'--experimental_new_converter',
]
# V1 parser.
parser = tflite_convert._get_parser(use_v2_converter=False)
parsed_args = parser.parse_args(args)
self.assertTrue(parsed_args.experimental_new_converter)
# V2 parser.
parser = tflite_convert._get_parser(use_v2_converter=True)
parsed_args = parser.parse_args(args)
self.assertTrue(parsed_args.experimental_new_converter)
def test_experimental_new_converter_true(self):
args = [
'--saved_model_dir=/tmp/saved_model/',
'--output_file=/tmp/output.tflite',
'--experimental_new_converter=true',
]
# V1 parser.
parser = tflite_convert._get_parser(False)
parsed_args = parser.parse_args(args)
self.assertTrue(parsed_args.experimental_new_converter)
# V2 parser.
parser = tflite_convert._get_parser(True)
parsed_args = parser.parse_args(args)
self.assertTrue(parsed_args.experimental_new_converter)
def test_experimental_new_converter_false(self):
args = [
'--saved_model_dir=/tmp/saved_model/',
'--output_file=/tmp/output.tflite',
'--experimental_new_converter=false',
]
# V1 parser.
parser = tflite_convert._get_parser(use_v2_converter=False)
parsed_args = parser.parse_args(args)
self.assertFalse(parsed_args.experimental_new_converter)
# V2 parser.
parser = tflite_convert._get_parser(use_v2_converter=True)
parsed_args = parser.parse_args(args)
self.assertFalse(parsed_args.experimental_new_converter)
if __name__ == '__main__':
test.main()
|
|
# _*_coding:utf-8_*_
from __future__ import print_function
import glob
import json
import logging
import os
import re
import socket
import subprocess
import sys
import time
import psutil
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import requests
class StanfordCoreNLP:
def __init__(self, path_or_host, port=None, memory='4g', lang='en', timeout=1500, quiet=True,
logging_level=logging.WARNING, max_retries=5):
self.path_or_host = path_or_host
self.port = port
self.memory = memory
self.lang = lang
self.timeout = timeout
self.quiet = quiet
self.logging_level = logging_level
logging.basicConfig(level=self.logging_level)
# Check args
self._check_args()
if path_or_host.startswith('http'):
self.url = path_or_host + ':' + str(port)
logging.info('Using an existing server {}'.format(self.url))
else:
# Check Java
if not subprocess.call(['java', '-version'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) == 0:
raise RuntimeError('Java not found.')
# Check if the dir exists
if not os.path.isdir(self.path_or_host):
raise IOError(str(self.path_or_host) + ' is not a directory.')
directory = os.path.normpath(self.path_or_host) + os.sep
self.class_path_dir = directory
# Check if the language specific model file exists
switcher = {
'en': 'stanford-corenlp-[0-9].[0-9].[0-9]-models.jar',
'zh': 'stanford-chinese-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'ar': 'stanford-arabic-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'fr': 'stanford-french-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'de': 'stanford-german-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'es': 'stanford-spanish-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar'
}
jars = {
'en': 'stanford-corenlp-x.x.x-models.jar',
'zh': 'stanford-chinese-corenlp-yyyy-MM-dd-models.jar',
'ar': 'stanford-arabic-corenlp-yyyy-MM-dd-models.jar',
'fr': 'stanford-french-corenlp-yyyy-MM-dd-models.jar',
'de': 'stanford-german-corenlp-yyyy-MM-dd-models.jar',
'es': 'stanford-spanish-corenlp-yyyy-MM-dd-models.jar'
}
if len(glob.glob(directory + switcher.get(self.lang))) <= 0:
raise IOError(jars.get(
self.lang) + ' not exists. You should download and place it in the ' + directory + ' first.')
# If port not set, auto select
if self.port is None:
for port_candidate in range(9000, 65535):
if port_candidate not in [conn.laddr[1] for conn in psutil.net_connections()]:
self.port = port_candidate
break
# Check if the port is in use
if self.port in [conn.laddr[1] for conn in psutil.net_connections()]:
raise IOError('Port ' + str(self.port) + ' is already in use.')
# Start native server
logging.info('Initializing native server...')
cmd = "java"
java_args = "-Xmx{}".format(self.memory)
java_class = "edu.stanford.nlp.pipeline.StanfordCoreNLPServer"
class_path = '"{}*"'.format(directory)
args = [cmd, java_args, '-cp', class_path, java_class, '-port', str(self.port)]
args = ' '.join(args)
logging.info(args)
# Silence
with open(os.devnull, 'w') as null_file:
out_file = None
if self.quiet:
out_file = null_file
self.p = subprocess.Popen(args, shell=True, stdout=out_file, stderr=subprocess.STDOUT)
logging.info('Server shell PID: {}'.format(self.p.pid))
self.url = 'http://localhost:' + str(self.port)
# Wait until server starts
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host_name = urlparse(self.url).hostname
time.sleep(1) # OSX, not tested
trial = 1
while sock.connect_ex((host_name, self.port)):
if trial > max_retries:
raise ValueError('Corenlp server is not available')
logging.info('Waiting until the server is available.')
trial += 1
time.sleep(1)
logging.info('The server is available.')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
logging.info('Cleanup...')
if hasattr(self, 'p'):
try:
parent = psutil.Process(self.p.pid)
except psutil.NoSuchProcess:
logging.info('No process: {}'.format(self.p.pid))
return
if self.class_path_dir not in ' '.join(parent.cmdline()):
logging.info('Process not in: {}'.format(parent.cmdline()))
return
children = parent.children(recursive=True)
for process in children:
logging.info('Killing pid: {}, cmdline: {}'.format(process.pid, process.cmdline()))
# process.send_signal(signal.SIGTERM)
process.kill()
logging.info('Killing shell pid: {}, cmdline: {}'.format(parent.pid, parent.cmdline()))
# parent.send_signal(signal.SIGTERM)
parent.kill()
def annotate(self, text, properties=None):
if sys.version_info.major >= 3:
text = text.encode('utf-8')
r = requests.post(self.url, params={'properties': str(properties)}, data=text,
headers={'Connection': 'close'})
return r.text
def tregex(self, sentence, pattern):
tregex_url = self.url + '/tregex'
r_dict = self._request(tregex_url, "tokenize,ssplit,depparse,parse", sentence, pattern=pattern)
return r_dict
def tokensregex(self, sentence, pattern):
tokensregex_url = self.url + '/tokensregex'
r_dict = self._request(tokensregex_url, "tokenize,ssplit,depparse", sentence, pattern=pattern)
return r_dict
def semgrex(self, sentence, pattern):
semgrex_url = self.url + '/semgrex'
r_dict = self._request(semgrex_url, "tokenize,ssplit,depparse", sentence, pattern=pattern)
return r_dict
def word_tokenize(self, sentence, span=False):
r_dict = self._request('ssplit,tokenize', sentence)
tokens = [token['originalText'] for s in r_dict['sentences'] for token in s['tokens']]
# Whether return token span
if span:
spans = [(token['characterOffsetBegin'], token['characterOffsetEnd']) for s in r_dict['sentences'] for token
in s['tokens']]
return tokens, spans
else:
return tokens
def pos_tag(self, sentence):
r_dict = self._request(self.url, 'pos', sentence)
words = []
tags = []
for s in r_dict['sentences']:
for token in s['tokens']:
words.append(token['originalText'])
tags.append(token['pos'])
return list(zip(words, tags))
def ner(self, sentence):
r_dict = self._request(self.url, 'ner', sentence)
words = []
ner_tags = []
for s in r_dict['sentences']:
for token in s['tokens']:
words.append(token['originalText'])
ner_tags.append(token['ner'])
return list(zip(words, ner_tags))
def parse(self, sentence):
r_dict = self._request(self.url, 'pos,parse', sentence)
return [s['parse'] for s in r_dict['sentences']][0]
def dependency_parse(self, sentence):
r_dict = self._request(self.url, 'depparse', sentence)
return [(dep['dep'], dep['governor'], dep['dependent']) for s in r_dict['sentences'] for dep in
s['basicDependencies']]
def coref(self, text):
r_dict = self._request('coref', text)
corefs = []
for k, mentions in r_dict['corefs'].items():
simplified_mentions = []
for m in mentions:
simplified_mentions.append((m['sentNum'], m['startIndex'], m['endIndex'], m['text']))
corefs.append(simplified_mentions)
return corefs
def switch_language(self, language="en"):
self._check_language(language)
self.lang = language
def _request(self, url, annotators=None, data=None, *args, **kwargs):
if sys.version_info.major >= 3:
data = data.encode('utf-8')
properties = {'annotators': annotators, 'outputFormat': 'json'}
params = {'properties': str(properties), 'pipelineLanguage': self.lang}
if 'pattern' in kwargs:
params = {"pattern": kwargs['pattern'], 'properties': str(properties), 'pipelineLanguage': self.lang}
logging.info(params)
r = requests.post(url, params=params, data=data, headers={'Connection': 'close'})
r_dict = json.loads(r.text)
return r_dict
def _check_args(self):
self._check_language(self.lang)
if not re.match('\dg', self.memory):
raise ValueError('memory=' + self.memory + ' not supported. Use 4g, 6g, 8g and etc. ')
def _check_language(self, lang):
if lang not in ['en', 'zh', 'ar', 'fr', 'de', 'es']:
raise ValueError('lang=' + self.lang + ' not supported. Use English(en), Chinese(zh), Arabic(ar), '
'French(fr), German(de), Spanish(es).')
|
|
from django.conf import settings
from restclients.canvas import Canvas
from restclients.exceptions import DataFailureException
from restclients.models.canvas import Report, ReportType, Attachment
from urllib3 import PoolManager
from time import sleep
import re
class ReportFailureException(Exception):
"""
This exception means there was an error fetching report data.
"""
def __init__(self, report):
self.report = report
def __str__(self):
return ("Error fetching report %s" % self.report.report_id)
class Reports(Canvas):
def get_available_reports(self, account_id):
"""
Returns the list of reports for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.available_reports
"""
url = "/api/v1/accounts/%s/reports" % account_id
report_types = []
for datum in self._get_resource(url):
report_type = ReportType()
report_type.name = datum["report"]
report_type.title = datum["title"]
report_type.parameters = datum["parameters"]
if datum["last_run"] is not None:
report = self._report_from_json(account_id, datum["last_run"])
report_type.last_run = report
report_types.append(report_type)
return report_types
def get_reports_by_type(self, account_id, report_type):
"""
Shows all reports of the passed report_type that have been run
for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.index
"""
url = "/api/v1/accounts/%s/reports/%s" % (account_id, report_type)
reports = []
for datum in self._get_resource(url):
reports.append(self._report_from_json(account_id, datum))
return reports
def create_report(self, report_type, account_id, term_id=None, params={}):
"""
Generates a report instance for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.create
"""
if term_id is not None:
params["enrollment_term_id"] = term_id
url = "/api/v1/accounts/%s/reports/%s" % (account_id, report_type)
body = {"parameters": params}
data = self._post_resource(url, body)
return self._report_from_json(account_id, data)
def create_course_provisioning_report(self, account_id, term_id=None,
params={}):
"""
Convenience method for create_report, for creating a course
provisioning report.
"""
params["courses"] = True
return self.create_report(ReportType.PROVISIONING, account_id, term_id,
params)
def create_enrollments_provisioning_report(self, account_id, term_id=None,
params={}):
"""
Convenience method for create_report, for creating an enrollment
provisioning report.
"""
params["enrollments"] = True
return self.create_report(ReportType.PROVISIONING, account_id, term_id,
params)
def create_user_provisioning_report(self, account_id, term_id=None,
params={}):
"""
Convenience method for create_report, for creating a user
provisioning report.
"""
params["users"] = True
return self.create_report(ReportType.PROVISIONING, account_id, term_id,
params)
def create_xlist_provisioning_report(self, account_id, term_id=None,
params={}):
"""
Convenience method for create_report, for creating a crosslist
provisioning report.
"""
params["xlist"] = True
return self.create_report(ReportType.PROVISIONING, account_id, term_id,
params)
def create_course_sis_export_report(self, account_id, term_id=None,
params={}):
"""
Convenience method for create_report, for creating a course sis export
report.
"""
params["courses"] = True
return self.create_report(ReportType.SIS_EXPORT, account_id, term_id,
params)
def create_unused_courses_report(self, account_id, term_id=None):
"""
Convenience method for create_report, for creating an unused courses
report.
"""
return self.create_report(ReportType.UNUSED_COURSES, account_id,
term_id)
def get_report_data(self, report):
"""
Returns a completed report as a list of csv strings.
"""
if report.report_id is None or report.status is None:
raise ReportFailureException(report)
while report.status != "complete":
if report.status == "error":
raise ReportFailureException(report)
sleep(5)
report = self.get_report_status(report)
if report.attachment is None or report.attachment.url is None:
return
data = self._get_report_file(report.attachment.url)
return data.split("\n")
def get_report_status(self, report):
"""
Returns the status of a report.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.show
"""
if (report.account_id is None or report.type is None or
report.report_id is None):
raise ReportFailureException(report)
url = "/api/v1/accounts/%s/reports/%s/%s" % (
report.account_id, report.type, report.report_id)
data = self._get_resource(url)
return self._report_from_json(report.account_id, data)
def delete_report(self, report):
"""
Deletes a generated report instance.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.destroy
"""
url = "/api/v1/accounts/%s/reports/%s/%s" % (
report.account_id, report.type, report.report_id)
response = self._delete_resource(url)
return True
def _get_report_file(self, url):
# Ensure file url matches the hostname in settings,
# workaround for Canvas bug help.instructure.com/tickets/362386
url = re.sub(r'^https://[^/]+', settings.RESTCLIENTS_CANVAS_HOST, url)
timeout = getattr(settings, "RESTCLIENTS_TIMEOUT", 15.0)
cafile = getattr(settings, "RESTCLIENTS_CA_BUNDLE",
"/etc/ssl/certs/ca-bundle.crt")
pool_manager = PoolManager(cert_reqs="CERT_REQUIRED",
ca_certs=cafile,
timeout=timeout,
retries=5)
response = pool_manager.request("GET", url)
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
return response.data
def _report_from_json(self, account_id, data):
report = Report()
report.account_id = account_id
report.report_id = data["id"]
report.type = data["report"]
report.url = data["file_url"]
report.status = data["status"]
report.progress = data["progress"]
report.parameters = data["parameters"]
if "attachment" in data:
report.attachment = Attachment(
attachment_id=data["attachment"]["id"],
filename=data["attachment"]["filename"],
display_name=data["attachment"]["display_name"],
content_type=data["attachment"]["content-type"],
size=data["attachment"]["size"],
url=data["attachment"]["url"])
return report
|
|
"""A chart parser and some grammars. (Chapter 22)"""
# (Written for the second edition of AIMA; expect some discrepanciecs
# from the third edition until this gets reviewed.)
from . utils import *
from collections import defaultdict
#______________________________________________________________________________
# Grammars and Lexicons
def Rules(**rules):
"""Create a dictionary mapping symbols to alternative sequences.
>>> Rules(A = "B C | D E")
{'A': [['B', 'C'], ['D', 'E']]}
"""
for (lhs, rhs) in list(rules.items()):
rules[lhs] = [alt.strip().split() for alt in rhs.split('|')]
return rules
def Lexicon(**rules):
"""Create a dictionary mapping symbols to alternative words.
>>> Lexicon(Art = "the | a | an")
{'Art': ['the', 'a', 'an']}
"""
for (lhs, rhs) in list(rules.items()):
rules[lhs] = [word.strip() for word in rhs.split('|')]
return rules
class Grammar:
def __init__(self, name, rules, lexicon):
"A grammar has a set of rules and a lexicon."
update(self, name=name, rules=rules, lexicon=lexicon)
self.categories = defaultdict(list)
for lhs in lexicon:
for word in lexicon[lhs]:
self.categories[word].append(lhs)
def rewrites_for(self, cat):
"Return a sequence of possible rhs's that cat can be rewritten as."
return self.rules.get(cat, ())
def isa(self, word, cat):
"Return True iff word is of category cat"
return cat in self.categories[word]
def __repr__(self):
return '<Grammar %s>' % self.name
E0 = Grammar('E0',
Rules( # Grammar for E_0 [Fig. 22.4]
S='NP VP | S Conjunction S',
NP='Pronoun | Name | Noun | Article Noun | Digit Digit | NP PP | NP RelClause',
VP='Verb | VP NP | VP Adjective | VP PP | VP Adverb',
PP='Preposition NP',
RelClause='That VP'),
Lexicon( # Lexicon for E_0 [Fig. 22.3]
Noun="stench | breeze | glitter | nothing | wumpus | pit | pits | gold | east",
Verb="is | see | smell | shoot | fell | stinks | go | grab | carry | kill | turn | feel",
Adjective="right | left | east | south | back | smelly",
Adverb="here | there | nearby | ahead | right | left | east | south | back",
Pronoun="me | you | I | it",
Name="John | Mary | Boston | Aristotle",
Article="the | a | an",
Preposition="to | in | on | near",
Conjunction="and | or | but",
Digit="0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9",
That="that"
))
E_ = Grammar('E_', # Trivial Grammar and lexicon for testing
Rules(
S='NP VP',
NP='Art N | Pronoun',
VP='V NP'),
Lexicon(
Art='the | a',
N='man | woman | table | shoelace | saw',
Pronoun='I | you | it',
V='saw | liked | feel'
))
E_NP_ = Grammar('E_NP_', # another trivial grammar for testing
Rules(NP='Adj NP | N'),
Lexicon(Adj='happy | handsome | hairy',
N='man'))
def generate_random(grammar=E_, s='S'):
"""Replace each token in s by a random entry in grammar (recursively).
This is useful for testing a grammar, e.g. generate_random(E_)"""
import random
def rewrite(tokens, into):
for token in tokens:
if token in grammar.rules:
rewrite(random.choice(grammar.rules[token]), into)
elif token in grammar.lexicon:
into.append(random.choice(grammar.lexicon[token]))
else:
into.append(token)
return into
return ' '.join(rewrite(s.split(), []))
#______________________________________________________________________________
# Chart Parsing
class Chart:
"""Class for parsing sentences using a chart data structure. [Fig 22.7]
>>> chart = Chart(E0);
>>> len(chart.parses('the stench is in 2 2'))
1
"""
def __init__(self, grammar, trace=False):
"""A datastructure for parsing a string; and methods to do the parse.
self.chart[i] holds the edges that end just before the i'th word.
Edges are 5-element lists of [start, end, lhs, [found], [expects]]."""
update(self, grammar=grammar, trace=trace)
def parses(self, words, S='S'):
"""Return a list of parses; words can be a list or string.
>>> chart = Chart(E_NP_)
>>> chart.parses('happy man', 'NP')
[[0, 2, 'NP', [('Adj', 'happy'), [1, 2, 'NP', [('N', 'man')], []]], []]]
"""
if isinstance(words, str):
words = words.split()
self.parse(words, S)
# Return all the parses that span the whole input
# 'span the whole input' => begin at 0, end at len(words)
return [[i, j, S, found, []]
for (i, j, lhs, found, expects) in self.chart[len(words)]
# assert j == len(words)
if i == 0 and lhs == S and expects == []]
def parse(self, words, S='S'):
"""Parse a list of words; according to the grammar.
Leave results in the chart."""
self.chart = [[] for i in range(len(words)+1)]
self.add_edge([0, 0, 'S_', [], [S]])
for i in range(len(words)):
self.scanner(i, words[i])
return self.chart
def add_edge(self, edge):
"Add edge to chart, and see if it extends or predicts another edge."
start, end, lhs, found, expects = edge
if edge not in self.chart[end]:
self.chart[end].append(edge)
if self.trace:
print('%10s: added %s' % (caller(2), edge))
if not expects:
self.extender(edge)
else:
self.predictor(edge)
def scanner(self, j, word):
"For each edge expecting a word of this category here, extend the edge."
for (i, j, A, alpha, Bb) in self.chart[j]:
if Bb and self.grammar.isa(word, Bb[0]):
self.add_edge([i, j+1, A, alpha + [(Bb[0], word)], Bb[1:]])
def predictor(self, xxx_todo_changeme):
"Add to chart any rules for B that could help extend this edge."
(i, j, A, alpha, Bb) = xxx_todo_changeme
B = Bb[0]
if B in self.grammar.rules:
for rhs in self.grammar.rewrites_for(B):
self.add_edge([j, j, B, [], rhs])
def extender(self, edge):
"See what edges can be extended by this edge."
(j, k, B, _, _) = edge
for (i, j, A, alpha, B1b) in self.chart[j]:
if B1b and B == B1b[0]:
self.add_edge([i, k, A, alpha + [edge], B1b[1:]])
# TODO:
# 1. Parsing with augmentations -- requires unification, etc.
# 2. Sequitor
__doc__ += """
>>> chart = Chart(E0)
>>> chart.parses('the wumpus that is smelly is near 2 2')
[[0, 9, 'S', [[0, 5, 'NP', [[0, 2, 'NP', [('Article', 'the'), ('Noun', 'wumpus')], []], [2, 5, 'RelClause', [('That', 'that'), [3, 5, 'VP', [[3, 4, 'VP', [('Verb', 'is')], []], ('Adjective', 'smelly')], []]], []]], []], [5, 9, 'VP', [[5, 6, 'VP', [('Verb', 'is')], []], [6, 9, 'PP', [('Preposition', 'near'), [7, 9, 'NP', [('Digit', '2'), ('Digit', '2')], []]], []]], []]], []]]
### There is a built-in trace facility (compare [Fig. 22.9])
>>> Chart(E_, trace=True).parses('I feel it')
parse: added [0, 0, 'S_', [], ['S']]
predictor: added [0, 0, 'S', [], ['NP', 'VP']]
predictor: added [0, 0, 'NP', [], ['Art', 'N']]
predictor: added [0, 0, 'NP', [], ['Pronoun']]
scanner: added [0, 1, 'NP', [('Pronoun', 'I')], []]
extender: added [0, 1, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []]], ['VP']]
predictor: added [1, 1, 'VP', [], ['V', 'NP']]
scanner: added [1, 2, 'VP', [('V', 'feel')], ['NP']]
predictor: added [2, 2, 'NP', [], ['Art', 'N']]
predictor: added [2, 2, 'NP', [], ['Pronoun']]
scanner: added [2, 3, 'NP', [('Pronoun', 'it')], []]
extender: added [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]
extender: added [0, 3, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []], [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]], []]
extender: added [0, 3, 'S_', [[0, 3, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []], [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]], []]], []]
[[0, 3, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []], [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]], []]]
"""
|
|
from .Exporter import Exporter
from pyqtgraph.parametertree import Parameter
from pyqtgraph.Qt import QtGui, QtCore, QtSvg
import pyqtgraph as pg
import re
import xml.dom.minidom as xml
import numpy as np
__all__ = ['SVGExporter']
class SVGExporter(Exporter):
Name = "Scalable Vector Graphics (SVG)"
allowCopy=True
def __init__(self, item):
Exporter.__init__(self, item)
#tr = self.getTargetRect()
self.params = Parameter(name='params', type='group', children=[
#{'name': 'width', 'type': 'float', 'value': tr.width(), 'limits': (0, None)},
#{'name': 'height', 'type': 'float', 'value': tr.height(), 'limits': (0, None)},
#{'name': 'viewbox clipping', 'type': 'bool', 'value': True},
#{'name': 'normalize coordinates', 'type': 'bool', 'value': True},
#{'name': 'normalize line width', 'type': 'bool', 'value': True},
])
#self.params.param('width').sigValueChanged.connect(self.widthChanged)
#self.params.param('height').sigValueChanged.connect(self.heightChanged)
def widthChanged(self):
sr = self.getSourceRect()
ar = sr.height() / sr.width()
self.params.param('height').setValue(self.params['width'] * ar, blockSignal=self.heightChanged)
def heightChanged(self):
sr = self.getSourceRect()
ar = sr.width() / sr.height()
self.params.param('width').setValue(self.params['height'] * ar, blockSignal=self.widthChanged)
def parameters(self):
return self.params
def export(self, fileName=None, toBytes=False, copy=False):
if toBytes is False and copy is False and fileName is None:
self.fileSaveDialog(filter="Scalable Vector Graphics (*.svg)")
return
#self.svg = QtSvg.QSvgGenerator()
#self.svg.setFileName(fileName)
#dpi = QtGui.QDesktopWidget().physicalDpiX()
### not really sure why this works, but it seems to be important:
#self.svg.setSize(QtCore.QSize(self.params['width']*dpi/90., self.params['height']*dpi/90.))
#self.svg.setResolution(dpi)
##self.svg.setViewBox()
#targetRect = QtCore.QRect(0, 0, self.params['width'], self.params['height'])
#sourceRect = self.getSourceRect()
#painter = QtGui.QPainter(self.svg)
#try:
#self.setExportMode(True)
#self.render(painter, QtCore.QRectF(targetRect), sourceRect)
#finally:
#self.setExportMode(False)
#painter.end()
## Workaround to set pen widths correctly
#data = open(fileName).readlines()
#for i in range(len(data)):
#line = data[i]
#m = re.match(r'(<g .*)stroke-width="1"(.*transform="matrix\(([^\)]+)\)".*)', line)
#if m is not None:
##print "Matched group:", line
#g = m.groups()
#matrix = list(map(float, g[2].split(',')))
##print "matrix:", matrix
#scale = max(abs(matrix[0]), abs(matrix[3]))
#if scale == 0 or scale == 1.0:
#continue
#data[i] = g[0] + ' stroke-width="%0.2g" ' % (1.0/scale) + g[1] + '\n'
##print "old line:", line
##print "new line:", data[i]
#open(fileName, 'w').write(''.join(data))
## Qt's SVG generator is not complete. (notably, it lacks clipping)
## Instead, we will use Qt to generate SVG for each item independently,
## then manually reconstruct the entire document.
xml = generateSvg(self.item)
if toBytes:
return xml.encode('UTF-8')
elif copy:
md = QtCore.QMimeData()
md.setData('image/svg+xml', QtCore.QByteArray(xml.encode('UTF-8')))
QtGui.QApplication.clipboard().setMimeData(md)
else:
with open(fileName, 'w') as fh:
fh.write(xml.encode('UTF-8'))
xmlHeader = """\
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.2" baseProfile="tiny">
<title>pyqtgraph SVG export</title>
<desc>Generated with Qt and pyqtgraph</desc>
<defs>
</defs>
"""
def generateSvg(item):
global xmlHeader
try:
node = _generateItemSvg(item)
finally:
## reset export mode for all items in the tree
if isinstance(item, QtGui.QGraphicsScene):
items = item.items()
else:
items = [item]
for i in items:
items.extend(i.childItems())
for i in items:
if hasattr(i, 'setExportMode'):
i.setExportMode(False)
cleanXml(node)
return xmlHeader + node.toprettyxml(indent=' ') + "\n</svg>\n"
def _generateItemSvg(item, nodes=None, root=None):
## This function is intended to work around some issues with Qt's SVG generator
## and SVG in general.
## 1) Qt SVG does not implement clipping paths. This is absurd.
## The solution is to let Qt generate SVG for each item independently,
## then glue them together manually with clipping.
##
## The format Qt generates for all items looks like this:
##
## <g>
## <g transform="matrix(...)">
## one or more of: <path/> or <polyline/> or <text/>
## </g>
## <g transform="matrix(...)">
## one or more of: <path/> or <polyline/> or <text/>
## </g>
## . . .
## </g>
##
## 2) There seems to be wide disagreement over whether path strokes
## should be scaled anisotropically.
## see: http://web.mit.edu/jonas/www/anisotropy/
## Given that both inkscape and illustrator seem to prefer isotropic
## scaling, we will optimize for those cases.
##
## 3) Qt generates paths using non-scaling-stroke from SVG 1.2, but
## inkscape only supports 1.1.
##
## Both 2 and 3 can be addressed by drawing all items in world coordinates.
prof = pg.debug.Profiler('generateItemSvg %s' % str(item), disabled=True)
if nodes is None: ## nodes maps all node IDs to their XML element.
## this allows us to ensure all elements receive unique names.
nodes = {}
if root is None:
root = item
## Skip hidden items
if hasattr(item, 'isVisible') and not item.isVisible():
return None
## If this item defines its own SVG generator, use that.
if hasattr(item, 'generateSvg'):
return item.generateSvg(nodes)
## Generate SVG text for just this item (exclude its children; we'll handle them later)
tr = QtGui.QTransform()
if isinstance(item, QtGui.QGraphicsScene):
xmlStr = "<g>\n</g>\n"
doc = xml.parseString(xmlStr)
childs = [i for i in item.items() if i.parentItem() is None]
elif item.__class__.paint == QtGui.QGraphicsItem.paint:
xmlStr = "<g>\n</g>\n"
doc = xml.parseString(xmlStr)
childs = item.childItems()
else:
childs = item.childItems()
tr = itemTransform(item, item.scene())
## offset to corner of root item
if isinstance(root, QtGui.QGraphicsScene):
rootPos = QtCore.QPoint(0,0)
else:
rootPos = root.scenePos()
tr2 = QtGui.QTransform()
tr2.translate(-rootPos.x(), -rootPos.y())
tr = tr * tr2
#print item, pg.SRTTransform(tr)
#tr.translate(item.pos().x(), item.pos().y())
#tr = tr * item.transform()
arr = QtCore.QByteArray()
buf = QtCore.QBuffer(arr)
svg = QtSvg.QSvgGenerator()
svg.setOutputDevice(buf)
dpi = QtGui.QDesktopWidget().physicalDpiX()
### not really sure why this works, but it seems to be important:
#self.svg.setSize(QtCore.QSize(self.params['width']*dpi/90., self.params['height']*dpi/90.))
svg.setResolution(dpi)
p = QtGui.QPainter()
p.begin(svg)
if hasattr(item, 'setExportMode'):
item.setExportMode(True, {'painter': p})
try:
p.setTransform(tr)
item.paint(p, QtGui.QStyleOptionGraphicsItem(), None)
finally:
p.end()
## Can't do this here--we need to wait until all children have painted as well.
## this is taken care of in generateSvg instead.
#if hasattr(item, 'setExportMode'):
#item.setExportMode(False)
xmlStr = str(arr)
doc = xml.parseString(xmlStr)
try:
## Get top-level group for this item
g1 = doc.getElementsByTagName('g')[0]
## get list of sub-groups
g2 = [n for n in g1.childNodes if isinstance(n, xml.Element) and n.tagName == 'g']
except:
print(doc.toxml())
raise
prof.mark('render')
## Get rid of group transformation matrices by applying
## transformation to inner coordinates
correctCoordinates(g1, item)
prof.mark('correct')
## make sure g1 has the transformation matrix
#m = (tr.m11(), tr.m12(), tr.m21(), tr.m22(), tr.m31(), tr.m32())
#g1.setAttribute('transform', "matrix(%f,%f,%f,%f,%f,%f)" % m)
#print "=================",item,"====================="
#print g1.toprettyxml(indent=" ", newl='')
## Inkscape does not support non-scaling-stroke (this is SVG 1.2, inkscape supports 1.1)
## So we need to correct anything attempting to use this.
#correctStroke(g1, item, root)
## decide on a name for this item
baseName = item.__class__.__name__
i = 1
while True:
name = baseName + "_%d" % i
if name not in nodes:
break
i += 1
nodes[name] = g1
g1.setAttribute('id', name)
## If this item clips its children, we need to take care of that.
childGroup = g1 ## add children directly to this node unless we are clipping
if not isinstance(item, QtGui.QGraphicsScene):
## See if this item clips its children
if int(item.flags() & item.ItemClipsChildrenToShape) > 0:
## Generate svg for just the path
#if isinstance(root, QtGui.QGraphicsScene):
#path = QtGui.QGraphicsPathItem(item.mapToScene(item.shape()))
#else:
#path = QtGui.QGraphicsPathItem(root.mapToParent(item.mapToItem(root, item.shape())))
path = QtGui.QGraphicsPathItem(item.mapToScene(item.shape()))
item.scene().addItem(path)
try:
pathNode = _generateItemSvg(path, root=root).getElementsByTagName('path')[0]
finally:
item.scene().removeItem(path)
## and for the clipPath element
clip = name + '_clip'
clipNode = g1.ownerDocument.createElement('clipPath')
clipNode.setAttribute('id', clip)
clipNode.appendChild(pathNode)
g1.appendChild(clipNode)
childGroup = g1.ownerDocument.createElement('g')
childGroup.setAttribute('clip-path', 'url(#%s)' % clip)
g1.appendChild(childGroup)
prof.mark('clipping')
## Add all child items as sub-elements.
childs.sort(key=lambda c: c.zValue())
for ch in childs:
cg = _generateItemSvg(ch, nodes, root)
if cg is None:
continue
childGroup.appendChild(cg) ### this isn't quite right--some items draw below their parent (good enough for now)
prof.mark('children')
prof.finish()
return g1
def correctCoordinates(node, item):
## Remove transformation matrices from <g> tags by applying matrix to coordinates inside.
groups = node.getElementsByTagName('g')
for grp in groups:
matrix = grp.getAttribute('transform')
match = re.match(r'matrix\((.*)\)', matrix)
if match is None:
vals = [1,0,0,1,0,0]
else:
vals = map(float, match.groups()[0].split(','))
tr = np.array([[vals[0], vals[2], vals[4]], [vals[1], vals[3], vals[5]]])
removeTransform = False
for ch in grp.childNodes:
if not isinstance(ch, xml.Element):
continue
if ch.tagName == 'polyline':
removeTransform = True
coords = np.array([map(float, c.split(',')) for c in ch.getAttribute('points').strip().split(' ')])
coords = pg.transformCoordinates(tr, coords, transpose=True)
ch.setAttribute('points', ' '.join([','.join(map(str, c)) for c in coords]))
elif ch.tagName == 'path':
removeTransform = True
newCoords = ''
oldCoords = ch.getAttribute('d').strip()
if oldCoords == '':
continue
for c in oldCoords.split(' '):
x,y = c.split(',')
if x[0].isalpha():
t = x[0]
x = x[1:]
else:
t = ''
nc = pg.transformCoordinates(tr, np.array([[float(x),float(y)]]), transpose=True)
newCoords += t+str(nc[0,0])+','+str(nc[0,1])+' '
ch.setAttribute('d', newCoords)
elif ch.tagName == 'text':
removeTransform = False
## leave text alone for now. Might need this later to correctly render text with outline.
#c = np.array([
#[float(ch.getAttribute('x')), float(ch.getAttribute('y'))],
#[float(ch.getAttribute('font-size')), 0],
#[0,0]])
#c = pg.transformCoordinates(tr, c, transpose=True)
#ch.setAttribute('x', str(c[0,0]))
#ch.setAttribute('y', str(c[0,1]))
#fs = c[1]-c[2]
#fs = (fs**2).sum()**0.5
#ch.setAttribute('font-size', str(fs))
## Correct some font information
families = ch.getAttribute('font-family').split(',')
if len(families) == 1:
font = QtGui.QFont(families[0].strip('" '))
if font.style() == font.SansSerif:
families.append('sans-serif')
elif font.style() == font.Serif:
families.append('serif')
elif font.style() == font.Courier:
families.append('monospace')
ch.setAttribute('font-family', ', '.join([f if ' ' not in f else '"%s"'%f for f in families]))
## correct line widths if needed
if removeTransform and ch.getAttribute('vector-effect') != 'non-scaling-stroke':
w = float(grp.getAttribute('stroke-width'))
s = pg.transformCoordinates(tr, np.array([[w,0], [0,0]]), transpose=True)
w = ((s[0]-s[1])**2).sum()**0.5
ch.setAttribute('stroke-width', str(w))
if removeTransform:
grp.removeAttribute('transform')
def itemTransform(item, root):
## Return the transformation mapping item to root
## (actually to parent coordinate system of root)
if item is root:
tr = QtGui.QTransform()
tr.translate(*item.pos())
tr = tr * item.transform()
return tr
if int(item.flags() & item.ItemIgnoresTransformations) > 0:
pos = item.pos()
parent = item.parentItem()
if parent is not None:
pos = itemTransform(parent, root).map(pos)
tr = QtGui.QTransform()
tr.translate(pos.x(), pos.y())
tr = item.transform() * tr
else:
## find next parent that is either the root item or
## an item that ignores its transformation
nextRoot = item
while True:
nextRoot = nextRoot.parentItem()
if nextRoot is None:
nextRoot = root
break
if nextRoot is root or int(nextRoot.flags() & nextRoot.ItemIgnoresTransformations) > 0:
break
if isinstance(nextRoot, QtGui.QGraphicsScene):
tr = item.sceneTransform()
else:
tr = itemTransform(nextRoot, root) * item.itemTransform(nextRoot)[0]
#pos = QtGui.QTransform()
#pos.translate(root.pos().x(), root.pos().y())
#tr = pos * root.transform() * item.itemTransform(root)[0]
return tr
#def correctStroke(node, item, root, width=1):
##print "==============", item, node
#if node.hasAttribute('stroke-width'):
#width = float(node.getAttribute('stroke-width'))
#if node.getAttribute('vector-effect') == 'non-scaling-stroke':
#node.removeAttribute('vector-effect')
#if isinstance(root, QtGui.QGraphicsScene):
#w = item.mapFromScene(pg.Point(width,0))
#o = item.mapFromScene(pg.Point(0,0))
#else:
#w = item.mapFromItem(root, pg.Point(width,0))
#o = item.mapFromItem(root, pg.Point(0,0))
#w = w-o
##print " ", w, o, w-o
#w = (w.x()**2 + w.y()**2) ** 0.5
##print " ", w
#node.setAttribute('stroke-width', str(w))
#for ch in node.childNodes:
#if isinstance(ch, xml.Element):
#correctStroke(ch, item, root, width)
def cleanXml(node):
## remove extraneous text; let the xml library do the formatting.
hasElement = False
nonElement = []
for ch in node.childNodes:
if isinstance(ch, xml.Element):
hasElement = True
cleanXml(ch)
else:
nonElement.append(ch)
if hasElement:
for ch in nonElement:
node.removeChild(ch)
elif node.tagName == 'g': ## remove childless groups
node.parentNode.removeChild(node)
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Uhd Ais 2
# Generated: Sat Dec 17 00:59:04 2016
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from gnuradio import analog
from gnuradio import blocks
from gnuradio import digital
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
import ais
import math
import pyqt
import sip
import sys
from gnuradio import qtgui
class uhd_ais_2(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "Uhd Ais 2")
Qt.QWidget.__init__(self)
self.setWindowTitle("Uhd Ais 2")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "uhd_ais_2")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 250e3
self.decim = decim = 5
self.baud = baud = 9600
self.samp_per_sym = samp_per_sym = (samp_rate/decim/50*48)/baud
self.fsk_deviation = fsk_deviation = 10e3
self.freq = freq = 162e6
self.filter_taps = filter_taps = firdes.low_pass(1,samp_rate, samp_rate/2, 50000, firdes.WIN_FLATTOP, 6.76)
##################################################
# Blocks
##################################################
self.rational_resampler_xxx_0_0 = filter.rational_resampler_fff(
interpolation=48,
decimation=50,
taps=None,
fractional_bw=None,
)
self.rational_resampler_xxx_0 = filter.rational_resampler_fff(
interpolation=48,
decimation=50,
taps=None,
fractional_bw=None,
)
self.qtgui_waterfall_sink_x_0_0 = qtgui.waterfall_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"AIS-B", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0_0.set_update_time(0.010)
self.qtgui_waterfall_sink_x_0_0.enable_grid(True)
self.qtgui_waterfall_sink_x_0_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0_0.set_intensity_range(-140, 10)
self._qtgui_waterfall_sink_x_0_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_waterfall_sink_x_0_0_win, 0,4,2,4)
self.qtgui_waterfall_sink_x_0 = qtgui.waterfall_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"AIS-A", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0.set_update_time(0.010)
self.qtgui_waterfall_sink_x_0.enable_grid(True)
self.qtgui_waterfall_sink_x_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0.set_intensity_range(-140, 10)
self._qtgui_waterfall_sink_x_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_waterfall_sink_x_0_win, 0,0,2,4)
self.qtgui_freq_sink_x_0_0 = qtgui.freq_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate/decim, #bw
"AIS-B", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0_0.set_update_time(0.01)
self.qtgui_freq_sink_x_0_0.set_y_axis(-60, 10)
self.qtgui_freq_sink_x_0_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0_0.enable_grid(True)
self.qtgui_freq_sink_x_0_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_0_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0_0.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["green", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_freq_sink_x_0_0_win, 2,4,2,4)
self.qtgui_freq_sink_x_0 = qtgui.freq_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate/decim, #bw
"AIS-A", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0.set_update_time(0.01)
self.qtgui_freq_sink_x_0.set_y_axis(-60, 10)
self.qtgui_freq_sink_x_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0.enable_grid(True)
self.qtgui_freq_sink_x_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["green", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_freq_sink_x_0_win, 2,0,2,4)
self.pyqt_text_output_0_0 = pyqt.text_output()
self._pyqt_text_output_0_0_win = self.pyqt_text_output_0_0;
self.top_grid_layout.addWidget(self._pyqt_text_output_0_0_win, 4,0,2,4)
self.pyqt_text_output_0 = pyqt.text_output()
self._pyqt_text_output_0_win = self.pyqt_text_output_0;
self.top_grid_layout.addWidget(self._pyqt_text_output_0_win, 4,4,2,4)
self.low_pass_filter_0_0 = filter.fir_filter_ccf(decim, firdes.low_pass(
1, samp_rate, 7e3, 1e3, firdes.WIN_HAMMING, 6.76))
self.low_pass_filter_0 = filter.fir_filter_ccf(decim, firdes.low_pass(
1, samp_rate, 7e3, 1e3, firdes.WIN_HAMMING, 6.76))
self.digital_hdlc_deframer_bp_0_0 = digital.hdlc_deframer_bp(11, 1000)
self.digital_hdlc_deframer_bp_0 = digital.hdlc_deframer_bp(11, 1000)
self.digital_diff_decoder_bb_0_0 = digital.diff_decoder_bb(2)
self.digital_diff_decoder_bb_0 = digital.diff_decoder_bb(2)
self.digital_clock_recovery_mm_xx_0_0 = digital.clock_recovery_mm_ff(samp_per_sym*(1+0.0), 0.25*0.175*0.175, 0.5, 0.175, 0.005)
self.digital_clock_recovery_mm_xx_0 = digital.clock_recovery_mm_ff(samp_per_sym*(1+0.0), 0.25*0.175*0.175, 0.5, 0.175, 0.005)
self.digital_binary_slicer_fb_0_0 = digital.binary_slicer_fb()
self.digital_binary_slicer_fb_0 = digital.binary_slicer_fb()
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.blocks_multiply_xx_1 = blocks.multiply_vcc(1)
self.blocks_multiply_xx_0 = blocks.multiply_vcc(1)
self.blocks_message_debug_0_1 = blocks.message_debug()
self.blocks_file_source_0 = blocks.file_source(gr.sizeof_gr_complex*1, '/home/leffke/sandbox/ais/captures/ais_20161216_250k_9.32fc', False)
self.analog_sig_source_x_1 = analog.sig_source_c(samp_rate, analog.GR_COS_WAVE, 25e3+400, 1, 0)
self.analog_sig_source_x_0 = analog.sig_source_c(samp_rate, analog.GR_COS_WAVE, -25e3+400, 1, 0)
self.analog_quadrature_demod_cf_0_0 = analog.quadrature_demod_cf((samp_rate/decim)/(2*math.pi*fsk_deviation/8.0))
self.analog_quadrature_demod_cf_0 = analog.quadrature_demod_cf((samp_rate/decim)/(2*math.pi*fsk_deviation/8.0))
self.analog_agc2_xx_0_0 = analog.agc2_cc(1e-1, 1e-2, 1.0, 1.0)
self.analog_agc2_xx_0_0.set_max_gain(65536)
self.analog_agc2_xx_0 = analog.agc2_cc(1e-1, 1e-2, 1.0, 1.0)
self.analog_agc2_xx_0.set_max_gain(65536)
self.ais_pdu_to_nmea_0_0 = ais.pdu_to_nmea('B')
self.ais_pdu_to_nmea_0 = ais.pdu_to_nmea('A')
self.ais_invert_0_0 = ais.invert()
self.ais_invert_0 = ais.invert()
##################################################
# Connections
##################################################
self.msg_connect((self.ais_pdu_to_nmea_0, 'out'), (self.blocks_message_debug_0_1, 'print'))
self.msg_connect((self.ais_pdu_to_nmea_0, 'out'), (self.pyqt_text_output_0_0, 'pdus'))
self.msg_connect((self.ais_pdu_to_nmea_0_0, 'out'), (self.blocks_message_debug_0_1, 'print'))
self.msg_connect((self.ais_pdu_to_nmea_0_0, 'out'), (self.pyqt_text_output_0, 'pdus'))
self.msg_connect((self.digital_hdlc_deframer_bp_0, 'out'), (self.ais_pdu_to_nmea_0, 'to_nmea'))
self.msg_connect((self.digital_hdlc_deframer_bp_0_0, 'out'), (self.ais_pdu_to_nmea_0_0, 'to_nmea'))
self.connect((self.ais_invert_0, 0), (self.digital_hdlc_deframer_bp_0, 0))
self.connect((self.ais_invert_0_0, 0), (self.digital_hdlc_deframer_bp_0_0, 0))
self.connect((self.analog_agc2_xx_0, 0), (self.analog_quadrature_demod_cf_0, 0))
self.connect((self.analog_agc2_xx_0, 0), (self.qtgui_freq_sink_x_0, 0))
self.connect((self.analog_agc2_xx_0, 0), (self.qtgui_waterfall_sink_x_0, 0))
self.connect((self.analog_agc2_xx_0_0, 0), (self.analog_quadrature_demod_cf_0_0, 0))
self.connect((self.analog_agc2_xx_0_0, 0), (self.qtgui_freq_sink_x_0_0, 0))
self.connect((self.analog_agc2_xx_0_0, 0), (self.qtgui_waterfall_sink_x_0_0, 0))
self.connect((self.analog_quadrature_demod_cf_0, 0), (self.rational_resampler_xxx_0, 0))
self.connect((self.analog_quadrature_demod_cf_0_0, 0), (self.rational_resampler_xxx_0_0, 0))
self.connect((self.analog_sig_source_x_0, 0), (self.blocks_multiply_xx_0, 1))
self.connect((self.analog_sig_source_x_1, 0), (self.blocks_multiply_xx_1, 0))
self.connect((self.blocks_file_source_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_multiply_xx_0, 0), (self.low_pass_filter_0_0, 0))
self.connect((self.blocks_multiply_xx_1, 0), (self.low_pass_filter_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.blocks_multiply_xx_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.blocks_multiply_xx_1, 1))
self.connect((self.digital_binary_slicer_fb_0, 0), (self.digital_diff_decoder_bb_0, 0))
self.connect((self.digital_binary_slicer_fb_0_0, 0), (self.digital_diff_decoder_bb_0_0, 0))
self.connect((self.digital_clock_recovery_mm_xx_0, 0), (self.digital_binary_slicer_fb_0, 0))
self.connect((self.digital_clock_recovery_mm_xx_0_0, 0), (self.digital_binary_slicer_fb_0_0, 0))
self.connect((self.digital_diff_decoder_bb_0, 0), (self.ais_invert_0, 0))
self.connect((self.digital_diff_decoder_bb_0_0, 0), (self.ais_invert_0_0, 0))
self.connect((self.low_pass_filter_0, 0), (self.analog_agc2_xx_0, 0))
self.connect((self.low_pass_filter_0_0, 0), (self.analog_agc2_xx_0_0, 0))
self.connect((self.rational_resampler_xxx_0, 0), (self.digital_clock_recovery_mm_xx_0, 0))
self.connect((self.rational_resampler_xxx_0_0, 0), (self.digital_clock_recovery_mm_xx_0_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "uhd_ais_2")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.set_samp_per_sym((self.samp_rate/self.decim/50*48)/self.baud)
self.qtgui_waterfall_sink_x_0_0.set_frequency_range(0, self.samp_rate)
self.qtgui_waterfall_sink_x_0.set_frequency_range(0, self.samp_rate)
self.qtgui_freq_sink_x_0_0.set_frequency_range(0, self.samp_rate/self.decim)
self.qtgui_freq_sink_x_0.set_frequency_range(0, self.samp_rate/self.decim)
self.low_pass_filter_0_0.set_taps(firdes.low_pass(1, self.samp_rate, 7e3, 1e3, firdes.WIN_HAMMING, 6.76))
self.low_pass_filter_0.set_taps(firdes.low_pass(1, self.samp_rate, 7e3, 1e3, firdes.WIN_HAMMING, 6.76))
self.set_filter_taps(firdes.low_pass(1,self.samp_rate, self.samp_rate/2, 50000, firdes.WIN_FLATTOP, 6.76))
self.blocks_throttle_0.set_sample_rate(self.samp_rate)
self.analog_sig_source_x_1.set_sampling_freq(self.samp_rate)
self.analog_sig_source_x_0.set_sampling_freq(self.samp_rate)
self.analog_quadrature_demod_cf_0_0.set_gain((self.samp_rate/self.decim)/(2*math.pi*self.fsk_deviation/8.0))
self.analog_quadrature_demod_cf_0.set_gain((self.samp_rate/self.decim)/(2*math.pi*self.fsk_deviation/8.0))
def get_decim(self):
return self.decim
def set_decim(self, decim):
self.decim = decim
self.set_samp_per_sym((self.samp_rate/self.decim/50*48)/self.baud)
self.qtgui_freq_sink_x_0_0.set_frequency_range(0, self.samp_rate/self.decim)
self.qtgui_freq_sink_x_0.set_frequency_range(0, self.samp_rate/self.decim)
self.analog_quadrature_demod_cf_0_0.set_gain((self.samp_rate/self.decim)/(2*math.pi*self.fsk_deviation/8.0))
self.analog_quadrature_demod_cf_0.set_gain((self.samp_rate/self.decim)/(2*math.pi*self.fsk_deviation/8.0))
def get_baud(self):
return self.baud
def set_baud(self, baud):
self.baud = baud
self.set_samp_per_sym((self.samp_rate/self.decim/50*48)/self.baud)
def get_samp_per_sym(self):
return self.samp_per_sym
def set_samp_per_sym(self, samp_per_sym):
self.samp_per_sym = samp_per_sym
self.digital_clock_recovery_mm_xx_0_0.set_omega(self.samp_per_sym*(1+0.0))
self.digital_clock_recovery_mm_xx_0.set_omega(self.samp_per_sym*(1+0.0))
def get_fsk_deviation(self):
return self.fsk_deviation
def set_fsk_deviation(self, fsk_deviation):
self.fsk_deviation = fsk_deviation
self.analog_quadrature_demod_cf_0_0.set_gain((self.samp_rate/self.decim)/(2*math.pi*self.fsk_deviation/8.0))
self.analog_quadrature_demod_cf_0.set_gain((self.samp_rate/self.decim)/(2*math.pi*self.fsk_deviation/8.0))
def get_freq(self):
return self.freq
def set_freq(self, freq):
self.freq = freq
def get_filter_taps(self):
return self.filter_taps
def set_filter_taps(self, filter_taps):
self.filter_taps = filter_taps
def main(top_block_cls=uhd_ais_2, options=None):
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
|
|
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import random
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.orm import joinedload
from sqlalchemy.sql import expression as expr
from neutron.common import utils
from neutron.db import models_v2
from neutron.extensions import l3
from neutron_lib import exceptions as n_exc
from networking_cisco._i18n import _, _LW
from networking_cisco import backwards_compatibility as bc
from networking_cisco.plugins.cisco.common import cisco_constants
from networking_cisco.plugins.cisco.common import utils as cisco_utils
from networking_cisco.plugins.cisco.extensions import ha
from networking_cisco.plugins.cisco.extensions import routerrole
from networking_cisco.plugins.cisco.extensions import routertype
LOG = logging.getLogger(__name__)
RouterPort = bc.RouterPort
HA_GROUP = 'group'
HA_PORT = 'ha_port'
MAX_VRRP_GROUPS = 4094
MAX_HSRP_GROUPS = 4094
MAX_GLBP_GROUPS = 1023
is_attr_set = bc.is_attr_set
ATTR_NOT_SPECIFIED = bc.constants.ATTR_NOT_SPECIFIED
EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO
DEVICE_OWNER_ROUTER_GW = bc.constants.DEVICE_OWNER_ROUTER_GW
DEVICE_OWNER_ROUTER_INTF = bc.constants.DEVICE_OWNER_ROUTER_INTF
DEVICE_OWNER_ROUTER_HA_INTF = bc.constants.DEVICE_OWNER_ROUTER_HA_INTF
ROUTER_ROLE_HA_REDUNDANCY = cisco_constants.ROUTER_ROLE_HA_REDUNDANCY
DEFAULT_MASTER_PRIORITY = 100
PRIORITY_INCREASE_STEP = -3
REDUNDANCY_ROUTER_SUFFIX = '_HA_backup_'
DEFAULT_PING_INTERVAL = 5
PROBE_TARGET_OPT_NAME = 'default_probe_target'
LOOKUP_RETRIES = 5
RETRY_INTERVAL = 3
router_appliance_opts = [
cfg.BoolOpt('ha_support_enabled', default=True,
help=_("Enables high-availability support")),
cfg.BoolOpt('ha_enabled_by_default', default=False,
help=_("Enables high-availability functionality for Neutron "
"router even if user does not explicitly request it")),
cfg.IntOpt('default_ha_redundancy_level', default=ha.MIN_REDUNDANCY_LEVEL,
help=_("Default number of routers added for redundancy when "
"high-availability by VRRP, HSRP, or GLBP is used")),
cfg.StrOpt('default_ha_mechanism', default=ha.HA_HSRP,
help=_("Default mechanism used to implement "
"high-availability")),
cfg.ListOpt('disabled_ha_mechanisms', default=[],
help=_("List of administratively disabled high-availability "
"mechanisms (VRRP, HSRP, GBLP)")),
cfg.BoolOpt('connectivity_probing_enabled_by_default', default=False,
help=_("Enables connectivity probing for high-availability "
"even if user does not explicitly request it")),
cfg.StrOpt('default_probe_target', default=None,
help=_("Host that will be probe target for high-availability "
"connectivity probing if user does not specify it")),
cfg.StrOpt('default_ping_interval', default=DEFAULT_PING_INTERVAL,
help=_("Time (in seconds) between probes for high-availability "
"connectivity probing if user does not specify it")),
]
cfg.CONF.register_opts(router_appliance_opts, "ha")
class RouterHASetting(bc.model_base.BASEV2):
"""Represents HA settings for router visible to user."""
__tablename__ = 'cisco_router_ha_settings'
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id', ondelete='CASCADE'),
primary_key=True)
router = orm.relationship(
bc.Router,
backref=orm.backref('ha_settings', cascade='all', uselist=False))
# 'ha_type' can be 'VRRP', 'HSRP', or 'GLBP'
ha_type = sa.Column(sa.String(255))
# 'redundancy_level' is number of extra routers for redundancy
redundancy_level = sa.Column(sa.Integer,
server_default=str(ha.MIN_REDUNDANCY_LEVEL))
# 'priority' is the priority used in VRRP, HSRP, and GLBP
priority = sa.Column(sa.Integer)
# 'probe_connectivity' is True if ICMP echo pinging is enabled
probe_connectivity = sa.Column(sa.Boolean)
# 'probe_target' is ip address of host that is probed
probe_target = sa.Column(sa.String(64))
# 'ping_interval' is the time between probes
probe_interval = sa.Column(sa.Integer)
# 'state' is the state of the user visible router: HA_ACTIVE or HA_STANDBY
state = sa.Column(sa.Enum(ha.HA_ACTIVE, ha.HA_STANDBY, name='ha_states'),
default=ha.HA_ACTIVE, server_default=ha.HA_ACTIVE)
class RouterHAGroup(bc.model_base.BASEV2, bc.model_base.HasId,
bc.HasProject):
"""Represents an HA group as used in VRRP, HSRP, and GLBP."""
__tablename__ = 'cisco_router_ha_groups'
# 'ha_type' can be 'VRRP', 'HSRP', or 'GLBP'
ha_type = sa.Column(sa.String(255))
# 'group_identity'
group_identity = sa.Column(sa.String(255))
# 'ha_port_id' is id of port used for virtual IP address
ha_port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete='CASCADE'),
primary_key=True)
ha_port = orm.relationship(
models_v2.Port,
primaryjoin='Port.id==RouterHAGroup.ha_port_id')
# 'extra_port_id' is id of port for user visible router's extra ip address
extra_port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete='SET NULL'),
nullable=True)
extra_port = orm.relationship(
models_v2.Port,
primaryjoin='Port.id==RouterHAGroup.extra_port_id')
# 'subnet_id' is id of subnet that this HA group serves
subnet_id = sa.Column(sa.String(36),
sa.ForeignKey('subnets.id'),
nullable=True)
# 'user_router_id' is id of router visible to the user
user_router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id'))
# 'timers_config' holds timer specific configurations
timers_config = sa.Column(sa.String(255))
# 'tracking_config' holds tracking object specific configurations
tracking_config = sa.Column(sa.String(255))
# 'other_config' holds other method specific configurations
other_config = sa.Column(sa.String(255))
class RouterRedundancyBinding(bc.model_base.BASEV2):
"""Represents binding between an HA enabled router and its
redundancy routers.
"""
__tablename__ = 'cisco_router_redundancy_bindings'
# 'redundancy_router_id' is id of a redundancy router
redundancy_router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id',
ondelete='CASCADE'),
primary_key=True)
redundancy_router = orm.relationship(
bc.Router,
primaryjoin='Router.id==RouterRedundancyBinding.redundancy_router_id',
backref=orm.backref('redundancy_binding', cascade='save-update, merge',
passive_deletes='all', uselist=False))
# 'priority' is the priority used in VRRP, HSRP, and GLBP
priority = sa.Column(sa.Integer)
# 'state' is the state of the redundancy router: HA_ACTIVE or HA_STANDBY
state = sa.Column(sa.Enum(ha.HA_ACTIVE, ha.HA_STANDBY, name='ha_states'),
default=ha.HA_STANDBY, server_default=ha.HA_STANDBY)
# 'user_router_id' is id of router visible to the user
user_router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id'))
user_router = orm.relationship(
bc.Router,
primaryjoin='Router.id==RouterRedundancyBinding.user_router_id',
backref=orm.backref('redundancy_bindings',
order_by=priority, cascade='all'))
__mapper_args__ = {
'confirm_deleted_rows': False
}
class HA_db_mixin(object):
"""Mixin class to support VRRP, HSRP, and GLBP based HA for routing."""
def _ensure_create_ha_compliant(self, router, router_type):
"""To be called in create_router() BEFORE router is created in DB."""
details = router.pop(ha.DETAILS, {})
if details == ATTR_NOT_SPECIFIED:
details = {}
res = {ha.ENABLED: router.pop(ha.ENABLED, ATTR_NOT_SPECIFIED),
ha.DETAILS: details}
if not is_attr_set(res[ha.ENABLED]):
res[ha.ENABLED] = router_type['ha_enabled_by_default']
if res[ha.ENABLED] and not cfg.CONF.ha.ha_support_enabled:
raise ha.HADisabled()
if not res[ha.ENABLED]:
return res
if not is_attr_set(details.get(ha.TYPE, ATTR_NOT_SPECIFIED)):
details[ha.TYPE] = cfg.CONF.ha.default_ha_mechanism
if details[ha.TYPE] in cfg.CONF.ha.disabled_ha_mechanisms:
raise ha.HADisabledHAType(ha_type=details[ha.TYPE])
if not is_attr_set(details.get(ha.REDUNDANCY_LEVEL,
ATTR_NOT_SPECIFIED)):
details[ha.REDUNDANCY_LEVEL] = (
cfg.CONF.ha.default_ha_redundancy_level)
if not is_attr_set(details.get(ha.PROBE_CONNECTIVITY,
ATTR_NOT_SPECIFIED)):
details[ha.PROBE_CONNECTIVITY] = (
cfg.CONF.ha.connectivity_probing_enabled_by_default)
if not is_attr_set(details.get(ha.PROBE_TARGET, ATTR_NOT_SPECIFIED)):
details[ha.PROBE_TARGET] = cfg.CONF.ha.default_probe_target
if not is_attr_set(details.get(ha.PROBE_INTERVAL, ATTR_NOT_SPECIFIED)):
details[ha.PROBE_INTERVAL] = cfg.CONF.ha.default_ping_interval
return res
def _create_redundancy_routers(self, context, new_router, ha_settings,
new_router_db, ports=None, expire_db=False):
"""To be called in create_router() AFTER router has been
created in DB.
"""
if (ha.ENABLED not in ha_settings or
not ha_settings[ha.ENABLED]):
new_router[ha.HA] = {ha.ENABLED: False}
return
ha_spec = ha_settings[ha.DETAILS]
priority = ha_spec.get(ha.PRIORITY, DEFAULT_MASTER_PRIORITY)
with context.session.begin(subtransactions=True):
r_ha_s_db = RouterHASetting(
router_id=new_router['id'],
ha_type=ha_spec[ha.TYPE],
redundancy_level=ha_spec[ha.REDUNDANCY_LEVEL],
priority=priority,
probe_connectivity=ha_spec[ha.PROBE_CONNECTIVITY],
probe_target=ha_spec[ha.PROBE_TARGET],
probe_interval=ha_spec[ha.PROBE_INTERVAL])
context.session.add(r_ha_s_db)
if r_ha_s_db.probe_connectivity and r_ha_s_db.probe_target is None:
LOG.warning(_LW("Connectivity probing for high-availability is "
"enabled but probe target is not specified. Please"
" configure option \'default_probe_target\'."))
e_context = context.elevated()
if new_router_db.gw_port:
# generate ha settings and extra port for router gateway (VIP) port
gw_port = self._core_plugin._make_port_dict(new_router_db.gw_port)
self._create_ha_group(e_context, new_router, gw_port, r_ha_s_db)
self._add_redundancy_routers(e_context, 1,
ha_spec[ha.REDUNDANCY_LEVEL] + 1,
new_router, ports or [], r_ha_s_db)
if expire_db:
context.session.expire(new_router_db)
self._extend_router_dict_ha(new_router, new_router_db)
def _ensure_update_ha_compliant(self, router, current_router,
r_hd_binding_db):
"""To be called in update_router() BEFORE router has been
updated in DB.
"""
auto_enable_ha = r_hd_binding_db.router_type.ha_enabled_by_default
requested_ha_details = router.pop(ha.DETAILS, {})
# If ha_details are given then ha is assumed to be enabled even if
# it is not explicitly specified or if auto_enable_ha says so.
# Note that None is used to indicate that request did not include any
# ha information was provided!
requested_ha_enabled = router.pop(
ha.ENABLED, True if requested_ha_details or auto_enable_ha is True
else None)
res = {}
ha_currently_enabled = current_router.get(ha.ENABLED, False)
# Note: must check for 'is True' as None implies attribute not given
if requested_ha_enabled is True or ha_currently_enabled is True:
if not cfg.CONF.ha.ha_support_enabled:
raise ha.HADisabled()
curr_ha_details = current_router.get(ha.DETAILS, {})
if ha.TYPE in requested_ha_details:
requested_ha_type = requested_ha_details[ha.TYPE]
if (ha.TYPE in curr_ha_details and
requested_ha_type != curr_ha_details[ha.TYPE]):
raise ha.HATypeCannotBeChanged()
elif requested_ha_type in cfg.CONF.ha.disabled_ha_mechanisms:
raise ha.HADisabledHAType(ha_type=requested_ha_type)
if requested_ha_enabled:
res[ha.ENABLED] = requested_ha_enabled
if requested_ha_details:
res[ha.DETAILS] = requested_ha_details
elif requested_ha_enabled is False:
res[ha.ENABLED] = False
return res
def _teardown_redundancy_router_gw_connectivity(self, context, router,
router_db,
plugging_driver):
"""To be called in update_router() if the router gateway is to change
BEFORE router has been updated in DB .
"""
if not router[ha.ENABLED]:
# No HA currently enabled so we're done
return
e_context = context.elevated()
# since gateway is about to change the ha group for the current gateway
# is removed, a new one will be created later
self._delete_ha_group(e_context, router_db.gw_port_id)
# teardown connectivity for the gw ports on the redundancy routers
# and remove those ports as new ones will be created later
rr_ids = []
for r_b_db in router_db.redundancy_bindings:
if plugging_driver is not None:
plugging_driver.teardown_logical_port_connectivity(
e_context, r_b_db.redundancy_router.gw_port,
r_b_db.redundancy_router.hosting_info.hosting_device_id)
self._update_router_no_notify(
e_context, r_b_db.redundancy_router_id,
{'router': {EXTERNAL_GW_INFO: None, ha.ENABLED: False}})
rr_ids.append(r_b_db.redundancy_router_id)
self.notify_routers_updated(e_context, rr_ids)
def _update_redundancy_routers(self, context, updated_router,
update_specification, requested_ha_settings,
updated_router_db, gateway_changed):
"""To be called in update_router() AFTER router has been
updated in DB.
"""
router_requested = update_specification['router']
ha_settings_db = updated_router_db.ha_settings
ha_enabled_requested = requested_ha_settings.get(ha.ENABLED, False)
if not (updated_router[ha.ENABLED] or ha_enabled_requested):
# No HA currently enabled and no HA requested so we're done
return
# The redundancy routers need interfaces on the same networks as the
# user visible router.
ports = self._get_router_interfaces(updated_router_db)
e_context = context.elevated()
if not updated_router[ha.ENABLED] and ha_enabled_requested:
# No HA currently enabled but HA requested
router_requested.update(requested_ha_settings)
router_requested[EXTERNAL_GW_INFO] = (
updated_router[EXTERNAL_GW_INFO])
requested_ha_settings = self._ensure_create_ha_compliant(
router_requested, updated_router_db.hosting_info.router_type)
self._create_redundancy_routers(
e_context, updated_router, requested_ha_settings,
updated_router_db, ports, expire_db=True)
return
rr_ids = self._get_redundancy_router_ids(context, updated_router['id'])
ha_details_update_spec = requested_ha_settings.get(ha.DETAILS)
if (updated_router[ha.ENABLED] and not requested_ha_settings.get(
ha.ENABLED, updated_router[ha.ENABLED])):
# HA currently enabled but HA disable requested
# delete ha settings and extra port for gateway (VIP) port
self._delete_ha_group(e_context, updated_router_db.gw_port_id)
self._remove_redundancy_routers(e_context, rr_ids, ports, True)
with context.session.begin(subtransactions=True):
context.session.delete(ha_settings_db)
elif ha_details_update_spec:
# HA currently enabled and HA setting update (other than
# disable HA) requested
old_redundancy_level = ha_settings_db.redundancy_level
ha_settings_db.update(ha_details_update_spec)
diff = (ha_details_update_spec.get(ha.REDUNDANCY_LEVEL,
old_redundancy_level) -
old_redundancy_level)
with context.session.begin(subtransactions=True):
context.session.add(ha_settings_db)
if diff < 0:
# Remove -diff redundancy routers
#TODO(bobmel): Ensure currently active router is excluded
to_remove = rr_ids[len(rr_ids) + diff:]
rr_ids = rr_ids[:len(rr_ids) + diff]
self._remove_redundancy_routers(e_context, to_remove, ports)
elif diff > 0:
# Add diff redundancy routers
start = old_redundancy_level + 1
stop = start + diff
self._add_redundancy_routers(e_context, start, stop,
updated_router, ports,
ha_settings_db, False)
if gateway_changed is True:
self._change_ha_for_gateway(e_context, updated_router,
updated_router_db, ha_settings_db,
router_requested, expire=True)
else:
# Notify redundancy routers about changes
self.notify_routers_updated(e_context, rr_ids)
elif gateway_changed is True:
# HA currently enabled (and to remain so) nor any HA setting update
# and gateway has changed
self._change_ha_for_gateway(e_context, updated_router,
updated_router_db, ha_settings_db,
router_requested)
# pick up updates to other attributes where it makes sense
# and push - right now it is only admin_state_up.
if 'admin_state_up' in update_specification['router']:
other_updates_spec = {'router': {'admin_state_up':
update_specification['router']['admin_state_up']}}
self._process_other_router_updates(e_context, updated_router_db,
other_updates_spec)
# Ensure we get latest state from DB
context.session.expire(updated_router_db)
self._extend_router_dict_ha(updated_router, updated_router_db)
def _change_ha_for_gateway(self, context, router, router_db,
ha_settings_db, update_spec, expire=False):
# generate ha settings and extra port for router gateway VIP
if router_db.gw_port is None:
# gateway was removed and since gw's of redundancy routers are
# removed by _teardown_redundancy_router_gw_connectivity we're done
return
gw_port = self._core_plugin._make_port_dict(router_db.gw_port)
self._create_ha_group(context, router, gw_port, ha_settings_db)
if expire is True:
context.session.expire(router_db)
# Now add gw to redundancy routers
rr_ids = []
for r_b_db in router_db.redundancy_bindings:
spec = {EXTERNAL_GW_INFO: copy.deepcopy(router[EXTERNAL_GW_INFO])}
if spec[EXTERNAL_GW_INFO]['external_fixed_ips']:
# Ensure ip addresses are not specified as they cannot be
# same as visible router's ip addresses.
for e_fixed_ip in spec[EXTERNAL_GW_INFO]['external_fixed_ips']:
e_fixed_ip.pop('ip_address', None)
#spec[EXTERNAL_GW_INFO].pop('external_fixed_ips', None)
spec[ha.ENABLED] = False
self._update_router_no_notify(
context, r_b_db.redundancy_router_id, {'router': spec})
rr_ids.append(r_b_db.redundancy_router_id)
self.notify_routers_updated(context, rr_ids)
def _process_other_router_updates(self, context, router_db, update_spec):
rr_ids = []
for r_b_db in router_db.redundancy_bindings:
update_spec['router'][ha.ENABLED] = False
self._update_router_no_notify(
context, r_b_db.redundancy_router_id, update_spec)
rr_ids.append(r_b_db.redundancy_router_id)
self.notify_routers_updated(context, rr_ids)
def _add_redundancy_routers(self, context, start_index, stop_index,
user_visible_router, ports=None,
ha_settings_db=None, create_ha_group=True):
"""Creates a redundancy router and its interfaces on
the specified subnets.
"""
priority = (DEFAULT_MASTER_PRIORITY +
(start_index - 1) * PRIORITY_INCREASE_STEP)
r = copy.deepcopy(user_visible_router)
# No tenant_id so redundancy routers are hidden from user
r['tenant_id'] = ''
name = r['name']
redundancy_r_ids = []
for i in range(start_index, stop_index):
del r['id']
# We don't replicate the user visible router's routes, instead
# they are populated to redundancy routers for get router(s) ops
r.pop('routes', None)
# Redundancy routers will never have a route spec themselves
# The redundancy routers must have HA disabled
r[ha.ENABLED] = False
r['name'] = name + REDUNDANCY_ROUTER_SUFFIX + str(i)
# set role so that purpose of this router can be easily determined
r[routerrole.ROUTER_ROLE_ATTR] = ROUTER_ROLE_HA_REDUNDANCY
gw_info = r[EXTERNAL_GW_INFO]
if gw_info and gw_info['external_fixed_ips']:
# Ensure ip addresses are not specified as they cannot be
# same as visible router's ip addresses.
for e_fixed_ip in gw_info['external_fixed_ips']:
e_fixed_ip.pop('ip_address', None)
r = self.create_router(context, {'router': r})
LOG.debug("Created redundancy router %(index)d with router id "
"%(r_id)s", {'index': i, 'r_id': r['id']})
priority += PRIORITY_INCREASE_STEP
r_b_b = RouterRedundancyBinding(
redundancy_router_id=r['id'],
priority=priority,
user_router_id=user_visible_router['id'])
context.session.add(r_b_b)
redundancy_r_ids.append(r['id'])
for port_db in ports or []:
port = self._core_plugin._make_port_dict(port_db)
self._add_redundancy_router_interfaces(
context, user_visible_router, None, port,
redundancy_r_ids, ha_settings_db, create_ha_group)
def _remove_redundancy_routers(self, context, router_ids, ports,
delete_ha_groups=False):
"""Deletes all interfaces of the specified redundancy routers
and then the redundancy routers themselves.
"""
subnets_info = [{'subnet_id': port['fixed_ips'][0]['subnet_id']}
for port in ports]
for r_id in router_ids:
for i in range(len(subnets_info)):
self.remove_router_interface(context, r_id, subnets_info[i])
LOG.debug("Removed interface on %(s_id)s to redundancy router "
"with %(r_id)s",
{'s_id': ports[i]['network_id'], 'r_id': r_id})
# There is only one ha group per network so only delete once
if delete_ha_groups and r_id == router_ids[0]:
self._delete_ha_group(context, ports[i]['id'])
self.delete_router(context, r_id)
LOG.debug("Deleted redundancy router %s", r_id)
def _get_router_interfaces(self, router_db,
port_type=DEVICE_OWNER_ROUTER_INTF):
return [p['port'] for p in router_db.attached_ports if
p['port_type'] == port_type]
def _delete_redundancy_routers(self, context, router_db):
"""To be called in delete_router() BEFORE router has been
deleted in DB. The router should have not interfaces.
"""
e_context = context.elevated()
for binding in router_db.redundancy_bindings:
self.delete_router(e_context, binding.redundancy_router_id)
LOG.debug("Deleted redundancy router %s",
binding.redundancy_router_id)
if router_db.gw_port_id:
# delete ha settings and extra port for gateway (VIP) port
self._delete_ha_group(e_context, router_db.gw_port_id)
def _add_redundancy_router_interfaces(self, context, router, itfc_info,
new_port, redundancy_router_ids=None,
ha_settings_db=None,
create_ha_group=True):
"""To be called in add_router_interface() AFTER interface has been
added to router in DB.
"""
# There are essentially three cases where we add interface to a
# redundancy router:
# 1. HA is enabled on a user visible router that has one or more
# interfaces.
# 2. Redundancy level is increased so one or more redundancy routers
# are added.
# 3. An interface is added to a user visible router.
#
# For 1: An HA GROUP MUST BE CREATED and EXTRA PORTS MUST BE CREATED
# for each redundancy router. The id of extra port should be
# specified in the interface_info argument of the
# add_router_interface call so that we ADD BY PORT.
# For 2: HA group need NOT be created as it will already exist (since
# there is already at least on redundancy router). EXTRA PORTS
# MUST BE CREATED for each added redundancy router. The id
# of extra port should be specified in the interface_info
# argument of the add_router_interface call so that we ADD BY
# PORT.
# For 3: if the interface for the user_visible_router was added by ...
# a) PORT: An HA GROUP MUST BE CREATED and and EXTRA PORTS MUST BE
# CREATED for each redundancy router. The id of extra port
# should be specified in the interface_info argument of
# the add_router_interface call so that we ADD BY PORT.
# b) SUBNET: There are two cases to consider. If the added interface
# of the user_visible_router has ...
# b1) 1 SUBNET: An HA GROUP MUST BE CREATED and and EXTRA
# PORTS MUST BE CREATED for each redundancy
# router. The id of extra port should be
# specified in the interface_info argument of
# the add_router_interface call so we ADD BY
# PORT.
# b2) >1 SUBNETS: HA group need NOT be created as it will
# already exist (since the redundancy routers
# should already have extra ports to which the
# (IPv6) subnet is added. Extra ports need
# thus NOT be created. The subnet id should be
# added to the existing extra ports.
router_id = router['id']
if ha_settings_db is None:
ha_settings_db = self._get_ha_settings_by_router_id(context,
router_id)
if ha_settings_db is None:
return
e_context = context.elevated()
add_by_subnet = (itfc_info is not None and 'subnet_id' in itfc_info and
len(new_port['fixed_ips']) > 1)
if (add_by_subnet is False or (itfc_info is None and
create_ha_group is True)):
# generate ha settings and extra port for router (VIP) port
self._create_ha_group(e_context, router, new_port, ha_settings_db)
fixed_ips = self._get_fixed_ips_subnets(new_port['fixed_ips'])
for r_id in (redundancy_router_ids or
self._get_redundancy_router_ids(e_context, router_id)):
if add_by_subnet is True:
# need to add subnet to redundancy router port
ports = self._core_plugin.get_ports(
e_context,
filters={'device_id': [r_id],
'network_id': [new_port['network_id']]},
fields=['fixed_ips', 'id'])
redundancy_port = ports[0]
fixed_ips = redundancy_port['fixed_ips']
fixed_ip = {'subnet_id': itfc_info['subnet_id']}
fixed_ips.append(fixed_ip)
self._core_plugin.update_port(
e_context, redundancy_port['id'],
{'port': {'fixed_ips': fixed_ips}})
else:
redundancy_port = self._create_hidden_port(
e_context, new_port['network_id'], '', fixed_ips)
interface_info = {'port_id': redundancy_port['id']}
self.add_router_interface(e_context, r_id, interface_info)
def _update_redundancy_router_interfaces(self, context, router,
port, modified_port_data,
redundancy_router_ids=None,
ha_settings_db=None):
"""To be called when the router interfaces are updated,
like in the case of change in port admin_state_up status
"""
router_id = router['id']
if ha_settings_db is None:
ha_settings_db = self._get_ha_settings_by_router_id(context,
router_id)
if ha_settings_db is None:
return
e_context = context.elevated()
rr_ids = self._get_redundancy_router_ids(e_context, router_id)
port_info_list = self._core_plugin.get_ports(
e_context, filters={'device_id': rr_ids,
'network_id': [port['network_id']]},
fields=['device_id', 'id'])
for port_info in port_info_list:
self._core_plugin.update_port(e_context, port_info['id'],
modified_port_data)
self._update_hidden_port(e_context, port['id'], modified_port_data)
def _create_ha_group(self, context, router, port, ha_settings_db):
driver = self._get_router_type_driver(context,
router[routertype.TYPE_ATTR])
if driver is None:
return
ha_group_uuid = uuidutils.generate_uuid()
# use HA group as device instead of the router to hide this port
with context.session.begin(subtransactions=True):
group_id = (driver.generate_ha_group_id(context, router, port,
ha_settings_db,
ha_group_uuid) or
self._generate_ha_group_id(context, router, port,
ha_settings_db,
ha_group_uuid))
timers_cfg = driver.get_ha_group_timers_parameters(
context, router, port, ha_settings_db, ha_group_uuid)
tracking_cfg = driver.get_ha_group_tracking_parameters(
context, router, port, ha_settings_db, ha_group_uuid)
other_cfg = driver.get_other_ha_group_parameters(
context, router, port, ha_settings_db, ha_group_uuid)
if driver.ha_interface_ip_address_needed(
context, router, port, ha_settings_db, ha_group_uuid):
fixed_ips = self._get_fixed_ips_subnets(port['fixed_ips'])
extra_port = self._create_hidden_port(
context, port['network_id'], ha_group_uuid,
fixed_ips, port['device_owner'])
extra_port_id = extra_port['id']
else:
extra_port_id = None
subnet_id = (port['fixed_ips'][0]['subnet_id']
if port['fixed_ips'] else None)
r_ha_g = RouterHAGroup(
id=ha_group_uuid,
tenant_id=port['tenant_id'],
ha_type=ha_settings_db.ha_type,
group_identity=group_id,
ha_port_id=port['id'],
extra_port_id=extra_port_id,
subnet_id=subnet_id,
user_router_id=router['id'],
timers_config=timers_cfg,
tracking_config=tracking_cfg,
other_config=other_cfg)
context.session.add(r_ha_g)
return r_ha_g
def _get_fixed_ips_subnets(self, fixed_ips):
subnets = copy.deepcopy(fixed_ips)
for s in subnets:
s.pop('ip_address', None)
return subnets
def _remove_redundancy_router_interfaces(self, context, router_id,
old_port):
"""To be called in delete_router_interface() BEFORE interface has been
removed from router in DB.
"""
ha_settings = self._get_ha_settings_by_router_id(context, router_id)
if ha_settings is None or old_port is None:
return
e_context = context.elevated()
rr_ids = self._get_redundancy_router_ids(e_context, router_id)
port_info_list = self._core_plugin.get_ports(
e_context, filters={'device_id': rr_ids,
'network_id': [old_port['network_id']]},
fields=['device_id', 'id'])
for port_info in port_info_list:
interface_info = {'port_id': port_info['id']}
self.remove_router_interface(e_context, port_info['device_id'],
interface_info)
self._delete_ha_group(e_context, old_port['id'])
def _redundancy_routers_for_floatingip(
self, context, router_id, redundancy_router_ids=None,
ha_settings_db=None):
"""To be called in update_floatingip() to get the
redundant router ids.
"""
if ha_settings_db is None:
ha_settings_db = self._get_ha_settings_by_router_id(context,
router_id)
if ha_settings_db is None:
return
e_context = context.elevated()
router_ids = []
for r_id in (redundancy_router_ids or
self._get_redundancy_router_ids(e_context, router_id)):
router_ids.append(r_id)
return router_ids
def _update_hidden_port(self, context, ha_port_id, modified_port_data):
hag = self._get_ha_group_by_ha_port_id(context, ha_port_id)
if hag is not None:
self._core_plugin.update_port(context, hag.extra_port_id,
modified_port_data)
def _delete_ha_group(self, context, ha_port_id):
hag = self._get_ha_group_by_ha_port_id(context, ha_port_id)
if hag is not None and hag.extra_port_id is not None:
self._core_plugin.delete_port(context, hag.extra_port_id,
l3_port_check=False)
with context.session.begin(subtransactions=True):
context.session.delete(hag)
def _extend_router_dict_ha(self, router_res, router_db):
if utils.is_extension_supported(self, ha.HA_ALIAS):
ha_s = router_db.ha_settings
rr_b = router_db.redundancy_binding
if rr_b and rr_b.user_router:
# include static routes from user visible router
temp = {}
self._extend_router_dict_extraroute(temp,
rr_b.user_router)
if temp['routes']:
router_res['routes'].extend(temp['routes'])
router_res[ha.ENABLED] = False if ha_s is None else True
if router_res[ha.ENABLED]:
ha_details = {ha.TYPE: ha_s.ha_type,
ha.PRIORITY: ha_s.priority,
ha.STATE: ha_s.state,
ha.REDUNDANCY_LEVEL: ha_s.redundancy_level,
ha.PROBE_CONNECTIVITY: ha_s.probe_connectivity}
if ha_details[ha.PROBE_CONNECTIVITY]:
ha_details.update({ha.PROBE_TARGET: ha_s.probe_target,
ha.PROBE_INTERVAL: ha_s.probe_interval})
ha_details[ha.REDUNDANCY_ROUTERS] = (
[{'id': b.redundancy_router_id, ha.PRIORITY: b.priority,
ha.STATE: b.state}
for b in router_db.redundancy_bindings])
router_res[ha.DETAILS] = ha_details
else:
# ensure any router details are removed
router_res.pop(ha.DETAILS, None)
def _populate_ha_information(self, context, router):
"""To be called when router information, including router interface
list, (for the l3_cfg_agent) has been collected so it is extended
with ha information.
"""
r_r_b = self._get_redundancy_router_bindings(
context, redundancy_router_id=router['id'])
if not r_r_b:
if router[ha.ENABLED]:
# The router is a user visible router with HA enabled.
user_router_id = router['id']
fips = []
else:
# The router is a user visible router with HA disabled.
# Nothing more to do here.
return
else:
# The router is a redundancy router.
# Need to fetch floatingip configurations from user visible router
# so they can be added to the redundancy routers.
user_router_id = r_r_b[0].user_router_id
fips = self.get_floatingips(context,
{'router_id': [user_router_id]})
if router['id'] != user_router_id:
# We add the HA settings from user visible router to
# its redundancy routers.
user_router_db = self._get_router(context, user_router_id)
self._extend_router_dict_ha(router, user_router_db)
# The interfaces of the user visible router must use the
# IP configuration of the extra ports in the HA groups.
hags = self._get_subnet_id_indexed_ha_groups(context, user_router_id)
e_context = context.elevated()
if router.get('gw_port'):
modified_interfaces = []
interface_port = self._populate_port_ha_information(
e_context, router['gw_port'], router['id'], hags,
user_router_id, modified_interfaces)
if not interface_port:
# The router has a gw_port but cannot find the port info yet
# so mark this router to have incomplete info and bail.
# The cfg_agent puts this in the updated_routers to ask again.
router['status'] = cisco_constants.ROUTER_INFO_INCOMPLETE
return
if modified_interfaces:
router['gw_port'] = interface_port
modified_interfaces = []
for itfc in router.get(bc.constants.INTERFACE_KEY, []):
interface_port = self._populate_port_ha_information(
e_context, itfc, router['id'], hags, user_router_id,
modified_interfaces)
if not interface_port:
# the router has interfaces but cannot find the port info yet
# so mark this router to have incomplete info and bail
# the cfg_agent will put this in the updated_list to ask again
router['status'] = cisco_constants.ROUTER_INFO_INCOMPLETE
return
if modified_interfaces:
router[bc.constants.INTERFACE_KEY] = modified_interfaces
if fips:
router[bc.constants.FLOATINGIP_KEY] = fips
def _populate_port_ha_information(self, context, port, router_id, hags,
user_router_id, modified_interfaces):
subnet_id = port['fixed_ips'][0]['subnet_id']
try:
hag = hags[subnet_id]
except KeyError:
# Oops, the subnet_id was not found. Probably because the DB
# insertion of that HA group is still in progress by another
# process and has not been committed to the DB yet.
# Let's retry a few times to see if the DB entry turns up.
LOG.debug('No HA group info for router: %(r_id)s and subnet: '
'%(s_id)s was found when populating HA info for port: '
'%(p_id)s. Will now make additional lookup attempts.',
{'r_id': router_id, 's_id': subnet_id,
'p_id': port['id']})
try:
hag = self._get_ha_group_for_subnet_id(context, router_id,
subnet_id)
except exc.NoResultFound:
hag = None
if hag is None:
LOG.debug('Failed to fetch the HA group info for for router: '
'%(r_id)s and subnet: %(s_id)s. Giving up. No HA '
'info will be added to the router\'s port: %(p_id)s.',
{'r_id': router_id, 's_id': subnet_id,
'p_id': port['id']})
# we leave it to the L3 config agent to handle this
return
else:
LOG.debug('Successfully fetched the HA group info for '
'router: %(r_id)s and subnet: %(s_id)s from DB',
{'r_id': router_id, 's_id': subnet_id})
hags[subnet_id] = hag
if router_id == user_router_id:
# If the router interface need no dedicated IP address we just
# set the HA (VIP) port to the port itself. The config agent
# driver will know how to handle this "signal".
p_id = hag.extra_port_id or port['id']
try:
interface_port = self._core_plugin.get_port(context, p_id)
except n_exc.PortNotFound:
LOG.debug('**** NO Port Info for '
'router: %(r_id)s : Port: %(p_id)s from DB',
{'r_id': router_id, 'p_id': port['id']})
return
LOG.debug('**** Fetched Port Info for '
'router: %(r_id)s : Port: %(p_id)s from DB',
{'r_id': router_id, 'p_id': port['id']})
self._populate_mtu_and_subnets_for_ports(context, [interface_port])
modified_interfaces.append(interface_port)
ha_port = port
else:
try:
ha_port = self._core_plugin.get_port(context, hag.ha_port_id)
except n_exc.PortNotFound:
LOG.debug('**** NO Port Info for '
'router(BAK): %(r_id)s : Port: %(p_id)s from DB',
{'r_id': router_id, 'p_id': hag.ha_port_id})
return
LOG.debug('**** Fetched Port Info for '
'router(BAK): %(r_id)s : Port: %(p_id)s from DB',
{'r_id': router_id, 'p_id': hag.ha_port_id})
self._populate_mtu_and_subnets_for_ports(context, [ha_port])
interface_port = port
interface_port[ha.HA_INFO] = {
ha.TYPE: hag.ha_type,
HA_GROUP: hag.group_identity,
'timers_config': hag.timers_config,
'tracking_config': hag.tracking_config,
'other_config': hag.other_config,
HA_PORT: ha_port}
return interface_port
def _create_hidden_port(self, context, network_id, device_id, fixed_ips,
port_type=DEVICE_OWNER_ROUTER_INTF):
"""Creates port used specially for HA purposes."""
port = {'port': {
'tenant_id': '', # intentionally not set
'network_id': network_id,
'mac_address': ATTR_NOT_SPECIFIED,
'fixed_ips': fixed_ips,
'device_id': device_id,
'device_owner': port_type,
'admin_state_up': True,
'name': ''}}
if utils.is_extension_supported(self._core_plugin, "dns-integration"):
port['port'].update(dns_name='')
return self._core_plugin.create_port(context, port)
def _get_ha_settings_by_router_id(self, context, router_id):
query = context.session.query(RouterHASetting)
query = query.filter(RouterHASetting.router_id == router_id)
try:
r_ha_s = query.one()
except (exc.NoResultFound, exc.MultipleResultsFound):
return
return r_ha_s
def _get_ha_group_by_ha_port_id(self, context, port_id):
query = context.session.query(RouterHAGroup)
query = query.filter(RouterHAGroup.ha_port_id == port_id)
try:
r_ha_g = query.one()
except (exc.NoResultFound, exc.MultipleResultsFound):
return
return r_ha_g
def _get_subnet_id_indexed_ha_groups(self, context, router_id,
load_virtual_port=False):
query = context.session.query(RouterHAGroup)
query = query.filter(RouterHAGroup.user_router_id == router_id)
if load_virtual_port:
query = query.options(joinedload('redundancy_router'))
return {hag['subnet_id']: hag for hag in query}
@cisco_utils.retry(exc.NoResultFound, LOOKUP_RETRIES, RETRY_INTERVAL, 1)
def _get_ha_group_for_subnet_id(self, context, router_id, subnet_id):
query = context.session.query(RouterHAGroup)
query = query.filter_by(user_router_id=router_id,
subnet_id=subnet_id)
LOG.debug('Trying to fetch HA group info for router: %(r_id)s and '
'subnet: %(s_id)s', {'r_id': router_id,
's_id': subnet_id})
return query.one()
def _get_redundancy_router_bindings(self, context, router_id=None,
redundancy_router_id=None):
query = context.session.query(RouterRedundancyBinding)
if router_id is not None:
query = query.filter(
RouterRedundancyBinding.user_router_id == router_id)
if redundancy_router_id is not None:
query = query.filter(
RouterRedundancyBinding.redundancy_router_id ==
redundancy_router_id)
query = query.order_by(RouterRedundancyBinding.priority)
return query.all()
def _get_redundancy_router_ids(self, context, router_id):
return [binding.redundancy_router_id for binding in
self._get_redundancy_router_bindings(context,
router_id=router_id)]
def _generate_ha_group_id(self, context, router, port, ha_settings_db,
ha_group_uuid):
#TODO(bob-melander): Generate "guaranteed" unique id
if ha_settings_db.ha_type == ha.HA_HSRP:
return random.randint(0, MAX_HSRP_GROUPS)
elif ha_settings_db.ha_type == ha.HA_VRRP:
return random.randint(0, MAX_VRRP_GROUPS)
else:
# ha_type must be ha_type.GLBP
return random.randint(0, MAX_GLBP_GROUPS)
# Overloaded function from l3_db
def get_router_for_floatingip(self, context, internal_port,
internal_subnet, external_network_id):
"""We need to over-load this function so that we only return the
user visible router and never its redundancy routers (as they never
have floatingips associated with them).
"""
gw_port = orm.aliased(models_v2.Port, name="gw_port")
routerport_qry = context.session.query(
RouterPort.router_id, models_v2.IPAllocation.ip_address).join(
models_v2.Port, models_v2.IPAllocation).filter(
models_v2.Port.network_id == internal_port['network_id'],
RouterPort.port_type.in_(bc.constants.ROUTER_INTERFACE_OWNERS),
models_v2.IPAllocation.subnet_id == internal_subnet['id']
).join(gw_port, gw_port.device_id == RouterPort.router_id).filter(
gw_port.network_id == external_network_id,
gw_port.device_owner == bc.constants.DEVICE_OWNER_ROUTER_GW
).distinct()
# Ensure that redundancy routers (in a ha group) are not returned,
# since only the user visible router should have floatingips.
# This can be done by checking that the id of routers does not
# appear in the 'redundancy_router_id' column in the
# 'cisco_router_redundancy_bindings' table.
routerport_qry = routerport_qry.outerjoin(
RouterRedundancyBinding,
RouterRedundancyBinding.redundancy_router_id ==
RouterPort.router_id)
routerport_qry = routerport_qry.filter(
RouterRedundancyBinding.redundancy_router_id == expr.null())
first_router_id = None
for router_id, interface_ip in routerport_qry:
if interface_ip == internal_subnet['gateway_ip']:
return router_id
if not first_router_id:
first_router_id = router_id
if first_router_id:
return first_router_id
raise l3.ExternalGatewayForFloatingIPNotFound(
subnet_id=internal_subnet['id'],
external_network_id=external_network_id,
port_id=internal_port['id'])
|
|
# -*- coding: utf-8 -*-
# Inspired by:
# https://github.com/lingdb/CoBL/issues/223#issuecomment-256815113
from __future__ import unicode_literals, print_function
from django.db import migrations
data = {'nose': 'While fighting, he hit him in the nose. (Kassian)',
'dirty': 'This cup is clean and that one is dirty.',
'red': 'Blood is red.',
'white': 'There is a white cloud in the sky.',
'breathe': "People can't breathe underwater. ",
'live': 'How many years can people live for?',
'fight': 'The two men are fighting outside.',
'flower': 'What type of flower is this?',
'stone': 'He held the stone in his hand',
'near': "You can walk there in one day; it's near.",
'smooth': 'He picked a smooth stone from the river.',
'fruit': 'He picked fruit from the trees.',
'sand': 'She let the sand fall through her hands.',
'know': 'They know that we are coming.',
'far': "You can't walk there in one day; it's too far.",
'rain': 'I saw him standing in the rain.',
'tooth': 'You can break a tooth while eating hard food. (Kassian)',
'bone': 'He fell and broke a bone.',
'ear': 'One ear of his was bigger than the other ear. ',
'two': 'One plus one is two.',
'right': 'He is blind in his right eye.',
'blow': 'He blew on the sticks to start the fire.',
'eye': 'She closed one eye.',
'mother': 'Everyone has a father and a mother.',
'push': 'He was pushing and pulling the furniture around the room. ',
'sit': 'He is sitting over there.',
'many': 'We picked many (a lot of) '
'(insert name of local fruit) today.',
'freeze': 'When water freezes, it becomes ice. ',
'skin': 'His skin was old and hard.',
'moon': 'I saw the moon in the sky.',
'back': 'I fell over and now my back hurts.',
'knee': 'He put his hand on his knee.',
'child': 'That is their first child.',
'navel': 'One could see his navel.',
'louse': 'I picked a louse from his hair.',
'cry': 'She was crying when I saw her.',
'run': 'Can you run fast?',
'three': 'Three plus one is four.',
'name': 'What is your name?',
'root': 'You could see some of the roots '
'of the tree above the ground.',
'wash': 'She washed her hands.',
'carry': 'She carried her bag (under her arm).',
'what': 'What are you eating?',
'forest': 'There are many animals in the forest.',
'dry': 'Is it dry or wet?',
'old': 'He lives in the old house, not in the new house.',
'all': 'He has cut down all the trees.',
'sew': 'He sewed the two pieces of cloth together.',
'fall': 'I fell from the tree yesterday and hurt my back.',
'wipe': 'He wiped the sweat from his brow.',
'few': "It's a small village. There are few houses. "
"(Eng. There are not many houses.)",
'smoke': 'I could see the smoke from the fire.',
'sun': "During the day, the sun is in the sky; at night, it's not.",
'hide': 'They hid their food from the animal.',
'cut': "I've cut my finger; look, it's bleeding.",
'die': 'Everyone dies sooner or later.',
'lake': 'Her house is beside the lake.',
'sky': 'There were white clouds in the blue sky.',
'sing': 'I heard him yesterday. He was singing.',
'there': 'The knife is (over) there.',
'scratch': 'She scratched her head.',
'hold': 'He is holding a stick in his hand.',
'stand': 'He is standing over there.',
'river': 'He lives beside the river.',
'cloud': 'There were white clouds in the blue sky. (WOLD)',
'egg': "Don't drop the egg; it will break!",
'burn': 'The wood is burning in the fire.',
'not': 'I do not understand.',
'foot': 'Her foot is injured so she cannot walk.',
'hunt': 'He is hunting animals in the forest.',
'tail': 'The dog wagged its tail.',
'dog': 'I would like to own a dog.',
'stab': 'I stabbed him in the belly (with a knife).',
'yesterday': "I haven't seen him today but I saw him yesterday.",
'black': 'Charcoal is black.',
'fat': 'Cut the fat off the meat.',
'person': 'Only one person at a time can pass through the gate.',
'mouth': 'He put his fingers in his mouth.',
'drink': 'He drank because he was thirsty.',
'sea': 'Her house is beside the sea.',
'water': 'She was thirsty, so she drank some water.',
'ash': 'There was nothing left but ash.',
'man': 'I saw a man and a woman.',
'some': 'There are some (insert name of local fruit) in the basket',
'if': 'If she does this, then it will happen.',
'play': 'They are playing a game outside.',
'sweet': 'This apple is ripe now, it tastes sweet.',
'big': 'This is a big stone. That is a small stone.',
'thick': 'That tree trunk is thick; '
'it would take a long time cut through.',
'turn': 'It was heavy so it was hard to turn it.',
'how': 'How do you make fire?',
'wet': 'Is it wet or dry?',
'fish': 'I can see a fish swimming in the water.',
'spit': 'He spat on the ground.',
'eat': 'Eat some food and drink some water.',
'this': 'I used this knife.',
'vomit': 'He ate something bad and threw up (vomited).',
'chest': 'His wore a medal on his chest.',
'squeeze': "Don't squeeze the ball - you'll burst it!",
'flow': 'The river flows, e.g. past the village, to the sea etc.',
'fear': 'While the dog was here, I was scared.',
'green': 'The grass was fresh and green.',
'do': 'What did you **do** in the village yesterday?',
'see': 'I see him every day on the road.',
'hard': 'It was very hard. When he bit it he broke his tooth.',
'bite': 'The dog bit him.',
'guts': 'He cut open the animal and pulled out its guts.',
'rotten': 'That (insert name of local fruit) is rotten.',
'bad': 'That is a bad idea.',
'swell': 'His arm swelled after he was injured.',
'round': 'That table is round.',
'in': 'The dog is in a basket.',
'float': 'The leaf is floating on the river.',
'bark': 'He tore some bark off the tree.',
'here': 'The knife is here.',
'narrow': 'Here the river is narrow; further down it is wide.',
'that': 'I used that knife.',
'year': 'The year is almost over.',
'throw': 'He threw a stick into the water.',
'pull': 'He was pushing and pulling the furniture around the room. ',
'hit': 'He hit the ball with the stick.',
'wind': 'She felt the wind in her face.',
'when': 'When will we meet again?',
'wide': "The canyon is wide here, but further down it's narrow.",
'yellow': 'The natural color of egg yolk. (midday sun?)',
'small': 'This is a small stone. That is a big stone.',
'leg': 'He fell and broke his leg.',
'hand': 'He burned his hand.',
'stick': 'He carried a stick under his arm.',
'dig': 'He is digging in the ground.',
'house': 'His house is at the edge of the village.',
'woman': 'I saw a man and a woman.',
'tie': 'He tied the two sticks together.',
'belly': 'He punched him in the belly.',
'kill': 'They started fighting. He killed him.',
'snake': 'He jumped when he saw the snake.',
'short': 'This is a short rope; that is a long rope.',
'claw': 'It caught its prey in its claws.',
'rub': 'He rubbed his belly.',
'fly_N': 'He killed a fly.',
'take': 'Take the money when you go.',
'thigh': 'He slapped his thigh.',
'feather': 'What bird does this feather come from?',
'shadow': 'He could see his shadow on the ground.',
'four': 'Four plus one is five.',
'and': 'I can see a tree and a rock.',
'earth': 'As he dug the hole, he threw out the earth.',
'grind': 'He is grinding flour.',
'leaf': 'He picked a leaf from the tree.',
'head': 'He has lots of hair on his head.',
'day': 'Day and night.',
'hear': 'I heard a noise last night.',
'at': 'He stopped at the rock half way to the village.',
'heavy': 'The rock was very heavy.',
'star': 'There were many stars in the night sky.',
'who': 'Who killed this dog? (Kassian)',
'say': 'She said that she would be late.',
'one': 'One plus one is two.',
'left': 'He is blind in his left eye.',
'thin': 'This piece of wood is thin, and that one is thick.',
'where': 'Where is the knife?',
'nail': 'He had very long nails on his fingers.',
'tree': 'There is a tree beside the house.',
'count': 'I counted the stones; there were five stones.',
'wing': 'Birds have wings; people do not.',
'other': 'He opened one eye and kept the other eye closed.',
'give': 'She wanted to cut the (insert name of local fruit). '
'He gave her a knife.',
'full': 'An empty pot is easier to carry than a full pot. (WOLD)',
'hot': 'The stone is hot (warm) under the sun.',
'smell': 'I (can) smell smoke.',
'tongue': 'He stuck his tongue out.',
'horn': 'Some animals have horns. Humans do not.',
'bitter': 'The leaves tasted bitter.',
'neck': 'He put his hand on his neck.',
'night': 'Night and day.',
'lie': 'She was lying on the ground.',
'walk': 'People can walk but fish cannot.',
'meat': 'What animal does this meat come from?',
'seed': 'You can eat the seeds of (insert name of local fruit).',
'fire': 'He was cold, so he moved closer to the fire.',
'laugh': 'When I saw him, he was laughing.',
'snow': 'There was snow on the ground.',
'grass': 'There is grass growing beside the river.',
'blood': 'Blood is red.',
'worm': 'There is a worm in this (insert name of local fruit).',
'new': 'He lives in the new house; not in the old house.',
'come': "I saw him on the road. "
"I don't know where he was coming from.",
'mountain': 'He started to climb the mountain.',
'bird': 'What bird does this feather come from?',
'hair': 'She pulled him by his hair. (almost Kassian)',
'five': 'Four plus one is five.',
'long': 'This is a long rope; that is a short rope.',
'fly_V': 'Birds can fly but people cannot. (close to Kassian)',
'father': 'Everyone has a father and a mother.',
'swim': 'He is swimming in the lake.',
'true': 'The story he is telling is true.',
'good': 'That is a good idea.',
'sleep': 'He is sleeping in the house.',
'straight': 'This stick is straight, but that one is bent.',
'fog': "I couldn't see the village because of the fog.",
'sharp': 'A sharp knife cuts better than a blunt one.',
'think': "What are you doing? I'm just thinking.",
'dust': 'When the ground is dry, you kick up dust when you walk.'}
def forwards_func(apps, schema_editor):
Meaning = apps.get_model("lexicon", "Meaning")
for gloss, exampleContext in data.items():
try:
entry = Meaning.objects.get(gloss=gloss)
entry.exampleContext = exampleContext
entry.save()
except Meaning.DoesNotExist:
print('\nCould not set exampleContext for Meaning: %s' % gloss)
def reverse_func(apps, schema_editor):
print('Reverse of 0123_fill_meaning_examplecontext')
class Migration(migrations.Migration):
dependencies = [('lexicon', '0122_meaning_examplecontext')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
|
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest.api.network import base_routers as base
from tempest.common.utils import data_utils
from tempest import test
class RoutersTest(base.BaseRouterTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(RoutersTest, cls).setUpClass()
@test.attr(type='smoke')
def test_create_show_list_update_delete_router(self):
# Create a router
# NOTE(salv-orlando): Do not invoke self.create_router
# as we need to check the response code
name = data_utils.rand_name('router-')
resp, create_body = self.client.create_router(
name, external_gateway_info={
"network_id": self.network_cfg.public_network_id},
admin_state_up=False)
self.assertEqual('201', resp['status'])
self.addCleanup(self._delete_router, create_body['router']['id'])
self.assertEqual(create_body['router']['name'], name)
self.assertEqual(
create_body['router']['external_gateway_info']['network_id'],
self.network_cfg.public_network_id)
self.assertEqual(create_body['router']['admin_state_up'], False)
# Show details of the created router
resp, show_body = self.client.show_router(
create_body['router']['id'])
self.assertEqual('200', resp['status'])
self.assertEqual(show_body['router']['name'], name)
self.assertEqual(
show_body['router']['external_gateway_info']['network_id'],
self.network_cfg.public_network_id)
self.assertEqual(show_body['router']['admin_state_up'], False)
# List routers and verify if created router is there in response
resp, list_body = self.client.list_routers()
self.assertEqual('200', resp['status'])
routers_list = list()
for router in list_body['routers']:
routers_list.append(router['id'])
self.assertIn(create_body['router']['id'], routers_list)
# Update the name of router and verify if it is updated
updated_name = 'updated ' + name
resp, update_body = self.client.update_router(
create_body['router']['id'], name=updated_name)
self.assertEqual('200', resp['status'])
self.assertEqual(update_body['router']['name'], updated_name)
resp, show_body = self.client.show_router(
create_body['router']['id'])
self.assertEqual(show_body['router']['name'], updated_name)
@test.attr(type='smoke')
def test_add_remove_router_interface_with_subnet_id(self):
network = self.create_network()
subnet = self.create_subnet(network)
router = self.create_router(data_utils.rand_name('router-'))
# Add router interface with subnet id
resp, interface = self.client.add_router_interface_with_subnet_id(
router['id'], subnet['id'])
self.assertEqual('200', resp['status'])
self.addCleanup(self._remove_router_interface_with_subnet_id,
router['id'], subnet['id'])
self.assertIn('subnet_id', interface.keys())
self.assertIn('port_id', interface.keys())
# Verify router id is equal to device id in port details
resp, show_port_body = self.client.show_port(
interface['port_id'])
self.assertEqual(show_port_body['port']['device_id'],
router['id'])
@test.attr(type='smoke')
def test_add_remove_router_interface_with_port_id(self):
network = self.create_network()
self.create_subnet(network)
router = self.create_router(data_utils.rand_name('router-'))
resp, port_body = self.client.create_port(network['id'])
# add router interface to port created above
resp, interface = self.client.add_router_interface_with_port_id(
router['id'], port_body['port']['id'])
self.assertEqual('200', resp['status'])
self.addCleanup(self._remove_router_interface_with_port_id,
router['id'], port_body['port']['id'])
self.assertIn('subnet_id', interface.keys())
self.assertIn('port_id', interface.keys())
# Verify router id is equal to device id in port details
resp, show_port_body = self.client.show_port(
interface['port_id'])
self.assertEqual(show_port_body['port']['device_id'],
router['id'])
def _verify_router_gateway(self, router_id, exp_ext_gw_info=None):
resp, show_body = self.client.show_router(router_id)
self.assertEqual('200', resp['status'])
actual_ext_gw_info = show_body['router']['external_gateway_info']
if exp_ext_gw_info is None:
self.assertIsNone(actual_ext_gw_info)
return
# Verify only keys passed in exp_ext_gw_info
for k, v in exp_ext_gw_info.iteritems():
self.assertEqual(v, actual_ext_gw_info[k])
def _verify_gateway_port(self, router_id):
resp, list_body = self.admin_client.list_ports(
network_id=self.network_cfg.public_network_id,
device_id=router_id)
self.assertEqual(len(list_body['ports']), 1)
gw_port = list_body['ports'][0]
fixed_ips = gw_port['fixed_ips']
self.assertEqual(len(fixed_ips), 1)
resp, public_net_body = self.admin_client.show_network(
self.network_cfg.public_network_id)
public_subnet_id = public_net_body['network']['subnets'][0]
self.assertEqual(fixed_ips[0]['subnet_id'], public_subnet_id)
@test.attr(type='smoke')
def test_update_router_set_gateway(self):
router = self.create_router(data_utils.rand_name('router-'))
self.client.update_router(
router['id'],
external_gateway_info={
'network_id': self.network_cfg.public_network_id})
# Verify operation - router
resp, show_body = self.client.show_router(router['id'])
self.assertEqual('200', resp['status'])
self._verify_router_gateway(
router['id'],
{'network_id': self.network_cfg.public_network_id})
self._verify_gateway_port(router['id'])
@test.attr(type='smoke')
def test_update_router_set_gateway_with_snat_explicit(self):
router = self.create_router(data_utils.rand_name('router-'))
self.admin_client.update_router_with_snat_gw_info(
router['id'],
external_gateway_info={
'network_id': self.network_cfg.public_network_id,
'enable_snat': True})
self._verify_router_gateway(
router['id'],
{'network_id': self.network_cfg.public_network_id,
'enable_snat': True})
self._verify_gateway_port(router['id'])
@test.attr(type='smoke')
def test_update_router_set_gateway_without_snat(self):
router = self.create_router(data_utils.rand_name('router-'))
self.admin_client.update_router_with_snat_gw_info(
router['id'],
external_gateway_info={
'network_id': self.network_cfg.public_network_id,
'enable_snat': False})
self._verify_router_gateway(
router['id'],
{'network_id': self.network_cfg.public_network_id,
'enable_snat': False})
self._verify_gateway_port(router['id'])
@test.attr(type='smoke')
def test_update_router_unset_gateway(self):
router = self.create_router(
data_utils.rand_name('router-'),
external_network_id=self.network_cfg.public_network_id)
self.client.update_router(router['id'], external_gateway_info={})
self._verify_router_gateway(router['id'])
# No gateway port expected
resp, list_body = self.admin_client.list_ports(
network_id=self.network_cfg.public_network_id,
device_id=router['id'])
self.assertFalse(list_body['ports'])
@test.attr(type='smoke')
def test_update_router_reset_gateway_without_snat(self):
router = self.create_router(
data_utils.rand_name('router-'),
external_network_id=self.network_cfg.public_network_id)
self.admin_client.update_router_with_snat_gw_info(
router['id'],
external_gateway_info={
'network_id': self.network_cfg.public_network_id,
'enable_snat': False})
self._verify_router_gateway(
router['id'],
{'network_id': self.network_cfg.public_network_id,
'enable_snat': False})
self._verify_gateway_port(router['id'])
@test.requires_ext(extension='extraroute', service='network')
@test.attr(type='smoke')
def test_update_extra_route(self):
self.network = self.create_network()
self.name = self.network['name']
self.subnet = self.create_subnet(self.network)
# Add router interface with subnet id
self.router = self.create_router(data_utils.rand_name('router-'), True)
self.create_router_interface(self.router['id'], self.subnet['id'])
self.addCleanup(
self._delete_extra_routes,
self.router['id'])
# Update router extra route
cidr = netaddr.IPNetwork(self.subnet['cidr'])
resp, extra_route = self.client.update_extra_routes(
self.router['id'], str(cidr[0]), str(self.subnet['cidr']))
def _delete_extra_routes(self, router_id):
resp, _ = self.client.delete_extra_routes(router_id)
|
|
# Copyright 2018-2019 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TODO: provider policy execution initialization for outputs
"""
import datetime
import logging
import os
import tempfile
import time
from c7n.output import (
blob_outputs,
metrics_outputs,
DirectoryOutput,
Metrics,
LogOutput)
from c7n.utils import local_session
@metrics_outputs.register('gcp')
class StackDriverMetrics(Metrics):
METRICS_PREFIX = 'custom.googleapis.com/custodian/policy'
DESCRIPTOR_COMMON = {
'metricsKind': 'GAUGE',
'labels': [{
'key': 'policy',
'valueType': 'STRING',
'description': 'Custodian Policy'}],
}
METRICS_DESCRIPTORS = {
'resourcecount': {
'type': '%s/%'.format(METRICS_PREFIX, 'resourcecount'),
'valueType': 'INT64',
'units': 'items',
'description': 'Number of resources that matched the given policy',
'displayName': 'Resources',
},
'resourcetime': {
'type': '%s/%s'.format(METRICS_PREFIX, 'resourcetime'),
'valueType': 'DOUBLE',
'units': 's',
'description': 'Time to query the resources for a given policy',
'displayName': 'Query Time',
},
'actiontime': {
'type': '%s/%s'.format(METRICS_PREFIX, 'actiontime'),
'valueType': 'DOUBLE',
'units': 's',
'description': 'Time to perform actions for a given policy',
'displayName': 'Action Time',
},
}
# Custom metrics docs https://tinyurl.com/y8rrghwc
log = logging.getLogger('c7n_gcp.metrics')
def __init__(self, ctx, config=None):
super(StackDriverMetrics, self).__init__(ctx, config)
self.project_id = local_session(
self.ctx.session_factory).get_default_project()
def initialize(self):
"""One time initialization of metrics descriptors.
# tbd - unclear if this adding significant value.
"""
client = local_session(self.ctx.session_factory).client(
'monitoring', 'v3', 'projects.metricDescriptors')
descriptor_map = {
n['type'].rsplit('/', 1)[-1]: n for n in client.execute_command('list', {
'name': 'projects/%s' % self.project_id,
'filter': 'metric.type=startswith("{}")'.format(self.METRICS_PREFIX)}).get(
'metricsDescriptors', [])}
created = False
for name in self.METRICS_DESCRIPTORS:
if name in descriptor_map:
continue
created = True
md = self.METRICS_DESCRIPTORS[name]
md.update(self.DESCRIPTOR_COMMON)
client.execute_command(
'create', {'name': 'projects/%s' % self.project_id, 'body': md})
if created:
self.log.info("Initializing StackDriver Metrics Descriptors")
time.sleep(5)
def _format_metric(self, key, value, unit, dimensions):
# Resource is a Google controlled vocabulary with artificial
# limitations on resource type there's not much useful we can
# utilize.
now = datetime.datetime.utcnow()
metrics_series = {
'metric': {
'type': 'custom.googleapis.com/custodian/policy/%s' % key.lower(),
'labels': {
'policy': self.ctx.policy.name,
'project_id': self.project_id
},
},
'metricKind': 'GAUGE',
'valueType': 'INT64',
'resource': {
'type': 'global',
},
'points': [{
'interval': {
'endTime': now.isoformat('T') + 'Z',
'startTime': now.isoformat('T') + 'Z'},
'value': {'int64Value': int(value)}}]
}
return metrics_series
def _put_metrics(self, ns, metrics):
session = local_session(self.ctx.session_factory)
client = session.client('monitoring', 'v3', 'projects.timeSeries')
params = {'name': "projects/{}".format(self.project_id),
'body': {'timeSeries': metrics}}
client.execute_command('create', params)
class StackDriverLogging(LogOutput):
def get_handler(self):
# gcp has three independent implementation of api bindings for python.
# The one used by logging is not yet supported by our test recording.
# TODO drop these grpc variants for the REST versions, and we can drop
# protobuf/grpc deps, and also so we can record tests..
# gcp has three different python sdks all independently maintained .. hmmm...
# and random monkey shims on top of those :-(
from google.cloud.logging import Client as LogClient
from google.cloud.logging.handlers import CloudLoggingHandler
from google.cloud.logging.resource import Resource
log_group = self.ctx.options.log_group
if log_group.endswith('*'):
log_group = "%s%s" % (log_group[:-1], self.ctx.policy.name)
project_id = local_session(self.ctx.session_factory).get_default_project()
client = LogClient(project_id)
return CloudLoggingHandler(
client,
log_group,
resource=Resource(type='project', labels={'project_id': project_id}))
def leave_log(self):
super(StackDriverLogging, self).leave_log()
# Flush and stop the background thread
self.handler.transport.flush()
self.handler.transport.worker.stop()
@blob_outputs.register('gs')
class GCPStorageOutput(DirectoryOutput):
def __init__(self, ctx, config=None):
super(GCPStorageOutput, self).__init__(ctx, config)
self.date_path = datetime.datetime.now().strftime('%Y/%m/%d/%H')
self.gs_path, self.bucket, self.key_prefix = parse_gs(
self.ctx.output_path)
self.root_dir = tempfile.mkdtemp()
def __repr__(self):
return "<%s to bucket:%s prefix:%s>" % (
self.__class__.__name__,
self.bucket,
"%s/%s" % (self.key_prefix, self.date_path))
def upload(self):
for root, dirs, files in os.walk(self.root_dir):
for f in files:
key = "%s/%s%s" % (
self.key_prefix,
self.date_path,
"%s/%s" % (
root[len(self.root_dir):], f))
key = key.strip('/')
self.transfer.upload_file(
os.path.join(root, f), self.bucket, key,
extra_args={
'ServerSideEncryption': 'AES256'})
def parse_gs(gs_path):
if not gs_path.startswith('gs://'):
raise ValueError("Invalid gs path")
ridx = gs_path.find('/', 5)
if ridx == -1:
ridx = None
bucket = gs_path[5:ridx]
gs_path = gs_path.rstrip('/')
if ridx is None:
key_prefix = ""
else:
key_prefix = gs_path[gs_path.find('/', 5):]
return gs_path, bucket, key_prefix
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Connects to a Cloud SQL instance."""
import datetime
from apitools.base.protorpclite import util as protorpc_util
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.sql import network
from googlecloudsdk.api_lib.sql import operations
from googlecloudsdk.api_lib.sql import validate
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import retry
def _WhitelistClientIP(instance_ref, sql_client, sql_messages, resources):
"""Add CLIENT_IP to the authorized networks list.
Makes an API call to add CLIENT_IP to the authorized networks list.
The server knows to interpret the string CLIENT_IP as the address with which
the client reaches the server. This IP will be whitelisted for 1 minute.
Args:
instance_ref: resources.Resource, The instance we're connecting to.
sql_client: apitools.BaseApiClient, A working client for the sql version
to be used.
sql_messages: module, The module that defines the messages for the sql
version to be used.
resources: resources.Registry, The registry that can create resource refs
for the sql version to be used.
Returns:
string, The name of the authorized network rule. Callers can use this name
to find out the IP the client reached the server with.
Raises:
HttpException: An http error response was received while executing api
request.
ToolException: Server did not complete the whitelisting operation in time.
"""
datetime_now = datetime.datetime.now(
protorpc_util.TimeZoneOffset(datetime.timedelta(0)))
acl_name = 'sql connect at time {0}'.format(datetime_now)
user_acl = sql_messages.AclEntry(
name=acl_name,
expirationTime=datetime_now + datetime.timedelta(minutes=1),
value='CLIENT_IP')
try:
original = sql_client.instances.Get(
sql_messages.SqlInstancesGetRequest(
project=instance_ref.project,
instance=instance_ref.instance))
except apitools_exceptions.HttpError as error:
raise exceptions.HttpException(error)
original.settings.ipConfiguration.authorizedNetworks.append(user_acl)
patch_request = sql_messages.SqlInstancesPatchRequest(
databaseInstance=original,
project=instance_ref.project,
instance=instance_ref.instance)
result = sql_client.instances.Patch(patch_request)
operation_ref = resources.Create(
'sql.operations',
operation=result.name,
project=instance_ref.project,
instance=instance_ref.instance)
message = 'Whitelisting your IP for incoming connection for 1 minute'
operations.OperationsV1Beta4.WaitForOperation(
sql_client, operation_ref, message)
return acl_name
def _GetClientIP(instance_ref, sql_client, acl_name):
"""Retrieves given instance and extracts its client ip."""
instance_info = sql_client.instances.Get(
sql_client.MESSAGES_MODULE.SqlInstancesGetRequest(
project=instance_ref.project,
instance=instance_ref.instance))
networks = instance_info.settings.ipConfiguration.authorizedNetworks
client_ip = None
for net in networks:
if net.name == acl_name:
client_ip = net.value
break
return instance_info, client_ip
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class Connect(base.Command):
"""Connects to a Cloud SQL instance."""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To connect to a Cloud SQL instance, run:
$ {command} my-instance --user=root
""",
}
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use it to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument(
'instance',
completion_resource='sql.instances',
help='Cloud SQL instance ID.')
parser.add_argument(
'--user', '-u',
required=False,
help='Cloud SQL instance user to connect as.')
def Run(self, args):
"""Connects to a Cloud SQL instance.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
If no exception is raised this method does not return. A new process is
started and the original one is killed.
Raises:
HttpException: An http error response was received while executing api
request.
ToolException: An error other than http error occured while executing the
command.
"""
sql_client = self.context['sql_client']
sql_messages = self.context['sql_messages']
resources = self.context['registry']
# Do the mysql executable check first. This way we can return an error
# faster and not wait for whitelisting IP and other operations.
mysql_executable = files.FindExecutableOnPath('mysql')
if not mysql_executable:
raise exceptions.ToolException(
'Mysql client not found. Please install a mysql client and make sure '
'it is in PATH to be able to connect to the database instance.')
validate.ValidateInstanceName(args.instance)
instance_ref = resources.Parse(args.instance, collection='sql.instances')
acl_name = _WhitelistClientIP(instance_ref, sql_client, sql_messages,
resources)
# Get the client IP that the server sees. Sadly we can only do this by
# checking the name of the authorized network rule.
retryer = retry.Retryer(max_retrials=2, exponential_sleep_multiplier=2)
try:
instance_info, client_ip = retryer.RetryOnResult(
_GetClientIP,
[instance_ref, sql_client, acl_name],
should_retry_if=lambda x, s: x[1] is None, # client_ip is None
sleep_ms=500)
except retry.RetryException:
raise exceptions.ToolException('Could not whitelist client IP. Server '
'did not reply with the whitelisted IP.')
# Check the version of IP and decide if we need to add ipv4 support.
ip_type = network.GetIpVersion(client_ip)
if ip_type == network.IP_VERSION_4:
if instance_info.settings.ipConfiguration.ipv4Enabled:
ip_address = instance_info.ipAddresses[0].ipAddress
else:
# TODO(user): ask user if we should enable ipv4 addressing
message = ('It seems your client does not have ipv6 connectivity and '
'the database instance does not have an ipv4 address. '
'Please request an ipv4 address for this database instance.')
raise exceptions.ToolException(message)
elif ip_type == network.IP_VERSION_6:
ip_address = instance_info.ipv6Address
else:
raise exceptions.ToolException('Could not connect to SQL server.')
# We have everything we need, time to party!
mysql_args = [mysql_executable, '-h', ip_address]
if args.user:
mysql_args.extend(['-u', args.user])
mysql_args.append('-p')
execution_utils.Exec(mysql_args)
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config import cfg
from six.moves.urllib import parse
from rally.cli import envutils
from rally.common.i18n import _
from rally.common import logging
from rally.common import objects
from rally.common.plugin import plugin
from rally import consts
from rally import exceptions
CONF = cfg.CONF
OSCLIENTS_OPTS = [
cfg.FloatOpt("openstack_client_http_timeout", default=180.0,
help="HTTP timeout for any of OpenStack service in seconds")
]
CONF.register_opts(OSCLIENTS_OPTS)
_NAMESPACE = "openstack"
def configure(name, default_version=None, default_service_type=None,
supported_versions=None):
"""OpenStack client class wrapper.
Each client class has to be wrapped by configure() wrapper. It
sets essential configuration of client classes.
:param name: Name of the client
:param default_version: Default version for client
:param default_service_type: Default service type of endpoint(If this
variable is not specified, validation will assume that your client
doesn't allow to specify service type.
:param supported_versions: List of supported versions(If this variable is
not specified, `OSClients.validate_version` method will raise an
exception that client doesn't support setting any versions. If this
logic is wrong for your client, you should override `validate_version`
in client object)
"""
def wrapper(cls):
cls = plugin.configure(name=name, namespace=_NAMESPACE)(cls)
cls._meta_set("default_version", default_version)
cls._meta_set("default_service_type", default_service_type)
cls._meta_set("supported_versions", supported_versions or [])
return cls
return wrapper
@plugin.base()
class OSClient(plugin.Plugin):
def __init__(self, credential, api_info, cache_obj):
self.credential = credential
self.api_info = api_info
self.cache = cache_obj
def choose_version(self, version=None):
"""Return version string.
Choose version between transmitted(preferable value if present),
version from api_info(configured from a context) and default.
"""
# NOTE(andreykurilin): The result of choose is converted to string,
# since most of clients contain map for versioned modules, where a key
# is a string value of version. Example of map and its usage:
#
# from oslo_utils import importutils
# ...
# version_map = {"1": "someclient.v1.client.Client",
# "2": "someclient.v2.client.Client"}
#
# def Client(version, *args, **kwargs):
# cls = importutils.import_class(version_map[version])
# return cls(*args, **kwargs)
#
# That is why type of version so important and we should ensure that
# version is a string object.
# For those clients which doesn't accept string value(for example
# zaqarclient), this method should be overridden.
version = (version
or self.api_info.get(self.get_name(), {}).get("version")
or self._meta_get("default_version"))
if version is not None:
version = str(version)
return version
@classmethod
def get_supported_versions(cls):
return cls._meta_get("supported_versions")
@classmethod
def validate_version(cls, version):
supported_versions = cls.get_supported_versions()
if supported_versions:
if str(version) not in supported_versions:
raise exceptions.ValidationError(_(
"'%(vers)s' is not supported. Should be one of "
"'%(supported)s'") % {"vers": version,
"supported": supported_versions})
else:
raise exceptions.RallyException(
_("Setting version is not supported."))
try:
float(version)
except ValueError:
raise exceptions.ValidationError(_(
"'%s' is invalid. Should be numeric value.") % version)
def choose_service_type(self, service_type=None):
"""Return service_type string.
Choose service type between transmitted(preferable value if present),
service type from api_info(configured from a context) and default.
"""
return (service_type
or self.api_info.get(self.get_name(), {}).get("service_type")
or self._meta_get("default_service_type"))
@classmethod
def is_service_type_configurable(cls):
"""Just checks that client supports setting service type."""
if cls._meta_get("default_service_type") is None:
raise exceptions.RallyException(_(
"Setting service type is not supported."))
def keystone(self, *args, **kwargs):
"""Make a call to keystone client."""
keystone = OSClient.get("keystone")(self.credential, self.api_info,
self.cache)
return keystone(*args, **kwargs)
def _get_session(self, auth_url=None, version=None):
from keystoneauth1 import discover
from keystoneauth1 import session
from keystoneclient.auth import identity
password_args = {
"auth_url": auth_url or self.credential.auth_url,
"username": self.credential.username,
"password": self.credential.password,
"tenant_name": self.credential.tenant_name
}
version = OSClient.get("keystone")(
self.credential, self.api_info, self.cache).choose_version(version)
if version is None:
# NOTE(rvasilets): If version not specified than we discover
# available version with the smallest number. To be able to
# discover versions we need session
temp_session = session.Session(
verify=(
self.credential.cacert or not self.credential.insecure),
timeout=CONF.openstack_client_http_timeout)
version = str(discover.Discover(
temp_session,
password_args["auth_url"]).version_data()[0]["version"][0])
if "v2.0" not in password_args["auth_url"] and (
version != "2"):
password_args.update({
"user_domain_name": self.credential.user_domain_name,
"domain_name": self.credential.domain_name,
"project_domain_name": self.credential.project_domain_name,
})
identity_plugin = identity.Password(**password_args)
sess = session.Session(
auth=identity_plugin, verify=(
self.credential.cacert or not self.credential.insecure),
timeout=CONF.openstack_client_http_timeout)
return sess, identity_plugin
def _get_endpoint(self, service_type=None):
kc = self.keystone()
kw = {"service_type": self.choose_service_type(service_type),
"region_name": self.credential.region_name}
if self.credential.endpoint_type:
kw["endpoint_type"] = self.credential.endpoint_type
api_url = kc.service_catalog.url_for(**kw)
return api_url
def _get_auth_info(self, user_key="username",
password_key="password",
auth_url_key="auth_url",
project_name_key="project_id",
domain_name_key="domain_name",
user_domain_name_key="user_domain_name",
project_domain_name_key="project_domain_name",
cacert_key="cacert",
endpoint_type="endpoint_type",
):
kw = {
user_key: self.credential.username,
password_key: self.credential.password,
auth_url_key: self.credential.auth_url,
cacert_key: self.credential.cacert,
}
if project_name_key:
kw.update({project_name_key: self.credential.tenant_name})
if "v2.0" not in self.credential.auth_url:
kw.update({
domain_name_key: self.credential.domain_name})
kw.update({
user_domain_name_key:
self.credential.user_domain_name or "Default"})
kw.update({
project_domain_name_key:
self.credential.project_domain_name or "Default"})
if self.credential.endpoint_type:
kw[endpoint_type] = self.credential.endpoint_type
return kw
@abc.abstractmethod
def create_client(self, *args, **kwargs):
"""Create new instance of client."""
def __call__(self, *args, **kwargs):
"""Return initialized client instance."""
key = "{0}{1}{2}".format(self.get_name(),
str(args) if args else "",
str(kwargs) if kwargs else "")
if key not in self.cache:
self.cache[key] = self.create_client(*args, **kwargs)
return self.cache[key]
@classmethod
def get(cls, name, namespace=_NAMESPACE):
return super(OSClient, cls).get(name, namespace)
@configure("keystone", supported_versions=("2", "3"))
class Keystone(OSClient):
def keystone(self, *args, **kwargs):
raise exceptions.RallyException(_("Method 'keystone' is restricted "
"for keystoneclient. :)"))
def _remove_url_version(self):
"""Remove any version from the auth_url.
The keystone Client code requires that auth_url be the root url
if a version override is used.
"""
url = parse.urlparse(self.credential.auth_url)
# NOTE(bigjools): This assumes that non-versioned URLs have no
# path component at all.
parts = (url.scheme, url.netloc, "/", url.params, url.query,
url.fragment)
return parse.urlunparse(parts)
def create_client(self, version=None):
"""Return a keystone client.
:param version: Keystone API version, can be one of:
("2", "3")
If this object was constructed with a version in the api_info
then that will be used unless the version parameter is passed.
"""
import keystoneclient
from keystoneclient import client
# Use the version in the api_info if provided, otherwise fall
# back to the passed version (which may be None, in which case
# keystoneclient chooses).
version = self.choose_version(version)
auth_url = self.credential.auth_url
if version is not None:
auth_url = self._remove_url_version()
sess, plugin = self._get_session(auth_url=auth_url, version=version)
# NOTE(bigjools): When using sessions, keystoneclient no longer
# does any pre-auth and calling client.authenticate() with
# sessions is deprecated (it's still possible to call it but if
# endpoint is defined it'll crash). We're forcing that pre-auth
# here because the use of the service_catalog depends on doing
# this. Also note that while the API has got the
# endpoints.list() equivalent, there is no service_type in that
# list which is why we need to ensure service_catalog is still
# present.
auth_ref = plugin.get_access(sess)
kw = {"version": version, "session": sess,
"timeout": CONF.openstack_client_http_timeout}
if keystoneclient.__version__[0] == "1":
# NOTE(andreykurilin): let's leave this hack for envs which uses
# old(<2.0.0) keystoneclient version. Upstream fix:
# https://github.com/openstack/python-keystoneclient/commit/d9031c252848d89270a543b67109a46f9c505c86
from keystoneclient import base
kw["auth_url"] = sess.get_endpoint(interface=base.AUTH_INTERFACE)
if self.credential.endpoint_type:
kw["endpoint_type"] = self.credential.endpoint_type
ks = client.Client(**kw)
ks.auth_ref = auth_ref
return ks
@configure("nova", default_version="2", default_service_type="compute")
class Nova(OSClient):
@classmethod
def validate_version(cls, version):
from novaclient import api_versions
from novaclient import exceptions as nova_exc
try:
api_versions.get_api_version(version)
except nova_exc.UnsupportedVersion:
raise exceptions.RallyException(
"Version string '%s' is unsupported." % version)
def create_client(self, version=None, service_type=None):
"""Return nova client."""
from novaclient import client as nova
kc = self.keystone()
client = nova.Client(self.choose_version(version),
auth_token=kc.auth_token,
http_log_debug=logging.is_debug(),
timeout=CONF.openstack_client_http_timeout,
insecure=self.credential.insecure,
**self._get_auth_info(password_key="api_key"))
client.set_management_url(self._get_endpoint(service_type))
return client
@configure("neutron", default_version="2.0", default_service_type="network",
supported_versions=["2.0"])
class Neutron(OSClient):
def create_client(self, version=None, service_type=None):
"""Return neutron client."""
from neutronclient.neutron import client as neutron
kc = self.keystone()
client = neutron.Client(self.choose_version(version),
token=kc.auth_token,
endpoint_url=self._get_endpoint(service_type),
timeout=CONF.openstack_client_http_timeout,
insecure=self.credential.insecure,
**self._get_auth_info(
project_name_key="tenant_name",
cacert_key="ca_cert"))
return client
@configure("glance", default_version="1", default_service_type="image",
supported_versions=["1", "2"])
class Glance(OSClient):
def create_client(self, version=None, service_type=None):
"""Return glance client."""
import glanceclient as glance
kc = self.keystone()
client = glance.Client(self.choose_version(version),
endpoint=self._get_endpoint(service_type),
token=kc.auth_token,
timeout=CONF.openstack_client_http_timeout,
insecure=self.credential.insecure,
cacert=self.credential.cacert)
return client
@configure("heat", default_version="1", default_service_type="orchestration",
supported_versions=["1"])
class Heat(OSClient):
def create_client(self, version=None, service_type=None):
"""Return heat client."""
from heatclient import client as heat
kc = self.keystone()
client = heat.Client(self.choose_version(version),
endpoint=self._get_endpoint(service_type),
token=kc.auth_token,
timeout=CONF.openstack_client_http_timeout,
insecure=self.credential.insecure,
**self._get_auth_info(project_name_key=None,
cacert_key="ca_file"))
return client
@configure("cinder", default_version="2", default_service_type="volumev2",
supported_versions=["1", "2"])
class Cinder(OSClient):
def create_client(self, version=None, service_type=None):
"""Return cinder client."""
from cinderclient import client as cinder
client = cinder.Client(self.choose_version(version),
http_log_debug=logging.is_debug(),
timeout=CONF.openstack_client_http_timeout,
insecure=self.credential.insecure,
**self._get_auth_info(password_key="api_key"))
kc = self.keystone()
client.client.management_url = self._get_endpoint(service_type)
client.client.auth_token = kc.auth_token
return client
@configure("manila", default_version="1", default_service_type="share",
supported_versions=["1", "2"])
class Manila(OSClient):
def create_client(self, version=None, service_type=None):
"""Return manila client."""
from manilaclient import client as manila
manila_client = manila.Client(
self.choose_version(version),
region_name=self.credential.region_name,
http_log_debug=logging.is_debug(),
timeout=CONF.openstack_client_http_timeout,
insecure=self.credential.insecure,
**self._get_auth_info(password_key="api_key",
project_name_key="project_name"))
kc = self.keystone()
manila_client.client.management_url = self._get_endpoint(service_type)
manila_client.client.auth_token = kc.auth_token
return manila_client
@configure("ceilometer", default_version="2", default_service_type="metering",
supported_versions=["1", "2"])
class Ceilometer(OSClient):
def create_client(self, version=None, service_type=None):
"""Return ceilometer client."""
from ceilometerclient import client as ceilometer
kc = self.keystone()
auth_token = kc.auth_token
if not hasattr(auth_token, "__call__"):
# python-ceilometerclient requires auth_token to be a callable
auth_token = lambda: kc.auth_token
client = ceilometer.get_client(
self.choose_version(version),
os_endpoint=self._get_endpoint(service_type),
token=auth_token,
timeout=CONF.openstack_client_http_timeout,
insecure=self.credential.insecure,
**self._get_auth_info(project_name_key="tenant_name",
endpoint_type="interface"))
return client
@configure("gnocchi", default_service_type="metric", default_version="1",
supported_versions=["1"])
class Gnocchi(OSClient):
def create_client(self, version=None, service_type=None):
"""Return gnocchi client."""
# NOTE(sumantmurke): gnocchiclient requires keystoneauth1 for
# authenticating and creating a session.
from gnocchiclient import client as gnocchi
service_type = self.choose_service_type(service_type)
sess = self._get_session()[0]
gclient = gnocchi.Client(version=self.choose_version(
version), session=sess, service_type=service_type)
return gclient
@configure("ironic", default_version="1", default_service_type="baremetal",
supported_versions=["1"])
class Ironic(OSClient):
def create_client(self, version=None, service_type=None):
"""Return Ironic client."""
from ironicclient import client as ironic
kc = self.keystone()
client = ironic.get_client(self.choose_version(version),
os_auth_token=kc.auth_token,
ironic_url=self._get_endpoint(service_type),
timeout=CONF.openstack_client_http_timeout,
insecure=self.credential.insecure,
cacert=self.credential.cacert,
interface=self._get_auth_info().get(
"endpoint_type")
)
return client
@configure("sahara", default_version="1.1", supported_versions=["1.0", "1.1"],
default_service_type="data-processing")
class Sahara(OSClient):
# NOTE(andreykurilin): saharaclient supports "1.0" version and doesn't
# support "1". `choose_version` and `validate_version` methods are written
# as a hack to covert 1 -> 1.0, which can simplify setting saharaclient
# for end-users.
def choose_version(self, version=None):
return float(super(Sahara, self).choose_version(version))
@classmethod
def validate_version(cls, version):
super(Sahara, cls).validate_version(float(version))
def create_client(self, version=None, service_type=None):
"""Return Sahara client."""
from saharaclient import client as sahara
client = sahara.Client(
self.choose_version(version),
service_type=self.choose_service_type(service_type),
insecure=self.credential.insecure,
**self._get_auth_info(password_key="api_key",
project_name_key="project_name"))
return client
@configure("zaqar", default_version="1.1", default_service_type="messaging",
supported_versions=["1", "1.1"])
class Zaqar(OSClient):
def choose_version(self, version=None):
# zaqarclient accepts only int or float obj as version
return float(super(Zaqar, self).choose_version(version))
def create_client(self, version=None, service_type=None):
"""Return Zaqar client."""
from zaqarclient.queues import client as zaqar
kc = self.keystone()
conf = {"auth_opts": {"backend": "keystone", "options": {
"os_username": self.credential.username,
"os_password": self.credential.password,
"os_project_name": self.credential.tenant_name,
"os_project_id": kc.auth_ref.get("token").get("tenant").get("id"),
"os_auth_url": self.credential.auth_url,
"insecure": self.credential.insecure,
}}}
client = zaqar.Client(url=self._get_endpoint(),
version=self.choose_version(version),
conf=conf)
return client
@configure("murano", default_version="1",
default_service_type="application-catalog",
supported_versions=["1"])
class Murano(OSClient):
def create_client(self, version=None, service_type=None):
"""Return Murano client."""
from muranoclient import client as murano
kc = self.keystone()
client = murano.Client(self.choose_version(version),
endpoint=self._get_endpoint(service_type),
token=kc.auth_token)
return client
@configure("designate", default_version="1", default_service_type="dns",
supported_versions=["1", "2"])
class Designate(OSClient):
def create_client(self, version=None, service_type=None):
"""Return designate client."""
from designateclient import client
version = self.choose_version(version)
api_url = self._get_endpoint(service_type)
api_url += "/v%s" % version
session = self._get_session()[0]
if version == "2":
return client.Client(version, session=session,
endpoint_override=api_url)
return client.Client(version, session=session,
endpoint=api_url)
@configure("trove", default_version="1.0", supported_versions=["1.0"])
class Trove(OSClient):
def create_client(self, version=None):
"""Returns trove client."""
from troveclient import client as trove
client = trove.Client(self.choose_version(version),
region_name=self.credential.region_name,
timeout=CONF.openstack_client_http_timeout,
insecure=self.credential.insecure,
**self._get_auth_info(password_key="api_key")
)
return client
@configure("mistral", default_service_type="workflowv2")
class Mistral(OSClient):
def create_client(self, service_type=None):
"""Return Mistral client."""
from mistralclient.api import client as mistral
kc = self.keystone()
client = mistral.client(
mistral_url=self._get_endpoint(service_type),
service_type=self.choose_service_type(service_type),
auth_token=kc.auth_token)
return client
@configure("swift", default_service_type="object-store")
class Swift(OSClient):
def create_client(self, service_type=None):
"""Return swift client."""
from swiftclient import client as swift
kc = self.keystone()
client = swift.Connection(retries=1,
preauthurl=self._get_endpoint(service_type),
preauthtoken=kc.auth_token,
insecure=self.credential.insecure,
cacert=self.credential.cacert,
user=self.credential.username,
tenant_name=self.credential.tenant_name,
)
return client
@configure("ec2")
class EC2(OSClient):
def create_client(self):
"""Return ec2 client."""
import boto
kc = self.keystone()
if kc.version != "v2.0":
raise exceptions.RallyException(
_("Rally EC2 benchmark currently supports only"
"Keystone version 2"))
ec2_credential = kc.ec2.create(user_id=kc.auth_user_id,
tenant_id=kc.auth_tenant_id)
client = boto.connect_ec2_endpoint(
url=self._get_endpoint(),
aws_access_key_id=ec2_credential.access,
aws_secret_access_key=ec2_credential.secret,
is_secure=self.credential.insecure)
return client
@configure("monasca", default_version="2_0",
default_service_type="monitoring", supported_versions=["2_0"])
class Monasca(OSClient):
def create_client(self, version=None, service_type=None):
"""Return monasca client."""
from monascaclient import client as monasca
kc = self.keystone()
auth_token = kc.auth_token
client = monasca.Client(
self.choose_version(version),
self._get_endpoint(service_type),
token=auth_token,
timeout=CONF.openstack_client_http_timeout,
insecure=self.credential.insecure,
**self._get_auth_info(project_name_key="tenant_name"))
return client
@configure("cue", default_version="1", default_service_type="message-broker")
class Cue(OSClient):
def create_client(self, service_type=None):
"""Return cue client."""
from cueclient.v1 import client as cue
version = self.choose_version()
api_url = self._get_endpoint(service_type)
api_url += "v%s" % version
session = self._get_session(auth_url=api_url)[0]
endpoint_type = self.credential.endpoint_type
return cue.Client(session=session, interface=endpoint_type)
@configure("senlin", default_version="1", default_service_type="clustering",
supported_versions=["1"])
class Senlin(OSClient):
def create_client(self, version=None, service_type=None):
"""Return senlin client."""
from senlinclient import client as senlin
return senlin.Client(
self.choose_version(version),
**self._get_auth_info(project_name_key="project_name",
cacert_key="cert",
endpoint_type="interface"))
@configure("magnum", default_version="1", supported_versions=["1"],
default_service_type="container-infra",)
class Magnum(OSClient):
def create_client(self, version=None, service_type=None):
"""Return magnum client."""
from magnumclient import client as magnum
api_url = self._get_endpoint(service_type)
session = self._get_session()[0]
return magnum.Client(
session=session,
interface=self.credential.endpoint_type,
magnum_url=api_url)
@configure("watcher", default_version="1", default_service_type="infra-optim",
supported_versions=["1"])
class Watcher(OSClient):
def create_client(self, version=None, service_type=None):
"""Return watcher client."""
from watcherclient import client as watcher_client
kc = self.keystone()
watcher_api_url = self._get_endpoint(
self.choose_service_type(service_type))
client = watcher_client.Client(
self.choose_version(version),
watcher_api_url,
token=kc.auth_token,
timeout=CONF.openstack_client_http_timeout,
insecure=self.credential.insecure,
ca_file=self.credential.cacert,
endpoint_type=self.credential.endpoint_type)
return client
class Clients(object):
"""This class simplify and unify work with OpenStack python clients."""
def __init__(self, credential, api_info=None):
self.credential = credential
self.api_info = api_info or {}
self.cache = {}
def __getattr__(self, client_name):
"""Lazy load of clients."""
return OSClient.get(client_name)(self.credential, self.api_info,
self.cache)
@classmethod
def create_from_env(cls):
creds = envutils.get_creds_from_env_vars()
return cls(
objects.Credential(
creds["auth_url"],
creds["admin"]["username"],
creds["admin"]["password"],
creds["admin"]["tenant_name"],
endpoint_type=creds["endpoint_type"],
user_domain_name=creds["admin"].get("user_domain_name"),
project_domain_name=creds["admin"].get("project_domain_name"),
endpoint=creds["endpoint"],
region_name=creds["region_name"],
https_cacert=creds["https_cacert"],
https_insecure=creds["https_insecure"]
))
def clear(self):
"""Remove all cached client handles."""
self.cache = {}
def verified_keystone(self):
"""Ensure keystone endpoints are valid and then authenticate
:returns: Keystone Client
"""
from keystoneclient import exceptions as keystone_exceptions
try:
# Ensure that user is admin
client = self.keystone()
if "admin" not in [role.lower() for role in
client.auth_ref.role_names]:
raise exceptions.InvalidAdminException(
username=self.credential.username)
except keystone_exceptions.Unauthorized:
raise exceptions.InvalidEndpointsException()
except keystone_exceptions.AuthorizationFailure:
raise exceptions.HostUnreachableException(
url=self.credential.auth_url)
return client
def services(self):
"""Return available services names and types.
:returns: dict, {"service_type": "service_name", ...}
"""
if "services_data" not in self.cache:
services_data = {}
ks = self.keystone()
available_services = ks.service_catalog.get_endpoints()
for stype in available_services.keys():
if stype in consts.ServiceType:
services_data[stype] = consts.ServiceType[stype]
else:
services_data[stype] = "__unknown__"
self.cache["services_data"] = services_data
return self.cache["services_data"]
|
|
from __future__ import print_function
import pickle
import cPickle
import numpy as np
import os
from sklearn.datasets import fetch_mldata
from sklearn.metrics import precision_recall_curve, accuracy_score, auc
from sklearn.metrics import precision_recall_fscore_support as prfs
def make_sure_path_exists(path):
'''Creates path if it does not exist. This function will not
incorrectly throw an exception if the path gets created in between
finding it does not exist and creating the directory, and therefore,
works cross-platform and when used in a distributed way.
http://stackoverflow.com/questions/273192/how-to-check-if-a-directory-exists-and-create-it-if-necessary
'''
import errno
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def auc_score(y, pred_prob):
'''
This function computes the Precision-Recall Area-under-the-Curve (AUC-PR) (Davis & Goadrich (2006),
http://dl.acm.org/citation.cfm?id=1143874), from the predicted P(y=1|x), and true labels y.
Parameters
----------
y : iterable (list or np.array)
True labels for the dataset.
pred_prob : iterable (list or np.array)
Predicted P(y=1|x), which may contain NaN or INF
'''
# Check for nan or inf in pred_prob (can occur with tensorflow)
num_inf = sum(np.isinf(pred_prob))
num_nan = sum(np.isnan(pred_prob))
if num_nan > 0 or num_inf > 0:
print("[Warning]: Predicted probabilities contain NaN or inf values.")
print("[Warning]: Number of NaN values:", sum(np.isnan(pred_prob)))
print("[Warning]: Number of inf values:", sum(np.isinf(pred_prob)))
if num_inf > 0:
pred_prob[pred_prob == -inf] = 0
pred_prob[pred_prob == inf] = 1
if num_nan > 0:
pred_prob = np.nan_to_num(pred_prob)
precision, recall, _ = precision_recall_curve(y, pred_prob)
return auc(recall, precision)
def get_metrics(pred, pred_prob, y):
"""This function calculates the metrics of AUC_PR, Error, Precision, Recall, and F1 score from
true labels y, prediction pred, or predicted P(y=1|x) pred_prob.
Parameters
----------
pred : iterable (list or np.array)
Predicted labels
pred_prob : iterable (list or np.array)
Predicted P(y=1|x)
y : iterable (list or np.array)
True labels.
"""
precision, recall, f1, _ = zip(*prfs(y, pred))[1]
error = 1 - accuracy_score(y, pred)
area_under_curve = auc_score(y, pred_prob)
metrics_dict = {
"AUC": area_under_curve,
"Error": error,
"Precision": precision,
"Recall": recall,
"F1 score": f1,
}
return metrics_dict
def get_mldata(dataset_name):
dataset = fetch_mldata(dataset_name)
X = dataset.data
y = dataset.target
print("{0} fetched. Data shape: {1}, target shape: {2}".format(dataset_name, X.shape, y.shape))
if y.ndim > 1:
print("Warning: target is larger than 1D!")
return X, y
def verify_md5(fname, md5sum):
"""This function checkes whether the file with filename fname has the md5sum required.
If not, raise error.
Parameters
----------
fname : string
filename for the file to check for md5sum.
md5sum : string
md5sum required for the file. Failure of the file to having the same md5sum indicates
that the file is corrupted.
"""
import hashlib
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
if md5sum != hash_md5.hexdigest():
raise IOError("File '%s': invalid md5sum! You may want to delete"
"this corrupted file..." % fname)
def download(url, output_filename, md5sum = None):
"""This function downloads the file located at 'url' and stores it on disk at location 'output_filename'
This function is addapted from is adapted from
https://github.com/ivanov/scikits.data/blob/master/datasets/utils/download_and_extract.py,
under the BSD 3 clause license.
"""
from urllib2 import urlopen
from IPython.display import clear_output
page = urlopen(url)
page_info = page.info()
output_file = open(output_filename, 'wb+')
# size of the download unit
block_size = 2 ** 15
dl_size = 0
# display progress only if we know the length
if 'content-length' in page_info:
# file size in Kilobytes
file_size = int(page_info['content-length']) / 1024.
while True:
buffer = page.read(block_size)
if not buffer:
break
dl_size += block_size / 1024
output_file.write(buffer)
percent = min(100, 100. * dl_size / file_size)
status = r"Progress: %20d kilobytes [%4.1f%%]" \
% (dl_size, percent)
status = status + chr(8) * (len(status) + 1)
clear_output(wait=True)
print(status, end="")
print('')
else:
output_file.write(page.read())
output_file.close()
if md5sum is not None:
verify_md5(output_filename, md5sum)
def extract(archive_filename, output_dirname, md5sum = None):
"""This function extracts 'archive_filename' into 'output_dirname'.
It is addapted from is adapted from
https://github.com/ivanov/scikits.data/blob/master/datasets/utils/download_and_extract.py,
under the BSD 3 clause license.
Supported archives:
-------------------
* Zip formats and equivalents: .zip, .egg, .jar
* Tar and compressed tar formats: .tar, .tar.gz, .tgz, .tar.bz2, .tz2
"""
import archive
if md5sum is not None:
if verbose:
print(" SHA-1 verification...")
verify_md5sum(archive_filename, md5sum)
archive.extract(archive_filename, output_dirname)
def download_and_extract(url, output_dirname, md5sum = None):
"""This function downloads and extracts archive in 'url' into 'output_dirname'.
Note that 'output_dirname' has to exist and won't be created by this function.
This function is addapted from is adapted from
https://github.com/ivanov/scikits.data/blob/master/datasets/utils/download_and_extract.py,
under the BSD 3 clause license.
"""
archive_basename = os.path.basename(url)
archive_filename = os.path.join(output_dirname, archive_basename)
download(url, archive_filename, md5sum = md5sum)
extract(archive_filename, output_dirname)
def unpickle(file):
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
def get_MNIST():
X, y = get_mldata('MNIST original')
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
print("Length: training set: {0}, testing set {1}".format(60000, 10000))
return (X_train, y_train), (X_test, y_test)
def get_CIFAR():
# Check if CIFAR-10 dataset is already downloaded. If not download and extract:
if not os.path.isdir("cifar-10-batches-py"):
print("Missing 'cifar-10-batches-py' directory for CIFAR-10, downloading CIFAR-10 data (may take > 1 min)...")
print('Note that Python package "archive" must be installed to extract CIFAR-10 data.')
# The following libraries are used by downloading CIFAR-10 data
# Download and extract CIFAR-10 data
import archive
download_and_extract("https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz",
"", md5sum = "c58f30108f718f92721af3b95e74349a")
# Process CIFAR-10 data:
cifar_meta = unpickle("cifar-10-batches-py/batches.meta")
cifar1 = unpickle("cifar-10-batches-py/data_batch_1")
cifar2 = unpickle("cifar-10-batches-py/data_batch_2")
cifar3 = unpickle("cifar-10-batches-py/data_batch_3")
cifar4 = unpickle("cifar-10-batches-py/data_batch_4")
cifar5 = unpickle("cifar-10-batches-py/data_batch_5")
cifar_test_batch = unpickle("cifar-10-batches-py/test_batch")
print("| ", end="")
for i, item in enumerate(cifar_meta['label_names']):
print(i, "=>", item, "| ", end="")
print("")
# Store all 5 (X, y) training pairs.
training_sets = [(eval("cifar" + str(i))["data"], np.array(eval("cifar" + str(i))["labels"], dtype=int))
for i in range(1,6)]
X_test = cifar_test_batch["data"]
y_test = np.array(cifar_test_batch["labels"], dtype=int)
testing_set = (X_test, y_test)
for i in range(1,6):
exec("del cifar" + str(i))
del cifar_test_batch
X_train = training_sets[0][0]
y_train = training_sets[0][1]
for i in range(1, len(training_sets)):
X_train = np.vstack((X_train, training_sets[i][0]))
y_train = np.concatenate((y_train, training_sets[i][1]))
return (X_train, y_train), (X_test, y_test)
def get_dataset(dataset = "mnist"):
"""This function fetches the MNIST or CIFAR-10 dataset, and returns in the format of
(X_train, y_train), (X_test, y_test).
Parameters
----------
dataset : string
choose between "mnist" and "cifar", and the function will return MNIST or CIFAR-10 dataset, respectively.
"""
if dataset == "mnist":
return get_MNIST()
elif dataset == "cifar":
return get_CIFAR()
def downsample(X, y, downsample_ratio, useFixedSeed = True):
"""Downsample the dataset with downsample_ratio.
Parameters
----------
X : np.array
Input feature matrix (N, D), 2D numpy array
y : np.array
A binary vector of labels
downsample_ratio : float
Fraction of examples to randomly sample from (X, y).
useFixedSeed : bool
If true, use a fixed seed for numpy's random number generator.
"""
if useFixedSeed:
np.random.seed(42) # Always downsample the same examples.
mask_sample = np.random.choice(len(X), size=int(len(X) * downsample_ratio), replace=False)
X = X[mask_sample]
y = y[mask_sample]
return X, y
|
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Service providing handler
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.5.7
:status: Beta
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 5, 7)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Pelix beans
from pelix.constants import BundleActivator, BundleException
# iPOPO constants
import pelix.ipopo.constants as ipopo_constants
import pelix.ipopo.handlers.constants as constants
# Standard library
import logging
# ------------------------------------------------------------------------------
class _HandlerFactory(constants.HandlerFactory):
"""
Factory service for service registration handlers
"""
def get_handlers(self, component_context, instance):
"""
Sets up service providers for the given component
:param component_context: The ComponentContext bean
:param instance: The component instance
:return: The list of handlers associated to the given component
"""
# Retrieve the handler configuration
provides = component_context.get_handler(
ipopo_constants.HANDLER_PROVIDES)
if not provides:
# Nothing to do
return tuple()
# 1 handler per provided service
return [ServiceRegistrationHandler(specs, controller)
for specs, controller in provides]
@BundleActivator
class _Activator(object):
"""
The bundle activator
"""
def __init__(self):
"""
Sets up members
"""
self._registration = None
def start(self, context):
"""
Bundle started
"""
# Set up properties
properties = {constants.PROP_HANDLER_ID:
ipopo_constants.HANDLER_PROVIDES}
# Register the handler factory service
self._registration = context.register_service(
constants.SERVICE_IPOPO_HANDLER_FACTORY,
_HandlerFactory(), properties)
def stop(self, context):
"""
Bundle stopped
"""
# Unregister the service
self._registration.unregister()
self._registration = None
# ------------------------------------------------------------------------------
class ServiceRegistrationHandler(constants.ServiceProviderHandler):
"""
Handles the registration of a service provided by a component
"""
def __init__(self, specifications, controller_name):
"""
Sets up the handler
:param specifications: The service specifications
:param controller_name: Name of the associated service controller
(can be None)
"""
self.specifications = specifications
self.__controller = controller_name
self._ipopo_instance = None
# Controller is "on" by default
self.__controller_on = True
self.__validated = False
# The ServiceRegistration and ServiceReference objects
self._registration = None
self._svc_reference = None
def _field_controller_generator(self):
"""
Generates the methods called by the injected controller
"""
# Local variable, to avoid messing with "self"
stored_instance = self._ipopo_instance
def get_value(self, name):
"""
Retrieves the controller value, from the iPOPO dictionaries
:param name: The property name
:return: The property value
"""
return stored_instance.get_controller_state(name)
def set_value(self, name, new_value):
"""
Sets the property value and trigger an update event
:param name: The property name
:param new_value: The new property value
"""
# Get the previous value
old_value = stored_instance.get_controller_state(name)
if new_value != old_value:
# Update the controller state
stored_instance.set_controller_state(name, new_value)
return new_value
return get_value, set_value
def manipulate(self, stored_instance, component_instance):
"""
Manipulates the component instance
:param stored_instance: The iPOPO component StoredInstance
:param component_instance: The component instance
"""
# Store the stored instance
self._ipopo_instance = stored_instance
if self.__controller is None:
# No controller: do nothing
return
# Get the current value of the member (True by default)
controller_value = getattr(component_instance, self.__controller, True)
# Store the controller value
stored_instance.set_controller_state(self.__controller,
controller_value)
# Prepare the methods names
getter_name = "{0}{1}" \
.format(ipopo_constants.IPOPO_CONTROLLER_PREFIX,
ipopo_constants.IPOPO_GETTER_SUFFIX)
setter_name = "{0}{1}" \
.format(ipopo_constants.IPOPO_CONTROLLER_PREFIX,
ipopo_constants.IPOPO_SETTER_SUFFIX)
# Inject the getter and setter at the instance level
getter, setter = self._field_controller_generator()
setattr(component_instance, getter_name, getter)
setattr(component_instance, setter_name, setter)
def check_event(self, svc_event):
"""
Tests if the given service event corresponds to the registered service
:param svc_event: A service event
:return: True if the given event references the provided service
"""
return self._svc_reference is not svc_event.get_service_reference()
def get_kinds(self):
"""
Retrieves the kinds of this handler: 'service_provider'
:return: the kinds of this handler
"""
return constants.KIND_SERVICE_PROVIDER,
def get_service_reference(self):
"""
Retrieves the reference of the provided service
:return: A ServiceReference object
"""
return self._svc_reference
def on_controller_change(self, name, value):
"""
Called by the instance manager when a controller value has been
modified
:param name: The name of the controller
:param value: The new value of the controller
"""
if self.__controller != name:
# Nothing to do
return
# Update the controller value
self.__controller_on = value
if value:
# Controller switched to "ON"
self._register_service()
else:
# Controller switched to "OFF"
self._unregister_service()
def on_property_change(self, name, old_value, new_value):
"""
Called by the instance manager when a component property is modified
:param name: The changed property name
:param old_value: The previous property value
:param new_value: The new property value
"""
if self._registration is not None:
# use the registration to trigger the service event
self._registration.set_properties({name: new_value})
def post_validate(self):
"""
Called by the instance manager once the component has been validated
"""
# Update the validation flag
self.__validated = True
self._register_service()
def pre_invalidate(self):
"""
Called by the instance manager before the component is invalidated
"""
# Update the validation flag
self.__validated = False
# Force service unregistration
self._unregister_service()
def _register_service(self):
"""
Registers the provided service, if possible
"""
if self._registration is None and self.specifications \
and self.__validated and self.__controller_on:
# Use a copy of component properties
properties = self._ipopo_instance.context.properties.copy()
bundle_context = self._ipopo_instance.bundle_context
# Register the service
self._registration = bundle_context.register_service(
self.specifications, self._ipopo_instance.instance, properties)
self._svc_reference = self._registration.get_reference()
def _unregister_service(self):
"""
Unregisters the provided service, if needed
"""
if self._registration is not None:
# Ignore error
try:
self._registration.unregister()
except BundleException as ex:
# Only log the error at this level
logger = logging.getLogger('-'.join((self._ipopo_instance.name,
'ServiceRegistration')))
logger.error("Error unregistering a service: %s", ex)
self._registration = None
self._svc_reference = None
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC
# Copyright 2011 Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import imp
import os
import StringIO
import sys
from nova import context
from nova import db
from nova import exception
from nova import test
from nova.tests.db import fakes as db_fakes
TOPDIR = os.path.normpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
os.pardir,
os.pardir))
NOVA_MANAGE_PATH = os.path.join(TOPDIR, 'bin', 'nova-manage')
sys.dont_write_bytecode = True
nova_manage = imp.load_source('nova_manage', NOVA_MANAGE_PATH)
sys.dont_write_bytecode = False
class FixedIpCommandsTestCase(test.TestCase):
def setUp(self):
super(FixedIpCommandsTestCase, self).setUp()
db_fakes.stub_out_db_network_api(self.stubs)
self.commands = nova_manage.FixedIpCommands()
def test_reserve(self):
self.commands.reserve('192.168.0.100')
address = db.fixed_ip_get_by_address(context.get_admin_context(),
'192.168.0.100')
self.assertEqual(address['reserved'], True)
def test_reserve_nonexistent_address(self):
self.assertRaises(SystemExit,
self.commands.reserve,
'55.55.55.55')
def test_unreserve(self):
self.commands.unreserve('192.168.0.100')
address = db.fixed_ip_get_by_address(context.get_admin_context(),
'192.168.0.100')
self.assertEqual(address['reserved'], False)
def test_unreserve_nonexistent_address(self):
self.assertRaises(SystemExit,
self.commands.unreserve,
'55.55.55.55')
class FloatingIpCommandsTestCase(test.TestCase):
def setUp(self):
super(FloatingIpCommandsTestCase, self).setUp()
db_fakes.stub_out_db_network_api(self.stubs)
self.commands = nova_manage.FloatingIpCommands()
def test_address_to_hosts(self):
def assert_loop(result, expected):
for ip in result:
self.assertTrue(str(ip) in expected)
address_to_hosts = self.commands.address_to_hosts
# /32 and /31
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/32')
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/31')
# /30
expected = ["192.168.100.%s" % i for i in range(1, 3)]
result = address_to_hosts('192.168.100.0/30')
self.assertTrue(len(list(result)) == 2)
assert_loop(result, expected)
# /29
expected = ["192.168.100.%s" % i for i in range(1, 7)]
result = address_to_hosts('192.168.100.0/29')
self.assertTrue(len(list(result)) == 6)
assert_loop(result, expected)
# /28
expected = ["192.168.100.%s" % i for i in range(1, 15)]
result = address_to_hosts('192.168.100.0/28')
self.assertTrue(len(list(result)) == 14)
assert_loop(result, expected)
class NetworkCommandsTestCase(test.TestCase):
def setUp(self):
super(NetworkCommandsTestCase, self).setUp()
self.commands = nova_manage.NetworkCommands()
self.net = {'id': 0,
'label': 'fake',
'injected': False,
'cidr': '192.168.0.0/24',
'cidr_v6': 'dead:beef::/64',
'multi_host': False,
'gateway_v6': 'dead:beef::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '8.8.8.8',
'dns2': '8.8.4.4',
'vlan': 200,
'vpn_public_address': '10.0.0.2',
'vpn_public_port': '2222',
'vpn_private_address': '192.168.0.2',
'dhcp_start': '192.168.0.3',
'project_id': 'fake_project',
'host': 'fake_host',
'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}
def fake_network_get_by_cidr(context, cidr):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(cidr, self.fake_net['cidr'])
return db_fakes.FakeModel(self.fake_net)
def fake_network_get_by_uuid(context, uuid):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(uuid, self.fake_net['uuid'])
return db_fakes.FakeModel(self.fake_net)
def fake_network_update(context, network_id, values):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.assertEqual(values, self.fake_update_value)
self.fake_network_get_by_cidr = fake_network_get_by_cidr
self.fake_network_get_by_uuid = fake_network_get_by_uuid
self.fake_network_update = fake_network_update
def test_create(self):
def fake_create_networks(obj, context, **kwargs):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(kwargs['label'], 'Test')
self.assertEqual(kwargs['cidr'], '10.2.0.0/24')
self.assertEqual(kwargs['multi_host'], False)
self.assertEqual(kwargs['num_networks'], 1)
self.assertEqual(kwargs['network_size'], 256)
self.assertEqual(kwargs['vlan_start'], 200)
self.assertEqual(kwargs['vpn_start'], 2000)
self.assertEqual(kwargs['cidr_v6'], 'fd00:2::/120')
self.assertEqual(kwargs['gateway'], '10.2.0.1')
self.assertEqual(kwargs['gateway_v6'], 'fd00:2::22')
self.assertEqual(kwargs['bridge'], 'br200')
self.assertEqual(kwargs['bridge_interface'], 'eth0')
self.assertEqual(kwargs['dns1'], '8.8.8.8')
self.assertEqual(kwargs['dns2'], '8.8.4.4')
self.flags(network_manager='nova.network.manager.VlanManager')
from nova.network import manager as net_manager
self.stubs.Set(net_manager.VlanManager, 'create_networks',
fake_create_networks)
self.commands.create(
label='Test',
cidr='10.2.0.0/24',
num_networks=1,
network_size=256,
multi_host='F',
vlan_start=200,
vpn_start=2000,
cidr_v6='fd00:2::/120',
gateway='10.2.0.1',
gateway_v6='fd00:2::22',
bridge='br200',
bridge_interface='eth0',
dns1='8.8.8.8',
dns2='8.8.4.4',
uuid='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
def test_list(self):
def fake_network_get_all(context):
return [db_fakes.FakeModel(self.net)]
self.stubs.Set(db, 'network_get_all', fake_network_get_all)
output = StringIO.StringIO()
sys.stdout = output
self.commands.list()
sys.stdout = sys.__stdout__
result = output.getvalue()
_fmt = "\t".join(["%(id)-5s", "%(cidr)-18s", "%(cidr_v6)-15s",
"%(dhcp_start)-15s", "%(dns1)-15s", "%(dns2)-15s",
"%(vlan)-15s", "%(project_id)-15s", "%(uuid)-15s"])
head = _fmt % {'id': _('id'),
'cidr': _('IPv4'),
'cidr_v6': _('IPv6'),
'dhcp_start': _('start address'),
'dns1': _('DNS1'),
'dns2': _('DNS2'),
'vlan': _('VlanID'),
'project_id': _('project'),
'uuid': _("uuid")}
body = _fmt % {'id': self.net['id'],
'cidr': self.net['cidr'],
'cidr_v6': self.net['cidr_v6'],
'dhcp_start': self.net['dhcp_start'],
'dns1': self.net['dns1'],
'dns2': self.net['dns2'],
'vlan': self.net['vlan'],
'project_id': self.net['project_id'],
'uuid': self.net['uuid']}
answer = '%s\n%s\n' % (head, body)
self.assertEqual(result, answer)
def test_delete(self):
self.fake_net = self.net
self.fake_net['project_id'] = None
self.fake_net['host'] = None
self.stubs.Set(db, 'network_get_by_uuid',
self.fake_network_get_by_uuid)
def fake_network_delete_safe(context, network_id):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
self.commands.delete(uuid=self.fake_net['uuid'])
def test_delete_by_cidr(self):
self.fake_net = self.net
self.fake_net['project_id'] = None
self.fake_net['host'] = None
self.stubs.Set(db, 'network_get_by_cidr',
self.fake_network_get_by_cidr)
def fake_network_delete_safe(context, network_id):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
self.commands.delete(fixed_range=self.fake_net['cidr'])
def _test_modify_base(self, update_value, project, host, dis_project=None,
dis_host=None):
self.fake_net = self.net
self.fake_update_value = update_value
self.stubs.Set(db, 'network_get_by_cidr',
self.fake_network_get_by_cidr)
self.stubs.Set(db, 'network_update', self.fake_network_update)
self.commands.modify(self.fake_net['cidr'], project=project, host=host,
dis_project=dis_project, dis_host=dis_host)
def test_modify_associate(self):
self._test_modify_base(update_value={'project_id': 'test_project',
'host': 'test_host'},
project='test_project', host='test_host')
def test_modify_unchanged(self):
self._test_modify_base(update_value={}, project=None, host=None)
def test_modify_disassociate(self):
self._test_modify_base(update_value={'project_id': None, 'host': None},
project=None, host=None, dis_project=True,
dis_host=True)
class InstanceTypeCommandsTestCase(test.TestCase):
def setUp(self):
super(InstanceTypeCommandsTestCase, self).setUp()
values = dict(name="test.small",
memory_mb=220,
vcpus=1,
root_gb=16,
ephemeral_gb=32,
flavorid=105)
ref = db.instance_type_create(context.get_admin_context(),
values)
self.instance_type_name = ref["name"]
self.instance_type_id = ref["id"]
self.instance_type_flavorid = ref["flavorid"]
self.set_key = nova_manage.InstanceTypeCommands().set_key
self.unset_key = nova_manage.InstanceTypeCommands().unset_key
def tearDown(self):
db.instance_type_destroy(context.get_admin_context(),
"test.small")
super(InstanceTypeCommandsTestCase, self).tearDown()
def _test_extra_specs_empty(self):
empty_specs = {}
actual_specs = db.instance_type_extra_specs_get(
context.get_admin_context(),
self.instance_type_id)
self.assertEquals(empty_specs, actual_specs)
def test_extra_specs_set_unset(self):
expected_specs = {'k1': 'v1'}
self._test_extra_specs_empty()
self.set_key(self.instance_type_name, "k1", "v1")
actual_specs = db.instance_type_extra_specs_get(
context.get_admin_context(),
self.instance_type_flavorid)
self.assertEquals(expected_specs, actual_specs)
self.unset_key(self.instance_type_name, "k1")
self._test_extra_specs_empty()
def test_extra_specs_update(self):
expected_specs = {'k1': 'v1'}
updated_specs = {'k1': 'v2'}
self._test_extra_specs_empty()
self.set_key(self.instance_type_name, "k1", "v1")
actual_specs = db.instance_type_extra_specs_get(
context.get_admin_context(),
self.instance_type_flavorid)
self.assertEquals(expected_specs, actual_specs)
self.set_key(self.instance_type_name, "k1", "v2")
actual_specs = db.instance_type_extra_specs_get(
context.get_admin_context(),
self.instance_type_flavorid)
self.assertEquals(updated_specs, actual_specs)
self.unset_key(self.instance_type_name, "k1")
def test_extra_specs_multiple(self):
two_items_extra_specs = {'k1': 'v1',
'k3': 'v3'}
self._test_extra_specs_empty()
self.set_key(self.instance_type_name, "k1", "v1")
self.set_key(self.instance_type_name, "k3", "v3")
actual_specs = db.instance_type_extra_specs_get(
context.get_admin_context(),
self.instance_type_flavorid)
self.assertEquals(two_items_extra_specs, actual_specs)
self.unset_key(self.instance_type_name, "k1")
self.unset_key(self.instance_type_name, "k3")
class ProjectCommandsTestCase(test.TestCase):
def setUp(self):
super(ProjectCommandsTestCase, self).setUp()
self.commands = nova_manage.ProjectCommands()
def test_quota(self):
output = StringIO.StringIO()
sys.stdout = output
self.commands.quota(project_id='admin',
key='volumes',
value='unlimited',
)
sys.stdout = sys.__stdout__
result = output.getvalue()
self.assertEquals(('volumes: unlimited' in result), True)
def test_quota_update_invalid_key(self):
self.assertRaises(SystemExit,
self.commands.quota, 'admin', 'volumes1', '10'
)
|
|
#!/usr/bin/env python3
"""
SAMTools - Tools (written in C using htslib) for manipulating next-generation
sequencing data
https://github.com/samtools/samtools
"""
import os
import warnings
from typing import Iterable, Optional
import paleomix.common.versions as versions
from paleomix.common.command import (
AtomicCmd,
InputFile,
OptionsType,
OutputFile,
ParallelCmds,
SequentialCmds,
TempInputFile,
TempOutputFile,
)
from paleomix.common.fileutils import describe_files
from paleomix.node import CommandNode, Node
_VERSION_REGEX = r"Version: (\d+\.\d+)(?:\.(\d+))?"
SAMTOOLS_VERSION = versions.Requirement(
call=("samtools",),
regexp=_VERSION_REGEX,
specifiers=">=1.6.0",
)
BCFTOOLS_VERSION = versions.Requirement(
call=("bcftools",),
regexp=_VERSION_REGEX,
specifiers=">=1.4.0",
)
TABIX_VERSION = versions.Requirement(
call=("tabix",),
regexp=_VERSION_REGEX,
specifiers=">=1.3.1",
)
class TabixIndexNode(CommandNode):
"""Tabix indexes a BGZip compressed VCF or pileup file."""
def __init__(
self,
infile: str,
preset: str = "vcf",
options: OptionsType = {},
dependencies: Iterable[Node] = (),
):
if preset not in ("vcf", "gff", "bed", "sam"):
raise ValueError(preset)
basename = os.path.basename(infile)
infile = os.path.abspath(infile)
# Tabix does not support a custom output path, so we create a symlink to the
# input file in the temporary folder and index that.
link = AtomicCmd(["ln", "-s", InputFile(infile), TempOutputFile(basename)])
tabix = AtomicCmd(
["tabix", "-p", preset, TempInputFile(basename)],
extra_files=[OutputFile(infile + ".tbi")],
requirements=[TABIX_VERSION],
)
tabix.append_options(options)
CommandNode.__init__(
self,
command=SequentialCmds([link, tabix]),
description="creating tabix %s index for %s" % (preset, infile),
dependencies=dependencies,
)
class FastaIndexNode(CommandNode):
"""Indexed a FASTA file using 'samtools faidx'."""
def __init__(self, infile: str, dependencies: Iterable[Node] = ()):
basename = os.path.basename(infile)
# faidx does not support a custom output path, so we create a symlink to the
# input file in the temporary folder and index that.
link = AtomicCmd(
[
"ln",
"-s",
InputFile(os.path.abspath(infile)),
TempOutputFile(basename),
]
)
faidx = AtomicCmd(
["samtools", "faidx", TempInputFile(basename)],
extra_files=[OutputFile(infile + ".fai")],
requirements=[SAMTOOLS_VERSION],
)
CommandNode.__init__(
self,
description="creating FAI index for %s" % (infile,),
command=SequentialCmds([link, faidx]),
dependencies=dependencies,
)
class BAMIndexNode(CommandNode):
"""Indexed a BAM file using 'samtools index'."""
def __init__(
self,
infile: str,
index_format: str = ".bai",
options: OptionsType = {},
dependencies: Iterable[Node] = (),
):
command = AtomicCmd(
["samtools", "index"],
requirements=[SAMTOOLS_VERSION],
)
if index_format == ".csi":
command.append("-c")
elif index_format != ".bai":
raise ValueError("Unknown BAM index format %r" % (index_format,))
command.append_options(options)
command.append(
InputFile(infile),
OutputFile(infile + index_format),
)
CommandNode.__init__(
self,
command=command,
description="creating %s index for %s" % (index_format[1:].upper(), infile),
threads=_get_number_of_threads(options),
dependencies=dependencies,
)
class BAMStatsNode(CommandNode):
METHODS = ("stats", "idxstats", "flagstats")
def __init__(
self,
method: str,
infile: str,
outfile: str,
index_format: str = ".bai",
options: OptionsType = {},
dependencies: Iterable[Node] = (),
):
if method not in self.METHODS:
raise ValueError(method)
command = AtomicCmd(
["samtools", method, InputFile(infile)],
stdout=outfile,
requirements=[SAMTOOLS_VERSION],
)
command.append_options(options)
if method == "idxstats":
command.add_extra_files([InputFile(infile + index_format)])
CommandNode.__init__(
self,
command=command,
description="collecting %s for %s" % (method, infile),
threads=_get_number_of_threads(options),
dependencies=dependencies,
)
class BAMMergeNode(CommandNode):
def __init__(
self,
in_files: Iterable[str],
out_file: str,
options: OptionsType = {},
dependencies: Iterable[Node] = (),
):
in_files = tuple(in_files)
if not in_files:
raise ValueError("no input files for samtools merge")
elif len(in_files) == 1:
# FIXME: hardlinking is faster, but could have unintended side-effects
cmd = AtomicCmd(["cp", InputFile(in_files[0]), OutputFile(out_file)])
threads = 1
else:
cmd = AtomicCmd(
["samtools", "merge"],
requirements=[SAMTOOLS_VERSION],
)
cmd.append_options(options)
cmd.append(OutputFile(out_file))
for in_file in in_files:
cmd.append(InputFile(in_file))
if "--write-index" in options:
cmd.add_extra_files([OutputFile(out_file + ".csi")])
threads = _get_number_of_threads(options)
CommandNode.__init__(
self,
command=cmd,
description="merging %i files into %s" % (len(in_files), out_file),
threads=threads,
dependencies=dependencies,
)
class MarkDupNode(CommandNode):
def __init__(
self,
in_bams: Iterable[str],
out_bam: str,
out_stats: Optional[str] = None,
options: OptionsType = {},
dependencies: Iterable[Node] = (),
):
in_bams = tuple(in_bams)
if len(in_bams) > 1:
merge = AtomicCmd(
["samtools", "merge", "-u", "-"],
stdout=AtomicCmd.PIPE,
requirements=[SAMTOOLS_VERSION],
)
for in_file in in_bams:
merge.append(InputFile(in_file))
markdup = AtomicCmd(
["samtools", "markdup", "-", OutputFile(out_bam)],
stdin=merge,
# Stderr is piped instead of saved using -f to support samtools < v1.10
stderr=out_stats,
requirements=[SAMTOOLS_VERSION],
)
command = ParallelCmds([merge, markdup])
else:
(in_file,) = in_bams
command = markdup = AtomicCmd(
["samtools", "markdup", InputFile(in_file), OutputFile(out_bam)],
requirements=[SAMTOOLS_VERSION],
)
fixed_options: OptionsType = {"-T": "%(TEMP_DIR)s/markdup"}
if out_stats is not None:
fixed_options["-s"] = None
markdup.merge_options(
user_options=options,
fixed_options=fixed_options,
blacklisted_options=["-f"],
)
CommandNode.__init__(
self,
command=command,
description="marking PCR duplicates in {}".format(describe_files(in_bams)),
threads=_get_number_of_threads(options),
dependencies=dependencies,
)
def merge_bam_files_command(input_files: Iterable[str]):
merge = AtomicCmd(
["samtools", "merge", "-u", "-"],
stdout=AtomicCmd.PIPE,
requirements=[SAMTOOLS_VERSION],
)
for filename in input_files:
merge.append(InputFile(filename))
return merge
def _get_number_of_threads(options: OptionsType, default: int = 1) -> int:
if "-@" in options and "--threads" in options:
raise ValueError("cannot use both -@ and --threads: {!r}".format(options))
value = options.get("-@", options.get("--threads", default))
if not isinstance(value, int):
raise ValueError(f"invalid number of samtools threads: {value}")
# -@/--threads specify threads in *addition* to the main thread, but in practice
# the number of cores used seems to be closer to the that value and not value + 1
return max(1, value)
|
|
# Copyright 2014 OpenStack Foundation
# Copyright 2014 Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations.
For the opportunistic testing you need to set up a db named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost.
The test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands:
sudo -u postgres psql
postgres=# create user openstack_citest with createdb login password
'openstack_citest';
postgres=# create database openstack_citest with owner openstack_citest;
"""
import os
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import utils as db_utils
from sahara.tests.unit.db.migration import test_migrations_base as base
class SaharaMigrationsCheckers(object):
snake_walk = True
downgrade = True
def assertColumnExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertIn(column, t.c)
def assertColumnsExists(self, engine, table, columns):
for column in columns:
self.assertColumnExists(engine, table, column)
def assertColumnCount(self, engine, table, columns):
t = db_utils.get_table(engine, table)
self.assertEqual(len(t.columns), len(columns))
def assertColumnNotExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertNotIn(column, t.c)
def assertIndexExists(self, engine, table, index):
t = db_utils.get_table(engine, table)
index_names = [idx.name for idx in t.indexes]
self.assertIn(index, index_names)
def assertIndexMembers(self, engine, table, index, members):
self.assertIndexExists(engine, table, index)
t = db_utils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
index_columns = idx.columns.keys()
break
self.assertEqual(sorted(members), sorted(index_columns))
def test_walk_versions(self):
self.walk_versions(self.engine, self.snake_walk, self.downgrade)
def _pre_upgrade_001(self, engine):
# Anything returned from this method will be
# passed to corresponding _check_xxx method as 'data'.
pass
def _check_001(self, engine, data):
job_binary_internal_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'data',
'datasize'
]
self.assertColumnsExists(
engine, 'job_binary_internal', job_binary_internal_columns)
self.assertColumnCount(
engine, 'job_binary_internal', job_binary_internal_columns)
node_group_templates_columns = [
'created_at',
'updated_at',
'id',
'name',
'description',
'tenant_id',
'flavor_id',
'image_id',
'plugin_name',
'hadoop_version',
'node_processes',
'node_configs',
'volumes_per_node',
'volumes_size',
'volume_mount_prefix',
'floating_ip_pool'
]
self.assertColumnsExists(
engine, 'node_group_templates', node_group_templates_columns)
self.assertColumnCount(
engine, 'node_group_templates', node_group_templates_columns)
data_sources_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'description',
'type',
'url',
'credentials'
]
self.assertColumnsExists(
engine, 'data_sources', data_sources_columns)
self.assertColumnCount(
engine, 'data_sources', data_sources_columns)
cluster_templates_columns = [
'created_at',
'updated_at',
'id',
'name',
'description',
'cluster_configs',
'default_image_id',
'anti_affinity',
'tenant_id',
'neutron_management_network',
'plugin_name',
'hadoop_version'
]
self.assertColumnsExists(
engine, 'cluster_templates', cluster_templates_columns)
self.assertColumnCount(
engine, 'cluster_templates', cluster_templates_columns)
job_binaries_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'description',
'url',
'extra'
]
self.assertColumnsExists(
engine, 'job_binaries', job_binaries_columns)
self.assertColumnCount(
engine, 'job_binaries', job_binaries_columns)
jobs_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'description',
'type'
]
self.assertColumnsExists(engine, 'jobs', jobs_columns)
self.assertColumnCount(engine, 'jobs', jobs_columns)
templates_relations_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'flavor_id',
'image_id',
'node_processes',
'node_configs',
'volumes_per_node',
'volumes_size',
'volume_mount_prefix',
'count',
'cluster_template_id',
'node_group_template_id',
'floating_ip_pool'
]
self.assertColumnsExists(
engine, 'templates_relations', templates_relations_columns)
self.assertColumnCount(
engine, 'templates_relations', templates_relations_columns)
mains_association_columns = [
'Job_id',
'JobBinary_id'
]
self.assertColumnsExists(
engine, 'mains_association', mains_association_columns)
self.assertColumnCount(
engine, 'mains_association', mains_association_columns)
libs_association_columns = [
'Job_id',
'JobBinary_id'
]
self.assertColumnsExists(
engine, 'libs_association', libs_association_columns)
self.assertColumnCount(
engine, 'libs_association', libs_association_columns)
clusters_columns = [
'created_at',
'updated_at',
'id',
'name',
'description',
'tenant_id',
'trust_id',
'is_transient',
'plugin_name',
'hadoop_version',
'cluster_configs',
'default_image_id',
'neutron_management_network',
'anti_affinity',
'management_private_key',
'management_public_key',
'user_keypair_id',
'status',
'status_description',
'info',
'extra',
'cluster_template_id'
]
self.assertColumnsExists(engine, 'clusters', clusters_columns)
self.assertColumnCount(engine, 'clusters', clusters_columns)
node_groups_columns = [
'created_at',
'updated_at',
'id',
'name',
'tenant_id',
'flavor_id',
'image_id',
'image_username',
'node_processes',
'node_configs',
'volumes_per_node',
'volumes_size',
'volume_mount_prefix',
'count',
'cluster_id',
'node_group_template_id',
'floating_ip_pool'
]
self.assertColumnsExists(engine, 'node_groups', node_groups_columns)
self.assertColumnCount(engine, 'node_groups', node_groups_columns)
job_executions_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'job_id',
'input_id',
'output_id',
'start_time',
'end_time',
'cluster_id',
'info',
'progress',
'oozie_job_id',
'return_code',
'job_configs',
'extra'
]
self.assertColumnsExists(
engine, 'job_executions', job_executions_columns)
self.assertColumnCount(
engine, 'job_executions', job_executions_columns)
instances_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'node_group_id',
'instance_id',
'instance_name',
'internal_ip',
'management_ip',
'volumes'
]
self.assertColumnsExists(engine, 'instances', instances_columns)
self.assertColumnCount(engine, 'instances', instances_columns)
self._data_001(engine, data)
def _data_001(self, engine, data):
datasize = 512 * 1024 # 512kB
data = os.urandom(datasize)
t = db_utils.get_table(engine, 'job_binary_internal')
engine.execute(t.insert(), data=data, id='123', name='name')
new_data = engine.execute(t.select()).fetchone().data
self.assertEqual(data, new_data)
engine.execute(t.delete())
def _check_002(self, engine, data):
# currently, 002 is just a placeholder
pass
def _check_003(self, engine, data):
# currently, 003 is just a placeholder
pass
def _check_004(self, engine, data):
# currently, 004 is just a placeholder
pass
def _check_005(self, engine, data):
# currently, 005 is just a placeholder
pass
def _check_006(self, engine, data):
# currently, 006 is just a placeholder
pass
def _pre_upgrade_007(self, engine):
desc = 'magic'
t = db_utils.get_table(engine, 'clusters')
engine.execute(t.insert(), id='123', name='name', plugin_name='pname',
hadoop_version='1', management_private_key='2',
management_public_key='3', status_description=desc)
def _check_007(self, engine, data):
t = db_utils.get_table(engine, 'clusters')
res = engine.execute(t.select(), id='123').first()
self.assertEqual('magic', res['status_description'])
engine.execute(t.delete())
# check that status_description can keep 128kb.
# MySQL varchar can not keep more then 64kb
desc = 'a' * 128 * 1024 # 128kb
t = db_utils.get_table(engine, 'clusters')
engine.execute(t.insert(), id='123', name='name', plugin_name='plname',
hadoop_version='hversion', management_private_key='1',
management_public_key='2', status_description=desc)
new_desc = engine.execute(t.select()).fetchone().status_description
self.assertEqual(desc, new_desc)
engine.execute(t.delete())
def _check_008(self, engine, date):
self.assertColumnExists(engine, 'node_group_templates',
'security_groups')
self.assertColumnExists(engine, 'node_groups', 'security_groups')
self.assertColumnExists(engine, 'templates_relations',
'security_groups')
def _check_009(self, engine, date):
self.assertColumnExists(engine, 'clusters', 'rollback_info')
def _check_010(self, engine, date):
self.assertColumnExists(engine, 'node_group_templates',
'auto_security_group')
self.assertColumnExists(engine, 'node_groups', 'auto_security_group')
self.assertColumnExists(engine, 'templates_relations',
'auto_security_group')
self.assertColumnExists(engine, 'node_groups', 'open_ports')
def _check_011(self, engine, date):
self.assertColumnExists(engine, 'clusters', 'sahara_info')
def _check_012(self, engine, date):
self.assertColumnExists(engine, 'node_group_templates',
'availability_zone')
self.assertColumnExists(engine, 'node_groups', 'availability_zone')
self.assertColumnExists(engine, 'templates_relations',
'availability_zone')
def _check_014(self, engine, data):
self.assertColumnExists(engine, 'node_group_templates', 'volume_type')
self.assertColumnExists(engine, 'node_groups', 'volume_type')
self.assertColumnExists(engine, 'templates_relations', 'volume_type')
def _check_015(self, engine, data):
provision_steps_columns = [
'created_at',
'updated_at',
'id',
'cluster_id',
'tenant_id',
'step_name',
'step_type',
'completed',
'total',
'successful',
'started_at',
'completed_at',
]
events_columns = [
'created_at',
'updated_at',
'id',
'node_group_id',
'instance_id',
'instance_name',
'event_info',
'successful',
'step_id',
]
self.assertColumnCount(engine, 'cluster_provision_steps',
provision_steps_columns)
self.assertColumnsExists(engine, 'cluster_provision_steps',
provision_steps_columns)
self.assertColumnCount(engine, 'cluster_events', events_columns)
self.assertColumnsExists(engine, 'cluster_events', events_columns)
def _check_016(self, engine, date):
self.assertColumnExists(engine, 'node_group_templates',
'is_proxy_gateway')
self.assertColumnExists(engine, 'node_groups', 'is_proxy_gateway')
self.assertColumnExists(engine, 'templates_relations',
'is_proxy_gateway')
class TestMigrationsMySQL(SaharaMigrationsCheckers,
base.BaseWalkMigrationTestCase,
base.TestModelsMigrationsSync,
test_base.MySQLOpportunisticTestCase):
pass
class TestMigrationsPostgresql(SaharaMigrationsCheckers,
base.BaseWalkMigrationTestCase,
base.TestModelsMigrationsSync,
test_base.PostgreSQLOpportunisticTestCase):
pass
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Aaron Rosen, Nicira Networks, Inc.
import uuid
from lxml import etree
from neutronclient.common import exceptions as n_exc
from oslo.config import cfg
import webob
from nova.api.openstack.compute.contrib import security_groups
from nova.api.openstack import xmlutil
from nova import compute
from nova import context
import nova.db
from nova import exception
from nova.network import neutronv2
from nova.network.neutronv2 import api as neutron_api
from nova.network.security_group import neutron_driver
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack.compute.contrib import test_security_groups
from nova.tests.api.openstack import fakes
class TestNeutronSecurityGroupsTestCase(test.TestCase):
def setUp(self):
super(TestNeutronSecurityGroupsTestCase, self).setUp()
cfg.CONF.set_override('security_group_api', 'neutron')
self.original_client = neutronv2.get_client
neutronv2.get_client = get_client
def tearDown(self):
neutronv2.get_client = self.original_client
get_client()._reset()
super(TestNeutronSecurityGroupsTestCase, self).tearDown()
class TestNeutronSecurityGroups(
test_security_groups.TestSecurityGroups,
TestNeutronSecurityGroupsTestCase):
def _create_sg_template(self, **kwargs):
sg = test_security_groups.security_group_template(**kwargs)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
return self.controller.create(req, {'security_group': sg})
def _create_network(self):
body = {'network': {'name': 'net1'}}
neutron = get_client()
net = neutron.create_network(body)
body = {'subnet': {'network_id': net['network']['id'],
'cidr': '10.0.0.0/24'}}
neutron.create_subnet(body)
return net
def _create_port(self, **kwargs):
body = {'port': {}}
fields = ['security_groups', 'device_id', 'network_id',
'port_security_enabled']
for field in fields:
if field in kwargs:
body['port'][field] = kwargs[field]
neutron = get_client()
return neutron.create_port(body)
def test_create_security_group_with_no_description(self):
# Neutron's security group descirption field is optional.
pass
def test_create_security_group_with_blank_name(self):
# Neutron's security group name field is optional.
pass
def test_create_security_group_with_whitespace_name(self):
# Neutron allows security group name to be whitespace.
pass
def test_create_security_group_with_blank_description(self):
# Neutron's security group descirption field is optional.
pass
def test_create_security_group_with_whitespace_description(self):
# Neutron allows description to be whitespace.
pass
def test_create_security_group_with_duplicate_name(self):
# Neutron allows duplicate names for security groups.
pass
def test_create_security_group_non_string_name(self):
# Neutron allows security group name to be non string.
pass
def test_create_security_group_non_string_description(self):
# Neutron allows non string description.
pass
def test_create_security_group_quota_limit(self):
# Enforced by Neutron server.
pass
def test_update_security_group(self):
# Enforced by Neutron server.
pass
def test_get_security_group_list(self):
self._create_sg_template().get('security_group')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
list_dict = self.controller.index(req)
self.assertEquals(len(list_dict['security_groups']), 2)
def test_get_security_group_list_all_tenants(self):
pass
def test_get_security_group_by_instance(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
device_id=test_security_groups.FAKE_UUID1)
expected = [{'rules': [], 'tenant_id': 'fake_tenant', 'id': sg['id'],
'name': 'test', 'description': 'test-description'}]
self.stubs.Set(nova.db, 'instance_get_by_uuid',
test_security_groups.return_server_by_uuid)
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/os-security-groups'
% test_security_groups.FAKE_UUID1)
res_dict = self.server_controller.index(
req, test_security_groups.FAKE_UUID1)['security_groups']
self.assertEquals(expected, res_dict)
def test_get_security_group_by_id(self):
sg = self._create_sg_template().get('security_group')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
% sg['id'])
res_dict = self.controller.show(req, sg['id'])
expected = {'security_group': sg}
self.assertEquals(res_dict, expected)
def test_delete_security_group_by_id(self):
sg = self._create_sg_template().get('security_group')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
sg['id'])
self.controller.delete(req, sg['id'])
def test_delete_security_group_in_use(self):
sg = self._create_sg_template().get('security_group')
self._create_network()
fake_instance = {'project_id': 'fake_tenant',
'availability_zone': 'zone_one',
'info_cache': {'network_info': []},
'security_groups': [],
'uuid': str(uuid.uuid4()),
'display_name': 'test_instance'}
neutron = neutron_api.API()
neutron.allocate_for_instance(context.get_admin_context(),
fake_instance,
security_groups=[sg['id']])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
% sg['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, sg['id'])
def test_associate_non_running_instance(self):
# Neutron does not care if the instance is running or not. When the
# instances is detected by nuetron it will push down the security
# group policy to it.
pass
def test_associate_already_associated_security_group_to_instance(self):
# Neutron security groups does not raise an error if you update a
# port adding a security group to it that was already associated
# to the port. This is because PUT semantics are used.
pass
def test_associate(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
device_id=test_security_groups.FAKE_UUID1)
self.stubs.Set(nova.db, 'instance_get',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._addSecurityGroup(req, '1', body)
def test_associate_port_security_enabled_true(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
port_security_enabled=True,
device_id=test_security_groups.FAKE_UUID1)
self.stubs.Set(nova.db, 'instance_get',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._addSecurityGroup(req, '1', body)
def test_associate_port_security_enabled_false(self):
self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], port_security_enabled=False,
device_id=test_security_groups.FAKE_UUID1)
self.stubs.Set(nova.db, 'instance_get',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup,
req, '1', body)
def test_disassociate_by_non_existing_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get',
test_security_groups.return_server)
body = dict(removeSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_non_running_instance(self):
# Neutron does not care if the instance is running or not. When the
# instances is detected by neutron it will push down the security
# group policy to it.
pass
def test_disassociate_already_associated_security_group_to_instance(self):
# Neutron security groups does not raise an error if you update a
# port adding a security group to it that was already associated
# to the port. This is because PUT semantics are used.
pass
def test_disassociate(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
device_id=test_security_groups.FAKE_UUID1)
self.stubs.Set(nova.db, 'instance_get',
test_security_groups.return_server)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._removeSecurityGroup(req, '1', body)
def test_get_instances_security_groups_bindings(self):
servers = [{'id': test_security_groups.FAKE_UUID1},
{'id': test_security_groups.FAKE_UUID2}]
sg1 = self._create_sg_template(name='test1').get('security_group')
sg2 = self._create_sg_template(name='test2').get('security_group')
# test name='' is replaced with id
sg3 = self._create_sg_template(name='').get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id'],
sg2['id']],
device_id=test_security_groups.FAKE_UUID1)
self._create_port(
network_id=net['network']['id'], security_groups=[sg2['id'],
sg3['id']],
device_id=test_security_groups.FAKE_UUID2)
expected = {test_security_groups.FAKE_UUID1: [{'name': sg1['name']},
{'name': sg2['name']}],
test_security_groups.FAKE_UUID2: [{'name': sg2['name']},
{'name': sg3['id']}]}
security_group_api = self.controller.security_group_api
bindings = (
security_group_api.get_instances_security_groups_bindings(
context.get_admin_context(), servers))
self.assertEqual(bindings, expected)
def test_get_instance_security_groups(self):
sg1 = self._create_sg_template(name='test1').get('security_group')
sg2 = self._create_sg_template(name='test2').get('security_group')
# test name='' is replaced with id
sg3 = self._create_sg_template(name='').get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id'],
sg2['id'],
sg3['id']],
device_id=test_security_groups.FAKE_UUID1)
expected = [{'name': sg1['name']}, {'name': sg2['name']},
{'name': sg3['id']}]
security_group_api = self.controller.security_group_api
sgs = security_group_api.get_instance_security_groups(
context.get_admin_context(), test_security_groups.FAKE_UUID1)
self.assertEquals(sgs, expected)
def test_create_port_with_sg_and_port_security_enabled_true(self):
sg1 = self._create_sg_template(name='test1').get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id']],
port_security_enabled=True,
device_id=test_security_groups.FAKE_UUID1)
security_group_api = self.controller.security_group_api
sgs = security_group_api.get_instance_security_groups(
context.get_admin_context(), test_security_groups.FAKE_UUID1)
self.assertEquals(sgs, [{'name': 'test1'}])
def test_create_port_with_sg_and_port_security_enabled_false(self):
sg1 = self._create_sg_template(name='test1').get('security_group')
net = self._create_network()
self.assertRaises(exception.SecurityGroupCannotBeApplied,
self._create_port,
network_id=net['network']['id'],
security_groups=[sg1['id']],
port_security_enabled=False,
device_id=test_security_groups.FAKE_UUID1)
class TestNeutronSecurityGroupRulesTestCase(TestNeutronSecurityGroupsTestCase):
def setUp(self):
super(TestNeutronSecurityGroupRulesTestCase, self).setUp()
id1 = '11111111-1111-1111-1111-111111111111'
sg_template1 = test_security_groups.security_group_template(
security_group_rules=[], id=id1)
id2 = '22222222-2222-2222-2222-222222222222'
sg_template2 = test_security_groups.security_group_template(
security_group_rules=[], id=id2)
self.controller_sg = security_groups.SecurityGroupController()
neutron = get_client()
neutron._fake_security_groups[id1] = sg_template1
neutron._fake_security_groups[id2] = sg_template2
def tearDown(self):
neutronv2.get_client = self.original_client
get_client()._reset()
super(TestNeutronSecurityGroupsTestCase, self).tearDown()
class TestNeutronSecurityGroupRules(
test_security_groups.TestSecurityGroupRules,
TestNeutronSecurityGroupRulesTestCase):
def test_create_add_existing_rules_by_cidr(self):
sg = test_security_groups.security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller_sg.create(req, {'security_group': sg})
rule = test_security_groups.security_group_rule_template(
cidr='15.0.0.0/8', parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.controller.create(req, {'security_group_rule': rule})
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_group_id(self):
sg = test_security_groups.security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller_sg.create(req, {'security_group': sg})
rule = test_security_groups.security_group_rule_template(
group=self.sg1['id'], parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.controller.create(req, {'security_group_rule': rule})
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_delete(self):
rule = test_security_groups.security_group_rule_template(
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
% security_group_rule['id'])
self.controller.delete(req, security_group_rule['id'])
def test_create_rule_quota_limit(self):
# Enforced by neutron
pass
class TestNeutronSecurityGroupsXMLDeserializer(
test_security_groups.TestSecurityGroupXMLDeserializer,
TestNeutronSecurityGroupsTestCase):
pass
class TestNeutronSecurityGroupsXMLSerializer(
test_security_groups.TestSecurityGroupXMLSerializer,
TestNeutronSecurityGroupsTestCase):
pass
class TestNeutronSecurityGroupsOutputTest(TestNeutronSecurityGroupsTestCase):
content_type = 'application/json'
def setUp(self):
super(TestNeutronSecurityGroupsOutputTest, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.controller = security_groups.SecurityGroupController()
self.stubs.Set(compute.api.API, 'get',
test_security_groups.fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all',
test_security_groups.fake_compute_get_all)
self.stubs.Set(compute.api.API, 'create',
test_security_groups.fake_compute_create)
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'get_instances_security_groups_bindings',
(test_security_groups.
fake_get_instances_security_groups_bindings))
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Security_groups'])
def _make_request(self, url, body=None):
req = webob.Request.blank(url)
if body:
req.method = 'POST'
req.body = self._encode_body(body)
req.content_type = self.content_type
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
return res
def _encode_body(self, body):
return jsonutils.dumps(body)
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def _get_groups(self, server):
return server.get('security_groups')
def test_create(self):
url = '/v2/fake/servers'
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
security_groups = [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
for security_group in security_groups:
sg = test_security_groups.security_group_template(
name=security_group['name'])
self.controller.create(req, {'security_group': sg})
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2,
security_groups=security_groups)
res = self._make_request(url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_create_server_get_default_security_group(self):
url = '/v2/fake/servers'
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
res = self._make_request(url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
group = self._get_groups(server)[0]
self.assertEquals(group.get('name'), 'default')
def test_show(self):
def fake_get_instance_security_groups(inst, context, id):
return [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'get_instance_security_groups',
fake_get_instance_security_groups)
url = '/v2/fake/servers'
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
security_groups = [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
for security_group in security_groups:
sg = test_security_groups.security_group_template(
name=security_group['name'])
self.controller.create(req, {'security_group': sg})
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2,
security_groups=security_groups)
res = self._make_request(url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
# Test that show (GET) returns the same information as create (POST)
url = '/v2/fake/servers/' + test_security_groups.UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_detail(self):
url = '/v2/fake/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
for j, group in enumerate(self._get_groups(server)):
name = 'fake-%s-%s' % (i, j)
self.assertEqual(group.get('name'), name)
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
class TestNeutronSecurityGroupsOutputXMLTest(
TestNeutronSecurityGroupsOutputTest):
content_type = 'application/xml'
class MinimalCreateServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
root.set('name')
root.set('id')
root.set('imageRef')
root.set('flavorRef')
elem = xmlutil.SubTemplateElement(root, 'security_groups')
sg = xmlutil.SubTemplateElement(elem, 'security_group',
selector='security_groups')
sg.set('name')
return xmlutil.MasterTemplate(root, 1,
nsmap={None: xmlutil.XMLNS_V11})
def _encode_body(self, body):
serializer = self.MinimalCreateServerTemplate()
return serializer.serialize(body)
def _get_server(self, body):
return etree.XML(body)
def _get_servers(self, body):
return etree.XML(body).getchildren()
def _get_groups(self, server):
# NOTE(vish): we are adding security groups without an extension
# namespace so we don't break people using the existing
# functionality, but that means we need to use find with
# the existing server namespace.
namespace = server.nsmap[None]
return server.find('{%s}security_groups' % namespace).getchildren()
def get_client(context=None, admin=False):
return MockClient()
class MockClient(object):
# Needs to be global to survive multiple calls to get_client.
_fake_security_groups = {}
_fake_ports = {}
_fake_networks = {}
_fake_subnets = {}
_fake_security_group_rules = {}
def __init__(self):
# add default security group
if not len(self._fake_security_groups):
ret = {'name': 'default', 'description': 'default',
'tenant_id': 'fake_tenant', 'security_group_rules': [],
'id': str(uuid.uuid4())}
self._fake_security_groups[ret['id']] = ret
def _reset(self):
self._fake_security_groups.clear()
self._fake_ports.clear()
self._fake_networks.clear()
self._fake_subnets.clear()
self._fake_security_group_rules.clear()
def create_security_group(self, body=None):
s = body.get('security_group')
if len(s.get('name')) > 255 or len(s.get('description')) > 255:
msg = 'Security Group name great than 255'
raise n_exc.NeutronClientException(message=msg, status_code=401)
ret = {'name': s.get('name'), 'description': s.get('description'),
'tenant_id': 'fake_tenant', 'security_group_rules': [],
'id': str(uuid.uuid4())}
self._fake_security_groups[ret['id']] = ret
return {'security_group': ret}
def create_network(self, body):
n = body.get('network')
ret = {'status': 'ACTIVE', 'subnets': [], 'name': n.get('name'),
'admin_state_up': n.get('admin_state_up', True),
'tenant_id': 'fake_tenant',
'id': str(uuid.uuid4())}
if 'port_security_enabled' in n:
ret['port_security_enabled'] = n['port_security_enabled']
self._fake_networks[ret['id']] = ret
return {'network': ret}
def create_subnet(self, body):
s = body.get('subnet')
try:
net = self._fake_networks[s.get('network_id')]
except KeyError:
msg = 'Network %s not found' % s.get('network_id')
raise n_exc.NeutronClientException(message=msg, status_code=404)
ret = {'name': s.get('name'), 'network_id': s.get('network_id'),
'tenant_id': 'fake_tenant', 'cidr': s.get('cidr'),
'id': str(uuid.uuid4()), 'gateway_ip': '10.0.0.1'}
net['subnets'].append(ret['id'])
self._fake_networks[net['id']] = net
self._fake_subnets[ret['id']] = ret
return {'subnet': ret}
def create_port(self, body):
p = body.get('port')
ret = {'status': 'ACTIVE', 'id': str(uuid.uuid4()),
'mac_address': p.get('mac_address', 'fa:16:3e:b8:f5:fb'),
'device_id': p.get('device_id', str(uuid.uuid4())),
'admin_state_up': p.get('admin_state_up', True),
'security_groups': p.get('security_groups', []),
'network_id': p.get('network_id')}
network = self._fake_networks[p['network_id']]
if 'port_security_enabled' in p:
ret['port_security_enabled'] = p['port_security_enabled']
elif 'port_security_enabled' in network:
ret['port_security_enabled'] = network['port_security_enabled']
port_security = ret.get('port_security_enabled', True)
# port_security must be True if security groups are present
if not port_security and ret['security_groups']:
raise exception.SecurityGroupCannotBeApplied()
if network['subnets']:
ret['fixed_ips'] = [{'subnet_id': network['subnets'][0],
'ip_address': '10.0.0.1'}]
if not ret['security_groups'] and (port_security is None or
port_security is True):
for security_group in self._fake_security_groups.values():
if security_group['name'] == 'default':
ret['security_groups'] = [security_group['id']]
break
self._fake_ports[ret['id']] = ret
return {'port': ret}
def create_security_group_rule(self, body):
# does not handle bulk case so just picks rule[0]
r = body.get('security_group_rules')[0]
fields = ['direction', 'protocol', 'port_range_min', 'port_range_max',
'ethertype', 'remote_ip_prefix', 'tenant_id',
'security_group_id', 'remote_group_id']
ret = {}
for field in fields:
ret[field] = r.get(field)
ret['id'] = str(uuid.uuid4())
self._fake_security_group_rules[ret['id']] = ret
return {'security_group_rules': [ret]}
def show_security_group(self, security_group, **_params):
try:
sg = self._fake_security_groups[security_group]
except KeyError:
msg = 'Security Group %s not found' % security_group
raise n_exc.NeutronClientException(message=msg, status_code=404)
for security_group_rule in self._fake_security_group_rules.values():
if security_group_rule['security_group_id'] == sg['id']:
sg['security_group_rules'].append(security_group_rule)
return {'security_group': sg}
def show_security_group_rule(self, security_group_rule, **_params):
try:
return {'security_group_rule':
self._fake_security_group_rules[security_group_rule]}
except KeyError:
msg = 'Security Group rule %s not found' % security_group_rule
raise n_exc.NeutronClientException(message=msg, status_code=404)
def show_network(self, network, **_params):
try:
return {'network':
self._fake_networks[network]}
except KeyError:
msg = 'Network %s not found' % network
raise n_exc.NeutronClientException(message=msg, status_code=404)
def show_port(self, port, **_params):
try:
return {'port':
self._fake_ports[port]}
except KeyError:
msg = 'Port %s not found' % port
raise n_exc.NeutronClientException(message=msg, status_code=404)
def show_subnet(self, subnet, **_params):
try:
return {'subnet':
self._fake_subnets[subnet]}
except KeyError:
msg = 'Port %s not found' % subnet
raise n_exc.NeutronClientException(message=msg, status_code=404)
def list_security_groups(self, **_params):
ret = []
for security_group in self._fake_security_groups.values():
names = _params.get('name')
if names:
if not isinstance(names, list):
names = [names]
for name in names:
if security_group.get('name') == name:
ret.append(security_group)
ids = _params.get('id')
if ids:
if not isinstance(ids, list):
ids = [ids]
for id in ids:
if security_group.get('id') == id:
ret.append(security_group)
elif not (names or ids):
ret.append(security_group)
return {'security_groups': ret}
def list_networks(self, **_params):
return {'networks':
[network for network in self._fake_networks.values()]}
def list_ports(self, **_params):
ret = []
device_id = _params.get('device_id')
for port in self._fake_ports.values():
if device_id:
if port['device_id'] in device_id:
ret.append(port)
else:
ret.append(port)
return {'ports': ret}
def list_subnets(self, **_params):
return {'subnets':
[subnet for subnet in self._fake_subnets.values()]}
def list_floatingips(self, **_params):
return {'floatingips': []}
def delete_security_group(self, security_group):
self.show_security_group(security_group)
ports = self.list_ports()
for port in ports.get('ports'):
for sg_port in port['security_groups']:
if sg_port == security_group:
msg = ('Unable to delete Security group %s in use'
% security_group)
raise n_exc.NeutronClientException(message=msg,
status_code=409)
del self._fake_security_groups[security_group]
def delete_security_group_rule(self, security_group_rule):
self.show_security_group_rule(security_group_rule)
del self._fake_security_group_rules[security_group_rule]
def delete_network(self, network):
self.show_network(network)
self._check_ports_on_network(network)
for subnet in self._fake_subnets.values():
if subnet['network_id'] == network:
del self._fake_subnets[subnet['id']]
del self._fake_networks[network]
def delete_subnet(self, subnet):
subnet = self.show_subnet(subnet).get('subnet')
self._check_ports_on_network(subnet['network_id'])
del self._fake_subnet[subnet]
def delete_port(self, port):
self.show_port(port)
del self._fake_ports[port]
def update_port(self, port, body=None):
self.show_port(port)
self._fake_ports[port].update(body['port'])
return {'port': self._fake_ports[port]}
def list_extensions(self, **_parms):
return {'extensions': []}
def _check_ports_on_network(self, network):
ports = self.list_ports()
for port in ports:
if port['network_id'] == network:
msg = ('Unable to complete operation on network %s. There is '
'one or more ports still in use on the network'
% network)
raise n_exc.NeutronClientException(message=msg, status_code=409)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011-2013 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Class for Tilera bare-metal nodes.
"""
import base64
import os
import jinja2
from oslo.config import cfg
from nova.compute import instance_types
from ironic.common import exception
from ironic.common import utils
from ironic.common import states
from ironic import db
from ironic.manager import base
from ironic.openstack.common.db import exception as db_exc
from ironic.openstack.common import fileutils
from ironic.openstack.common import log as logging
tilera_opts = [
cfg.StrOpt('net_config_template',
default='$pybasedir/ironic/net-dhcp.ubuntu.template',
help='Template file for injected network config'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(tilera_opts)
CONF.import_opt('use_ipv6', 'ironic.netconf')
def build_network_config(network_info):
try:
assert isinstance(network_info, list)
except AssertionError:
network_info = [network_info]
interfaces = []
for id, (network, mapping) in enumerate(network_info):
address_v6 = None
gateway_v6 = None
netmask_v6 = None
if CONF.use_ipv6:
address_v6 = mapping['ip6s'][0]['ip']
netmask_v6 = mapping['ip6s'][0]['netmask']
gateway_v6 = mapping['gateway_v6']
interface = {
'name': 'eth%d' % id,
'address': mapping['ips'][0]['ip'],
'gateway': mapping['gateway'],
'netmask': mapping['ips'][0]['netmask'],
'dns': ' '.join(mapping['dns']),
'address_v6': address_v6,
'gateway_v6': gateway_v6,
'netmask_v6': netmask_v6,
}
interfaces.append(interface)
tmpl_path, tmpl_file = os.path.split(CONF.net_config_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
return template.render({'interfaces': interfaces,
'use_ipv6': CONF.use_ipv6})
def get_image_dir_path(instance):
"""Generate the dir for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'])
def get_image_file_path(instance):
"""Generate the full path for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'], 'disk')
def get_tilera_nfs_path(node_id):
"""Generate the path for an instances Tilera nfs."""
tilera_nfs_dir = "fs_" + str(node_id)
return os.path.join(CONF.tftp_root, tilera_nfs_dir)
def get_partition_sizes(instance):
instance_type = instance_types.extract_instance_type(instance)
root_mb = instance_type['root_gb'] * 1024
swap_mb = instance_type['swap']
if swap_mb < 1:
swap_mb = 1
return (root_mb, swap_mb)
def get_tftp_image_info(instance):
"""Generate the paths for tftp files for this instance.
Raises NovaException if
- instance does not contain kernel_id
"""
image_info = {
'kernel': [None, None],
}
try:
image_info['kernel'][0] = str(instance['kernel_id'])
except KeyError:
pass
missing_labels = []
for label in image_info.keys():
(uuid, path) = image_info[label]
if not uuid:
missing_labels.append(label)
else:
image_info[label][1] = os.path.join(CONF.tftp_root,
instance['uuid'], label)
if missing_labels:
raise exception.NovaException(_(
"Can not activate Tilera bootloader. "
"The following boot parameters "
"were not passed to baremetal driver: %s") % missing_labels)
return image_info
class Tilera(base.NodeDriver):
"""Tilera bare metal driver."""
def __init__(self, virtapi):
super(Tilera, self).__init__(virtapi)
def _collect_mac_addresses(self, context, node):
macs = set()
for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']):
if nic['address']:
macs.add(nic['address'])
return sorted(macs)
def _cache_tftp_images(self, context, instance, image_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
fileutils.ensure_tree(
os.path.join(CONF.tftp_root, instance['uuid']))
LOG.debug(_("Fetching kernel and ramdisk for instance %s") %
instance['name'])
for label in image_info.keys():
(uuid, path) = image_info[label]
utils.cache_image(
context=context,
target=path,
image_id=uuid,
user_id=instance['user_id'],
project_id=instance['project_id'],
)
def _cache_image(self, context, instance, image_meta):
"""Fetch the instance's image from Glance
This method pulls the relevant AMI and associated kernel and ramdisk,
and the deploy kernel and ramdisk from Glance, and writes them
to the appropriate places on local disk.
Both sets of kernel and ramdisk are needed for Tilera booting, so these
are stored under CONF.tftp_root.
At present, the AMI is cached and certain files are injected.
Debian/ubuntu-specific assumptions are made regarding the injected
files. In a future revision, this functionality will be replaced by a
more scalable and os-agnostic approach: the deployment ramdisk will
fetch from Glance directly, and write its own last-mile configuration.
"""
fileutils.ensure_tree(get_image_dir_path(instance))
image_path = get_image_file_path(instance)
LOG.debug(_("Fetching image %(ami)s for instance %(name)s") %
{'ami': image_meta['id'], 'name': instance['name']})
utils.cache_image(context=context,
target=image_path,
image_id=image_meta['id'],
user_id=instance['user_id'],
project_id=instance['project_id']
)
return [image_meta['id'], image_path]
def _inject_into_image(self, context, node, instance, network_info,
injected_files=None, admin_password=None):
"""Inject last-mile configuration into instances image
Much of this method is a hack around DHCP and cloud-init
not working together with baremetal provisioning yet.
"""
partition = None
if not instance['kernel_id']:
partition = "1"
ssh_key = None
if 'key_data' in instance and instance['key_data']:
ssh_key = str(instance['key_data'])
if injected_files is None:
injected_files = []
else:
injected_files = list(injected_files)
net_config = build_network_config(network_info)
if instance['hostname']:
injected_files.append(('/etc/hostname', instance['hostname']))
LOG.debug(_("Injecting files into image for instance %(name)s") %
{'name': instance['name']})
utils.inject_into_image(
image=get_image_file_path(instance),
key=ssh_key,
net=net_config,
metadata=instance['metadata'],
admin_password=admin_password,
files=injected_files,
partition=partition,
)
def cache_images(self, context, node, instance,
admin_password, image_meta, injected_files, network_info):
"""Prepare all the images for this instance."""
tftp_image_info = get_tftp_image_info(instance)
self._cache_tftp_images(context, instance, tftp_image_info)
self._cache_image(context, instance, image_meta)
self._inject_into_image(context, node, instance, network_info,
injected_files, admin_password)
def destroy_images(self, context, node, instance):
"""Delete instance's image file."""
utils.unlink_without_raise(get_image_file_path(instance))
utils.rmtree_without_raise(get_image_dir_path(instance))
def activate_bootloader(self, context, node, instance):
"""Configure Tilera boot loader for an instance
Kernel and ramdisk images are downloaded by cache_tftp_images,
and stored in /tftpboot/{uuid}/
This method writes the instances config file, and then creates
symlinks for each MAC address in the instance.
By default, the complete layout looks like this:
/tftpboot/
./{uuid}/
kernel
./fs_node_id/
"""
(root_mb, swap_mb) = get_partition_sizes(instance)
tilera_nfs_path = get_tilera_nfs_path(node['id'])
image_file_path = get_image_file_path(instance)
deployment_key = utils.random_alnum(32)
db.bm_node_update(context, node['id'],
{'deploy_key': deployment_key,
'image_path': image_file_path,
'pxe_config_path': tilera_nfs_path,
'root_mb': root_mb,
'swap_mb': swap_mb})
if os.path.exists(image_file_path) and \
os.path.exists(tilera_nfs_path):
utils.execute('mount', '-o', 'loop', image_file_path,
tilera_nfs_path, run_as_root=True)
def deactivate_bootloader(self, context, node, instance):
"""Delete Tilera bootloader images and config."""
try:
db.bm_node_update(context, node['id'],
{'deploy_key': None,
'image_path': None,
'pxe_config_path': None,
'root_mb': 0,
'swap_mb': 0})
except exception.NodeNotFound:
pass
tilera_nfs_path = get_tilera_nfs_path(node['id'])
if os.path.ismount(tilera_nfs_path):
utils.execute('rpc.mountd', run_as_root=True)
utils.execute('umount', '-f', tilera_nfs_path, run_as_root=True)
try:
image_info = get_tftp_image_info(instance)
except exception.NovaException:
pass
else:
for label in image_info.keys():
(uuid, path) = image_info[label]
utils.unlink_without_raise(path)
try:
self._collect_mac_addresses(context, node)
except db_exc.DBError:
pass
if os.path.exists(os.path.join(CONF.tftp_root,
instance['uuid'])):
utils.rmtree_without_raise(
os.path.join(CONF.tftp_root, instance['uuid']))
def _iptables_set(self, node_ip, user_data):
"""Sets security setting (iptables:port) if needed.
iptables -A INPUT -p tcp ! -s $IP --dport $PORT -j DROP
/tftpboot/iptables_rule script sets iptables rule on the given node.
"""
rule_path = CONF.tftp_root + "/iptables_rule"
if user_data is not None:
open_ip = base64.b64decode(user_data)
utils.execute(rule_path, node_ip, open_ip)
def activate_node(self, context, node, instance):
"""Wait for Tilera deployment to complete."""
locals = {'error': '', 'started': False}
try:
row = db.bm_node_get(context, node['id'])
if instance['uuid'] != row.get('instance_uuid'):
locals['error'] = _("Node associated with another instance"
" while waiting for deploy of %s")
status = row.get('task_state')
if (status == states.DEPLOYING and
locals['started'] is False):
LOG.info(_('Tilera deploy started for instance %s')
% instance['uuid'])
locals['started'] = True
elif status in (states.DEPLOYDONE,
states.BUILDING,
states.ACTIVE):
LOG.info(_("Tilera deploy completed for instance %s")
% instance['uuid'])
node_ip = node['pm_address']
user_data = instance['user_data']
try:
self._iptables_set(node_ip, user_data)
except Exception:
self.deactivate_bootloader(context, node, instance)
raise exception.NovaException(_("Node is "
"unknown error state."))
elif status == states.DEPLOYFAIL:
locals['error'] = _("Tilera deploy failed for instance %s")
except exception.NodeNotFound:
locals['error'] = _("Baremetal node deleted while waiting "
"for deployment of instance %s")
if locals['error']:
raise exception.InstanceDeployFailure(
locals['error'] % instance['uuid'])
def deactivate_node(self, context, node, instance):
pass
|
|
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Ken Pepple
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Built-in volume type properties."""
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import six
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _, _LE
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def create(context,
name,
extra_specs=None,
is_public=True,
projects=None,
description=None):
"""Creates volume types."""
extra_specs = extra_specs or {}
projects = projects or []
try:
type_ref = db.volume_type_create(context,
dict(name=name,
extra_specs=extra_specs,
is_public=is_public,
description=description),
projects=projects)
except db_exc.DBError as e:
LOG.exception(_LE('DB error: %s') % six.text_type(e))
raise exception.VolumeTypeCreateFailed(name=name,
extra_specs=extra_specs)
return type_ref
def update(context, id, name, description):
"""Update volume type by id."""
if id is None:
msg = _("id cannot be None")
raise exception.InvalidVolumeType(reason=msg)
try:
type_updated = db.volume_type_update(context,
id,
dict(name=name,
description=description))
except db_exc.DBError as e:
LOG.exception(_LE('DB error: %s') % six.text_type(e))
raise exception.VolumeTypeUpdateFailed(id=id)
return type_updated
def destroy(context, id):
"""Marks volume types as deleted."""
if id is None:
msg = _("id cannot be None")
raise exception.InvalidVolumeType(reason=msg)
else:
db.volume_type_destroy(context, id)
def get_all_types(context, inactive=0, search_opts=None):
"""Get all non-deleted volume_types.
Pass true as argument if you want deleted volume types returned also.
"""
search_opts = search_opts or {}
filters = {}
if 'is_public' in search_opts:
filters['is_public'] = search_opts['is_public']
del search_opts['is_public']
vol_types = db.volume_type_get_all(context, inactive, filters=filters)
if search_opts:
LOG.debug("Searching by: %s" % search_opts)
def _check_extra_specs_match(vol_type, searchdict):
for k, v in searchdict.items():
if (k not in vol_type['extra_specs'].keys()
or vol_type['extra_specs'][k] != v):
return False
return True
# search_option to filter_name mapping.
filter_mapping = {'extra_specs': _check_extra_specs_match}
result = {}
for type_name, type_args in vol_types.items():
# go over all filters in the list
for opt, values in search_opts.items():
try:
filter_func = filter_mapping[opt]
except KeyError:
# no such filter - ignore it, go to next filter
continue
else:
if filter_func(type_args, values):
result[type_name] = type_args
break
vol_types = result
return vol_types
def get_volume_type(ctxt, id, expected_fields=None):
"""Retrieves single volume type by id."""
if id is None:
msg = _("id cannot be None")
raise exception.InvalidVolumeType(reason=msg)
if ctxt is None:
ctxt = context.get_admin_context()
return db.volume_type_get(ctxt, id, expected_fields=expected_fields)
def get_volume_type_by_name(context, name):
"""Retrieves single volume type by name."""
if name is None:
msg = _("name cannot be None")
raise exception.InvalidVolumeType(reason=msg)
return db.volume_type_get_by_name(context, name)
def get_default_volume_type():
"""Get the default volume type."""
name = CONF.default_volume_type
vol_type = {}
if name is not None:
ctxt = context.get_admin_context()
try:
vol_type = get_volume_type_by_name(ctxt, name)
except exception.VolumeTypeNotFoundByName as e:
# Couldn't find volume type with the name in default_volume_type
# flag, record this issue and move on
# TODO(zhiteng) consider add notification to warn admin
LOG.exception(_LE('Default volume type is not found,'
'please check default_volume_type config: %s') %
six.text_type(e))
return vol_type
def get_volume_type_extra_specs(volume_type_id, key=False):
volume_type = get_volume_type(context.get_admin_context(),
volume_type_id)
extra_specs = volume_type['extra_specs']
if key:
if extra_specs.get(key):
return extra_specs.get(key)
else:
return False
else:
return extra_specs
def is_public_volume_type(context, volume_type_id):
"""Return is_public boolean value of volume type"""
volume_type = db.volume_type_get(context, volume_type_id)
return volume_type['is_public']
def add_volume_type_access(context, volume_type_id, project_id):
"""Add access to volume type for project_id."""
if volume_type_id is None:
msg = _("volume_type_id cannot be None")
raise exception.InvalidVolumeType(reason=msg)
if is_public_volume_type(context, volume_type_id):
msg = _("Type access modification is not applicable to public volume "
"type.")
raise exception.InvalidVolumeType(reason=msg)
return db.volume_type_access_add(context, volume_type_id, project_id)
def remove_volume_type_access(context, volume_type_id, project_id):
"""Remove access to volume type for project_id."""
if volume_type_id is None:
msg = _("volume_type_id cannot be None")
raise exception.InvalidVolumeType(reason=msg)
if is_public_volume_type(context, volume_type_id):
msg = _("Type access modification is not applicable to public volume "
"type.")
raise exception.InvalidVolumeType(reason=msg)
return db.volume_type_access_remove(context, volume_type_id, project_id)
def is_encrypted(context, volume_type_id):
if volume_type_id is None:
return False
encryption = db.volume_type_encryption_get(context, volume_type_id)
return encryption is not None
def get_volume_type_encryption(context, volume_type_id):
if volume_type_id is None:
return None
encryption = db.volume_type_encryption_get(context, volume_type_id)
return encryption
def get_volume_type_qos_specs(volume_type_id):
ctxt = context.get_admin_context()
res = db.volume_type_qos_specs_get(ctxt,
volume_type_id)
return res
def volume_types_diff(context, vol_type_id1, vol_type_id2):
"""Returns a 'diff' of two volume types and whether they are equal.
Returns a tuple of (diff, equal), where 'equal' is a boolean indicating
whether there is any difference, and 'diff' is a dictionary with the
following format:
{'extra_specs': {'key1': (value_in_1st_vol_type, value_in_2nd_vol_type),
'key2': (value_in_1st_vol_type, value_in_2nd_vol_type),
...}
'qos_specs': {'key1': (value_in_1st_vol_type, value_in_2nd_vol_type),
'key2': (value_in_1st_vol_type, value_in_2nd_vol_type),
...}
'encryption': {'cipher': (value_in_1st_vol_type, value_in_2nd_vol_type),
{'key_size': (value_in_1st_vol_type, value_in_2nd_vol_type),
...}
"""
def _fix_qos_specs(qos_specs):
if qos_specs:
qos_specs.pop('id', None)
qos_specs.pop('name', None)
qos_specs.update(qos_specs.pop('specs', {}))
def _fix_encryption_specs(encryption):
if encryption:
encryption = dict(encryption)
for param in ['volume_type_id', 'created_at', 'updated_at',
'deleted_at']:
encryption.pop(param, None)
return encryption
def _dict_diff(dict1, dict2):
res = {}
equal = True
if dict1 is None:
dict1 = {}
if dict2 is None:
dict2 = {}
for k, v in dict1.items():
res[k] = (v, dict2.get(k))
if k not in dict2 or res[k][0] != res[k][1]:
equal = False
for k, v in dict2.items():
res[k] = (dict1.get(k), v)
if k not in dict1 or res[k][0] != res[k][1]:
equal = False
return (res, equal)
all_equal = True
diff = {}
vol_type_data = []
for vol_type_id in (vol_type_id1, vol_type_id2):
if vol_type_id is None:
specs = {'extra_specs': None,
'qos_specs': None,
'encryption': None}
else:
specs = {}
vol_type = get_volume_type(context, vol_type_id)
specs['extra_specs'] = vol_type.get('extra_specs')
qos_specs = get_volume_type_qos_specs(vol_type_id)
specs['qos_specs'] = qos_specs.get('qos_specs')
_fix_qos_specs(specs['qos_specs'])
specs['encryption'] = get_volume_type_encryption(context,
vol_type_id)
specs['encryption'] = _fix_encryption_specs(specs['encryption'])
vol_type_data.append(specs)
diff['extra_specs'], equal = _dict_diff(vol_type_data[0]['extra_specs'],
vol_type_data[1]['extra_specs'])
if not equal:
all_equal = False
diff['qos_specs'], equal = _dict_diff(vol_type_data[0]['qos_specs'],
vol_type_data[1]['qos_specs'])
if not equal:
all_equal = False
diff['encryption'], equal = _dict_diff(vol_type_data[0]['encryption'],
vol_type_data[1]['encryption'])
if not equal:
all_equal = False
return (diff, all_equal)
|
|
#!/usr/bin/env python
import re
import os
import json
import collections
import config
def generate_package_json(name, version, directory):
opam_file = os.path.join(directory, 'opam')
files_directory = os.path.join(directory, 'files')
package_url = None
package_checksum = None
version_file = os.path.join(directory, 'url')
if os.path.exists(version_file):
with open(version_file, 'r') as f:
content = f.read()
package_url = re.search(r"(archive:\s*|http:\s*|src:\s*)\"(.*)\"", content).group(2)
package_checksum = re.search(r"checksum:\s*\"([a-f0-9]+)\"", content).group(1)
def prefixWithScope(name):
return "%s/%s" % (config.GH_ORG_NAME, name)
def getVersionFromStr(str):
g = re.search(r"([a-zA-Z0-9_\-]\.)?(\d+\.\d+\.\d+).*", str)
if g:
return g.group(2)
g = re.search(r"([a-zA-Z0-9_\-]\.)?(\d+\.\d+).*", str)
if g:
return g.group(2) + ".0"
g = re.search(r"([a-zA-Z0-9_\-]\.)?(\d+).*", str)
if g:
return g.group(2) + ".0.0"
return "0.0.0"
def getPrereleaseTag(name):
g = re.search(r".*\+(.*)", name)
if not g:
return ""
return "".join(re.findall(r"\d+", g.group(1)))
def splitKV(txt):
g = txt.split(":")
return (g[0], ":".join(g[1:]).strip())
def yieldKVPair(f):
current = ""
for l in open(f):
if l.startswith("#"):
continue
l = l.split("#")[0]
g = re.search(r"^([a-zA-Z\-]+)\s*:", l)
if not g:
current += l
else:
if current != "":
yield splitKV(current)
current = l
if current != "":
yield splitKV(current)
def unescapeTerm(term):
if term.startswith("{"):
return ""
g = re.search(r'\"(.*)\"', term)
if g:
return term
else:
return builtInVars[term]
def buildFlatList(txt):
if txt.startswith("["):
txt = txt[1:-1]
txt = txt.strip()
if txt == "":
return []
g = re.findall(r'\"(.*?)\"\s*(\{.*\})?', txt)
return list(g)
def breakList(txt):
if txt.startswith("["):
txt = txt[1:-1]
txt = txt.strip()
if txt == "":
return []
# Look for lists
g = re.findall(r'\[([^\[\]]*)\]\s*\{?([^\{\}\[\]]*)\}?', txt, re.S)
if not g:
terms = [unescapeTerm(term) for term in re.split(r"[\s\n]+", txt)]
return [(" ".join(terms), "")]
# results = []
# for line in re.split(r"\s+", txt):
# results.append((txt, ""))
# g = re.search(r'"[a-zA-Z0-9\-]*"', line)
# if not g:
# results.append((line, ""))
# continue
# key = g.group(0)
# g = re.search(r'\{(.*)\}', line)
# if g:
# constraint = g.group(1)
# else:
# constraint = ""
# results.append((key, constraint))
# return results
return g
def normalize_version_segment(version):
version = re.sub(r'[^0-9]', '', version)
if version == '':
version = '0'
return version
def normalize_version(version):
if '+' in version:
[version, suffix] = version.split('+')
suffix = re.sub(r'[^0-9]', '', suffix)
else:
suffix = ''
parts = version.split('.', 2)
if len(parts) == 1:
(major,) = parts
return '%s.0%s.0' % (normalize_version_segment(major), suffix)
elif len(parts) == 2:
(major, minor) = parts
return '%s.%s%s.0' % (major, normalize_version_segment(minor), suffix)
else:
return version
def normalize_var_name(name):
if '+' in name:
name, _ = name.split('+', 1)
return name.replace('-', '_')
def cmdToStrings(cmd):
return re.findall(r"\"[^\"]+\"|\S+", cmd)
def unescapeBuiltinVariables(s):
def escape(matched):
var = matched.group(1)
if var in builtInVars:
return builtInVars[var]
g = re.search(r"(.*):enable", var)
if g:
return "${%s_enable:-disable}" % normalize_var_name(g.group(1))
g = re.search(r"(.*):installed", var)
if g:
return "${%s_installed:-false}" % normalize_var_name(g.group(1))
raise Exception("Cannot expand variable %s" % var)
return re.sub(r"%\{(.*?)\}%", escape, s)
# TODO unhack this
def filterCommands(filter):
if filter == "ocaml-native":
return False
if filter == "!ocaml-native":
return True
if filter == "preinstalled":
return True
return False
def createPostInstallCommand(substs, cmds):
build = []
for (subst, _) in substs:
build.append("substs %s.in" % subst)
for cmd in cmds:
if filterCommands(cmd[1]):
continue
subCMDs = cmdToStrings(cmd[0])
newCMDs = []
for subCMD in subCMDs:
g = re.search(r'\"(.*)\"', subCMD)
if g:
newCMDs.append(g.group(1))
else:
if not subCMD.startswith("{"):
newCMDs.append(builtInVars.get(subCMD, subCMD))
build.append(" ".join(newCMDs))
# finalCMD += " && " + " ".join(newCMDs)
build.append("(opam-installer --prefix=$cur__install || true)")
return [unescapeBuiltinVariables(cmd) for cmd in build]
def scoped(name):
return '@%s/%s' % ("opam-alpha", name)
def opamVersionToNpmVersion(v):
v = v.group(0).strip("\"")
return getVersionFromStr(v) + getPrereleaseTag(name)
def opamRangeToNpmRange(range):
if range == "" : return "*"
range = range.strip("{}")
assert ("|" not in range)
ranges = [re.sub("\".*\"", opamVersionToNpmVersion, r) for r in [r.strip() for r in range.split("&")] if r != "build" and r != "test"]
if len(ranges) == 0:
return "*"
return " ".join(ranges)
d = collections.defaultdict(str)
for (k, v) in yieldKVPair(opam_file):
d[k] = v
version = normalize_version(version)
if name in config.OVERRIDE and 'version' in config.OVERRIDE[name]:
version = config.OVERRIDE[name]['version'](version)
builtInVars = {
"name": name,
"make": "make",
"jobs": "4",
"bin": "$cur__bin",
"prefix": "$cur__install",
"lib": "$cur__lib",
"sbin": "$cur__sbin",
"doc": "$cur__doc",
"man": "$cur__man",
"ocaml-native": "true",
"ocaml-native-dynlink": "true",
"pinned": "false",
}
packageJSON = {}
packageJSON["name"] = scoped(name)
packageJSON["version"] = version
packageJSON["scripts"] = {}
packageJSON["peerDependencies"] = {}
packageJSON["esy"] = {}
if name in config.OVERRIDE and 'build' in config.OVERRIDE[name]:
packageJSON["esy"]["build"] = config.OVERRIDE[name]['build']
else:
packageJSON["esy"]["build"] = createPostInstallCommand(
buildFlatList(d["substs"]), breakList(d["build"]) + breakList(d["install"]))
packageJSON["dependencies"] = {
"substs": "esy-ocaml/substs",
"opam-installer-bin": "esy-ocaml/opam-installer-bin",
}
for (dep, range) in buildFlatList(d["depends"]):
if not config.is_dep_allowed(name, dep):
continue
if dep.startswith("base-"):
continue
dep = dep.strip("\" ")
if dep == "":
continue
npm_range = opamRangeToNpmRange(range)
packageJSON["dependencies"][scoped(dep)] = npm_range
if name in config.ESY_EXTRA_DEP:
for dep_name in config.ESY_EXTRA_DEP[name]:
packageJSON["dependencies"][dep_name] = config.ESY_EXTRA_DEP[name][dep_name]
for (dep, range) in buildFlatList(d["depopts"]):
dep = dep.strip("\" ")
if not config.is_dep_allowed(name, dep):
continue
if dep == "" or dep in config.OPAM_DEPOPT_BLACKLIST:
continue
npm_range = opamRangeToNpmRange(range)
packageJSON["dependencies"][scoped(dep)] = npm_range
g = re.findall(r"ocaml-version ([!=<>]+.*?\".*?\")", d["available"])
if g:
g = " ".join(g)
packageJSON["peerDependencies"]["ocaml"] = re.sub("\".*?\"", opamVersionToNpmVersion, g)
else:
packageJSON["peerDependencies"]["ocaml"] = ">= 4.2.3"
opamINFO = {
'url': package_url,
'checksum': package_checksum,
'files': [],
}
if os.path.exists(files_directory):
for filename in os.listdir(files_directory):
abs_filename = os.path.join(files_directory, filename)
with open(abs_filename, 'r') as f:
content = f.read()
# loosy check for binary
if '\0' in content:
raise Exception('trying to include a binary file: %s' % abs_filename)
opamINFO['files'].append({
'name': filename,
'content': content
})
packageJSON["opam"] = opamINFO
packageJSON["esy"]["buildsInSource"] = True
packageJSON["esy"]["exportedEnv"] = {
"%s_version" % name.replace("-", "_"): {
"val": packageJSON["version"],
"scope": "global"
},
"%s_enable" % name.replace("-", "_"): {
"val": "enable",
"scope": "global"
},
"%s_installed" % name.replace("-", "_"): {
"val": "true",
"scope": "global"
},
}
if name in config.OVERRIDE and 'exportedEnv' in config.OVERRIDE[name]:
packageJSON['esy']['exportedEnv'].update(config.OVERRIDE[name]['exportedEnv'])
return packageJSON
|
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from unittest.case import TestCase
from unittest import mock
from alamo_common.test.utils import override_settings
from alamo_worker.plugins.druid import DruidResult
from alamo_worker.plugins.evaluate import ResultEvaluator
from alamo_worker.plugins.http_check import HttpResult
from alamo_worker.plugins.graphite import GraphiteResult
from alamo_worker.plugins.prometheus import PrometheusResult
from alamo_worker.plugins.mixins import (
RESULT_OK,
RESULT_FAILED,
RESULT_UNKNOWN,
)
class TestResultEvaluator(TestCase):
@staticmethod
def get_payload(rule):
return {
'id': 2206299,
'triggers': [
{
'rule': rule,
'url': 'http://some.url',
}],
}
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluation_for_unknown_status(self, stats):
rule = 'check_name.values < 200'
plugin_results = [
# unknown status
GraphiteResult('check_name', {}, env='test')
]
evaluator = ResultEvaluator()
payload = self.get_payload(rule)
payload['unknown_as_failure'] = True
result = evaluator.evaluate(payload, plugin_results)
expected = {
'status': RESULT_FAILED,
'message': ('Metric not found in `check_name` source, '
'check your query'),
'hysteresis': RESULT_OK
}
self.assertEqual(result['triggers'][0]['result'], expected)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_passing_rule(self, stats):
rule = 'check_name.status_code != 200'
plugin_results = [
HttpResult('check_name', 200)
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_OK,
'message': '',
'hysteresis': RESULT_OK,
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_failing_rule(self, stats):
rule = 'check_name.status_code != 200'
plugin_results = [
HttpResult('check_name', 404)
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
msg = 'Check failed for rule `{}` which evaluates to `True`'.format(
rule)
expected = {
'status': RESULT_FAILED,
'message': msg,
'hysteresis': RESULT_OK,
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_multiple_successful_results(self, stats):
rule = 'check1.status_code != 200 AND check2.status_code != 200'
plugin_results = [
HttpResult('check1', 200, status=RESULT_UNKNOWN),
HttpResult('check2', 200, status=RESULT_OK),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_UNKNOWN,
'message': '',
'hysteresis': RESULT_OK,
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
@override_settings(FAIL_ON_UNKNOWN=False)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_multiple_failed_results(self, stats):
rule = 'check1.status_code != 200 AND check2.status_code != 200'
plugin_results = [
HttpResult('check1', 404, status=RESULT_UNKNOWN),
HttpResult('check2', 404, status=RESULT_OK),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
msg = 'Check failed for rule `check1.status_code != 200` '\
'which evaluates to `True`, Check failed for rule '\
'`check2.status_code != 200` which evaluates to `True`'
expected = {
'status': RESULT_FAILED,
'message': msg,
'hysteresis': RESULT_OK,
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
@override_settings(FAIL_ON_UNKNOWN=True)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_multiple_failed_results_with_strict_fail(self, stats):
rule = 'check1.status_code != 200 AND check2.status_code != 200'
plugin_results = [
HttpResult('check1', 404, status=RESULT_UNKNOWN),
HttpResult('check2', 404, status=RESULT_OK),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_UNKNOWN,
'hysteresis': 0,
'message': '',
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_multiple_different_results(self, stats):
rule = 'check1.status_code != 200 AND check2.status_code != 200'
plugin_results = [
HttpResult('check1', 404, status=RESULT_OK),
HttpResult('check2', 200, status=RESULT_OK),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_OK,
'message': '',
'hysteresis': RESULT_OK,
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
@mock.patch.object(GraphiteResult, 'build_meta')
def test_evaluate_multiple_sources_with_diff_statuses_and_one_is_unknown(
self, *args
):
plugin_results = [
GraphiteResult('check_first', {'a': [5]}, ),
# this result is unknown
GraphiteResult('check_second', {}),
]
payload = {
'id': 999,
'triggers': [
{'rule': 'check_first.values < 1'},
{'rule': 'check_second.values < 200'},
]
}
result = ResultEvaluator().evaluate(payload, plugin_results)
expected = {
'status': RESULT_OK,
'message': '',
'hysteresis': RESULT_OK,
}
self.assertEqual(result['triggers'][0]['result'], expected)
self.assertEqual(result['triggers'][1]['result'], {
'status': RESULT_UNKNOWN,
'message': ('Metric not found in `check_second` source, '
'check your query'),
'hysteresis': RESULT_OK,
})
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
@mock.patch.object(GraphiteResult, 'build_meta')
def test_evaluation_build_proper_target_for_trigger(self, *args):
plugin_results = [
GraphiteResult('foo', {'foo': [1, 2, 3]}, metric='stats.test.foo'),
GraphiteResult('bar', {'bar': [1, 2, 3]}, metric='stats.test.bar'),
GraphiteResult('baz', {'baz': [1, 2, 3]}, metric='stats.test.baz'),
]
trigger = {'rule': 'foo.values > 10 OR bar.values > 10'}
payload = {
'id': 999,
'triggers': [trigger]
}
ResultEvaluator().evaluate(payload, plugin_results)
expected = {
'status': RESULT_OK,
'message': '',
'hysteresis': RESULT_OK
}
self.assertEqual(trigger['result'], expected)
self.assertEqual(trigger['target'], 'stats.test.bar,stats.test.foo')
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_invalid_rule(self, stats):
rule = 'cat dog'
plugin_results = [
HttpResult('check', 200),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_UNKNOWN,
'message': '',
'hysteresis': RESULT_OK,
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
stats.increment.incr.assert_called_once_with(
'manager.warnings.invalid_rule')
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_failed_plugin_result(self, stats):
rule = 'check.status_code != 200'
content = 'some content'
plugin_results = [
HttpResult('check', 200, status=RESULT_FAILED,
message='some message', content=content),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_FAILED,
'message': 'some message',
'hysteresis': RESULT_OK,
'http_response_body': content
}
expected_meta = {
'links': {
'trigger_url': {'href': 'http://some.url', 'type': 'link'}
},
}
self.assertEqual(result['triggers'][0]['result'], expected)
self.assertEqual(result['triggers'][0]['meta'], expected_meta)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_unknown_plugin_result(self, stats):
rule = 'druid.histogram_90 > 70000'
plugin_results = [
DruidResult('druid', [], status=RESULT_UNKNOWN,
message='Unknown Message'),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_UNKNOWN,
'message': 'Unknown Message',
'hysteresis': RESULT_OK
}
self.assertEqual(
result['triggers'][0]['result'], expected
)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
@mock.patch.object(GraphiteResult, 'build_meta')
def test_evaluate_successful_graphite_result(self, mock_build_meta, stats):
rule = 'check.values > 6'
plugin_results = [
GraphiteResult('check', {'bunny.a': [1, 2, 3], 'bunny.b': [4, 5]}),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected_result = {
'status': RESULT_OK,
'message': '',
'hysteresis': RESULT_OK
}
expected_meta = {
'links': {
'trigger_url': {'href': 'http://some.url', 'type': 'link'}
}
}
self.assertEqual(result['triggers'][0]['result'], expected_result)
self.assertEqual(result['triggers'][0]['meta'], expected_meta)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
@mock.patch.object(GraphiteResult, 'build_meta')
def test_evaluate_failed_graphite_result(self, mock_build_meta, stats):
rule = 'check.values > 2'
plugin_results = [
GraphiteResult(
'check',
OrderedDict([('bunny.a', [1, 2, 3]), ('bunny.b', [4, 5])])
),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected_result = {
'status': RESULT_FAILED,
'message': 'Check failed for rule `check.values > 2` which '
"evaluates to `{'bunny.a': [3], 'bunny.b': [4, 5]}`",
'hysteresis': RESULT_OK
}
expected_meta = {
'failed_metrics': {'bunny.a': [3], 'bunny.b': [4, 5]},
'links': {
'trigger_url': {'href': 'http://some.url', 'type': 'link'}
}
}
self.assertEqual(result['triggers'][0]['result'], expected_result)
self.assertEqual(result['triggers'][0]['meta'], expected_meta)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_successful_prometheus_result(self, stats):
rule = 'check.values > 2'
plugin_results = [
PrometheusResult(
'check',
{(('__name__', 'metric1'),): [0, 1]},
RESULT_OK, '', 2
),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_OK,
'message': '',
'hysteresis': RESULT_OK
}
self.assertEqual(result['triggers'][0]['result'], expected)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_failed_prometheus_result(self, stats):
rule = 'check.values > 2'
plugin_results = [
PrometheusResult(
'check',
{(('__name__', 'metric1'),): [1, 2, 3]},
RESULT_OK, '', 2
),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected_result = {
'status': RESULT_FAILED,
'message': (
'Check failed for rule `check.values > 2` '
'which evaluates to `{((\'__name__\', \'metric1\'),): [3]}`'
),
'hysteresis': RESULT_OK
}
expected_meta = {
'failed_metrics': {"(('__name__', 'metric1'),)": [3]},
'links': {
'trigger_url': {'href': 'http://some.url', 'type': 'link'}
}
}
self.assertEqual(result['triggers'][0]['result'], expected_result)
self.assertEqual(result['triggers'][0]['meta'], expected_meta)
|
|
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for storage engines
"""
import copy
import aodh
from aodh.utils import get_func_valid_keys
def update_nested(original_dict, updates):
"""Updates the leaf nodes in a nest dict.
Updates occur without replacing entire sub-dicts.
"""
dict_to_update = copy.deepcopy(original_dict)
for key, value in updates.items():
if isinstance(value, dict):
sub_dict = update_nested(dict_to_update.get(key, {}), value)
dict_to_update[key] = sub_dict
else:
dict_to_update[key] = updates[key]
return dict_to_update
class Model(object):
"""base class for storage api models."""
def __init__(self, **kwds):
self.fields = list(kwds)
for k, v in kwds.items():
setattr(self, k, v)
def as_dict(self):
d = {}
for f in self.fields:
v = getattr(self, f)
if isinstance(v, Model):
v = v.as_dict()
elif isinstance(v, list) and v and isinstance(v[0], Model):
v = [sub.as_dict() for sub in v]
d[f] = v
return d
def __eq__(self, other):
return self.as_dict() == other.as_dict()
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def get_field_names(cls):
fields = get_func_valid_keys(cls.__init__)
return set(fields) - set(["self"])
class Connection(object):
"""Base class for alarm storage system connections."""
# A dictionary representing the capabilities of this driver.
CAPABILITIES = {
'alarms': {'query': {'simple': False,
'complex': False},
'history': {'query': {'simple': False,
'complex': False}}},
}
STORAGE_CAPABILITIES = {
'storage': {'production_ready': False},
}
def __init__(self, conf, url):
pass
@staticmethod
def upgrade():
"""Migrate the database to `version` or the most recent version."""
@staticmethod
def get_alarms(*args, **kwargs):
"""Yields a lists of alarms that match filters."""
raise aodh.NotImplementedError('Alarms not implemented')
@staticmethod
def create_alarm(alarm):
"""Create an alarm. Returns the alarm as created.
:param alarm: The alarm to create.
"""
raise aodh.NotImplementedError('Alarms not implemented')
@staticmethod
def update_alarm(alarm):
"""Update alarm."""
raise aodh.NotImplementedError('Alarms not implemented')
@staticmethod
def delete_alarm(alarm_id):
"""Delete an alarm and its history data."""
raise aodh.NotImplementedError('Alarms not implemented')
@staticmethod
def get_alarm_changes(alarm_id, on_behalf_of,
user=None, project=None, alarm_type=None,
severity=None, start_timestamp=None,
start_timestamp_op=None, end_timestamp=None,
end_timestamp_op=None, pagination=None):
"""Yields list of AlarmChanges describing alarm history
Changes are always sorted in reverse order of occurrence, given
the importance of currency.
Segregation for non-administrative users is done on the basis
of the on_behalf_of parameter. This allows such users to have
visibility on both the changes initiated by themselves directly
(generally creation, rule changes, or deletion) and also on those
changes initiated on their behalf by the alarming service (state
transitions after alarm thresholds are crossed).
:param alarm_id: ID of alarm to return changes for
:param on_behalf_of: ID of tenant to scope changes query (None for
administrative user, indicating all projects)
:param user: Optional ID of user to return changes for
:param project: Optional ID of project to return changes for
:param alarm_type: Optional change type
:param severity: Optional change severity
:param start_timestamp: Optional modified timestamp start range
:param start_timestamp_op: Optional timestamp start range operation
:param end_timestamp: Optional modified timestamp end range
:param end_timestamp_op: Optional timestamp end range operation
:param pagination: Pagination parameters.
"""
raise aodh.NotImplementedError('Alarm history not implemented')
@staticmethod
def record_alarm_change(alarm_change):
"""Record alarm change event."""
raise aodh.NotImplementedError('Alarm history not implemented')
@staticmethod
def clear():
"""Clear database."""
@staticmethod
def query_alarms(filter_expr=None, orderby=None, limit=None):
"""Return an iterable of model.Alarm objects.
:param filter_expr: Filter expression for query.
:param orderby: List of field name and direction pairs for order by.
:param limit: Maximum number of results to return.
"""
raise aodh.NotImplementedError('Complex query for alarms '
'is not implemented.')
@staticmethod
def query_alarm_history(filter_expr=None, orderby=None, limit=None):
"""Return an iterable of model.AlarmChange objects.
:param filter_expr: Filter expression for query.
:param orderby: List of field name and direction pairs for order by.
:param limit: Maximum number of results to return.
"""
raise aodh.NotImplementedError('Complex query for alarms '
'history is not implemented.')
@classmethod
def get_capabilities(cls):
"""Return an dictionary with the capabilities of each driver."""
return cls.CAPABILITIES
@classmethod
def get_storage_capabilities(cls):
"""Return a dictionary representing the performance capabilities.
This is needed to evaluate the performance of each driver.
"""
return cls.STORAGE_CAPABILITIES
@staticmethod
def clear_expired_alarm_history_data(ttl, max_count=None):
"""Clear expired alarm history data from the backend storage system.
Clearing occurs according to the time-to-live.
:param ttl: Number of seconds to keep alarm history records for.
:param max_count: Number of records to delete.
"""
raise aodh.NotImplementedError('Clearing alarm history '
'not implemented')
@staticmethod
def get_quotas(project_id):
"""Get resource quota for the given project."""
raise aodh.NotImplementedError('Getting resource quota not '
'implemented')
@staticmethod
def set_quotas(project_id, quotas):
"""Set resource quota for the given user."""
raise aodh.NotImplementedError('Setting resource quota not '
'implemented')
@staticmethod
def delete_quotas(project_id):
raise aodh.NotImplementedError('Deleting resource quota not '
'implemented')
|
|
"""Bayesian Gaussian Mixture Models and
Dirichlet Process Gaussian Mixture Models"""
from __future__ import print_function
# Author: Alexandre Passos (alexandre.tp@gmail.com)
# Bertrand Thirion <bertrand.thirion@inria.fr>
#
# Based on mixture.py by:
# Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
import numpy as np
from scipy.special import digamma as _digamma, gammaln as _gammaln
from scipy import linalg
from scipy.spatial.distance import cdist
from ..externals.six.moves import xrange
from ..utils import check_random_state
from ..utils.extmath import logsumexp, pinvh, squared_norm
from .. import cluster
from .gmm import GMM
def digamma(x):
return _digamma(x + np.finfo(np.float32).eps)
def gammaln(x):
return _gammaln(x + np.finfo(np.float32).eps)
def log_normalize(v, axis=0):
"""Normalized probabilities from unnormalized log-probabilites"""
v = np.rollaxis(v, axis)
v = v.copy()
v -= v.max(axis=0)
out = logsumexp(v)
v = np.exp(v - out)
v += np.finfo(np.float32).eps
v /= np.sum(v, axis=0)
return np.swapaxes(v, 0, axis)
def wishart_log_det(a, b, detB, n_features):
"""Expected value of the log of the determinant of a Wishart
The expected value of the logarithm of the determinant of a
wishart-distributed random variable with the specified parameters."""
l = np.sum(digamma(0.5 * (a - np.arange(-1, n_features - 1))))
l += n_features * np.log(2)
return l + detB
def wishart_logz(v, s, dets, n_features):
"The logarithm of the normalization constant for the wishart distribution"
z = 0.
z += 0.5 * v * n_features * np.log(2)
z += (0.25 * (n_features * (n_features - 1)) * np.log(np.pi))
z += 0.5 * v * np.log(dets)
z += np.sum(gammaln(0.5 * (v - np.arange(n_features) + 1)))
return z
def _bound_wishart(a, B, detB):
"""Returns a function of the dof, scale matrix and its determinant
used as an upper bound in variational approcimation of the evidence"""
n_features = B.shape[0]
logprior = wishart_logz(a, B, detB, n_features)
logprior -= wishart_logz(n_features,
np.identity(n_features),
1, n_features)
logprior += 0.5 * (a - 1) * wishart_log_det(a, B, detB, n_features)
logprior += 0.5 * a * np.trace(B)
return logprior
##############################################################################
# Variational bound on the log likelihood of each class
##############################################################################
def _sym_quad_form(x, mu, A):
"""helper function to calculate symmetric quadratic form x.T * A * x"""
q = (cdist(x, mu[np.newaxis], "mahalanobis", VI=A) ** 2).reshape(-1)
return q
def _bound_state_log_lik(X, initial_bound, precs, means, covariance_type):
"""Update the bound with likelihood terms, for standard covariance types"""
n_components, n_features = means.shape
n_samples = X.shape[0]
bound = np.empty((n_samples, n_components))
bound[:] = initial_bound
if covariance_type in ['diag', 'spherical']:
for k in range(n_components):
d = X - means[k]
bound[:, k] -= 0.5 * np.sum(d * d * precs[k], axis=1)
elif covariance_type == 'tied':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs)
elif covariance_type == 'full':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs[k])
return bound
class DPGMM(GMM):
"""Variational Inference for the Infinite Gaussian Mixture Model.
DPGMM stands for Dirichlet Process Gaussian Mixture Model, and it
is an infinite mixture model with the Dirichlet Process as a prior
distribution on the number of clusters. In practice the
approximate inference algorithm uses a truncated distribution with
a fixed maximum number of components, but almost always the number
of components actually used depends on the data.
Stick-breaking Representation of a Gaussian mixture model
probability distribution. This class allows for easy and efficient
inference of an approximate posterior distribution over the
parameters of a Gaussian mixture model with a variable number of
components (smaller than the truncation parameter n_components).
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Parameters
----------
n_components: int, optional
Number of mixture components. Defaults to 1.
covariance_type: string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
alpha: float, optional
Real number representing the concentration parameter of
the dirichlet process. Intuitively, the Dirichlet Process
is as likely to start a new cluster for a point as it is
to add that point to a cluster with alpha elements. A
higher alpha means more clusters, as the expected number
of clusters is ``alpha*log(N)``. Defaults to 1.
thresh : float, optional
Convergence threshold.
n_iter : int, optional
Maximum number of iterations to perform before convergence.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_components : int
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
VBGMM : Finite Gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, thresh=1e-2, verbose=False,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
self.alpha = alpha
self.verbose = verbose
super(DPGMM, self).__init__(n_components, covariance_type,
random_state=random_state,
thresh=thresh, min_covar=min_covar,
n_iter=n_iter, params=params,
init_params=init_params)
def _get_precisions(self):
"""Return precisions as a full matrix."""
if self.covariance_type == 'full':
return self.precs_
elif self.covariance_type in ['diag', 'spherical']:
return [np.diag(cov) for cov in self.precs_]
elif self.covariance_type == 'tied':
return [self.precs_] * self.n_components
def _get_covars(self):
return [pinvh(c) for c in self._get_precisions()]
def _set_covars(self, covars):
raise NotImplementedError("""The variational algorithm does
not support setting the covariance parameters.""")
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
sd = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dgamma1 = digamma(self.gamma_.T[1]) - sd
dgamma2 = np.zeros(self.n_components)
dgamma2[0] = digamma(self.gamma_[0, 2]) - digamma(self.gamma_[0, 1] +
self.gamma_[0, 2])
for j in range(1, self.n_components):
dgamma2[j] = dgamma2[j - 1] + digamma(self.gamma_[j - 1, 2])
dgamma2[j] -= sd[j - 1]
dgamma = dgamma1 + dgamma2
# Free memory and developers cognitive load:
del dgamma1, dgamma2, sd
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dgamma
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
"""Update the concentration parameters for each cluster"""
sz = np.sum(z, axis=0)
self.gamma_.T[1] = 1. + sz
self.gamma_.T[2].fill(0)
for i in range(self.n_components - 2, -1, -1):
self.gamma_[i, 2] = self.gamma_[i + 1, 2] + sz[i]
self.gamma_.T[2] += self.alpha
def _update_means(self, X, z):
"""Update the variational distributions for the means"""
n_features = X.shape[1]
for k in range(self.n_components):
if self.covariance_type in ['spherical', 'diag']:
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num *= self.precs_[k]
den = 1. + self.precs_[k] * np.sum(z.T[k])
self.means_[k] = num / den
elif self.covariance_type in ['tied', 'full']:
if self.covariance_type == 'tied':
cov = self.precs_
else:
cov = self.precs_[k]
den = np.identity(n_features) + cov * np.sum(z.T[k])
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num = np.dot(cov, num)
self.means_[k] = linalg.lstsq(den, num)[0]
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in range(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in range(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in range(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = pinvh(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_.T[1])
print("covariance_type:", self.covariance_type)
def _do_mstep(self, X, z, params):
"""Maximize the variational lower bound
Update each of the parameters to maximize the lower bound."""
self._monitor(X, z, "z")
self._update_concentration(z)
self._monitor(X, z, "gamma")
if 'm' in params:
self._update_means(X, z)
self._monitor(X, z, "mu")
if 'c' in params:
self._update_precisions(X, z)
self._monitor(X, z, "a and b", end=True)
def _initialize_gamma(self):
"Initializes the concentration parameters"
self.gamma_ = self.alpha * np.ones((self.n_components, 3))
def _bound_concentration(self):
"""The variational lower bound for the concentration parameter."""
logprior = gammaln(self.alpha) * self.n_components
logprior += np.sum((self.alpha - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior += np.sum(- gammaln(self.gamma_.T[1] + self.gamma_.T[2]))
logprior += np.sum(gammaln(self.gamma_.T[1]) +
gammaln(self.gamma_.T[2]))
logprior -= np.sum((self.gamma_.T[1] - 1) * (
digamma(self.gamma_.T[1]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior -= np.sum((self.gamma_.T[2] - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
return logprior
def _bound_means(self):
"The variational lower bound for the mean parameters"
logprior = 0.
logprior -= 0.5 * squared_norm(self.means_)
logprior -= 0.5 * self.means_.shape[1] * self.n_components
return logprior
def _bound_precisions(self):
"""Returns the bound term related to precisions"""
logprior = 0.
if self.covariance_type == 'spherical':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_
- self.precs_[:, 0])
elif self.covariance_type == 'diag':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_ - self.precs_)
elif self.covariance_type == 'tied':
logprior += _bound_wishart(self.dof_, self.scale_, self.det_scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
logprior += _bound_wishart(self.dof_[k],
self.scale_[k],
self.det_scale_[k])
return logprior
def _bound_proportions(self, z):
"""Returns the bound term related to proportions"""
dg12 = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dg1 = digamma(self.gamma_.T[1]) - dg12
dg2 = digamma(self.gamma_.T[2]) - dg12
cz = np.cumsum(z[:, ::-1], axis=-1)[:, -2::-1]
logprior = np.sum(cz * dg2[:-1]) + np.sum(z * dg1)
del cz # Save memory
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _logprior(self, z):
logprior = self._bound_concentration()
logprior += self._bound_means()
logprior += self._bound_precisions()
logprior += self._bound_proportions(z)
return logprior
def lower_bound(self, X, z):
"""returns a lower bound on model evidence based on X and membership"""
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
c = np.sum(z * _bound_state_log_lik(X, self._initial_bound +
self.bound_prec_, self.precs_,
self.means_, self.covariance_type))
return c + self._logprior(z)
def _set_weights(self):
for i in xrange(self.n_components):
self.weights_[i] = self.gamma_[i, 1] / (self.gamma_[i, 1]
+ self.gamma_[i, 2])
self.weights_ /= np.sum(self.weights_)
def fit(self, X):
"""Estimate model parameters with the variational
algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html#
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when when creating
the object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
self.random_state = check_random_state(self.random_state)
## initialization step
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
n_features = X.shape[1]
z = np.ones((X.shape[0], self.n_components))
z /= self.n_components
self._initial_bound = - 0.5 * n_features * np.log(2 * np.pi)
self._initial_bound -= np.log(2 * np.pi * np.e)
if (self.init_params != '') or not hasattr(self, 'gamma_'):
self._initialize_gamma()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_[::-1]
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'precs_'):
if self.covariance_type == 'spherical':
self.dof_ = np.ones(self.n_components)
self.scale_ = np.ones(self.n_components)
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * n_features * (
digamma(self.dof_) - np.log(self.scale_))
elif self.covariance_type == 'diag':
self.dof_ = 1 + 0.5 * n_features
self.dof_ *= np.ones((self.n_components, n_features))
self.scale_ = np.ones((self.n_components, n_features))
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * (np.sum(digamma(self.dof_) -
np.log(self.scale_), 1))
self.bound_prec_ -= 0.5 * np.sum(self.precs_, 1)
elif self.covariance_type == 'tied':
self.dof_ = 1.
self.scale_ = np.identity(n_features)
self.precs_ = np.identity(n_features)
self.det_scale_ = 1.
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
self.dof_ = (1 + self.n_components + X.shape[0])
self.dof_ *= np.ones(self.n_components)
self.scale_ = [2 * np.identity(n_features)
for _ in range(self.n_components)]
self.precs_ = [np.identity(n_features)
for _ in range(self.n_components)]
self.det_scale_ = np.ones(self.n_components)
self.bound_prec_ = np.zeros(self.n_components)
for k in range(self.n_components):
self.bound_prec_[k] = wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= (self.dof_[k] *
np.trace(self.scale_[k]))
self.bound_prec_ *= 0.5
logprob = []
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
# Expectation step
curr_logprob, z = self.score_samples(X)
logprob.append(curr_logprob.sum() + self._logprior(z))
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, z, self.params)
self._set_weights()
return self
class VBGMM(DPGMM):
"""Variational Inference for the Gaussian Mixture Model
Variational inference for a Gaussian mixture model probability
distribution. This class allows for easy and efficient inference
of an approximate posterior distribution over the parameters of a
Gaussian mixture model with a fixed number of components.
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Parameters
----------
n_components: int, optional
Number of mixture components. Defaults to 1.
covariance_type: string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
alpha: float, optional
Real number representing the concentration parameter of
the dirichlet distribution. Intuitively, the higher the
value of alpha the more likely the variational mixture of
Gaussians model will use all components it can. Defaults
to 1.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussians.
n_components : int (read-only)
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False
otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
DPGMM : Ininite Gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, thresh=1e-2, verbose=False,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
super(VBGMM, self).__init__(
n_components, covariance_type, random_state=random_state,
thresh=thresh, verbose=verbose, min_covar=min_covar,
n_iter=n_iter, params=params, init_params=init_params)
self.alpha = float(alpha) / n_components
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
p = np.zeros(self.n_components)
bound = np.zeros(X.shape[0])
dg = digamma(self.gamma_) - digamma(np.sum(self.gamma_))
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dg
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
for i in range(self.n_components):
self.gamma_[i] = self.alpha + np.sum(z.T[i])
def _initialize_gamma(self):
self.gamma_ = self.alpha * np.ones(self.n_components)
def _bound_proportions(self, z):
logprior = 0.
dg = digamma(self.gamma_)
dg -= digamma(np.sum(self.gamma_))
logprior += np.sum(dg.reshape((-1, 1)) * z.T)
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _bound_concentration(self):
logprior = 0.
logprior = gammaln(np.sum(self.gamma_)) - gammaln(self.n_components
* self.alpha)
logprior -= np.sum(gammaln(self.gamma_) - gammaln(self.alpha))
sg = digamma(np.sum(self.gamma_))
logprior += np.sum((self.gamma_ - self.alpha)
* (digamma(self.gamma_) - sg))
return logprior
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_)
print("covariance_type:", self.covariance_type)
def _set_weights(self):
self.weights_[:] = self.gamma_
self.weights_ /= np.sum(self.weights_)
|
|
"""
=============================================================
Online Latent Dirichlet Allocation with variational inference
=============================================================
This implementation is modified from Matthew D. Hoffman's onlineldavb code
Link: http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
# Author: Chyi-Kwei Yau
# Author: Matthew D. Hoffman (original onlineldavb implementation)
import numpy as np
import scipy.sparse as sp
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import (check_random_state, check_array,
gen_batches, gen_even_slices, _get_n_jobs)
from ..utils.validation import NotFittedError, check_non_negative
from ..utils.extmath import logsumexp
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ._online_lda import (mean_change, _dirichlet_expectation_1d,
_dirichlet_expectation_2d)
EPS = np.finfo(np.float).eps
def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior,
max_iters,
mean_change_tol, cal_sstats, random_state):
"""E-step: update document-topic distribution.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
exp_topic_word_distr : dense matrix, shape=(n_topics, n_features)
Exponential value of expection of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_iters : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-setp.
cal_sstats : boolean
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calcuate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
"""
is_sparse_x = sp.issparse(X)
n_samples, n_features = X.shape
n_topics = exp_topic_word_distr.shape[0]
if random_state:
doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics))
else:
doc_topic_distr = np.ones((n_samples, n_topics))
# In the literature, this is `exp(E[log(theta)])`
exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
# diff on `component_` (only calculate it when `cal_diff` is True)
suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in xrange(n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
doc_topic_d = doc_topic_distr[idx_d, :]
exp_doc_topic_d = exp_doc_topic[idx_d, :]
exp_topic_word_d = exp_topic_word_distr[:, ids]
# Iterate between `doc_topic_d` and `norm_phi` until convergence
for _ in xrange(0, max_iters):
last_d = doc_topic_d
# The optimal phi_{dwk} is proportional to
# exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
doc_topic_d = (doc_topic_prior + exp_doc_topic_d *
np.dot(cnts / norm_phi, exp_topic_word_d.T))
exp_doc_topic_d = _dirichlet_expectation_1d(doc_topic_d)
if mean_change(last_d, doc_topic_d) < mean_change_tol:
break
doc_topic_distr[idx_d, :] = doc_topic_d
# Contribution of document d to the expected sufficient
# statistics for the M step.
if cal_sstats:
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
return (doc_topic_distr, suff_stats)
class LatentDirichletAllocation(BaseEstimator, TransformerMixin):
"""Latent Dirichlet Allocation with online variational Bayes algorithm
Parameters
----------
n_topics : int, optional (default=10)
Number of topics.
doc_topic_prior : float, optional (default=None)
Prior of document topic distribution `theta`. If the value is None,
defaults to `1 / n_topics`.
In the literature, this is called `alpha`.
topic_word_prior : float, optional (default=None)
Prior of topic word distribution `beta`. If the value is None, defaults
to `1 / n_topics`.
In the literature, this is called `eta`.
learning_method : 'batch' | 'online', default='online'
Method used to update `_component`. Only used in `fit` method.
In general, if the data size is large, the online update will be much
faster than the batch update.
Valid options::
'batch': Batch variational Bayes method. Use all training data in
each EM update.
Old `components_` will be overwritten in each iteration.
'online': Online variational Bayes method. In each EM update, use
mini-batch of training data to update the ``components_``
variable incrementally. The learning rate is controlled by the
``learning_decay`` and the ``learning_offset`` parameters.
learning_decay : float, optional (default=0.7)
It is a parameter that control learning rate in the online learning
method. The value should be set between (0.5, 1.0] to guarantee
asymptotic convergence. When the value is 0.0 and batch_size is
``n_samples``, the update method is same as batch learning. In the
literature, this is called kappa.
learning_offset : float, optional (default=10.)
A (positive) parameter that downweights early iterations in online
learning. It should be greater than 1.0. In the literature, this is
called tau_0.
max_iter : integer, optional (default=10)
The maximum number of iterations.
total_samples : int, optional (default=1e6)
Total number of documents. Only used in the `partial_fit` method.
batch_size : int, optional (default=128)
Number of documents to use in each EM iteration. Only used in online
learning.
evaluate_every : int optional (default=0)
How often to evaluate perplexity. Only used in `fit` method.
set it to 0 or and negative number to not evalute perplexity in
training at all. Evaluating perplexity can help you check convergence
in training process, but it will also increase total training time.
Evaluating perplexity in every iteration might increase training time
up to two-fold.
perp_tol : float, optional (default=1e-1)
Perplexity tolerance in batch learning. Only used when
``evaluate_every`` is greater than 0.
mean_change_tol : float, optional (default=1e-3)
Stopping tolerance for updating document topic distribution in E-step.
max_doc_update_iter : int (default=100)
Max number of iterations for updating document topic distribution in
the E-step.
n_jobs : int, optional (default=1)
The number of jobs to use in the E-step. If -1, all CPUs are used. For
``n_jobs`` below -1, (n_cpus + 1 + n_jobs) are used.
verbose : int, optional (default=0)
Verbosity level.
random_state : int or RandomState instance or None, optional (default=None)
Pseudo-random number generator seed control.
Attributes
----------
components_ : array, [n_topics, n_features]
Topic word distribution. ``components_[i, j]`` represents word j in
topic `i`. In the literature, this is called lambda.
n_batch_iter_ : int
Number of iterations of the EM step.
n_iter_ : int
Number of passes over the dataset.
References
----------
[1] "Online Learning for Latent Dirichlet Allocation", Matthew D. Hoffman,
David M. Blei, Francis Bach, 2010
[2] "Stochastic Variational Inference", Matthew D. Hoffman, David M. Blei,
Chong Wang, John Paisley, 2013
[3] Matthew D. Hoffman's onlineldavb code. Link:
http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
def __init__(self, n_topics=10, doc_topic_prior=None,
topic_word_prior=None, learning_method='online',
learning_decay=.7, learning_offset=10., max_iter=10,
batch_size=128, evaluate_every=-1, total_samples=1e6,
perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100,
n_jobs=1, verbose=0, random_state=None):
self.n_topics = n_topics
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.learning_method = learning_method
self.learning_decay = learning_decay
self.learning_offset = learning_offset
self.max_iter = max_iter
self.batch_size = batch_size
self.evaluate_every = evaluate_every
self.total_samples = total_samples
self.perp_tol = perp_tol
self.mean_change_tol = mean_change_tol
self.max_doc_update_iter = max_doc_update_iter
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def _check_params(self):
"""Check model parameters."""
if self.n_topics <= 0:
raise ValueError("Invalid 'n_topics' parameter: %r"
% self.n_topics)
if self.total_samples <= 0:
raise ValueError("Invalid 'total_samples' parameter: %r"
% self.total_samples)
if self.learning_offset < 0:
raise ValueError("Invalid 'learning_offset' parameter: %r"
% self.learning_offset)
if self.learning_method not in ("batch", "online"):
raise ValueError("Invalid 'learning_method' parameter: %r"
% self.learning_method)
def _init_latent_vars(self, n_features):
"""Initialize latent variables."""
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1. / self.n_topics
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1. / self.n_topics
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.
init_var = 1. / init_gamma
# In the literature, this is called `lambda`
self.components_ = self.random_state_.gamma(
init_gamma, init_var, (self.n_topics, n_features))
# In the literature, this is `exp(E[log(beta)])`
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
def _e_step(self, X, cal_sstats, random_init):
"""E-step in EM update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
cal_sstats : boolean
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : boolean
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormailzed topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
"""
# Run e-step in parallel
n_jobs = _get_n_jobs(self.n_jobs)
random_state = self.random_state_ if random_init else None
results = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_update_doc_distribution)(X[idx_slice, :],
self.exp_dirichlet_component_,
self.doc_topic_prior_,
self.max_doc_update_iter,
self.mean_change_tol, cal_sstats,
random_state)
for idx_slice in gen_even_slices(X.shape[0], n_jobs))
# merge result
doc_topics, sstats_list = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
# This step finishes computing the sufficient statistics for the
# M-step.
suff_stats = np.zeros(self.components_.shape)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
def _em_step(self, X, total_samples, batch_update):
"""EM update for 1 iteration.
update `_component` by batch VB or online VB.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
total_samples : integer
Total umber of documents. It is only used when
batch_update is `False`.
batch_update : boolean
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
Returns
-------
doc_topic_distr : array, shape=(n_samples, n_topics)
Unnormalized document topic distribution.
"""
# E-step
_, suff_stats = self._e_step(X, cal_sstats=True, random_init=True)
# M-step
if batch_update:
self.components_ = self.topic_word_prior_ + suff_stats
else:
# online update
# In the literature, the weight is `rho`
weight = np.power(self.learning_offset + self.n_batch_iter_,
-self.learning_decay)
doc_ratio = float(total_samples) / X.shape[0]
self.components_ *= (1 - weight)
self.components_ += (weight * (self.topic_word_prior_
+ doc_ratio * suff_stats))
# update `component_` related variables
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
def _check_non_neg_array(self, X, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, whom)
return X
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.partial_fit")
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if not hasattr(self, 'components_'):
self._init_latent_vars(n_features)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=self.total_samples,
batch_update=False)
return self
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X, "LatentDirichletAllocation.fit")
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features)
# change to perplexity later
last_bound = None
for i in xrange(max_iter):
if learning_method == 'online':
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=n_samples,
batch_update=False)
else:
# batch update
self._em_step(X, total_samples=n_samples, batch_update=True)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
bound = self.perplexity(X, doc_topics_distr,
sub_sampling=False)
if self.verbose:
print('iteration: %d, perplexity: %.4f' % (i + 1, bound))
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
self.n_iter_ += 1
return self
def transform(self, X):
"""Transform data X according to the fitted model.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : shape=(n_samples, n_topics)
Document topic distribution for X.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
# make sure feature size is the same in fitted model and in X
X = self._check_non_neg_array(X, "LatentDirichletAllocation.transform")
n_samples, n_features = X.shape
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
doc_topic_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
return doc_topic_distr
def _approx_bound(self, X, doc_topic_distr, sub_sampling):
"""Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
doc_topic_distr : array, shape=(n_samples, n_topics)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : boolean, optional, (default=False)
Compensate for subsampling of documents.
It is used in calcuate bound in online learning.
Returns
-------
score : float
"""
def _loglikelihood(prior, distr, dirichlet_distr, size):
# calculate log-likelihood
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
return score
is_sparse_x = sp.issparse(X)
n_samples, n_topics = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# E[log p(docs | theta, beta)]
for idx_d in xrange(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = (dirichlet_doc_topic[idx_d, :, np.newaxis]
+ dirichlet_component_[:, ids])
norm_phi = logsumexp(temp)
score += np.dot(cnts, norm_phi)
# compute E[log p(theta | alpha) - log q(theta | gamma)]
score += _loglikelihood(doc_topic_prior, doc_topic_distr,
dirichlet_doc_topic, self.n_topics)
# Compensate for the subsampling of the population of documents
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
# E[log p(beta | eta) - log q (beta | lambda)]
score += _loglikelihood(topic_word_prior, self.components_,
dirichlet_component_, n_features)
return score
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
score : float
Use approximate bound as score.
"""
X = self._check_non_neg_array(X, "LatentDirichletAllocation.score")
doc_topic_distr = self.transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
def perplexity(self, X, doc_topic_distr=None, sub_sampling=False):
"""Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_topics)
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.perplexity")
if doc_topic_distr is None:
doc_topic_distr = self.transform(X)
else:
n_samples, n_topics = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError("Number of samples in X and doc_topic_distr"
" do not match.")
if n_topics != self.n_topics:
raise ValueError("Number of topics does not match.")
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
|
|
from AbstractSyntaxTree import *
class Visitor:
def __init__(self, errorHandler=None):
self.errorHandler = errorHandler
def visitChildren(self, node):
error = False
for child in node.children:
if not self.shouldVisitNextChild(node):
pass
child.accept(self)
if child.error:
error = True
if error:
return "error"
def shouldVisitNextChild(self, node):
True
def addError(self, error, node):
line, column = node.getLineAndColumn()
node.error = True
# if something fails during variable initialization, the declarator initializer node will be marked as error,
# though the variable will remain in the symbol table to prevent every applied occurrence from generating an
# undeclared variable error
if isinstance(node, ASTExpressionNode):
baseExpression = node.baseExpression()
baseExpression.error = True
if isinstance(baseExpression.parent, ASTInitializerListNode):
baseExpression.parent.parent.error = True
if isinstance(baseExpression.parent, ASTStatementNode):
baseExpression.parent.error = True
self.errorHandler.addError(error, line, column)
def addWarning(self, error, node):
line, column = node.getLineAndColumn()
node.error = False
self.errorHandler.addWarning(error, line, column)
def visitProgramNode(self, node):
self.visitChildren(node)
def visitIncludeNode(self, node):
self.visitChildren(node)
def visitFunctionDeclarationNode(self, node):
self.visitChildren(node)
def visitFunctionDefinitionNode(self, node):
self.visitChildren(node)
def visitMainFunctionNode(self, node):
self.visitChildren(node)
def visitParametersNode(self, node):
self.visitChildren(node)
def visitParameterNode(self, node):
self.visitChildren(node)
def visitArgumentsNode(self, node):
self.visitChildren(node)
def visitInitializerListNode(self, node):
self.visitChildren(node)
def visitArrayPartNode(self, node):
self.visitChildren(node)
def visitStatementsNode(self, node):
self.visitChildren(node)
def visitStatementNode(self, node):
self.visitChildren(node)
def visitReturnNode(self, node):
self.visitChildren(node)
def visitBreakNode(self, node):
pass
def visitContinueNode(self, node):
pass
def visitIfNode(self, node):
self.visitChildren(node)
def visitElseNode(self, node):
self.visitChildren(node)
def visitForNode(self, node):
if node.initializer:
node.initializer.accept(self)
if node.condition:
node.condition.accept(self)
if node.iteration:
node.iteration.accept(self)
self.visitChildren(node)
def visitWhileNode(self, node):
self.visitChildren(node)
def visitDoWhileNode(self, node):
self.visitChildren(node)
def visitVariableDeclarationNode(self, node):
self.visitChildren(node)
def visitDeclaratorInitializerNode(self, node):
self.visitChildren(node)
def enterExpression(self, node):
pass
def exitExpression(self, node):
pass
def visitCommaOperatorNode(self, node):
self.visitChildren(node)
def visitIntegerLiteralNode(self, node):
pass
def visitFloatLiteralNode(self, node):
pass
def visitCharacterLiteralNode(self, node):
pass
def visitStringLiteralNode(self, node):
pass
def visitVariableNode(self, node):
self.visitChildren(node)
def visitFunctionCallNode(self, node):
self.visitChildren(node)
def visitTypeCastNode(self, node):
self.visitChildren(node)
def visitTernaryConditionalOperatorNode(self, node):
self.visitChildren(node)
def visitSimpleAssignmentOperatorNode(self, node):
self.visitChildren(node)
def visitLogicOperatorNode(self, node):
self.visitChildren(node)
def visitComparisonOperatorNode(self, node):
self.visitChildren(node)
def visitUnaryArithmeticOperatorNode(self, node):
self.visitChildren(node)
def visitAddressOfoperatorNode(self, node):
self.visitChildren(node)
def visitDereferenceNode(self, node):
self.visitChildren(node)
def visitLogicalNotOperatorNode(self, node):
self.visitChildren(node)
def visitArraySubscriptNode(self, node):
self.visitChildren(node)
def visitBinaryArithmeticNode(self, node):
self.visitChildren(node)
|
|
from flask import jsonify, request, Response, json, g
from orlo.app import app
from orlo import queries
from orlo.config import config
from orlo.exceptions import InvalidUsage
from orlo.user_auth import token_auth
from orlo.orm import db, Release, Package, PackageResult, ReleaseNote, \
ReleaseMetadata, Platform
from orlo.util import validate_request_json, create_release, \
validate_release_input, validate_package_input, fetch_release, \
create_package, fetch_package, stream_json_list, str_to_bool, is_uuid
from orlo.user_auth import conditional_auth
security_enabled = config.getboolean('security', 'enabled')
@app.route('/releases', methods=['POST'])
@conditional_auth(conditional_auth(token_auth.token_required))
def post_releases():
"""
Create a release - the first step in a deployment
:<json string user: User that is performing the release
:<json string team: The development team responsible for this release
:<json array platforms: List of platforms receiving the release
:<json array references: List of external references, e.g. Jira ticket
:>json string id: UUID reference to the created release
:reqheader Content-Type: Must be application/json
:status 200: Release was created successfully
:status 400: Invalid request
**Example curl**:
.. sourcecode:: shell
curl -H "Content-Type: application/json" \\
-X POST \\
http://127.0.0.1/releases \\
-d '{"note": "blah", "platforms": ["site1"], "references": ["ticket"],
"team": "A-Team", "user": "aforbes"}'
"""
validate_release_input(request)
release = create_release(request)
if request.json.get('note'):
release_note = ReleaseNote(release.id, request.json.get('note'))
db.session.add(release_note)
if request.json.get('metadata'):
for key, value in request.json.get('metadata').items():
metadata = ReleaseMetadata(release.id, key, value)
db.session.add(metadata)
app.logger.info(
'Create release {}, references: {}, platforms: {}'.format(
release.id, release.notes, release.references, release.platforms,
release.metadata)
)
release.start()
db.session.add(release)
db.session.commit()
return jsonify(id=release.id)
@app.route('/releases/<release_id>/packages', methods=['POST'])
@conditional_auth(token_auth.token_required)
def post_packages(release_id):
"""
Add a package to a release
:param string release_id: UUID of the release to add the package to
:<json string name: Name of the package
:<json string version: Version of the package
:<json boolean rollback: Whether this package deploy is a rollback
:>json string id: UUID reference to the created package
:reqheader Content-Type: Must be application/json
:status 200: Package was added to the release successfully
:status 400: Invalid request
**Example curl**:
.. sourcecode:: shell
curl -H "Content-Type: application/json" \\
-X POST http://127.0.0.1/releases/${RELEASE_ID}/packages \\
-d '{"name": "test-package", "version": "1.0.1"}'
"""
validate_package_input(request, release_id)
release = fetch_release(release_id)
package = create_package(release.id, request)
app.logger.info(
'Create package {}, release {}, name {}, version {}'.format(
package.id, release.id, request.json['name'],
request.json['version']))
db.session.add(package)
db.session.commit()
return jsonify(id=package.id)
@app.route('/releases/<release_id>/packages/<package_id>/results',
methods=['POST'])
@conditional_auth(token_auth.token_required)
def post_results(release_id, package_id):
"""
Post the results of a package release
:param string release_id: Release UUID
:param string package_id: Package UUID
:<json string content: Free text field to store what you wish
:status 204: Package results added successfully
"""
results = PackageResult(package_id, str(request.json))
app.logger.info("Post results, release {}, package {}".format(
release_id, package_id))
db.session.add(results)
db.session.commit()
return '', 204
@app.route('/releases/<release_id>/start', methods=['POST'])
@conditional_auth(token_auth.token_required)
def post_releases_start(release_id):
"""
Indicate that a release is starting
:param release_id:
:return:
"""
release = fetch_release(release_id)
app.logger.info("Release start, release {}".format(release_id))
release.start()
db.session.add(release)
db.session.commit()
return '', 204
@app.route('/releases/<release_id>/stop', methods=['POST'])
@conditional_auth(token_auth.token_required)
def post_releases_stop(release_id):
"""
Indicate that a release has finished
This should be called after all packages have also been "stopped".
In future it may stop any un-stopped packages.
:param string release_id: Release UUID
**Example curl**:
.. sourcecode:: shell
curl -H "Content-Type: application/json" \\
-X POST http://127.0.0.1/releases/${RELEASE_ID}/stop
"""
release = fetch_release(release_id)
# TODO check that all packages have been finished
app.logger.info("Release stop, release {}".format(release_id))
release.stop()
db.session.add(release)
db.session.commit()
return '', 204
@app.route('/releases/<release_id>/packages/<package_id>/start',
methods=['POST'])
@conditional_auth(token_auth.token_required)
def post_packages_start(release_id, package_id):
"""
Indicate that a package has started deploying
:param string release_id: Release UUID
:param string package_id: Package UUID
:status 204:
**Example curl**:
.. sourcecode:: shell
curl -X POST http://127.0.0.1/releases/${RELEASE_ID}/packages/${
PACKAGE_ID}/start
"""
package = fetch_package(release_id, package_id)
app.logger.info("Package start, release {}, package {}".format(
release_id, package_id))
package.start()
db.session.add(package)
db.session.commit()
return '', 204
@app.route('/releases/<release_id>/packages/<package_id>/stop',
methods=['POST'])
@conditional_auth(token_auth.token_required)
def post_packages_stop(release_id, package_id):
"""
Indicate that a package has finished deploying
**Example curl**:
.. sourcecode:: shell
curl -H "Content-Type: application/json" \\
-X POST \\
http://127.0.0.1/releases/${RELEASE_ID}/packages/${PACKAGE_ID}/stop \\
-d '{"success": "true"}'
:param string package_id: Package UUID
:param string release_id: Release UUID
"""
validate_request_json(request)
success = request.json.get('success') in [True, 'True', 'true', '1']
package = fetch_package(release_id, package_id)
app.logger.info("Package stop, release {}, package {}, success {}".format(
release_id, package_id, success))
package.stop(success=success)
db.session.add(package)
db.session.commit()
return '', 204
@app.route('/releases/<release_id>/notes', methods=['POST'])
@conditional_auth(token_auth.token_required)
def post_releases_notes(release_id):
"""
Add a note to a release
:param string release_id: Release UUID
:query string text: Text
:return:
"""
validate_request_json(request)
text = request.json.get('text')
if not text:
raise InvalidUsage("Must include text in posted document")
note = ReleaseNote(release_id, text)
app.logger.info("Adding note to release {}".format(release_id))
db.session.add(note)
db.session.commit()
return '', 204
@app.route('/releases/<release_id>/metadata', methods=['POST'])
@conditional_auth(token_auth.token_required)
def post_releases_metadata(release_id):
"""
Add metadata to a release
:param string release_id: Release UUID
:query string text: Text
:return:
"""
validate_request_json(request)
meta = request
if not meta:
raise InvalidUsage(
"Must include metadata in posted document: es {\"key\" : "
"\"value\"}")
for key, value in request.json.items():
app.logger.info("Adding Metadata to release {}".format(release_id))
metadata = ReleaseMetadata(release_id, key, value)
db.session.add(metadata)
db.session.commit()
return '', 204
@app.route('/releases', methods=['GET'])
@app.route('/releases/<release_id>', methods=['GET'])
def get_releases(release_id=None):
"""
Return a list of releases to the client, filters optional
:param string release_id: Optionally specify a single release UUID to
fetch. This does not disable filters.
:query bool asc: Normally results are returned ordered by stime
descending, setting asc to true will reverse this and sort by stime
ascending
:query int limit: Limit the results by int (default 100)
:query int offset: Offset the results by int
:query string user: Filter releases by user the that performed the release
:query string platform: Filter releases by platform
:query string stime_before: Only include releases that started before \
timestamp given
:query string stime_after: Only include releases that started after \
timestamp given
:query string ftime_before: Only include releases that finished before \
timestamp given
:query string ftime_after: Only include releases that finished after \
timestamp given
:query string team: Filter releases by team
:query string status: Filter by release status. This field is calculated \
from the package status, see special note below.
:query int duration_lt: Only include releases that took less than (int) \
seconds
:query int duration_gt: Only include releases that took more than (int) \
seconds
:query boolean package_rollback: Filter on whether or not the releases \
contain a rollback
:query string package_name: Filter by package name
:query string package_version: Filter by package version
:query int package_duration_gt: Filter by packages of duration greater than
:query int package_duration_lt: Filter by packages of duration less than
:query string package_status: Filter by package status. Valid statuses are:\
"NOT_STARTED", "IN_PROGRESS", "SUCCESSFUL", "FAILED"
**Note for time arguments**:
The timestamp format you must use is specified in /etc/orlo/orlo.ini.
All times are UTC.
**Note on status**:
The release status is calculated from the packages it contains. The
possible values are the same as a package. For a release to be
considered "SUCCESSFUL" or "NOT_STARTED", all packages must have this
value. If any one package has the value "IN_PROGRESS" or "FAILED",
that status applies to the whole release, with "FAILED" overriding
"IN_PROGRESS".
"""
booleans = ('rollback', 'package_rollback',)
if release_id: # Simple, just fetch one release
if not is_uuid(release_id):
raise InvalidUsage("Release ID given is not a valid UUID")
query = queries.get_release(release_id)
else: # Bit more complex
# Defaults
args = {
'limit': 100
}
# Flatten args, as the ImmutableDict puts some values in a list when
# expanded
for k in request.args.keys():
if k in booleans:
args[k] = str_to_bool(request.args.get(k))
else:
args[k] = request.args.get(k)
query = queries.build_query(Release, **args)
# Execute eagerly to avoid confusing stack traces within the Response on
# error
db.session.execute(query)
if query.count() == 0:
response = jsonify(message="No releases found", releases=[])
return response, 404
return Response(stream_json_list('releases', query),
content_type='application/json')
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import os.path as op
import sys
import logging
import numpy as np
from jcvi.formats.base import LineFile
from jcvi.apps.base import (
OptionParser,
ActionDispatcher,
need_update,
sh,
get_abs_path,
which,
)
class Sizes(LineFile):
"""
Two-column .sizes file, often generated by `faSize -detailed`
contigID size
"""
def __init__(self, filename, select=None):
assert op.exists(filename), "File `{0}` not found".format(filename)
# filename can be both .sizes file or FASTA formatted file
sizesname = filename
if not filename.endswith(".sizes"):
sizesname = filename + ".sizes"
filename = get_abs_path(filename)
if need_update(filename, sizesname):
cmd = "faSize"
if which(cmd):
cmd += " -detailed {0}".format(filename)
sh(cmd, outfile=sizesname)
else:
from jcvi.formats.fasta import Fasta
f = Fasta(filename)
fw = open(sizesname, "w")
for k, size in f.itersizes_ordered():
print("\t".join((k, str(size))), file=fw)
fw.close()
filename = sizesname
assert filename.endswith(".sizes")
super(Sizes, self).__init__(filename)
self.fp = open(filename)
self.filename = filename
# get sizes for individual contigs, both in list and dict
# this is to preserve the input order in the sizes file
sizes = list(self.iter_sizes())
if select:
assert select > 0
sizes = [x for x in sizes if x[1] >= select]
self.sizes_mapping = dict(sizes)
# get cumulative sizes, both in list and dict
ctgs, sizes = zip(*sizes)
self.sizes = sizes
cumsizes = np.cumsum([0] + list(sizes))
self.ctgs = ctgs
self.cumsizes = cumsizes
self.cumsizes_mapping = dict(zip(ctgs, cumsizes))
def __len__(self):
return len(self.sizes)
def get_size(self, ctg):
return self.sizes_mapping[ctg]
def get_cumsize(self, ctg):
return self.cumsizes_mapping[ctg]
def close(self, clean=False):
self.fp.close()
if clean:
os.remove(self.filename)
@property
def mapping(self):
return self.sizes_mapping
@property
def totalsize(self):
return sum(self.sizes)
def iter_sizes(self):
self.fp.seek(0)
for row in self.fp:
ctg, size = row.split()[:2]
yield ctg, int(size)
def iter_names(self):
self.fp.seek(0)
for row in self.fp:
ctg, size = row.split()[:2]
yield ctg
def get_position(self, ctg, pos):
if ctg not in self.cumsizes_mapping:
return None
return self.cumsizes_mapping[ctg] + pos
def get_breaks(self):
for i in range(len(self)):
yield self.ctgs[i], self.cumsizes[i], self.cumsizes[i + 1]
@property
def summary(self):
from jcvi.assembly.base import calculate_A50
ctgsizes = self.sizes
a50, l50, n50 = calculate_A50(ctgsizes)
return sum(ctgsizes), l50, n50
def main():
actions = (
("agp", "write to AGP format from sizes file"),
("extract", "extract the lines containing only the given IDs"),
("histogram", "plot read/contig length distribution"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def histogram(args):
"""
%prog histogram [reads.fasta|reads.fastq]
Plot read length distribution for reads. The plot would be similar to the
one generated by SMRT-portal, for example:
http://blog.pacificbiosciences.com/2013/10/data-release-long-read-shotgun.html
Plot has two axes - corresponding to pdf and cdf, respectively. Also adding
number of reads, average/median, N50, and total length.
"""
from jcvi.utils.cbook import human_size, thousands, SUFFIXES
from jcvi.formats.fastq import fasta
from jcvi.graphics.histogram import stem_leaf_plot
from jcvi.graphics.base import (
plt,
markup,
human_formatter,
human_base_formatter,
savefig,
set2,
set_ticklabels_helvetica,
)
p = OptionParser(histogram.__doc__)
p.set_histogram(
vmax=50000, bins=100, xlabel="Read length", title="Read length distribution"
)
p.add_option("--ylabel1", default="Counts", help="Label of y-axis on the left")
p.add_option(
"--color",
default="0",
choices=[str(x) for x in range(8)],
help="Color of bars, which is an index 0-7 in brewer set2",
)
opts, args, iopts = p.set_image_options(args, figsize="6x6", style="dark")
if len(args) != 1:
sys.exit(not p.print_help())
(fastafile,) = args
fastafile, qualfile = fasta([fastafile, "--seqtk"])
sizes = Sizes(fastafile)
all_sizes = sorted(sizes.sizes)
xmin, xmax, bins = opts.vmin, opts.vmax, opts.bins
left, height = stem_leaf_plot(all_sizes, xmin, xmax, bins)
plt.figure(1, (iopts.w, iopts.h))
ax1 = plt.gca()
width = (xmax - xmin) * 0.5 / bins
color = set2[int(opts.color)]
ax1.bar(left, height, width=width, linewidth=0, fc=color, align="center")
ax1.set_xlabel(markup(opts.xlabel))
ax1.set_ylabel(opts.ylabel1)
ax2 = ax1.twinx()
cur_size = 0
total_size, l50, n50 = sizes.summary
cdf = {}
hsize = human_size(total_size)
tag = hsize[-2:]
unit = 1000 ** SUFFIXES[1000].index(tag)
for x in all_sizes:
if x not in cdf:
cdf[x] = (total_size - cur_size) * 1.0 / unit
cur_size += x
x, y = zip(*sorted(cdf.items()))
ax2.plot(x, y, "-", color="darkslategray")
ylabel2 = "{0} above read length".format(tag)
ax2.set_ylabel(ylabel2)
for ax in (ax1, ax2):
set_ticklabels_helvetica(ax)
ax.set_xlim((xmin - width / 2, xmax + width / 2))
tc = "gray"
axt = ax1.transAxes
xx, yy = 0.95, 0.95
ma = "Total bases: {0}".format(hsize)
mb = "Total reads: {0}".format(thousands(len(sizes)))
mc = "Average read length: {0}bp".format(thousands(np.mean(all_sizes)))
md = "Median read length: {0}bp".format(thousands(np.median(all_sizes)))
me = "N50 read length: {0}bp".format(thousands(l50))
for t in (ma, mb, mc, md, me):
print(t, file=sys.stderr)
ax1.text(xx, yy, t, color=tc, transform=axt, ha="right")
yy -= 0.05
ax1.set_title(markup(opts.title))
# Seaborn removes ticks for all styles except 'ticks'. Now add them back:
ax1.tick_params(
axis="x",
direction="out",
length=3,
left=False,
right=False,
top=False,
bottom=True,
)
ax1.xaxis.set_major_formatter(human_base_formatter)
ax1.yaxis.set_major_formatter(human_formatter)
figname = sizes.filename + ".pdf"
savefig(figname)
def extract(args):
"""
%prog extract idsfile sizesfile
Extract the lines containing only the given IDs.
"""
p = OptionParser(extract.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
idsfile, sizesfile = args
sizes = Sizes(sizesfile).mapping
fp = open(idsfile)
for row in fp:
name = row.strip()
size = sizes[name]
print("\t".join(str(x) for x in (name, size)))
def agp(args):
"""
%prog agp <fastafile|sizesfile>
Convert the sizes file to a trivial AGP file.
"""
from jcvi.formats.agp import OO
p = OptionParser(agp.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(sizesfile,) = args
sizes = Sizes(sizesfile)
agpfile = sizes.filename.rsplit(".", 1)[0] + ".agp"
fw = open(agpfile, "w")
o = OO() # Without a filename
for ctg, size in sizes.iter_sizes():
o.add(ctg, ctg, size)
o.write_AGP(fw)
fw.close()
logging.debug("AGP file written to `{0}`.".format(agpfile))
return agpfile
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import os
import signal
import sys
from queue import Queue
from random import randrange
from threading import Thread
from time import sleep
try:
# python2
from urllib2 import urlopen, Request, HTTPError
except ImportError:
# python3
from urllib.request import Request, urlopen
from urllib.error import HTTPError
import pytest
from future.builtins import bytes
from telegram import TelegramError, Message, User, Chat, Update, Bot
from telegram.error import Unauthorized, InvalidToken
from telegram.ext import Updater
signalskip = pytest.mark.skipif(sys.platform == 'win32',
reason='Can\'t send signals without stopping '
'whole process on windows')
@pytest.fixture()
def updater(bot):
up = Updater(bot=bot, workers=2)
yield up
if up.running:
up.stop()
class TestUpdater:
@pytest.fixture(autouse=True)
def reset(self):
self.message_count = 0
self.received = None
self.attempts = 0
def error_handler(self, bot, update, error):
self.received = error.message
def callback(self, bot, update):
self.received = update.message.text
# TODO: test clean= argument
def test_error_on_get_updates(self, monkeypatch, updater):
def test(*args, **kwargs):
raise TelegramError('Test Error 2')
monkeypatch.setattr('telegram.Bot.get_updates', test)
monkeypatch.setattr('telegram.Bot.set_webhook', lambda *args, **kwargs: True)
updater.dispatcher.add_error_handler(self.error_handler)
updater.start_polling(0.01)
sleep(.1)
assert self.received == "Test Error 2"
def test_webhook(self, monkeypatch, updater):
q = Queue()
monkeypatch.setattr('telegram.Bot.set_webhook', lambda *args, **kwargs: True)
monkeypatch.setattr('telegram.Bot.delete_webhook', lambda *args, **kwargs: True)
monkeypatch.setattr('telegram.ext.Dispatcher.process_update', lambda _, u: q.put(u))
ip = '127.0.0.1'
port = randrange(1024, 49152) # Select random port for travis
updater.start_webhook(
ip,
port,
url_path='TOKEN',
cert='./tests/test_updater.py',
key='./tests/test_updater.py', )
sleep(.2)
# SSL-Wrapping will fail, so we start the server without SSL
thr = Thread(target=updater.httpd.serve_forever)
thr.start()
try:
# Now, we send an update to the server via urlopen
update = Update(1, message=Message(1, User(1, ''), None, Chat(1, ''), text='Webhook'))
self._send_webhook_msg(ip, port, update.to_json(), 'TOKEN')
sleep(.2)
assert q.get(False) == update
response = self._send_webhook_msg(ip, port, None, 'webookhandler.py')
assert b'' == response.read()
assert 200 == response.code
response = self._send_webhook_msg(ip, port, None, 'webookhandler.py',
get_method=lambda: 'HEAD')
assert b'' == response.read()
assert 200 == response.code
# Test multiple shutdown() calls
updater.httpd.shutdown()
finally:
updater.httpd.shutdown()
thr.join()
def test_webhook_no_ssl(self, monkeypatch, updater):
q = Queue()
monkeypatch.setattr('telegram.Bot.set_webhook', lambda *args, **kwargs: True)
monkeypatch.setattr('telegram.Bot.delete_webhook', lambda *args, **kwargs: True)
monkeypatch.setattr('telegram.ext.Dispatcher.process_update', lambda _, u: q.put(u))
ip = '127.0.0.1'
port = randrange(1024, 49152) # Select random port for travis
updater.start_webhook(ip, port, webhook_url=None)
sleep(.2)
# Now, we send an update to the server via urlopen
update = Update(1, message=Message(1, User(1, ''), None, Chat(1, ''), text='Webhook 2'))
self._send_webhook_msg(ip, port, update.to_json())
sleep(.2)
assert q.get(False) == update
def test_bootstrap_retries_success(self, monkeypatch, updater):
retries = 2
def attempt(_, *args, **kwargs):
if self.attempts < retries:
self.attempts += 1
raise TelegramError('')
monkeypatch.setattr('telegram.Bot.set_webhook', attempt)
updater._bootstrap(retries, False, 'path', None)
assert self.attempts == retries
@pytest.mark.parametrize(('error', 'attempts'),
argvalues=[
(TelegramError(''), 2),
(Unauthorized(''), 1),
(InvalidToken(), 1)
],
ids=('TelegramError', 'Unauthorized', 'InvalidToken'))
def test_bootstrap_retries_error(self, monkeypatch, updater, error, attempts):
retries = 1
def attempt(_, *args, **kwargs):
self.attempts += 1
raise error
monkeypatch.setattr('telegram.Bot.set_webhook', attempt)
with pytest.raises(type(error)):
updater._bootstrap(retries, False, 'path', None)
assert self.attempts == attempts
def test_webhook_invalid_posts(self, updater):
ip = '127.0.0.1'
port = randrange(1024, 49152) # select random port for travis
thr = Thread(
target=updater._start_webhook,
args=(ip, port, '', None, None, 0, False, None, None))
thr.start()
sleep(.2)
try:
with pytest.raises(HTTPError) as excinfo:
self._send_webhook_msg(ip, port, '<root><bla>data</bla></root>',
content_type='application/xml')
assert excinfo.value.code == 403
with pytest.raises(HTTPError) as excinfo:
self._send_webhook_msg(ip, port, 'dummy-payload', content_len=-2)
assert excinfo.value.code == 403
# TODO: prevent urllib or the underlying from adding content-length
# with pytest.raises(HTTPError) as excinfo:
# self._send_webhook_msg(ip, port, 'dummy-payload', content_len=None)
# assert excinfo.value.code == 411
with pytest.raises(HTTPError) as ctx:
self._send_webhook_msg(ip, port, 'dummy-payload', content_len='not-a-number')
assert excinfo.value.code == 403
finally:
updater.httpd.shutdown()
thr.join()
def _send_webhook_msg(self,
ip,
port,
payload_str,
url_path='',
content_len=-1,
content_type='application/json',
get_method=None):
headers = {'content-type': content_type, }
if not payload_str:
content_len = None
payload = None
else:
payload = bytes(payload_str, encoding='utf-8')
if content_len == -1:
content_len = len(payload)
if content_len is not None:
headers['content-length'] = str(content_len)
url = 'http://{ip}:{port}/{path}'.format(ip=ip, port=port, path=url_path)
req = Request(url, data=payload, headers=headers)
if get_method is not None:
req.get_method = get_method
return urlopen(req)
def signal_sender(self):
sleep(0.2)
os.kill(os.getpid(), signal.SIGTERM)
@signalskip
def test_idle(self, updater):
updater.start_polling(0.01)
Thread(target=self.signal_sender).start()
updater.idle()
# If we get this far, idle() ran through
sleep(.5)
assert updater.running is False
@signalskip
def test_user_signal(self, updater):
temp_var = {'a': 0}
def user_signal_inc(signum, frame):
temp_var['a'] = 1
updater.user_sig_handler = user_signal_inc
updater.start_polling(0.01)
Thread(target=self.signal_sender).start()
updater.idle()
# If we get this far, idle() ran through
sleep(.5)
assert updater.running is False
assert temp_var['a'] != 0
def test_create_bot(self):
updater = Updater('123:abcd')
assert updater.bot is not None
def test_mutual_exclude_token_bot(self):
bot = Bot('123:zyxw')
with pytest.raises(ValueError):
Updater(token='123:abcd', bot=bot)
def test_no_token_or_bot(self):
with pytest.raises(ValueError):
Updater()
|
|
"""
Connection module for Amazon EFS
.. versionadded:: 2017.7.0
:configuration: This module accepts explicit EFS credentials but can also
utilize IAM roles assigned to the instance through Instance Profiles or
it can read them from the ~/.aws/credentials file or from these
environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More information available at:
.. code-block:: text
http://docs.aws.amazon.com/efs/latest/ug/
access-control-managing-permissions.html
http://boto3.readthedocs.io/en/latest/guide/
configuration.html#guide-configuration
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file
.. code-block:: yaml
efs.keyid: GKTADJGHEIQSXMKKRBJ08H
efs.key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs
A region may also be specified in the configuration
.. code-block:: yaml
efs.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid, and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
"""
import logging
import salt.utils.versions
try:
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if boto3 libraries exist and if boto3 libraries are greater than
a given version.
"""
return salt.utils.versions.check_boto_reqs(boto3_ver="1.0.0", check_boto=False)
def _get_conn(key=None, keyid=None, profile=None, region=None, **kwargs):
"""
Create a boto3 client connection to EFS
"""
client = None
if profile:
if isinstance(profile, str):
if profile in __pillar__:
profile = __pillar__[profile]
elif profile in __opts__:
profile = __opts__[profile]
elif key or keyid or region:
profile = {}
if key:
profile["key"] = key
if keyid:
profile["keyid"] = keyid
if region:
profile["region"] = region
if isinstance(profile, dict):
if "region" in profile:
profile["region_name"] = profile["region"]
profile.pop("region", None)
if "key" in profile:
profile["aws_secret_access_key"] = profile["key"]
profile.pop("key", None)
if "keyid" in profile:
profile["aws_access_key_id"] = profile["keyid"]
profile.pop("keyid", None)
client = boto3.client("efs", **profile)
else:
client = boto3.client("efs")
return client
def create_file_system(
name,
performance_mode="generalPurpose",
keyid=None,
key=None,
profile=None,
region=None,
creation_token=None,
**kwargs
):
"""
Creates a new, empty file system.
name
(string) - The name for the new file system
performance_mode
(string) - The PerformanceMode of the file system. Can be either
generalPurpose or maxIO
creation_token
(string) - A unique name to be used as reference when creating an EFS.
This will ensure idempotency. Set to name if not specified otherwise
returns
(dict) - A dict of the data for the elastic file system
CLI Example:
.. code-block:: bash
salt 'my-minion' boto_efs.create_file_system efs-name generalPurpose
"""
if creation_token is None:
creation_token = name
tags = {"Key": "Name", "Value": name}
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
response = client.create_file_system(
CreationToken=creation_token, PerformanceMode=performance_mode
)
if "FileSystemId" in response:
client.create_tags(FileSystemId=response["FileSystemId"], Tags=tags)
if "Name" in response:
response["Name"] = name
return response
def create_mount_target(
filesystemid,
subnetid,
ipaddress=None,
securitygroups=None,
keyid=None,
key=None,
profile=None,
region=None,
**kwargs
):
"""
Creates a mount target for a file system.
You can then mount the file system on EC2 instances via the mount target.
You can create one mount target in each Availability Zone in your VPC.
All EC2 instances in a VPC within a given Availability Zone share a
single mount target for a given file system.
If you have multiple subnets in an Availability Zone,
you create a mount target in one of the subnets.
EC2 instances do not need to be in the same subnet as the mount target
in order to access their file system.
filesystemid
(string) - ID of the file system for which to create the mount target.
subnetid
(string) - ID of the subnet to add the mount target in.
ipaddress
(string) - Valid IPv4 address within the address range
of the specified subnet.
securitygroups
(list[string]) - Up to five VPC security group IDs,
of the form sg-xxxxxxxx.
These must be for the same VPC as subnet specified.
returns
(dict) - A dict of the response data
CLI Example:
.. code-block:: bash
salt 'my-minion' boto_efs.create_mount_target filesystemid subnetid
"""
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
if ipaddress is None and securitygroups is None:
return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid)
if ipaddress is None:
return client.create_mount_target(
FileSystemId=filesystemid, SubnetId=subnetid, SecurityGroups=securitygroups
)
if securitygroups is None:
return client.create_mount_target(
FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress
)
return client.create_mount_target(
FileSystemId=filesystemid,
SubnetId=subnetid,
IpAddress=ipaddress,
SecurityGroups=securitygroups,
)
def create_tags(
filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs
):
"""
Creates or overwrites tags associated with a file system.
Each tag is a key-value pair. If a tag key specified in the request
already exists on the file system, this operation overwrites
its value with the value provided in the request.
filesystemid
(string) - ID of the file system for whose tags will be modified.
tags
(dict) - The tags to add to the file system
CLI Example:
.. code-block:: bash
salt 'my-minion' boto_efs.create_tags
"""
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
new_tags = []
for k, v in tags.items():
new_tags.append({"Key": k, "Value": v})
client.create_tags(FileSystemId=filesystemid, Tags=new_tags)
def delete_file_system(
filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs
):
"""
Deletes a file system, permanently severing access to its contents.
Upon return, the file system no longer exists and you can't access
any contents of the deleted file system. You can't delete a file system
that is in use. That is, if the file system has any mount targets,
you must first delete them.
filesystemid
(string) - ID of the file system to delete.
CLI Example:
.. code-block:: bash
salt 'my-minion' boto_efs.delete_file_system filesystemid
"""
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
client.delete_file_system(FileSystemId=filesystemid)
def delete_mount_target(
mounttargetid, keyid=None, key=None, profile=None, region=None, **kwargs
):
"""
Deletes the specified mount target.
This operation forcibly breaks any mounts of the file system via the
mount target that is being deleted, which might disrupt instances or
applications using those mounts. To avoid applications getting cut off
abruptly, you might consider unmounting any mounts of the mount target,
if feasible. The operation also deletes the associated network interface.
Uncommitted writes may be lost, but breaking a mount target using this
operation does not corrupt the file system itself.
The file system you created remains.
You can mount an EC2 instance in your VPC via another mount target.
mounttargetid
(string) - ID of the mount target to delete
CLI Example:
.. code-block:: bash
salt 'my-minion' boto_efs.delete_mount_target mounttargetid
"""
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
client.delete_mount_target(MountTargetId=mounttargetid)
def delete_tags(
filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs
):
"""
Deletes the specified tags from a file system.
filesystemid
(string) - ID of the file system for whose tags will be removed.
tags
(list[string]) - The tag keys to delete to the file system
CLI Example:
.. code-block:: bash
salt 'my-minion' boto_efs.delete_tags
"""
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
client.delete_tags(FileSystemId=filesystemid, Tags=tags)
def get_file_systems(
filesystemid=None,
keyid=None,
key=None,
profile=None,
region=None,
creation_token=None,
**kwargs
):
"""
Get all EFS properties or a specific instance property
if filesystemid is specified
filesystemid
(string) - ID of the file system to retrieve properties
creation_token
(string) - A unique token that identifies an EFS.
If fileysystem created via create_file_system this would
either be explictitly passed in or set to name.
You can limit your search with this.
returns
(list[dict]) - list of all elastic file system properties
CLI Example:
.. code-block:: bash
salt 'my-minion' boto_efs.get_file_systems efs-id
"""
result = None
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
if filesystemid and creation_token:
response = client.describe_file_systems(
FileSystemId=filesystemid, CreationToken=creation_token
)
result = response["FileSystems"]
elif filesystemid:
response = client.describe_file_systems(FileSystemId=filesystemid)
result = response["FileSystems"]
elif creation_token:
response = client.describe_file_systems(CreationToken=creation_token)
result = response["FileSystems"]
else:
response = client.describe_file_systems()
result = response["FileSystems"]
while "NextMarker" in response:
response = client.describe_file_systems(Marker=response["NextMarker"])
result.extend(response["FileSystems"])
return result
def get_mount_targets(
filesystemid=None,
mounttargetid=None,
keyid=None,
key=None,
profile=None,
region=None,
**kwargs
):
"""
Get all the EFS mount point properties for a specific filesystemid or
the properties for a specific mounttargetid. One or the other must be
specified
filesystemid
(string) - ID of the file system whose mount targets to list
Must be specified if mounttargetid is not
mounttargetid
(string) - ID of the mount target to have its properties returned
Must be specified if filesystemid is not
returns
(list[dict]) - list of all mount point properties
CLI Example:
.. code-block:: bash
salt 'my-minion' boto_efs.get_mount_targets
"""
result = None
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
if filesystemid:
response = client.describe_mount_targets(FileSystemId=filesystemid)
result = response["MountTargets"]
while "NextMarker" in response:
response = client.describe_mount_targets(
FileSystemId=filesystemid, Marker=response["NextMarker"]
)
result.extend(response["MountTargets"])
elif mounttargetid:
response = client.describe_mount_targets(MountTargetId=mounttargetid)
result = response["MountTargets"]
return result
def get_tags(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs):
"""
Return the tags associated with an EFS instance.
filesystemid
(string) - ID of the file system whose tags to list
returns
(list) - list of tags as key/value pairs
CLI Example:
.. code-block:: bash
salt 'my-minion' boto_efs.get_tags efs-id
"""
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
response = client.describe_tags(FileSystemId=filesystemid)
result = response["Tags"]
while "NextMarker" in response:
response = client.describe_tags(
FileSystemId=filesystemid, Marker=response["NextMarker"]
)
result.extend(response["Tags"])
return result
def set_security_groups(
mounttargetid,
securitygroup,
keyid=None,
key=None,
profile=None,
region=None,
**kwargs
):
"""
Modifies the set of security groups in effect for a mount target
mounttargetid
(string) - ID of the mount target whose security groups will be modified
securitygroups
(list[string]) - list of no more than 5 VPC security group IDs.
CLI Example:
.. code-block:: bash
salt 'my-minion' boto_efs.set_security_groups my-mount-target-id my-sec-group
"""
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
client.modify_mount_target_security_groups(
MountTargetId=mounttargetid, SecurityGroups=securitygroup
)
|
|
import argparse
import gevent
import os
import signal
from client_management import MessageServerClient
from gevent import Greenlet
from gevent.queue import Queue
from gevent_zeromq import zmq
from JumpScale import j
if not q._init_called:
from JumpScale.core.InitBase import q
# IMPORT INFORMATION
#
# ZMQ::REQ
#
# A socket of type ZMQ::REQ is used by a client to send requests to and
# receive replies from a service. This socket type allows only an
# alternating sequence of send(request) and subsequent recv(reply)
# calls. Each request sent is load-balanced among all services, and each
# reply received is matched with the last issued request.
# When a ZMQ::REQ socket enters an exceptional state due to having reached the
# high water mark for all services, or if there are no services at all, then any
# send() operations on the socket shall block until the exceptional state ends
# or at least one service becomes available for sending; messages are not
# discarded.
# ZMQ::REP
#
# A socket of type ZMQ::REP is used by a service to receive requests from and
# send replies to a client. This socket type allows only an alternating sequence
# of recv(request) and subsequent send(reply) calls. Each request received is
# fair-queued from among all clients, and each reply sent is routed to the
# client that issued the last request.
# When a ZMQ::REP socket enters an exceptional state due to having reached the
# high water mark for a client, then any replies sent to the client in question
# shall be dropped until the exceptional state ends.
# @Todo: Make a queue for each type of message, so that each type of message can
# have a different way of processing them
# @Todo: Check for performance bottlenecks.
class Stat():
def __init__(self):
self.received = 0
self.processed = 0
self.forwarded = 0
class Stats():
def __init__(self):
self.logs = Stat()
self.signals = Stat()
self.errors = Stat()
self.stats = Stat()
class MessageServer(object):
DEFAULT_PID_FILE = j.system.fs.joinPaths(j.dirs.pidDir, 'message_server.pid')
FORWARD_MESSAGES_BATCH_SIZE = 100
def __init__(self, address, storeLocally, pidFile=None, echo=False):
self._address = 'tcp://%s' % address
self.storeLocally = storeLocally
self._pid = os.getpid()
self._pidFile = pidFile or self.DEFAULT_PID_FILE
self.stats = Stats()
self._socket = None
self._context = None
self.logQueue = Queue()
self.categories = None
self.echo = echo
self.forwardAddresses = []
self.forwardClients = set()
j.core.messagehandler.epoch = 0
def start(self):
print('Starting message server')
for forwardAddress in self.forwardAddresses:
client = MessageServerClient(forwardAddress)
server.forwardClients.add(client)
self._connect()
self._storePidInPidFile()
gevent.core.signal(signal.SIGHUP, self.stop)
gevent.core.signal(signal.SIGINT, self.stop)
gevent.core.signal(signal.SIGTERM, self.stop)
greenlet = Greenlet(self.receiveMessages)
greenlet.link_exception(self._logGreenletError)
greenlet2 = Greenlet(self.processLogMessages)
greenlet2.link_exception(self._logGreenletError)
greenlet2.start()
greenlet3 = Greenlet(self._timer)
greenlet3.link_exception(self._logGreenletError)
greenlet3.start()
greenlet.start()
storeLocallyStr = str(self.storeLocally)
addresses = [client.address for client in self.forwardClients]
addressesStr = ', '.join(addresses)
print('''\
Message server started
listens on: %s
stores locally: %s
forwards to: %s
pid: %d
pid file: %s''' % (self._address, storeLocallyStr, addressesStr, self._pid, self._pidFile))
# Wait until the log server stops (main greenlet).
try:
greenlet.join()
except KeyboardInterrupt:
# Ignore this error.
pass
def stop(self):
print('Stopping message server')
self._disconnect()
self._removePidFile()
print('Stopped message server')
def _connect(self):
if self._isConnected:
print('Can\'t connect to %s, already connected' % self._address)
return
self._context = zmq.Context(2)
self._socket = self._context.socket(zmq.REP)
self._socket.setsockopt(zmq.LINGER, 0)
self._socket.bind(self._address)
def _disconnect(self):
if not self._isConnected:
print('Can\'t disconnect from %s, already disconnected' % self._address)
return
self._socket.close()
self._context.term()
@property
def _isConnected(self):
if self._context:
return not self._context.closed
else:
return False
@property
def isForwarding(self):
return len(self.forwardClients) > 0
@property
def numberQueuedMessages(self):
return self._messageQueue.qsize()
def forwardLogMessages(self, messages):
for client in self.forwardClients:
client.send(message)
def _logGreenletError(self, greenlet):
print(greenlet.exception)
def _timer(self):
while True:
self.epoch = j.base.time.getTimeEpoch()
j.core.messagehandler.epoch = self.epoch
gevent.sleep(0.1)
def processLogMessages(self):
forwardMessages = ""
while True:
gevent.sleep(1)
message = self.logQueue.get()
if self.storeLocally:
j.core.messagehandler.loghandlerdb.save(message)
if self.echo:
dtype, length, epoch, gid, nid, pid, data = j.core.messagehandler.unPackMessage(message)
print(data)
if self.isForwarding:
forwardMessages += message
if len(forwardMessages) > self.FORWARD_MESSAGES_BATCH_SIZE:
self.forwardLogMessages(forwardMessages)
forwardMessages = ""
if forwardMessages <> "":
self._forward(message)
def processErrorMessage(self, message):
dtype, length, epoch, gid, nid, pid, data = j.core.messagehandler.unPackMessage(message)
print data
return
def processSignalMessage(self, message):
return
def processStatusMessages(self, message):
return
def processAllertMessage(self, message):
return
def receiveMessages(self):
# receive from ZMQ
while True:
message = self._socket.recv()
if message != 'ping':
messageType = j.core.messagehandler.getMessageType(message)
MessageServerMessageType = j.enumerators.MessageServerMessageType
if messageType == 1:
self.logQueue.put(message)
elif messageType == 2:
self.processSignalMessage(message)
elif messageType == 3:
self.processErrorMessage(message)
elif messageType == 4:
self.processAllertMessage(message)
elif messageType == 5:
self.processStatusMessages(message)
else:
self.raiseError("Did not recognise messagetype %s for message %s" % (messageType, message))
self._socket.send('1')
def _removePidFile(self, *args):
if j.system.fs.exists(self._pidFile):
pidStr = j.system.fs.fileGetContents(self._pidFile)
pid = int(pidStr)
if pid == self._pid:
j.system.fs.remove(self._pidFile)
def _storePidInPidFile(self):
pidStr = str(self._pid)
j.system.fs.writeFile(self._pidFile, pidStr)
def raiseError(self, msg):
print msg
if __name__ == '__main__':
# Prevent gevent so called zombie processes by calling gevent.shutdown
# when the SIGQUIT event is raised.
gevent.signal(signal.SIGQUIT, gevent.shutdown)
parser = argparse.ArgumentParser(description='Starts the message server.')
parser.add_argument('-a', '--address', default='127.0.0.1:7777',
dest='address', help='Address (ip and port) for this message server')
parser.add_argument('-e', '--echo', action='store_true', dest='echo',
help='Print the log messages when processing them')
parser.add_argument('-f', '--forward-addresses', default=[],
dest='forwardAddresses', nargs='*', help='Addresses (ip and port) to '
'forward the messages to')
parser.add_argument('-p', '--pid-file',
default=MessageServer.DEFAULT_PID_FILE, dest='pidFile',
help='Process identifier file path')
parser.add_argument('-s', '--store-locally', action='store_true',
dest='storeLocally', help='Store messages locally to disk')
args = parser.parse_args()
server = MessageServer(args.address, args.storeLocally, args.pidFile,
args.echo)
server.echo = args.echo
for address in args.forwardAddresses:
server.forwardAddresses.append(address)
server.start()
|
|
# Copyright 2018 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import os
import unittest
import numpy as np
from reference_models.dpa import move_list
from reference_models.propagation import wf_itm
from reference_models.tools import entities
from reference_models.tools import testutils
# A Protection point namedtuple as required by input
ProtectionPoint = namedtuple('ProtectionPoint', ['latitude', 'longitude'])
class TestDpa(unittest.TestCase):
def setUp(self):
self.original_itm = wf_itm.CalcItmPropagationLoss
def tearDown(self):
wf_itm.CalcItmPropagationLoss = self.original_itm
def test_movelist_single_grant(self):
np.random.seed(1248)
# Configuring for -144dBm circle at 20km
wf_itm.CalcItmPropagationLoss = testutils.FakePropagationPredictor(
dist_type='REAL', factor=1.0, offset=(144+30-0.1) - 20.0)
point = ProtectionPoint(latitude=36.815, longitude=-76.292)
# Within the move list
grants = entities.ConvertToCbsdGrantInfo(
entities.GenerateCbsdList(
1, template_cbsd=entities.CBSD_TEMPLATE_CAT_A_OUTDOOR,
ref_latitude=36.815, ref_longitude=-76.292,
min_distance_km=19.97, max_distance_km=19.98),
min_freq_mhz=3600,
max_freq_mhz=3610)
move_grants, nbor_grants = move_list.moveListConstraint(
point, 3600e6, 3610e6, grants,
50, 2000, -144, 3, (150, 200, 0, 25))
self.assertListEqual(nbor_grants, grants)
self.assertListEqual(move_grants, grants)
# Outside the move list
grants = entities.ConvertToCbsdGrantInfo(
entities.GenerateCbsdList(
1, template_cbsd=entities.CBSD_TEMPLATE_CAT_A_OUTDOOR,
ref_latitude=36.815, ref_longitude=-76.292,
min_distance_km=20.1, max_distance_km=20.2),
min_freq_mhz=3600,
max_freq_mhz=3610)
move_grants, nbor_grants = move_list.moveListConstraint(
point, 3600e6, 3610e6, grants,
50, 2000, -144, 3, (150, 200, 0, 25))
self.assertListEqual(nbor_grants, grants)
self.assertListEqual(move_grants, [])
def test_movelist_oob_cata(self):
np.random.seed(1248)
# Configuring for -144dBm circle at 20km for OOB power -25dBm
wf_itm.CalcItmPropagationLoss = testutils.FakePropagationPredictor(
dist_type='REAL', factor=1.0, offset=(144+(-25)-0.1) - 20.0)
point = ProtectionPoint(latitude=36.815, longitude=-76.292)
grants = entities.ConvertToCbsdGrantInfo(
entities.GenerateCbsdList(
1, template_cbsd=entities.CBSD_TEMPLATE_CAT_A_OUTDOOR,
ref_latitude=36.815, ref_longitude=-76.292,
min_distance_km=10, max_distance_km=11),
min_freq_mhz=3600,
max_freq_mhz=3610)
move_grants, nbor_grants = move_list.moveListConstraint(
point, 3540e6, 3550e6, grants,
50, 2000, -144, 3, (150, 200, 0, 25))
self.assertListEqual(nbor_grants, [])
self.assertListEqual(move_grants, [])
def test_movelist_oob_catb(self):
np.random.seed(1248)
# Configuring for -144dBm circle at 20km for OOB power -15dBm/10MHz
wf_itm.CalcItmPropagationLoss = testutils.FakePropagationPredictor(
dist_type='REAL', factor=1.0, offset=(144+(-15+8)-0.1) - 20.0)
point = ProtectionPoint(latitude=36.815, longitude=-76.292)
# Within move list for power
grants = entities.ConvertToCbsdGrantInfo(
entities.GenerateCbsdList(
1, template_cbsd=entities.CBSD_TEMPLATE_CAT_B_OMNI,
ref_latitude=36.815, ref_longitude=-76.292,
min_distance_km=19.97, max_distance_km=19.98),
min_freq_mhz=3600,
max_freq_mhz=3610)
move_grants, nbor_grants = move_list.moveListConstraint(
point, 3540e6, 3550e6, grants,
50, 2000, -144, 3, (150, 200, 0, 25))
self.assertListEqual(nbor_grants, grants)
self.assertListEqual(move_grants, grants)
# Outside the move list for power
grants = entities.ConvertToCbsdGrantInfo(
entities.GenerateCbsdList(
1, template_cbsd=entities.CBSD_TEMPLATE_CAT_B_OMNI,
ref_latitude=36.815, ref_longitude=-76.292,
min_distance_km=20.1, max_distance_km=20.2),
min_freq_mhz=3600,
max_freq_mhz=3610)
move_grants, nbor_grants = move_list.moveListConstraint(
point, 3540e6, 3550e6, grants,
50, 2000, -144, 3, (150, 200, 0, 25))
self.assertListEqual(nbor_grants, grants)
self.assertListEqual(move_grants, [])
# Outside the nbor list for distance. -144 at 30km
wf_itm.CalcItmPropagationLoss = testutils.FakePropagationPredictor(
dist_type='REAL', factor=1.0, offset=(144+(-15+8)-0.1) - 20.0)
grants = entities.ConvertToCbsdGrantInfo(
entities.GenerateCbsdList(
1, template_cbsd=entities.CBSD_TEMPLATE_CAT_B_OMNI,
ref_latitude=36.815, ref_longitude=-76.292,
min_distance_km=25.1, max_distance_km=25.2),
min_freq_mhz=3600,
max_freq_mhz=3610)
move_grants, nbor_grants = move_list.moveListConstraint(
point, 3540e6, 3550e6, grants,
50, 2000, -144, 3, (150, 200, 0, 25))
self.assertListEqual(nbor_grants, [])
self.assertListEqual(move_grants, [])
def test_movelist_oob_purge_catb(self):
np.random.seed(1248)
# Configuring for -144dBm circle at 20km for OOB power -3dBm/10MHz
wf_itm.CalcItmPropagationLoss = testutils.FakePropagationPredictor(
dist_type='REAL', factor=1.0, offset=(144+(-13+10+8)-0.1) - 20.0)
point = ProtectionPoint(latitude=36.815, longitude=-76.292)
# Within move list for power
grants = entities.ConvertToCbsdGrantInfo(
entities.GenerateCbsdList(
1, template_cbsd=entities.CBSD_TEMPLATE_CAT_B_OMNI,
ref_latitude=36.815, ref_longitude=-76.292,
min_distance_km=19.97, max_distance_km=19.98),
min_freq_mhz=3550,
max_freq_mhz=3570,
chunks_mhz=5)
move_grants, nbor_grants = move_list.moveListConstraint(
point, 3540e6, 3550e6, grants,
50, 2000, -144, 3, (150, 200, 0, 25))
self.assertListEqual(nbor_grants, grants)
self.assertListEqual(move_grants, grants)
# However only using the last 2 would be out of move list
grants = grants[2:]
move_grants, nbor_grants = move_list.moveListConstraint(
point, 3540e6, 3550e6, grants,
50, 2000, -144, 3, (150, 200, 0, 25))
self.assertListEqual(nbor_grants, grants)
self.assertListEqual(move_grants, [])
# Slightly lower than the cutoff power -> none in move list
grants = entities.ConvertToCbsdGrantInfo(
entities.GenerateCbsdList(
1, template_cbsd=entities.CBSD_TEMPLATE_CAT_B_OMNI,
ref_latitude=36.815, ref_longitude=-76.292,
min_distance_km=20.1, max_distance_km=20.2),
min_freq_mhz=3550,
max_freq_mhz=3570,
chunks_mhz=5)
move_grants, nbor_grants = move_list.moveListConstraint(
point, 3540e6, 3550e6, grants,
50, 2000, -144, 3, (150, 200, 0, 25))
self.assertListEqual(nbor_grants, grants)
self.assertListEqual(move_grants, [])
if __name__ == '__main__':
unittest.main()
|
|
import socket
import types
from itertools import count
from Queue import Empty, Queue as _Queue
from kombu.connection import BrokerConnection
from kombu.entity import Exchange, Queue
from kombu.messaging import Consumer, Producer
from kombu.tests.utils import unittest
from kombu.tests.utils import module_exists
# patch poll
from kombu.utils import eventio
class _poll(eventio._select):
def poll(self, timeout):
events = []
for fd in self._rfd:
if fd.data:
events.append((fd.fileno(), eventio.POLL_READ))
return events
eventio.poll = _poll
from kombu.transport import pyredis # must import after poller patch
class ResponseError(Exception):
pass
class Client(object):
queues = {}
sets = {}
def __init__(self, db=None, port=None, **kwargs):
self.port = port
self.db = db
self._called = []
self._connection = None
self.bgsave_raises_ResponseError = False
def bgsave(self):
self._called.append("BGSAVE")
if self.bgsave_raises_ResponseError:
raise ResponseError()
def delete(self, key):
self.queues.pop(key, None)
def sadd(self, key, member):
if key not in self.sets:
self.sets[key] = set()
self.sets[key].add(member)
def smembers(self, key):
return self.sets.get(key, set())
def llen(self, key):
return self.queues[key].qsize()
def lpush(self, key, value):
self.queues[key].put_nowait(value)
def parse_command(self, cmd):
c = cmd.split('\r\n')
c.pop()
c.reverse()
argv = []
argc = int(c.pop().replace('*', ''))
for i in xrange(argc):
c.pop()
argv.append(c.pop())
return argv
def parse_response(self, type, **options):
cmd = self.connection._sock.data.pop()
argv = self.parse_command(cmd)
cmd = argv[0]
queues = argv[1:-1]
assert cmd == type
self.connection._sock.data = []
if type == "BRPOP":
item = self.brpop(queues, 0.001)
if item:
return item
raise Empty()
def brpop(self, keys, timeout=None):
key = keys[0]
try:
item = self.queues[key].get(timeout=timeout)
except Empty:
pass
else:
return key, item
def rpop(self, key):
try:
return self.queues[key].get_nowait()
except KeyError:
pass
def __contains__(self, k):
return k in self._called
def pipeline(self):
return Pipeline(self)
def encode(self, value):
return str(value)
def _new_queue(self, key):
self.queues[key] = _Queue()
class _sconnection(object):
disconnected = False
class _socket(object):
blocking = True
next_fileno = count(30).next
def __init__(self, *args):
self._fileno = self.next_fileno()
self.data = []
def fileno(self):
return self._fileno
def setblocking(self, blocking):
self.blocking = blocking
def __init__(self, client):
self.client = client
self._sock = self._socket()
def disconnect(self):
self.disconnected = True
def send(self, cmd, client):
self._sock.data.append(cmd)
def info(self):
return {"foo": 1}
@property
def connection(self):
if self._connection is None:
self._connection = self._sconnection(self)
return self._connection
class Pipeline(object):
def __init__(self, client):
self.client = client
self.stack = []
def __getattr__(self, key):
if key not in self.__dict__:
def _add(*args, **kwargs):
self.stack.append((getattr(self.client, key), args, kwargs))
return self
return _add
return self.__dict__[key]
def execute(self):
stack = list(self.stack)
self.stack[:] = []
return [fun(*args, **kwargs) for fun, args, kwargs in stack]
class Channel(pyredis.Channel):
def _get_client(self):
return Client
def _get_response_error(self):
return ResponseError
def _new_queue(self, queue, **kwargs):
self.client._new_queue(queue)
class Transport(pyredis.Transport):
Channel = Channel
def _get_errors(self):
return ((KeyError, ), (IndexError, ))
class test_Redis(unittest.TestCase):
def setUp(self):
self.connection = BrokerConnection(transport=Transport)
self.exchange = Exchange("test_Redis", type="direct")
self.queue = Queue("test_Redis", self.exchange, "test_Redis")
def tearDown(self):
self.connection.close()
def test_publish__get(self):
channel = self.connection.channel()
producer = Producer(channel, self.exchange, routing_key="test_Redis")
self.queue(channel).declare()
producer.publish({"hello": "world"})
self.assertDictEqual(self.queue(channel).get().payload,
{"hello": "world"})
self.assertIsNone(self.queue(channel).get())
self.assertIsNone(self.queue(channel).get())
self.assertIsNone(self.queue(channel).get())
def test_publish__consume(self):
connection = BrokerConnection(transport=Transport)
channel = connection.channel()
producer = Producer(channel, self.exchange, routing_key="test_Redis")
consumer = Consumer(channel, self.queue)
producer.publish({"hello2": "world2"})
_received = []
def callback(message_data, message):
_received.append(message_data)
message.ack()
consumer.register_callback(callback)
consumer.consume()
self.assertIn(channel, channel.connection.cycle._channels)
try:
connection.drain_events(timeout=1)
self.assertTrue(_received)
self.assertRaises(socket.timeout,
connection.drain_events, timeout=0.01)
finally:
channel.close()
def test_purge(self):
channel = self.connection.channel()
producer = Producer(channel, self.exchange, routing_key="test_Redis")
self.queue(channel).declare()
for i in range(10):
producer.publish({"hello": "world-%s" % (i, )})
self.assertEqual(channel._size("test_Redis"), 10)
self.assertEqual(self.queue(channel).purge(), 10)
channel.close()
def test_db_values(self):
c1 = BrokerConnection(virtual_host=1,
transport=Transport).channel()
self.assertEqual(c1.client.db, 1)
c2 = BrokerConnection(virtual_host="1",
transport=Transport).channel()
self.assertEqual(c2.client.db, 1)
c3 = BrokerConnection(virtual_host="/1",
transport=Transport).channel()
self.assertEqual(c3.client.db, 1)
self.assertRaises(BrokerConnection(virtual_host="/foo",
transport=Transport).channel)
def test_db_port(self):
c1 = BrokerConnection(port=None, transport=Transport).channel()
self.assertEqual(c1.client.port, Transport.default_port)
c1.close()
c2 = BrokerConnection(port=9999, transport=Transport).channel()
self.assertEqual(c2.client.port, 9999)
c2.close()
def test_close_poller_not_active(self):
c = BrokerConnection(transport=Transport).channel()
c.client.connection
c.close()
self.assertNotIn(c, c.connection.cycle._channels)
def test_close_ResponseError(self):
c = BrokerConnection(transport=Transport).channel()
c.client.bgsave_raises_ResponseError = True
c.close()
def test_close_disconnects(self):
c = BrokerConnection(transport=Transport).channel()
conn1 = c.client.connection
conn2 = c.subclient.connection
c.close()
self.assertTrue(conn1.disconnected)
self.assertTrue(conn2.disconnected)
def test_get__Empty(self):
channel = self.connection.channel()
self.assertRaises(Empty, channel._get, "does-not-exist")
channel.close()
def test_get_client(self):
redis, exceptions = _redis_modules()
@module_exists(redis, exceptions)
def _do_test():
conn = BrokerConnection(transport=Transport)
chan = conn.channel()
self.assertTrue(chan.Client)
self.assertTrue(chan.ResponseError)
self.assertTrue(conn.transport.connection_errors)
self.assertTrue(conn.transport.channel_errors)
_do_test()
def _redis_modules():
class ConnectionError(Exception):
pass
class AuthenticationError(Exception):
pass
class InvalidData(Exception):
pass
class InvalidResponse(Exception):
pass
class ResponseError(Exception):
pass
exceptions = types.ModuleType("redis.exceptions")
exceptions.ConnectionError = ConnectionError
exceptions.AuthenticationError = AuthenticationError
exceptions.InvalidData = InvalidData
exceptions.InvalidResponse = InvalidResponse
exceptions.ResponseError = ResponseError
class Redis(object):
pass
redis = types.ModuleType("redis")
redis.exceptions = exceptions
redis.Redis = Redis
return redis, exceptions
|
|
# Microsoft Azure Linux Agent
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+ and Openssl 1.0+
import crypt
import random
import string
import struct
import xml.dom.minidom as minidom
import sys
from distutils.version import LooseVersion
def parse_doc(xml_text):
"""
Parse xml document from string
"""
#The minidom lib has some issue with unicode in python2.
#Encode the string into utf-8 first
xml_text = xml_text.encode('utf-8')
return minidom.parseString(xml_text)
def findall(root, tag, namespace=None):
"""
Get all nodes by tag and namespace under Node root.
"""
if root is None:
return []
if namespace is None:
return root.getElementsByTagName(tag)
else:
return root.getElementsByTagNameNS(namespace, tag)
def find(root, tag, namespace=None):
"""
Get first node by tag and namespace under Node root.
"""
nodes = findall(root, tag, namespace=namespace)
if nodes is not None and len(nodes) >= 1:
return nodes[0]
else:
return None
def gettext(node):
"""
Get node text
"""
if node is None:
return None
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE:
return child.data
return None
def findtext(root, tag, namespace=None):
"""
Get text of node by tag and namespace under Node root.
"""
node = find(root, tag, namespace=namespace)
return gettext(node)
def getattrib(node, attr_name):
"""
Get attribute of xml node
"""
if node is not None:
return node.getAttribute(attr_name)
else:
return None
def unpack(buf, offset, range):
"""
Unpack bytes into python values.
"""
result = 0
for i in range:
result = (result << 8) | str_to_ord(buf[offset + i])
return result
def unpack_little_endian(buf, offset, length):
"""
Unpack little endian bytes into python values.
"""
return unpack(buf, offset, list(range(length - 1, -1, -1)))
def unpack_big_endian(buf, offset, length):
"""
Unpack big endian bytes into python values.
"""
return unpack(buf, offset, list(range(0, length)))
def hex_dump3(buf, offset, length):
"""
Dump range of buf in formatted hex.
"""
return ''.join(['%02X' % str_to_ord(char) for char in buf[offset:offset + length]])
def hex_dump2(buf):
"""
Dump buf in formatted hex.
"""
return hex_dump3(buf, 0, len(buf))
def is_in_range(a, low, high):
"""
Return True if 'a' in 'low' <= a >= 'high'
"""
return (a >= low and a <= high)
def is_printable(ch):
"""
Return True if character is displayable.
"""
return (is_in_range(ch, str_to_ord('A'), str_to_ord('Z'))
or is_in_range(ch, str_to_ord('a'), str_to_ord('z'))
or is_in_range(ch, str_to_ord('0'), str_to_ord('9')))
def hex_dump(buffer, size):
"""
Return Hex formated dump of a 'buffer' of 'size'.
"""
if size < 0:
size = len(buffer)
result = ""
for i in range(0, size):
if (i % 16) == 0:
result += "%06X: " % i
byte = buffer[i]
if type(byte) == str:
byte = ord(byte.decode('latin1'))
result += "%02X " % byte
if (i & 15) == 7:
result += " "
if ((i + 1) % 16) == 0 or (i + 1) == size:
j = i
while ((j + 1) % 16) != 0:
result += " "
if (j & 7) == 7:
result += " "
j += 1
result += " "
for j in range(i - (i % 16), i + 1):
byte=buffer[j]
if type(byte) == str:
byte = str_to_ord(byte.decode('latin1'))
k = '.'
if is_printable(byte):
k = chr(byte)
result += k
if (i + 1) != size:
result += "\n"
return result
def str_to_ord(a):
"""
Allows indexing into a string or an array of integers transparently.
Generic utility function.
"""
if type(a) == type(b'') or type(a) == type(u''):
a = ord(a)
return a
def compare_bytes(a, b, start, length):
for offset in range(start, start + length):
if str_to_ord(a[offset]) != str_to_ord(b[offset]):
return False
return True
def int_to_ip4_addr(a):
"""
Build DHCP request string.
"""
return "%u.%u.%u.%u" % ((a >> 24) & 0xFF,
(a >> 16) & 0xFF,
(a >> 8) & 0xFF,
(a) & 0xFF)
def hexstr_to_bytearray(a):
"""
Return hex string packed into a binary struct.
"""
b = b""
for c in range(0, len(a) // 2):
b += struct.pack("B", int(a[c * 2:c * 2 + 2], 16))
return b
def set_ssh_config(config, name, val):
notfound = True
for i in range(0, len(config)):
if config[i].startswith(name):
config[i] = "{0} {1}".format(name, val)
notfound = False
elif config[i].startswith("Match"):
#Match block must be put in the end of sshd config
break
if notfound:
config.insert(i, "{0} {1}".format(name, val))
return config
def remove_bom(c):
if str_to_ord(c[0]) > 128 and str_to_ord(c[1]) > 128 and \
str_to_ord(c[2]) > 128:
c = c[3:]
return c
def gen_password_hash(password, crypt_id, salt_len):
collection = string.ascii_letters + string.digits
salt = ''.join(random.choice(collection) for _ in range(salt_len))
salt = "${0}${1}".format(crypt_id, salt)
return crypt.crypt(password, salt)
def get_bytes_from_pem(pem_str):
base64_bytes = ""
for line in pem_str.split('\n'):
if "----" not in line:
base64_bytes += line
return base64_bytes
Version = LooseVersion
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2020 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
import binascii
import logging
import struct
from gdbstubs.gdbstub import GdbStub
logger = logging.getLogger("gdbstub")
class RegNum():
# Matches the enum amd64_regnum in GDB
RAX = 0
RBX = 1
RCX = 2
RDX = 3
RSI = 4
RDI = 5
RBP = 6
RSP = 7
R8 = 8
R9 = 9
R10 = 10
R11 = 11
R12 = 12
R13 = 13
R14 = 14
R15 = 15
RIP = 16
EFLAGS = 17
CS = 18
SS = 19
DS = 20
ES = 21
FS = 22
GS = 23
FS_BASE = 24
GS_BASE = 25
K_GS_BASE = 26
class ExceptionVectors():
# Matches arch/x86/include/kernel_arch_data.h
IV_DIVIDE_ERROR = 0
IV_DEBUG = 1
IV_NON_MASKABLE_INTERRUPT = 2
IV_BREAKPOINT = 3
IV_OVERFLOW = 4
IV_BOUND_RANGE = 5
IV_INVALID_OPCODE = 6
IV_DEVICE_NOT_AVAILABLE = 7
IV_DOUBLE_FAULT = 8
IV_COPROC_SEGMENT_OVERRUN = 9
IV_INVALID_TSS = 10
IV_SEGMENT_NOT_PRESENT = 11
IV_STACK_FAULT = 12
IV_GENERAL_PROTECTION = 13
IV_PAGE_FAULT = 14
IV_RESERVED = 15
IV_X87_FPU_FP_ERROR = 16
IV_ALIGNMENT_CHECK = 17
IV_MACHINE_CHECK = 18
IV_SIMD_FP = 19
IV_VIRT_EXCEPTION = 20
IV_SECURITY_EXCEPTION = 30
class GdbStub_x86_64(GdbStub):
GDB_SIGNAL_DEFAULT = 7
# Mapping is from GDB's gdb/i386-stubs.c
GDB_SIGNAL_MAPPING = {
ExceptionVectors.IV_DIVIDE_ERROR: 8,
ExceptionVectors.IV_DEBUG: 5,
ExceptionVectors.IV_BREAKPOINT: 5,
ExceptionVectors.IV_OVERFLOW: 16,
ExceptionVectors.IV_BOUND_RANGE: 16,
ExceptionVectors.IV_INVALID_OPCODE: 4,
ExceptionVectors.IV_DEVICE_NOT_AVAILABLE: 8,
ExceptionVectors.IV_DOUBLE_FAULT: 7,
ExceptionVectors.IV_COPROC_SEGMENT_OVERRUN: 11,
ExceptionVectors.IV_INVALID_TSS: 11,
ExceptionVectors.IV_SEGMENT_NOT_PRESENT: 11,
ExceptionVectors.IV_STACK_FAULT: 11,
ExceptionVectors.IV_GENERAL_PROTECTION: 11,
ExceptionVectors.IV_PAGE_FAULT: 11,
ExceptionVectors.IV_X87_FPU_FP_ERROR: 7,
}
GDB_G_PKT_NUM_REGS = 34
GDB_32BIT_REGS = {
RegNum.EFLAGS,
RegNum.CS,
RegNum.SS,
RegNum.DS,
RegNum.ES,
RegNum.FS,
RegNum.GS,
}
def __init__(self, logfile, elffile):
super().__init__(logfile=logfile, elffile=elffile)
self.registers = None
self.exception_vector = None
self.exception_code = None
self.gdb_signal = self.GDB_SIGNAL_DEFAULT
self.parse_arch_data_block()
self.compute_signal()
def parse_arch_data_block(self):
arch_data_blk = self.logfile.get_arch_data()['data']
arch_data_blk_struct = "<QQQQQQQQQQQQQQQQQQQQQQ"
cfg_exception_debug = True
if len(arch_data_blk) != struct.calcsize(arch_data_blk_struct):
# There are fewer registers dumped
# when CONFIG_EXCEPTION_DEBUG=n
arch_data_blk_struct = "<QQQQQQQQQQQQQQQQQ"
cfg_exception_debug = False
tu = struct.unpack(arch_data_blk_struct, arch_data_blk)
self.registers = dict()
self.exception_vector = tu[0]
self.exception_code = tu[1]
self.registers[RegNum.RAX] = tu[2]
self.registers[RegNum.RCX] = tu[3]
self.registers[RegNum.RDX] = tu[4]
self.registers[RegNum.RSI] = tu[5]
self.registers[RegNum.RDI] = tu[6]
self.registers[RegNum.RSP] = tu[7]
self.registers[RegNum.R8 ] = tu[8]
self.registers[RegNum.R9 ] = tu[9]
self.registers[RegNum.R10] = tu[10]
self.registers[RegNum.R11] = tu[11]
self.registers[RegNum.RIP] = tu[12]
self.registers[RegNum.EFLAGS] = tu[13]
self.registers[RegNum.CS] = tu[14]
self.registers[RegNum.SS] = tu[15]
self.registers[RegNum.RBP] = tu[16]
if cfg_exception_debug:
self.registers[RegNum.RBX] = tu[17]
self.registers[RegNum.R12] = tu[18]
self.registers[RegNum.R13] = tu[19]
self.registers[RegNum.R14] = tu[20]
self.registers[RegNum.R15] = tu[21]
def compute_signal(self):
sig = self.GDB_SIGNAL_DEFAULT
vector = self.exception_vector
if vector is None:
sig = self.GDB_SIGNAL_DEFAULT
# Map vector number to GDB signal number
if vector in self.GDB_SIGNAL_MAPPING:
sig = self.GDB_SIGNAL_MAPPING[vector]
self.gdb_signal = sig
def handle_register_group_read_packet(self):
idx = 0
pkt = b''
while idx < self.GDB_G_PKT_NUM_REGS:
if idx in self.GDB_32BIT_REGS:
reg_fmt = "<I"
reg_bytes = 4
else:
reg_fmt = "<Q"
reg_bytes = 8
if idx in self.registers:
bval = struct.pack(reg_fmt, self.registers[idx])
pkt += binascii.hexlify(bval)
else:
# Register not in coredump -> unknown value
# Send in "xxxxxxxx"
pkt += b'x' * (reg_bytes * 2)
idx += 1
self.put_gdb_packet(pkt)
def handle_register_single_read_packet(self, pkt):
# Mark registers as "<unavailable>".
# 'p' packets are usually used for registers
# other than the general ones (e.g. eax, ebx)
# so we can safely reply "xxxxxxxx" here.
self.put_gdb_packet(b'x' * 16)
|
|
import pytest
import os
class TestDOCS(object):
def setup_method(self, method):
import numpy as np
from hyperion.model import ModelOutput
from hyperion.util.constants import kpc
from fluxcompensator.cube import *
# read in from HYPERION
m = ModelOutput(os.path.join(os.path.dirname(__file__), 'hyperion_output.rtout'))
array = m.get_image(group=0, inclination=0, distance=10*kpc, units='ergs/cm^2/s')
# initial FluxCompensator array
self.FC_object = SyntheticCube(input_array=array, unit_out='ergs/cm^2/s', name='test_cube')
def test_docs_cube(self):
# plot c.val at 60 microns
self.FC_object.plot_image(name='init', wav_interest=60., set_cut=(1e-14, 9.4e-13),
single_cut=None, multi_cut=None, dpi=300)
def test_docs_sed(self):
import numpy as np
from hyperion.model import ModelOutput
from hyperion.util.constants import pc
from fluxcompensator.sed import *
# read in from HYPERION
m = ModelOutput(os.path.join(os.path.dirname(__file__), 'B5_class2_45.rtout'))
array = m.get_sed(group=0, inclination=0, distance=300*pc,
units='ergs/cm^2/s')
# initial FluxCompensator array
s = SyntheticSED(input_array=array, unit_out='ergs/cm^2/s', name='test_sed')
def test_docs_extinction(self):
# dered with provided extinction law
ext = self.FC_object.extinction(A_v=20.)
# plot ext.val (3D) at 60 microns
ext.plot_image(name='ext', wav_interest=60., set_cut=(1e-14, 9.4e-13),
single_cut=None, multi_cut=None, dpi=300)
def test_docs_resolution(self):
# change resolution
zoom = self.FC_object.change_resolution(new_resolution=6., grid_plot=True)
# plot zoom.val (3D) at 60 microns
zoom.plot_image(name='zoom', wav_interest=60., set_cut=(1e-13, 9.4e-12),
single_cut=None, multi_cut=None, dpi=300)
def test_docs_psf_file(self):
from fluxcompensator.psf import FilePSF
# create PSF from my own file
file_psf = FilePSF(psf_file=os.path.join(os.path.dirname(__file__), 'mips_24_100K.fits'), oversampled=5)
def test_docs_psf_file_database(self):
from fluxcompensator.psf import FilePSF
# create PSF from my own file
file_psf = FilePSF(psf_file=os.path.join(os.path.dirname(__file__), 'mips_24_100K.fits'), oversampled=5)
import fluxcompensator.database.missions as PSFs
# call object from the psf database
psf_object = getattr(PSFs, 'PACS1_PSF')
# convolve with PSF
psf = self.FC_object.convolve_psf(psf_object)
# psf_object is FilePSF
# plot psf.val (3D) at 60 microns
psf.plot_image(name='psf_file', wav_interest=60., set_cut=(1e-14, 9.4e-13),
single_cut=None, multi_cut=None, dpi=300)
def test_docs_psf_function(self):
import numpy as np
def my_psf(X,Y,wavelength):
# resolution in rad/pixel
resolution = self.FC_object.FOV[0] / self.FC_object.distance / self.FC_object.pixel[0]
# telescope diameter in cm
D_telescope = 350.
# standard deviation in pixel
sigma = 0.44 * wavelength / D_telescope / resolution
Z = np.exp(-(X**2 + Y**2)/(2 * sigma**2))
return Z
from fluxcompensator.psf import FunctionPSF
# create PSF from a Function
psf_object = FunctionPSF(psf_function=my_psf, width=32)
# convolve with PSF
psf = self.FC_object.convolve_psf(psf_object)
# psf_object is FunctionPSF
# plot psf.val (3D) at 60 microns
psf.plot_image(name='psf_func', wav_interest=60., set_cut=(1e-14, 9.4e-13),
single_cut=None, multi_cut=None, dpi=300)
def test_docs_psf_gaussian(self):
from fluxcompensator.psf import GaussianPSF
# create Gaussian PSF
psf_object = GaussianPSF(diameter=350.)
# convolve with PSF
psf = self.FC_object.convolve_psf(psf_object)
# psf_object is GaussianPSF
# plot psf.val (3D) at 60 microns
psf.plot_image(name='psf_gauss', wav_interest=60., set_cut=(1e-14,
9.4e-13), single_cut=None, multi_cut=None, dpi=300)
def test_docs_filter_database(self):
import fluxcompensator.database.missions as filters
# call object from the filter database
filter_input = getattr(filters, 'PACS1_FILTER')
# convolve with filter object
filtered = self.FC_object.convolve_filter(filter_input, plot_rebin=None,
plot_rebin_dpi=None)
# plot filtered.val (3D)
filtered.plot_image(name='filter', set_cut=(1e-14, 9.4e-13),
single_cut=None, multi_cut=None, dpi=300)
def test_docs_filter_database_plot_rebin(self):
import fluxcompensator.database.missions as filters
# call object from the filter database
filter_input = getattr(filters, 'PACS1_FILTER')
# convolve with filter object
filtered = self.FC_object.convolve_filter(filter_input, plot_rebin=True, plot_rebin_dpi=300)
def test_docs_filter_read(self):
import fluxcompensator.database.missions as filters
# call object from the filter database
filter_input_database = getattr(filters, 'IRAC1_FILTER')
from fluxcompensator.filter import Filter
# create own filter object
filter_input = Filter(name='my_IRAC1', filter_file='IRAC1.txt',
waf_0=3.550, alpha=1, beta=0)
assert filter_input_database.waf_0 == filter_input.waf_0
def test_docs_noise(self):
# add noise
noise = self.FC_object.add_noise(mu_noise=0, sigma_noise=1e-13, seed=2, diagnostics=None)
# plot noise.val at 60 microns
noise.plot_image(name='noise', wav_interest=60., set_cut=(1e-14, 9.4e-13),
single_cut=None, multi_cut=None, dpi=300)
def test_docs_example(self):
import numpy as np
from hyperion.model import ModelOutput
from hyperion.util.constants import pc
from fluxcompensator.cube import *
from fluxcompensator.psf import *
from fluxcompensator.utils.resolution import *
# read in from HYPERION
m = ModelOutput(os.path.join(os.path.dirname(__file__), 'hyperion_output.rtout'))
array = m.get_image(group=0, inclination=0, distance=300*pc,
units='ergs/cm^2/s')
# initial FluxCompensator array
c = SyntheticCube(input_array=array, unit_out='ergs/cm^2/s',
name='test_cube')
# dered with provided extinction law
ext = c.extinction(A_v=20.)
# change resolution to 10-times of the initial
zoom = ext.change_resolution(new_resolution=10*ext.resolution['arcsec'],
grid_plot=True)
import fluxcompensator.database.missions as PSFs
# call object from the psf database
psf_object = getattr(PSFs, 'PACS1_PSF')
# convolve with PSF
psf = zoom.convolve_psf(psf_object)
import fluxcompensator.database.missions as filters
# call object from the filter database
filter_input = getattr(filters, 'PACS1_FILTER')
# convolve with filter
filtered = psf.convolve_filter(filter_input, plot_rebin=None,
plot_rebin_dpi=None)
# add noise
noise = filtered.add_noise(mu_noise=0, sigma_noise=5e-15, diagnostics=None)
def test_docs_output_rough(self):
# collapse 3D cube to rough SED
rough = self.FC_object.get_rough_sed()
def test_docs_output_total(self):
# collapse FC_object.val (3D or 1D) within 30 and 60 microns
FC_object_tot = self.FC_object.get_total_val(wav_1=30., wav_2=60.)
# convert to magnitudes
FC_object_mag = FC_object_tot.magnitudes(zero_point=64.9)
def test_docs_output_image_plot(self):
# plot FC_object.val (3D) at 60 microns with default naming
self.FC_object.plot_image(name='plot', wav_interest=60., set_cut=(1e-14,
9.4e-13), single_cut=80., multi_cut=True, dpi=300)
# plot FC_object.val (3D) at 60 microns with prefix
self.FC_object.plot_image(prefix='prefix1', wav_interest=60., multi_cut=True,
dpi=300)
self.FC_object.plot_image(prefix='prefix2', wav_interest=60., set_cut=(1e-14,
9.4e-13), dpi=300)
self.FC_object.plot_image(prefix='prefix3', wav_interest=60., single_cut=80.,
dpi=300)
def test_docs_output_sed_plot(self):
# collapse 3D cube to rough SED
s = self.FC_object.get_rough_sed()
print s.val
import fluxcompensator.database.missions as filters
# call object from the filter database
filter_input = getattr(filters, 'PACS1_FILTER')
# convolve with filter object
filtered = self.FC_object.convolve_filter(filter_input, plot_rebin=None,
plot_rebin_dpi=None)
# collapse filtered.val
f = filtered.get_total_val()
# plot c_tot.val with c_sed.val in one plot
f.plot_sed_filter(wav_sed=s.wav, val_sed=s.val, ymin=1.e-5, dpi=300)
def test_docs_tutorial_multi_filter(self):
import numpy as np
# collapse 3D cube to rough SED
s = self.FC_object.get_rough_sed()
import fluxcompensator.database.missions as filters
# empty arrays for storage
val_array = np.array(())
wav_array = np.array(())
filter_array = np.array(())
for loop_filter in ['J_2MASS', 'H_2MASS', 'K_2MASS', 'IRAS1', 'IRAS2',
'IRAS3', 'IRAS4','IRAC1', 'IRAC2', 'IRAC3', 'IRAC4',
'MIPS1', 'MIPS2', 'MIPS3','WISE1', 'WISE2', 'WISE3',
'WISE4', 'PACS1', 'PACS2', 'PACS3', 'SPIRE1', 'SPIRE2',
'SPIRE3']:
# call object from the filter database
filter_input = getattr(filters, loop_filter + '_FILTER')
# convolve with filter object
filtered = self.FC_object.convolve_filter(filter_input, plot_rebin=None,
plot_rebin_dpi=None)
# collapse FC_object.val
f = filtered.get_total_val()
# store f.val, f.wav and filter_input in arrays
# for plot_sed_multi_filter()
val_array = np.append(val_array, f.val)
wav_array = np.append(wav_array, f.wav)
filter_array = np.append(filter_array, f.filter['name'] + '_FILTER_PLOT')
# plot all filters in loop with f.val and s.val
s.plot_sed_multi_filter(multi_filter_val=val_array,
multi_filter_wav=wav_array, names=filter_array,
ymin=1e-5, filter_label_size=None, dpi=300)
def test_docs_database(self):
# telescopes
from fluxcompensator.database.missions import *
assert wise.diameter == 40.
from fluxcompensator.database.missions import wise
assert wise.diameter == 40.
# PSFs
from fluxcompensator.database.missions import *
assert IRAC4_PSF_RESOLUTION == 1.22
assert IRAC4_ZERO == 64.9
from fluxcompensator.database.missions import irac
assert irac.IRAC4_PSF_RESOLUTION == 1.22
assert irac.IRAC4_ZERO == 64.9
from fluxcompensator.database.missions import spitzer
assert spitzer.IRAC4_PSF_RESOLUTION == 1.22
assert spitzer.IRAC4_ZERO == 64.9
# Access objects from database
import fluxcompensator.database.missions as filters
filter_object = getattr(filters, 'IRAC4_FILTER')
import fluxcompensator.database.missions as PSFs
psf_object = getattr(PSFs, 'IRAC4_PSF')
import fluxcompensator.database.missions as Plots
plot_filter = getattr(Plots, 'IRAC4_FILTER_PLOT')
# Access attributes from objects in database
from fluxcompensator.database.missions import *
# name of Filter object
assert IRAC4_FILTER.name == 'IRAC4'
# central wavelength in microns
assert IRAC4_FILTER.waf_0 == 7.872
# power law exponent alpha
assert IRAC4_FILTER.alpha == 1.0
# power law exponent beta
assert IRAC4_FILTER.beta == 0.0
|
|
"""Opcode definitions for both Python 2 and Python 3."""
# We define all-uppercase classes, to match their opcode names:
# pylint: disable=invalid-name
HAS_CONST = 1 # references the constant table
HAS_NAME = 2 # references the name table
HAS_JREL = 4 # relative jump
HAS_JABS = 8 # absolute jump
HAS_JUNKNOWN = 16 # jumps to unknown location
HAS_LOCAL = 32 # references the varnames table
HAS_FREE = 64 # references "free variable" cells
HAS_NARGS = 128 # stores number of args + kwargs
HAS_ARGUMENT = 256 # all opcodes >= 90
NO_NEXT = 512 # doesn't execute the following opcode
STORE_JUMP = 1024 # only stores a jump, doesn't actually execute it
PUSHES_BLOCK = 2048 # starts a block (while, try, finally, with, etc.)
POPS_BLOCK = 4096 # ends a block
class Opcode(object):
"""An opcode without arguments."""
__slots__ = ("line", "index", "prev", "next", "target", "block_target")
FLAGS = 0
def __init__(self, index, line):
self.index = index
self.line = line
self.target = None
def __str__(self):
return "%4d: %s" % (self.index, self.__class__.__name__)
def __repr__(self):
return self.__class__.__name__
@property
def name(self):
return self.__class__.__name__
@classmethod
def has_const(cls):
return bool(cls.FLAGS & HAS_CONST)
@classmethod
def has_name(cls):
return bool(cls.FLAGS & HAS_NAME)
@classmethod
def has_jrel(cls):
return bool(cls.FLAGS & HAS_JREL)
@classmethod
def has_jabs(cls):
return bool(cls.FLAGS & HAS_JABS)
@classmethod
def has_junknown(cls):
return bool(cls.FLAGS & HAS_JUNKNOWN)
@classmethod
def has_jump(cls):
return bool(cls.FLAGS & (HAS_JREL | HAS_JABS | HAS_JUNKNOWN))
@classmethod
def has_local(cls):
return bool(cls.FLAGS & HAS_LOCAL)
@classmethod
def has_free(cls):
return bool(cls.FLAGS & HAS_FREE)
@classmethod
def has_nargs(cls):
return bool(cls.FLAGS & HAS_NARGS)
@classmethod
def has_argument(cls):
return bool(cls.FLAGS & HAS_ARGUMENT)
@classmethod
def no_next(cls):
return bool(cls.FLAGS & NO_NEXT)
@classmethod
def carry_on_to_next(cls):
return not cls.FLAGS & NO_NEXT
@classmethod
def store_jump(cls):
return bool(cls.FLAGS & STORE_JUMP)
@classmethod
def does_jump(cls):
return cls.has_jump() and not cls.store_jump()
@classmethod
def pushes_block(cls):
return bool(cls.FLAGS & PUSHES_BLOCK)
@classmethod
def pops_block(cls):
return bool(cls.FLAGS & POPS_BLOCK)
@classmethod
def has_arg(cls):
return False
class OpcodeWithArg(Opcode):
"""An opcode with one argument."""
__slots__ = ("arg", "pretty_arg")
def __init__(self, index, line, arg, pretty_arg=None):
super(OpcodeWithArg, self).__init__(index, line)
self.arg = arg
self.pretty_arg = pretty_arg
def __str__(self):
return "%4d: %s %s" % (self.index, self.__class__.__name__, self.arg)
@classmethod
def has_arg(cls):
return True
class STOP_CODE(Opcode):
__slots__ = ()
FLAGS = NO_NEXT
class POP_TOP(Opcode):
__slots__ = ()
class ROT_TWO(Opcode):
__slots__ = ()
class ROT_THREE(Opcode):
__slots__ = ()
class DUP_TOP(Opcode):
__slots__ = ()
class ROT_FOUR(Opcode):
__slots__ = ()
class DUP_TOP_TWO(Opcode):
__slots__ = ()
class NOP(Opcode):
__slots__ = ()
class UNARY_POSITIVE(Opcode):
__slots__ = ()
class UNARY_NEGATIVE(Opcode):
__slots__ = ()
class UNARY_NOT(Opcode):
__slots__ = ()
class UNARY_CONVERT(Opcode):
__slots__ = ()
class UNARY_INVERT(Opcode):
__slots__ = ()
class BINARY_POWER(Opcode):
__slots__ = ()
class BINARY_MULTIPLY(Opcode):
__slots__ = ()
class BINARY_DIVIDE(Opcode):
__slots__ = ()
class BINARY_MODULO(Opcode):
__slots__ = ()
class BINARY_ADD(Opcode):
__slots__ = ()
class BINARY_SUBTRACT(Opcode):
__slots__ = ()
class BINARY_SUBSCR(Opcode):
__slots__ = ()
class BINARY_FLOOR_DIVIDE(Opcode):
__slots__ = ()
class BINARY_TRUE_DIVIDE(Opcode):
__slots__ = ()
class INPLACE_FLOOR_DIVIDE(Opcode):
__slots__ = ()
class INPLACE_TRUE_DIVIDE(Opcode):
__slots__ = ()
class SLICE_0(Opcode):
__slots__ = ()
class SLICE_1(Opcode):
__slots__ = ()
class SLICE_2(Opcode):
__slots__ = ()
class SLICE_3(Opcode):
__slots__ = ()
class STORE_SLICE_0(Opcode):
__slots__ = ()
class STORE_SLICE_1(Opcode):
__slots__ = ()
class STORE_SLICE_2(Opcode):
__slots__ = ()
class STORE_SLICE_3(Opcode):
__slots__ = ()
class DELETE_SLICE_0(Opcode):
__slots__ = ()
class DELETE_SLICE_1(Opcode):
__slots__ = ()
class DELETE_SLICE_2(Opcode):
__slots__ = ()
class DELETE_SLICE_3(Opcode):
__slots__ = ()
class STORE_MAP(Opcode):
__slots__ = ()
class INPLACE_ADD(Opcode):
__slots__ = ()
class INPLACE_SUBTRACT(Opcode):
__slots__ = ()
class INPLACE_MULTIPLY(Opcode):
__slots__ = ()
class INPLACE_DIVIDE(Opcode):
__slots__ = ()
class INPLACE_MODULO(Opcode):
__slots__ = ()
class STORE_SUBSCR(Opcode):
__slots__ = ()
class DELETE_SUBSCR(Opcode):
__slots__ = ()
class BINARY_LSHIFT(Opcode):
__slots__ = ()
class BINARY_RSHIFT(Opcode):
__slots__ = ()
class BINARY_AND(Opcode):
__slots__ = ()
class BINARY_XOR(Opcode):
__slots__ = ()
class BINARY_OR(Opcode):
__slots__ = ()
class INPLACE_POWER(Opcode):
__slots__ = ()
class GET_ITER(Opcode):
__slots__ = ()
class STORE_LOCALS(Opcode):
__slots__ = ()
class PRINT_EXPR(Opcode):
__slots__ = ()
class PRINT_ITEM(Opcode):
__slots__ = ()
class PRINT_NEWLINE(Opcode):
__slots__ = ()
class PRINT_ITEM_TO(Opcode):
__slots__ = ()
class PRINT_NEWLINE_TO(Opcode):
__slots__ = ()
class LOAD_BUILD_CLASS(Opcode):
__slots__ = ()
class YIELD_FROM(Opcode):
FLAGS = HAS_JUNKNOWN
__slots__ = ()
class INPLACE_LSHIFT(Opcode):
__slots__ = ()
class INPLACE_RSHIFT(Opcode):
__slots__ = ()
class INPLACE_AND(Opcode):
__slots__ = ()
class INPLACE_XOR(Opcode):
__slots__ = ()
class INPLACE_OR(Opcode):
__slots__ = ()
class BREAK_LOOP(Opcode):
FLAGS = HAS_JUNKNOWN | NO_NEXT
__slots__ = ()
class WITH_CLEANUP(Opcode):
# This opcode changes the block stack, but it should never change its depth.
FLAGS = HAS_JUNKNOWN # might call __exit__
__slots__ = ()
class LOAD_LOCALS(Opcode):
__slots__ = ()
class RETURN_VALUE(Opcode):
FLAGS = HAS_JUNKNOWN | NO_NEXT
__slots__ = ()
class IMPORT_STAR(Opcode):
__slots__ = ()
class EXEC_STMT(Opcode):
FLAGS = HAS_JUNKNOWN
__slots__ = ()
class YIELD_VALUE(Opcode):
FLAGS = HAS_JUNKNOWN
__slots__ = ()
class POP_BLOCK(Opcode):
FLAGS = POPS_BLOCK
__slots__ = ()
class END_FINALLY(Opcode):
FLAGS = HAS_JUNKNOWN # might re-raise an exception
__slots__ = ()
class BUILD_CLASS(Opcode):
__slots__ = ()
class POP_EXCEPT(Opcode):
__slots__ = ()
class STORE_NAME(OpcodeWithArg): # Indexes into name list
FLAGS = HAS_NAME|HAS_ARGUMENT
__slots__ = ()
class DELETE_NAME(OpcodeWithArg): # Indexes into name list
FLAGS = HAS_NAME|HAS_ARGUMENT
__slots__ = ()
class UNPACK_SEQUENCE(OpcodeWithArg): # Arg: Number of tuple items
FLAGS = HAS_ARGUMENT
__slots__ = ()
class FOR_ITER(OpcodeWithArg):
FLAGS = HAS_JREL|HAS_ARGUMENT
__slots__ = ()
class LIST_APPEND(OpcodeWithArg):
FLAGS = HAS_ARGUMENT
__slots__ = ()
class UNPACK_EX(OpcodeWithArg):
FLAGS = HAS_ARGUMENT
__slots__ = ()
class STORE_ATTR(OpcodeWithArg): # Indexes into name list
FLAGS = HAS_NAME|HAS_ARGUMENT
__slots__ = ()
class DELETE_ATTR(OpcodeWithArg): # Indexes into name list
FLAGS = HAS_NAME|HAS_ARGUMENT
__slots__ = ()
class STORE_GLOBAL(OpcodeWithArg): # Indexes into name list
FLAGS = HAS_NAME|HAS_ARGUMENT
__slots__ = ()
class DELETE_GLOBAL(OpcodeWithArg): # Indexes into name list
FLAGS = HAS_NAME|HAS_ARGUMENT
__slots__ = ()
class DUP_TOPX(OpcodeWithArg): # Arg: Number of items to duplicate
FLAGS = HAS_ARGUMENT
__slots__ = ()
class LOAD_CONST(OpcodeWithArg): # Arg: Index in const list
FLAGS = HAS_ARGUMENT|HAS_CONST
__slots__ = ()
class LOAD_NAME(OpcodeWithArg): # Arg: Index in name list
FLAGS = HAS_NAME|HAS_ARGUMENT
__slots__ = ()
class BUILD_TUPLE(OpcodeWithArg): # Arg: Number of tuple items
FLAGS = HAS_ARGUMENT
__slots__ = ()
class BUILD_LIST(OpcodeWithArg): # Arg: Number of list items
FLAGS = HAS_ARGUMENT
__slots__ = ()
class BUILD_SET(OpcodeWithArg): # Arg: Number of set items
FLAGS = HAS_ARGUMENT
__slots__ = ()
class BUILD_MAP(OpcodeWithArg): # Arg: Number of dict entries (up to 255)
FLAGS = HAS_ARGUMENT
__slots__ = ()
class LOAD_ATTR(OpcodeWithArg): # Arg: Index in name list
FLAGS = HAS_NAME|HAS_ARGUMENT
__slots__ = ()
class COMPARE_OP(OpcodeWithArg): # Arg: Comparison operator
FLAGS = HAS_ARGUMENT
__slots__ = ()
class IMPORT_NAME(OpcodeWithArg): # Arg: Index in name list
FLAGS = HAS_NAME|HAS_ARGUMENT|HAS_JUNKNOWN
__slots__ = ()
class IMPORT_FROM(OpcodeWithArg): # Arg: Index in name list
FLAGS = HAS_NAME|HAS_ARGUMENT
__slots__ = ()
class JUMP_FORWARD(OpcodeWithArg):
FLAGS = HAS_JREL|HAS_ARGUMENT|NO_NEXT
__slots__ = ()
class JUMP_IF_FALSE_OR_POP(OpcodeWithArg):
FLAGS = HAS_JABS|HAS_ARGUMENT
__slots__ = ()
class JUMP_IF_TRUE_OR_POP(OpcodeWithArg):
FLAGS = HAS_JABS|HAS_ARGUMENT
__slots__ = ()
class JUMP_ABSOLUTE(OpcodeWithArg):
FLAGS = HAS_JABS|HAS_ARGUMENT|NO_NEXT
__slots__ = ()
class POP_JUMP_IF_FALSE(OpcodeWithArg):
FLAGS = HAS_JABS|HAS_ARGUMENT
__slots__ = ()
class POP_JUMP_IF_TRUE(OpcodeWithArg):
FLAGS = HAS_JABS|HAS_ARGUMENT
__slots__ = ()
class LOAD_GLOBAL(OpcodeWithArg): # Indexes into name list
FLAGS = HAS_NAME|HAS_ARGUMENT
__slots__ = ()
class CONTINUE_LOOP(OpcodeWithArg): # Acts as jump
FLAGS = HAS_JABS|HAS_ARGUMENT|NO_NEXT
__slots__ = ()
class SETUP_LOOP(OpcodeWithArg):
FLAGS = HAS_JREL|HAS_ARGUMENT|STORE_JUMP|PUSHES_BLOCK
__slots__ = ()
class SETUP_EXCEPT(OpcodeWithArg):
FLAGS = HAS_JREL|HAS_ARGUMENT|STORE_JUMP|PUSHES_BLOCK
__slots__ = ()
class SETUP_FINALLY(OpcodeWithArg):
FLAGS = HAS_JREL|HAS_ARGUMENT|STORE_JUMP|PUSHES_BLOCK
__slots__ = ()
class LOAD_FAST(OpcodeWithArg): # Loads local variable number
FLAGS = HAS_LOCAL|HAS_ARGUMENT
__slots__ = ()
class STORE_FAST(OpcodeWithArg): # Stores local variable number
FLAGS = HAS_LOCAL|HAS_ARGUMENT
__slots__ = ()
class DELETE_FAST(OpcodeWithArg): # Deletes local variable number
FLAGS = HAS_LOCAL|HAS_ARGUMENT
__slots__ = ()
class RAISE_VARARGS(OpcodeWithArg): # Arg: Number of raise args (1, 2, or 3)
FLAGS = HAS_ARGUMENT|HAS_JUNKNOWN|NO_NEXT
__slots__ = ()
class CALL_FUNCTION(OpcodeWithArg): # Arg: #args + (#kwargs << 8)
FLAGS = HAS_NARGS|HAS_ARGUMENT|HAS_JUNKNOWN
__slots__ = ()
class MAKE_FUNCTION(OpcodeWithArg): # Arg: Number of args with default values
FLAGS = HAS_ARGUMENT
__slots__ = ()
class BUILD_SLICE(OpcodeWithArg): # Arg: Number of items
FLAGS = HAS_ARGUMENT
__slots__ = ()
class MAKE_CLOSURE(OpcodeWithArg):
FLAGS = HAS_ARGUMENT
__slots__ = ()
class LOAD_CLOSURE(OpcodeWithArg):
FLAGS = HAS_FREE|HAS_ARGUMENT
__slots__ = ()
class LOAD_DEREF(OpcodeWithArg):
FLAGS = HAS_FREE|HAS_ARGUMENT
__slots__ = ()
class STORE_DEREF(OpcodeWithArg):
FLAGS = HAS_FREE|HAS_ARGUMENT
__slots__ = ()
class DELETE_DEREF(OpcodeWithArg):
FLAGS = HAS_FREE|HAS_ARGUMENT
__slots__ = ()
class CALL_FUNCTION_VAR(OpcodeWithArg): # Arg: #args + (#kwargs << 8)
FLAGS = HAS_NARGS|HAS_ARGUMENT|HAS_JUNKNOWN
__slots__ = ()
class CALL_FUNCTION_KW(OpcodeWithArg): # Arg: #args + (#kwargs << 8)
FLAGS = HAS_NARGS|HAS_ARGUMENT|HAS_JUNKNOWN
__slots__ = ()
class CALL_FUNCTION_VAR_KW(OpcodeWithArg): # Arg: #args + (#kwargs << 8)
FLAGS = HAS_NARGS|HAS_ARGUMENT|HAS_JUNKNOWN
__slots__ = ()
class SETUP_WITH(OpcodeWithArg):
FLAGS = HAS_JREL|HAS_ARGUMENT|STORE_JUMP|PUSHES_BLOCK
__slots__ = ()
class EXTENDED_ARG(OpcodeWithArg):
FLAGS = HAS_ARGUMENT
__slots__ = ()
class SET_ADD(OpcodeWithArg):
FLAGS = HAS_ARGUMENT
__slots__ = ()
class MAP_ADD(OpcodeWithArg):
FLAGS = HAS_ARGUMENT
__slots__ = ()
class LOAD_CLASSDEREF(OpcodeWithArg):
FLAGS = HAS_FREE|HAS_ARGUMENT
__slots__ = ()
python2_mapping = {
0: STOP_CODE, # removed in Python 3
1: POP_TOP,
2: ROT_TWO,
3: ROT_THREE,
4: DUP_TOP,
5: ROT_FOUR, # becomes DUP_TOP_TWO in Python 3
9: NOP,
10: UNARY_POSITIVE,
11: UNARY_NEGATIVE,
12: UNARY_NOT,
13: UNARY_CONVERT, # removed in Python 3
15: UNARY_INVERT,
19: BINARY_POWER,
20: BINARY_MULTIPLY,
21: BINARY_DIVIDE, # removed in Python 3
22: BINARY_MODULO,
23: BINARY_ADD,
24: BINARY_SUBTRACT,
25: BINARY_SUBSCR,
26: BINARY_FLOOR_DIVIDE,
27: BINARY_TRUE_DIVIDE,
28: INPLACE_FLOOR_DIVIDE,
29: INPLACE_TRUE_DIVIDE,
30: SLICE_0, # removed in Python 3
31: SLICE_1, # removed in Python 3
32: SLICE_2, # removed in Python 3
33: SLICE_3, # removed in Python 3
40: STORE_SLICE_0, # removed in Python 3
41: STORE_SLICE_1, # removed in Python 3
42: STORE_SLICE_2, # removed in Python 3
43: STORE_SLICE_3, # removed in Python 3
50: DELETE_SLICE_0, # removed in Python 3
51: DELETE_SLICE_1, # removed in Python 3
52: DELETE_SLICE_2, # removed in Python 3
53: DELETE_SLICE_3, # removed in Python 3
54: STORE_MAP,
55: INPLACE_ADD,
56: INPLACE_SUBTRACT,
57: INPLACE_MULTIPLY,
58: INPLACE_DIVIDE, # removed in Python 3
59: INPLACE_MODULO,
60: STORE_SUBSCR,
61: DELETE_SUBSCR,
62: BINARY_LSHIFT,
63: BINARY_RSHIFT,
64: BINARY_AND,
65: BINARY_XOR,
66: BINARY_OR,
67: INPLACE_POWER,
68: GET_ITER,
70: PRINT_EXPR,
71: PRINT_ITEM, # becomes LOAD_BUILD_CLASS in Python 3
72: PRINT_NEWLINE, # becomes YIELD_FROM in Python 3
73: PRINT_ITEM_TO,
74: PRINT_NEWLINE_TO,
75: INPLACE_LSHIFT,
76: INPLACE_RSHIFT,
77: INPLACE_AND,
78: INPLACE_XOR,
79: INPLACE_OR,
80: BREAK_LOOP,
81: WITH_CLEANUP,
82: LOAD_LOCALS, # removed in Python 3
83: RETURN_VALUE,
84: IMPORT_STAR,
85: EXEC_STMT, # removed in Python 3
86: YIELD_VALUE,
87: POP_BLOCK,
88: END_FINALLY,
89: BUILD_CLASS, # becomes POP_EXCEPT in Python 3
90: STORE_NAME,
91: DELETE_NAME,
92: UNPACK_SEQUENCE,
93: FOR_ITER,
94: LIST_APPEND, # becomes UNPACK_EX in Python 3
95: STORE_ATTR,
96: DELETE_ATTR,
97: STORE_GLOBAL,
98: DELETE_GLOBAL,
99: DUP_TOPX, # removed in Python 3
100: LOAD_CONST,
101: LOAD_NAME,
102: BUILD_TUPLE,
103: BUILD_LIST,
104: BUILD_SET,
105: BUILD_MAP,
106: LOAD_ATTR,
107: COMPARE_OP,
108: IMPORT_NAME,
109: IMPORT_FROM,
110: JUMP_FORWARD,
111: JUMP_IF_FALSE_OR_POP,
112: JUMP_IF_TRUE_OR_POP,
113: JUMP_ABSOLUTE,
114: POP_JUMP_IF_FALSE,
115: POP_JUMP_IF_TRUE,
116: LOAD_GLOBAL,
119: CONTINUE_LOOP,
120: SETUP_LOOP,
121: SETUP_EXCEPT,
122: SETUP_FINALLY,
124: LOAD_FAST,
125: STORE_FAST,
126: DELETE_FAST,
130: RAISE_VARARGS,
131: CALL_FUNCTION,
132: MAKE_FUNCTION,
133: BUILD_SLICE,
134: MAKE_CLOSURE,
135: LOAD_CLOSURE,
136: LOAD_DEREF,
137: STORE_DEREF,
140: CALL_FUNCTION_VAR,
141: CALL_FUNCTION_KW,
142: CALL_FUNCTION_VAR_KW,
143: SETUP_WITH,
145: EXTENDED_ARG, # moved to 144 in Python 3, 145 becomes LIST_APPEND
146: SET_ADD,
147: MAP_ADD,
}
python3_mapping = {
1: POP_TOP,
2: ROT_TWO,
3: ROT_THREE,
4: DUP_TOP,
5: DUP_TOP_TWO,
9: NOP,
10: UNARY_POSITIVE,
11: UNARY_NEGATIVE,
12: UNARY_NOT,
15: UNARY_INVERT,
19: BINARY_POWER,
20: BINARY_MULTIPLY,
22: BINARY_MODULO,
23: BINARY_ADD,
24: BINARY_SUBTRACT,
25: BINARY_SUBSCR,
26: BINARY_FLOOR_DIVIDE,
27: BINARY_TRUE_DIVIDE,
28: INPLACE_FLOOR_DIVIDE,
29: INPLACE_TRUE_DIVIDE,
54: STORE_MAP,
55: INPLACE_ADD,
56: INPLACE_SUBTRACT,
57: INPLACE_MULTIPLY,
59: INPLACE_MODULO,
60: STORE_SUBSCR,
61: DELETE_SUBSCR,
62: BINARY_LSHIFT,
63: BINARY_RSHIFT,
64: BINARY_AND,
65: BINARY_XOR,
66: BINARY_OR,
67: INPLACE_POWER,
68: GET_ITER,
69: STORE_LOCALS, # removed again in Python 3.4
70: PRINT_EXPR,
71: LOAD_BUILD_CLASS, # PRINT_ITEM in Python 2
72: YIELD_FROM, # PRINT_NEWLINE in Python 2
75: INPLACE_LSHIFT,
76: INPLACE_RSHIFT,
77: INPLACE_AND,
78: INPLACE_XOR,
79: INPLACE_OR,
80: BREAK_LOOP,
81: WITH_CLEANUP,
83: RETURN_VALUE,
84: IMPORT_STAR,
86: YIELD_VALUE,
87: POP_BLOCK,
88: END_FINALLY,
89: POP_EXCEPT, # BUILD_CLASS in Python 2
90: STORE_NAME,
91: DELETE_NAME,
92: UNPACK_SEQUENCE,
93: FOR_ITER,
94: UNPACK_EX, # LIST_APPEND in Python 2
95: STORE_ATTR,
96: DELETE_ATTR,
97: STORE_GLOBAL,
98: DELETE_GLOBAL,
100: LOAD_CONST,
101: LOAD_NAME,
102: BUILD_TUPLE,
103: BUILD_LIST,
104: BUILD_SET,
105: BUILD_MAP,
106: LOAD_ATTR,
107: COMPARE_OP,
108: IMPORT_NAME,
109: IMPORT_FROM,
110: JUMP_FORWARD,
111: JUMP_IF_FALSE_OR_POP,
112: JUMP_IF_TRUE_OR_POP,
113: JUMP_ABSOLUTE,
114: POP_JUMP_IF_FALSE,
115: POP_JUMP_IF_TRUE,
116: LOAD_GLOBAL,
119: CONTINUE_LOOP,
120: SETUP_LOOP,
121: SETUP_EXCEPT,
122: SETUP_FINALLY,
124: LOAD_FAST,
125: STORE_FAST,
126: DELETE_FAST,
130: RAISE_VARARGS,
131: CALL_FUNCTION,
132: MAKE_FUNCTION,
133: BUILD_SLICE,
134: MAKE_CLOSURE,
135: LOAD_CLOSURE,
136: LOAD_DEREF,
137: STORE_DEREF,
138: DELETE_DEREF,
140: CALL_FUNCTION_VAR,
141: CALL_FUNCTION_KW,
142: CALL_FUNCTION_VAR_KW,
143: SETUP_WITH,
144: EXTENDED_ARG, # 145 in Python 2
145: LIST_APPEND, # 94 in Python 2
146: SET_ADD,
147: MAP_ADD,
148: LOAD_CLASSDEREF, # not in Python 2
}
class _LineNumberTableParser(object):
"""State machine for decoding a Python line number array."""
def __init__(self, lnotab, firstlineno):
assert not len(lnotab) & 1 # lnotab always has an even number of elements
self.lnotab = lnotab
self.lineno = firstlineno
self.next_addr = ord(self.lnotab[0]) if self.lnotab else 0
self.pos = 0
def get(self, i):
"""Get the line number for the instruction at the given position.
This does NOT allow random access. Call with incremental numbers.
Args:
i: The byte position in the bytecode. i needs to stay constant or increase
between calls.
Returns:
The line number corresponding to the position at i.
"""
while i >= self.next_addr and self.pos < len(self.lnotab):
self.lineno += ord(self.lnotab[self.pos + 1])
self.pos += 2
if self.pos < len(self.lnotab):
self.next_addr += ord(self.lnotab[self.pos])
return self.lineno
def _prettyprint_arg(cls, oparg, co_consts, co_names,
co_varnames, cellvars_freevars):
if cls.has_jrel():
return oparg
elif co_consts and cls.has_const():
return repr(co_consts[oparg])
elif co_names and cls.has_name():
return co_names[oparg]
elif co_varnames and cls.has_local():
return co_varnames[oparg]
elif cellvars_freevars and cls.has_free():
return cellvars_freevars[oparg]
else:
return oparg
def _dis(data, mapping,
co_varnames=None, co_names=None, co_consts=None, co_cellvars=None,
co_freevars=None, co_lnotab=None, co_firstlineno=None):
"""Disassemble a string into a list of Opcode instances."""
code = []
size = len(data)
pos = 0
lp = _LineNumberTableParser(co_lnotab, co_firstlineno) if co_lnotab else None
offset_to_index = {}
extended_arg = 0
if co_cellvars is not None and co_freevars is not None:
cellvars_freevars = co_cellvars + co_freevars
else:
cellvars_freevars = None
while pos < size:
opcode = ord(data[pos])
index = len(code)
offset_to_index[pos] = index
line = lp.get(pos) if lp else 0
pos += 1
cls = mapping[opcode]
if cls is EXTENDED_ARG:
# EXTENDED_ARG modifies the opcode after it, setting bits 16..31 of
# its argument.
assert not extended_arg, "two EXTENDED_ARGs in a row"
extended_arg = ord(data[pos]) << 16 | ord(data[pos+1]) << 24
elif cls.FLAGS & HAS_ARGUMENT:
oparg = ord(data[pos]) | ord(data[pos+1]) << 8 | extended_arg
extended_arg = 0
pos += 2
if cls.has_jrel():
oparg += pos
pretty = _prettyprint_arg(cls, oparg, co_consts, co_names, co_varnames,
cellvars_freevars)
code.append(cls(index, line, oparg, pretty))
else:
assert not extended_arg, "EXTENDED_ARG in front of opcode without arg"
code.append(cls(index, line))
# Map the target of jump instructions to the opcode they jump to, and fill
# in "next" and "prev" pointers
for i, op in enumerate(code):
if op.FLAGS & (HAS_JREL | HAS_JABS):
op.arg = op.pretty_arg = offset_to_index[op.arg]
op.target = code[op.arg]
op.prev = code[i - 1] if i > 0 else None
op.next = code[i + 1] if i < len(code) - 1 else None
return code
def dis(data, python_version, *args, **kwargs):
assert python_version[0] in (2, 3)
mapping = python2_mapping if python_version[0] == 2 else python3_mapping
return _dis(data, mapping, *args, **kwargs)
def dis_code(code):
return dis(data=code.co_code,
python_version=code.python_version,
co_varnames=code.co_varnames,
co_names=code.co_names,
co_consts=code.co_consts,
co_cellvars=code.co_cellvars,
co_freevars=code.co_freevars,
co_lnotab=code.co_lnotab,
co_firstlineno=code.co_firstlineno)
|
|
# coding=utf-8
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Library to support data extraction from Wikipedia and Wikidata.'''
from absl import logging
import json
import os
import pandas as pd
import string
from typing import Dict, List, Sequence
from cabby.data.wikidata import query as wdq
from cabby.data.wikipedia import query as wpq
from cabby.data.wikidata import item as wdi
from cabby.data.wikipedia import item as wpi
from cabby.data.wikidata import info_item as wdqi
from cabby.data import osm_item
from cabby.data import wikigeo
from cabby.geo.map_processing import map_structure
from cabby.geo.regions import Region
def get_wikigeo_data(
wikidata_items: Sequence[wdi.WikidataEntity]
) -> List[wikigeo.WikigeoEntity]:
'''Get data from Wikipedia based on Wikidata items"
Arguments:
wikidata_items: The Wikidata items to which corresponding Wikigeo
items will be extracted.
Returns:
The Wikigeo items found (composed of Wikipedia (text,title) and
Wikidata (location) data).
'''
# Get Wikipedia titles.
titles_to_wikidata = {item.wikipedia_title: item for item in wikidata_items}
# Get Wikipedia pages.
wikipedia_items = wpq.get_wikipedia_items(list(titles_to_wikidata.keys()))
titles_to_wikipedia = {item.title: item for item in wikipedia_items}
# Change to Geodata dataset format.
geo_data = []
for wp_item in wikipedia_items:
geo_data.append(wikigeo.WikigeoEntity.from_wiki_items(
wp_item, wp_item, titles_to_wikidata[wp_item.title], 'Wikipedia_page'))
# Get backlinks for Wikipedia pages.
logging.info("Creating WikipediaEntities from backlinks.")
backlinks_items = wpq.get_backlinks_items_from_wikipedia_titles(
list(titles_to_wikidata.keys()))
# Change backlinks pages to Geodata dataset format.
logging.info("Converting backlink pages to geodata")
for item in backlinks_items:
geo_data.append(wikigeo.WikigeoEntity.from_wiki_items(
item,
titles_to_wikipedia[item.linked_title],
titles_to_wikidata[item.linked_title],
"Wikipedia_backlink"))
# Remove duplicates.
logging.info("Removing duplicate items.")
uniq_wgitems = {}
for wgitem in geo_data:
key = wgitem.ref_qid + wgitem.text
uniq_wgitems[key] = wgitem
logging.info("Done with all extraction.")
return list(uniq_wgitems.values())
def get_data_by_qid(qid: str) -> Sequence[wikigeo.WikigeoEntity]:
'''Get data from Wikipedia and Wikidata by region.
Arguments:
qid(str): The qid of the Wikidata to extract items from.
Returns:
The Wikipedia (text, title) and Wikidata (location) data found.
'''
# Get Wikidata items by qid.
wikidata_results = wdq.get_place_location_points_from_qid(qid)
wikidata_items = [wdi.WikidataEntity.from_sparql_result(result)
for result in wikidata_results]
return get_wikigeo_data(wikidata_items)
def get_data_by_region(region: Region) -> List[wikigeo.WikigeoEntity]:
'''Get data from Wikipedia and Wikidata by region.
Arguments:
region(Region): The region to extract items from.
Returns:
The Wikipedia (text,title) and Wikidata (location) data found.
'''
# Get Wikidata items by region.
wikidata_results = wdq.get_geofenced_wikidata_items(region)
wikidata_items = [wdi.WikidataEntity.from_sparql_result(result)
for result in wikidata_results]
return get_wikigeo_data(wikidata_items)
def get_data_by_region_with_osm(
region: Region, path_osm: str = None) -> List[wikigeo.WikigeoEntity]:
'''Get three types of samples by region:
(1) samples from Wikipedia(text, title) and Wikidata(location)
(2) Concatenation of Wikidata tags
(3) Concatenation of OSM tags.
Arguments:
region(Region): The region to extract items from.
Returns:
The Wikipedia(text,title) and Wikidata(location) data found.
'''
# Get Wikidata items by region.
samples = get_data_by_region(region)
logging.info(
f'Created {len(samples)} samples from Wikipedia pages and Wikidata (locations only).')
# Add samples from Wikidata only.
wikidata_tags = wdq.get_geofenced_info_wikidata_items(region)
for item in wikidata_tags:
info_item = wdqi.WikidataEntity.from_sparql_result_info(item)
sample = wikigeo.WikigeoEntity.from_wikidata(info_item)
samples.append(sample)
logging.info(
f'Created {len(samples)} samples with Wikidata additional data.')
# Add sample from OSM only.
if path_osm is None:
map = map_structure.Map(region)
poi = map.poi
else:
poi = map_structure.load_poi(path_osm)
num_cells_large_entities = 10
# Remove large entities.
poi = poi[poi['s2cellids'].str.len() <= num_cells_large_entities]
osm_entities = poi.apply(
lambda row: osm_item.OSMEntity.from_osm(row), axis=1).tolist()
unique_texts = []
for osm in osm_entities:
sample = wikigeo.WikigeoEntity.from_osm(osm)
if sample.text not in unique_texts:
unique_texts.append(sample.text)
samples.append(sample)
logging.info(
f'Created {len(samples)} samples with OSM additional data.')
return samples
def split_dataset(
dataset: Sequence[wikigeo.WikigeoEntity],
percentage_train: float,
percentage_dev: float
) -> Dict[str, Sequence[wikigeo.WikigeoEntity]]:
'''Splits the dataset into train-set, dev-set, test-set according to the
ref_title."
Arguments:
percentage_train(float in [0,1]): percentage of the train-set.
percentage_dev(float in [0,1]): percentage of the dev-set.
Returns:
The train-set, dev-set and test-set splits.
'''
assert percentage_train >= 0 and percentage_train <= 1, (
"percentage_train is not in range 0-1.")
assert percentage_dev >= 0 and percentage_dev <= 1, (
"percentage_dev is not in range 0-1.")
assert percentage_dev + percentage_train <= 1, (
"percentage_dev+percentage_train is more than 1.")
# TODO (https://github.com/googleinterns/cabby/issues/28#issue-695818890):
# Change split by qid so that it will ensure qid isn't shared between sets
# Sort the dataset by ref_title.
sorted_dataset = sorted(dataset, key=lambda item: item.ref_title)
# Get size of splits.
size_dataset = len(dataset)
size_train = round(percentage_train*size_dataset)
size_dev = round(percentage_dev*size_dataset)
# Split the dataset.
train_set = sorted_dataset[0:size_train]
dev_set = sorted_dataset[size_train:size_train+size_dev]
test_set = sorted_dataset[size_train+size_dev:]
return {
'train': train_set,
'dev': dev_set,
'test': test_set
}
def write_files(path: str, items: Sequence):
'''Write items to disk.'''
with open(path, 'a') as outfile:
for item in items:
json.dump(item, outfile, default=lambda o: o.__dict__)
outfile.write('\n')
outfile.flush()
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.db.utils import DatabaseError
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing index on 'Category', fields ['visibility']
db.delete_index('canvas_category', ['visibility'])
# Removing index on 'Content', fields ['visibility']
db.delete_index('canvas_content', ['visibility'])
def backwards(self, orm):
# Adding index on 'Content', fields ['visibility']
db.create_index('canvas_content', ['visibility'])
# Adding index on 'Category', fields ['visibility']
db.create_index('canvas_category', ['visibility'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.apiapp': {
'Meta': {'object_name': 'APIApp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'canvas.apiauthtoken': {
'Meta': {'unique_together': "(('user', 'app'),)", 'object_name': 'APIAuthToken'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.APIApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.bestof': {
'Meta': {'object_name': 'BestOf'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'best_of'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'chosen_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'best_of'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {})
},
'canvas.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'posted_on_quest_of_the_day': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'skip_moderation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'star_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.commentflag': {
'Meta': {'object_name': 'CommentFlag'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['auth.User']"})
},
'canvas.commentmoderationlog': {
'Meta': {'object_name': 'CommentModerationLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderated_comments_log'", 'to': "orm['auth.User']"}),
'visibility': ('django.db.models.fields.IntegerField', [], {})
},
'canvas.commentpin': {
'Meta': {'object_name': 'CommentPin'},
'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.commentsticker': {
'Meta': {'object_name': 'CommentSticker'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': "orm['canvas.Comment']"}),
'epic_message': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '140', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'canvas.commentstickerlog': {
'Meta': {'object_name': 'CommentStickerLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': "orm['canvas.Content']"}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.emailunsubscribe': {
'Meta': {'object_name': 'EmailUnsubscribe'},
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.externalcontent': {
'Meta': {'object_name': 'ExternalContent'},
'_data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'content_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'external_content'", 'to': "orm['canvas.Comment']"}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'null': 'True', 'blank': 'True'})
},
'canvas.facebookinvite': {
'Meta': {'object_name': 'FacebookInvite'},
'fb_message_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_fbid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.facebookuser': {
'Meta': {'object_name': 'FacebookUser'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_invited': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'canvas.followcategory': {
'Meta': {'unique_together': "(('user', 'category'),)", 'object_name': 'FollowCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following'", 'to': "orm['auth.User']"})
},
'canvas.invitecode': {
'Meta': {'object_name': 'InviteCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.remixplugin': {
'Meta': {'object_name': 'RemixPlugin'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
's3md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'avatar': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']", 'null': 'True'}),
'bio_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'enable_timeline': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_timeline_posts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'follower_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_bypass': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'is_qa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'profile_image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']", 'null': 'True'}),
'trust_changed': ('canvas.util.UnixTimestampField', [], {'null': 'True', 'blank': 'True'}),
'trusted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'canvas.usermoderationlog': {
'Meta': {'object_name': 'UserModerationLog'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderation_log'", 'to': "orm['auth.User']"})
},
'canvas.userwarning': {
'Meta': {'object_name': 'UserWarning'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'confirmed': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'custom_message': ('django.db.models.fields.TextField', [], {}),
'disable_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('canvas.util.UnixTimestampField', [], {}),
'stock_message': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_warnings'", 'to': "orm['auth.User']"}),
'viewed': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'canvas.welcomeemailrecipient': {
'Meta': {'object_name': 'WelcomeEmailRecipient'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'canvas_auth.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'", '_ormbases': ['auth.User'], 'proxy': 'True'}
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas']
|
|
import datetime
import decimal
import uuid
from functools import lru_cache
from itertools import chain
from django.conf import settings
from django.core.exceptions import FieldError
from django.db import utils
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.models import aggregates, fields
from django.db.models.expressions import Col
from django.utils import timezone
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.duration import duration_microseconds
from django.utils.functional import cached_property
class DatabaseOperations(BaseDatabaseOperations):
cast_char_field_without_max_length = 'text'
cast_data_types = {
'DateField': 'TEXT',
'DateTimeField': 'TEXT',
}
explain_prefix = 'EXPLAIN QUERY PLAN'
def bulk_batch_size(self, fields, objs):
"""
SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of
999 variables per query.
If there's only a single field to insert, the limit is 500
(SQLITE_MAX_COMPOUND_SELECT).
"""
if len(fields) == 1:
return 500
elif len(fields) > 1:
return self.connection.features.max_query_params // len(fields)
else:
return len(objs)
def check_expression_support(self, expression):
bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField)
bad_aggregates = (aggregates.Sum, aggregates.Avg, aggregates.Variance, aggregates.StdDev)
if isinstance(expression, bad_aggregates):
for expr in expression.get_source_expressions():
try:
output_field = expr.output_field
except FieldError:
# Not every subexpression has an output_field which is fine
# to ignore.
pass
else:
if isinstance(output_field, bad_fields):
raise utils.NotSupportedError(
'You cannot use Sum, Avg, StdDev, and Variance '
'aggregations on date/time fields in sqlite3 '
'since date/time is saved as text.'
)
if isinstance(expression, aggregates.Aggregate) and len(expression.source_expressions) > 1:
raise utils.NotSupportedError(
"SQLite doesn't support DISTINCT on aggregate functions "
"accepting multiple arguments."
)
def date_extract_sql(self, lookup_type, field_name):
"""
Support EXTRACT with a user-defined function django_date_extract()
that's registered in connect(). Use single quotes because this is a
string and could otherwise cause a collision with a field name.
"""
return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, timedelta):
return str(duration_microseconds(timedelta))
def format_for_duration_arithmetic(self, sql):
"""Do nothing since formatting is handled in the custom function."""
return sql
def date_trunc_sql(self, lookup_type, field_name):
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def time_trunc_sql(self, lookup_type, field_name):
return "django_time_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def _convert_tznames_to_sql(self, tzname):
if settings.USE_TZ:
return "'%s'" % tzname, "'%s'" % self.connection.timezone_name
return 'NULL', 'NULL'
def datetime_cast_date_sql(self, field_name, tzname):
return 'django_datetime_cast_date(%s, %s, %s)' % (
field_name, *self._convert_tznames_to_sql(tzname),
)
def datetime_cast_time_sql(self, field_name, tzname):
return 'django_datetime_cast_time(%s, %s, %s)' % (
field_name, *self._convert_tznames_to_sql(tzname),
)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
return "django_datetime_extract('%s', %s, %s, %s)" % (
lookup_type.lower(), field_name, *self._convert_tznames_to_sql(tzname),
)
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
return "django_datetime_trunc('%s', %s, %s, %s)" % (
lookup_type.lower(), field_name, *self._convert_tznames_to_sql(tzname),
)
def time_extract_sql(self, lookup_type, field_name):
return "django_time_extract('%s', %s)" % (lookup_type.lower(), field_name)
def pk_default_value(self):
return "NULL"
def _quote_params_for_last_executed_query(self, params):
"""
Only for last_executed_query! Don't use this to execute SQL queries!
"""
# This function is limited both by SQLITE_LIMIT_VARIABLE_NUMBER (the
# number of parameters, default = 999) and SQLITE_MAX_COLUMN (the
# number of return values, default = 2000). Since Python's sqlite3
# module doesn't expose the get_limit() C API, assume the default
# limits are in effect and split the work in batches if needed.
BATCH_SIZE = 999
if len(params) > BATCH_SIZE:
results = ()
for index in range(0, len(params), BATCH_SIZE):
chunk = params[index:index + BATCH_SIZE]
results += self._quote_params_for_last_executed_query(chunk)
return results
sql = 'SELECT ' + ', '.join(['QUOTE(?)'] * len(params))
# Bypass Django's wrappers and use the underlying sqlite3 connection
# to avoid logging this query - it would trigger infinite recursion.
cursor = self.connection.connection.cursor()
# Native sqlite3 cursors cannot be used as context managers.
try:
return cursor.execute(sql, params).fetchone()
finally:
cursor.close()
def last_executed_query(self, cursor, sql, params):
# Python substitutes parameters in Modules/_sqlite/cursor.c with:
# pysqlite_statement_bind_parameters(self->statement, parameters, allow_8bit_chars);
# Unfortunately there is no way to reach self->statement from Python,
# so we quote and substitute parameters manually.
if params:
if isinstance(params, (list, tuple)):
params = self._quote_params_for_last_executed_query(params)
else:
values = tuple(params.values())
values = self._quote_params_for_last_executed_query(values)
params = dict(zip(params, values))
return sql % params
# For consistency with SQLiteCursorWrapper.execute(), just return sql
# when there are no parameters. See #13648 and #17158.
else:
return sql
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def __references_graph(self, table_name):
query = """
WITH tables AS (
SELECT %s name
UNION
SELECT sqlite_master.name
FROM sqlite_master
JOIN tables ON (sql REGEXP %s || tables.name || %s)
) SELECT name FROM tables;
"""
params = (
table_name,
r'(?i)\s+references\s+("|\')?',
r'("|\')?\s*\(',
)
with self.connection.cursor() as cursor:
results = cursor.execute(query, params)
return [row[0] for row in results.fetchall()]
@cached_property
def _references_graph(self):
# 512 is large enough to fit the ~330 tables (as of this writing) in
# Django's test suite.
return lru_cache(maxsize=512)(self.__references_graph)
def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables and allow_cascade:
# Simulate TRUNCATE CASCADE by recursively collecting the tables
# referencing the tables to be flushed.
tables = set(chain.from_iterable(self._references_graph(table) for table in tables))
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
def adapt_datetimefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.")
return str(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
raise ValueError("SQLite backend does not support timezone-aware times.")
return str(value)
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'DateTimeField':
converters.append(self.convert_datetimefield_value)
elif internal_type == 'DateField':
converters.append(self.convert_datefield_value)
elif internal_type == 'TimeField':
converters.append(self.convert_timefield_value)
elif internal_type == 'DecimalField':
converters.append(self.get_decimalfield_converter(expression))
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
elif internal_type in ('NullBooleanField', 'BooleanField'):
converters.append(self.convert_booleanfield_value)
return converters
def convert_datetimefield_value(self, value, expression, connection):
if value is not None:
if not isinstance(value, datetime.datetime):
value = parse_datetime(value)
if settings.USE_TZ and not timezone.is_aware(value):
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_datefield_value(self, value, expression, connection):
if value is not None:
if not isinstance(value, datetime.date):
value = parse_date(value)
return value
def convert_timefield_value(self, value, expression, connection):
if value is not None:
if not isinstance(value, datetime.time):
value = parse_time(value)
return value
def get_decimalfield_converter(self, expression):
# SQLite stores only 15 significant digits. Digits coming from
# float inaccuracy must be removed.
create_decimal = decimal.Context(prec=15).create_decimal_from_float
if isinstance(expression, Col):
quantize_value = decimal.Decimal(1).scaleb(-expression.output_field.decimal_places)
def converter(value, expression, connection):
if value is not None:
return create_decimal(value).quantize(quantize_value, context=expression.output_field.context)
else:
def converter(value, expression, connection):
if value is not None:
return create_decimal(value)
return converter
def convert_uuidfield_value(self, value, expression, connection):
if value is not None:
value = uuid.UUID(value)
return value
def convert_booleanfield_value(self, value, expression, connection):
return bool(value) if value in (1, 0) else value
def bulk_insert_sql(self, fields, placeholder_rows):
return " UNION ALL ".join(
"SELECT %s" % ", ".join(row)
for row in placeholder_rows
)
def combine_expression(self, connector, sub_expressions):
# SQLite doesn't have a ^ operator, so use the user-defined POWER
# function that's registered in connect().
if connector == '^':
return 'POWER(%s)' % ','.join(sub_expressions)
return super().combine_expression(connector, sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
if connector not in ['+', '-']:
raise utils.DatabaseError('Invalid connector for timedelta: %s.' % connector)
fn_params = ["'%s'" % connector] + sub_expressions
if len(fn_params) > 3:
raise ValueError('Too many params for timedelta operations.')
return "django_format_dtdelta(%s)" % ', '.join(fn_params)
def integer_field_range(self, internal_type):
# SQLite doesn't enforce any integer constraints
return (None, None)
def subtract_temporals(self, internal_type, lhs, rhs):
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
if internal_type == 'TimeField':
return "django_time_diff(%s, %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
return "django_timestamp_diff(%s, %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
def insert_statement(self, ignore_conflicts=False):
return 'INSERT OR IGNORE INTO' if ignore_conflicts else super().insert_statement(ignore_conflicts)
|
|
import operator
import numpy as np
from pandas._libs import index as libindex
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.generic import ABCCategorical, ABCSeries
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
is_categorical_dtype,
_ensure_platform_int,
is_list_like,
is_interval_dtype,
is_scalar)
from pandas.core.dtypes.missing import array_equivalent, isna
from pandas.core.algorithms import take_1d
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.config import get_option
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core import accessor
import pandas.core.common as com
import pandas.core.missing as missing
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(dict(target_klass='CategoricalIndex'))
class CategoricalIndex(Index, accessor.PandasDelegate):
"""
Immutable Index implementing an ordered, sliceable set. CategoricalIndex
represents a sparsely populated Index with an underlying Categorical.
Parameters
----------
data : array-like or Categorical, (1-dimensional)
categories : optional, array-like
categories for the CategoricalIndex
ordered : boolean,
designating if the categories are ordered
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Attributes
----------
codes
categories
ordered
Methods
-------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
as_ordered
as_unordered
map
See Also
--------
Categorical, Index
"""
_typ = 'categoricalindex'
_engine_type = libindex.Int64Engine
_attributes = ['name']
def __new__(cls, data=None, categories=None, ordered=None, dtype=None,
copy=False, name=None, fastpath=False):
if fastpath:
return cls._simple_new(data, name=name, dtype=dtype)
if name is None and hasattr(data, 'name'):
name = data.name
if isinstance(data, ABCCategorical):
data = cls._create_categorical(cls, data, categories, ordered,
dtype)
elif isinstance(data, CategoricalIndex):
data = data._data
data = cls._create_categorical(cls, data, categories, ordered,
dtype)
else:
# don't allow scalars
# if data is None, then categories must be provided
if is_scalar(data):
if data is not None or categories is None:
cls._scalar_data_error(data)
data = []
data = cls._create_categorical(cls, data, categories, ordered,
dtype)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
def _create_from_codes(self, codes, categories=None, ordered=None,
name=None):
"""
*this is an internal non-public method*
create the correct categorical from codes
Parameters
----------
codes : new codes
categories : optional categories, defaults to existing
ordered : optional ordered attribute, defaults to existing
name : optional name attribute, defaults to existing
Returns
-------
CategoricalIndex
"""
from pandas.core.arrays import Categorical
if categories is None:
categories = self.categories
if ordered is None:
ordered = self.ordered
if name is None:
name = self.name
cat = Categorical.from_codes(codes, categories=categories,
ordered=self.ordered)
return CategoricalIndex(cat, name=name)
@staticmethod
def _create_categorical(self, data, categories=None, ordered=None,
dtype=None):
"""
*this is an internal non-public method*
create the correct categorical from data and the properties
Parameters
----------
data : data for new Categorical
categories : optional categories, defaults to existing
ordered : optional ordered attribute, defaults to existing
dtype : CategoricalDtype, defaults to existing
Returns
-------
Categorical
"""
if (isinstance(data, (ABCSeries, type(self))) and
is_categorical_dtype(data)):
data = data.values
if not isinstance(data, ABCCategorical):
if ordered is None and dtype is None:
ordered = False
from pandas.core.arrays import Categorical
data = Categorical(data, categories=categories, ordered=ordered,
dtype=dtype)
else:
if categories is not None:
data = data.set_categories(categories, ordered=ordered)
elif ordered is not None and ordered != data.ordered:
data = data.set_ordered(ordered)
if isinstance(dtype, CategoricalDtype):
# we want to silently ignore dtype='category'
data = data._set_dtype(dtype)
return data
@classmethod
def _simple_new(cls, values, name=None, categories=None, ordered=None,
dtype=None, **kwargs):
result = object.__new__(cls)
values = cls._create_categorical(cls, values, categories, ordered,
dtype=dtype)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
result._reset_identity()
return result
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, categories=None, ordered=None,
dtype=None, **kwargs):
# categories and ordered can't be part of attributes,
# as these are properties
# we want to reuse self.dtype if possible, i.e. neither are
# overridden.
if dtype is not None and (categories is not None or
ordered is not None):
raise TypeError("Cannot specify both `dtype` and `categories` "
"or `ordered`")
if categories is None and ordered is None:
dtype = self.dtype if dtype is None else dtype
return super(CategoricalIndex, self)._shallow_copy(
values=values, dtype=dtype, **kwargs)
if categories is None:
categories = self.categories
if ordered is None:
ordered = self.ordered
return super(CategoricalIndex, self)._shallow_copy(
values=values, categories=categories,
ordered=ordered, **kwargs)
def _is_dtype_compat(self, other):
"""
*this is an internal non-public method*
provide a comparison between the dtype of self and other (coercing if
needed)
Raises
------
TypeError if the dtypes are not compatible
"""
if is_categorical_dtype(other):
if isinstance(other, CategoricalIndex):
other = other._values
if not other.is_dtype_equal(self):
raise TypeError("categories must match existing categories "
"when appending")
else:
values = other
if not is_list_like(values):
values = [values]
other = CategoricalIndex(self._create_categorical(
self, other, categories=self.categories, ordered=self.ordered))
if not other.isin(values).all():
raise TypeError("cannot append a non-category item to a "
"CategoricalIndex")
return other
def equals(self, other):
"""
Determines if two CategorialIndex objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
try:
other = self._is_dtype_compat(other)
return array_equivalent(self._data, other)
except (TypeError, ValueError):
pass
return False
@property
def _formatter_func(self):
return self.categories._formatter_func
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
attrs = [
('categories',
ibase.default_pprint(self.categories,
max_seq_items=max_categories)),
('ordered', self.ordered)]
if self.name is not None:
attrs.append(('name', ibase.default_pprint(self.name)))
attrs.append(('dtype', "'%s'" % self.dtype.name))
max_seq_items = get_option('display.max_seq_items') or len(self)
if len(self) > max_seq_items:
attrs.append(('length', len(self)))
return attrs
@property
def inferred_type(self):
return 'categorical'
@property
def values(self):
""" return the underlying data, which is a Categorical """
return self._data
@property
def itemsize(self):
# Size of the items in categories, not codes.
return self.values.itemsize
def get_values(self):
""" return the underlying data as an ndarray """
return self._data.get_values()
def tolist(self):
return self._data.tolist()
@property
def codes(self):
return self._data.codes
@property
def categories(self):
return self._data.categories
@property
def ordered(self):
return self._data.ordered
def _reverse_indexer(self):
return self._data._reverse_indexer()
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
if self.categories._defer_to_indexing:
return key in self.categories
return key in self.values
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def contains(self, key):
hash(key)
if self.categories._defer_to_indexing:
return self.categories.contains(key)
return key in self.values
def __array__(self, dtype=None):
""" the array interface, return my values """
return np.array(self._data, dtype=dtype)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_interval_dtype(dtype):
from pandas import IntervalIndex
return IntervalIndex(np.array(self))
elif is_categorical_dtype(dtype):
# GH 18630
dtype = self.dtype.update_dtype(dtype)
if dtype == self.dtype:
return self.copy() if copy else self
return super(CategoricalIndex, self).astype(dtype=dtype, copy=copy)
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
return self._data.codes == -1
@Appender(ibase._index_shared_docs['fillna'])
def fillna(self, value, downcast=None):
self._assert_can_do_op(value)
return CategoricalIndex(self._data.fillna(value), name=self.name)
def argsort(self, *args, **kwargs):
return self.values.argsort(*args, **kwargs)
@cache_readonly
def _engine(self):
# we are going to look things up with the codes themselves
return self._engine_type(lambda: self.codes.astype('i8'), len(self))
# introspection
@cache_readonly
def is_unique(self):
return self._engine.is_unique
@property
def is_monotonic_increasing(self):
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
return self._engine.is_monotonic_decreasing
@Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs)
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
result = self.values.unique()
# CategoricalIndex._shallow_copy keeps original categories
# and ordered if not otherwise specified
return self._shallow_copy(result, categories=result.categories,
ordered=result.ordered)
@Appender(Index.duplicated.__doc__)
def duplicated(self, keep='first'):
from pandas._libs.hashtable import duplicated_int64
codes = self.codes.astype('i8')
return duplicated_int64(codes, keep)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.astype('object')
def get_loc(self, key, method=None):
"""
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None}
* default: exact matches only.
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Examples
---------
>>> unique_index = pd.CategoricalIndex(list('abc'))
>>> unique_index.get_loc('b')
1
>>> monotonic_index = pd.CategoricalIndex(list('abbc'))
>>> monotonic_index.get_loc('b')
slice(1, 3, None)
>>> non_monotonic_index = p.dCategoricalIndex(list('abcb'))
>>> non_monotonic_index.get_loc('b')
array([False, True, False, True], dtype=bool)
"""
codes = self.categories.get_loc(key)
if (codes == -1):
raise KeyError(key)
return self._engine.get_loc(codes)
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
try:
k = com._values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
indexer = self.get_loc(k)
return series.iloc[indexer]
except (KeyError, TypeError):
pass
# we might be a positional inexer
return super(CategoricalIndex, self).get_value(series, key)
def _can_reindex(self, indexer):
""" always allow reindexing """
pass
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
from pandas.core.arrays import Categorical
cat = Categorical(values,
categories=self.categories,
ordered=self.ordered)
return self._shallow_copy(cat, **self._get_attributes_dict())
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
if method is not None:
raise NotImplementedError("argument method is not implemented for "
"CategoricalIndex.reindex")
if level is not None:
raise NotImplementedError("argument level is not implemented for "
"CategoricalIndex.reindex")
if limit is not None:
raise NotImplementedError("argument limit is not implemented for "
"CategoricalIndex.reindex")
target = ibase._ensure_index(target)
if not is_categorical_dtype(target) and not target.is_unique:
raise ValueError("cannot reindex with a non-unique indexer")
indexer, missing = self.get_indexer_non_unique(np.array(target))
if len(self.codes):
new_target = self.take(indexer)
else:
new_target = target
# filling in missing if needed
if len(missing):
cats = self.categories.get_indexer(target)
if (cats == -1).any():
# coerce to a regular index here!
result = Index(np.array(self), name=self.name)
new_target, indexer, _ = result._reindex_non_unique(
np.array(target))
else:
codes = new_target.codes.copy()
codes[indexer == -1] = cats[missing]
new_target = self._create_from_codes(codes)
# we always want to return an Index type here
# to be consistent with .reindex for other index types (e.g. they don't
# coerce based on the actual values, only on the dtype)
# unless we had an initial Categorical to begin with
# in which case we are going to conform to the passed Categorical
new_target = np.asarray(new_target)
if is_categorical_dtype(target):
new_target = target._shallow_copy(new_target, name=self.name)
else:
new_target = Index(new_target, name=self.name)
return new_target, indexer
def _reindex_non_unique(self, target):
""" reindex from a non-unique; which CategoricalIndex's are almost
always
"""
new_target, indexer = self.reindex(target)
new_indexer = None
check = indexer == -1
if check.any():
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[check] = -1
cats = self.categories.get_indexer(target)
if not (cats == -1).any():
# .reindex returns normal Index. Revert to CategoricalIndex if
# all targets are included in my categories
new_target = self._shallow_copy(new_target)
return new_target, indexer, new_indexer
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
from pandas.core.arrays.categorical import _recode_for_categories
method = missing.clean_reindex_fill_method(method)
target = ibase._ensure_index(target)
if self.is_unique and self.equals(target):
return np.arange(len(self), dtype='intp')
if method == 'pad' or method == 'backfill':
raise NotImplementedError("method='pad' and method='backfill' not "
"implemented yet for CategoricalIndex")
elif method == 'nearest':
raise NotImplementedError("method='nearest' not implemented yet "
'for CategoricalIndex')
if (isinstance(target, CategoricalIndex) and
self.values.is_dtype_equal(target)):
if self.values.equals(target.values):
# we have the same codes
codes = target.codes
else:
codes = _recode_for_categories(target.codes,
target.categories,
self.values.categories)
else:
if isinstance(target, CategoricalIndex):
code_indexer = self.categories.get_indexer(target.categories)
codes = take_1d(code_indexer, target.codes, fill_value=-1)
else:
codes = self.categories.get_indexer(target)
indexer, _ = self._engine.get_indexer_non_unique(codes)
return _ensure_platform_int(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = ibase._ensure_index(target)
if isinstance(target, CategoricalIndex):
# Indexing on codes is more efficient if categories are the same:
if target.categories is self.categories:
target = target.codes
indexer, missing = self._engine.get_indexer_non_unique(target)
return _ensure_platform_int(indexer), missing
target = target.values
codes = self.categories.get_indexer(target)
indexer, missing = self._engine.get_indexer_non_unique(codes)
return _ensure_platform_int(indexer), missing
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
if self.categories._defer_to_indexing:
return self.categories._convert_scalar_indexer(key, kind=kind)
return super(CategoricalIndex, self)._convert_scalar_indexer(
key, kind=kind)
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
# Return our indexer or raise if all of the values are not included in
# the categories
if self.categories._defer_to_indexing:
indexer = self.categories._convert_list_indexer(keyarr, kind=kind)
return Index(self.codes).get_indexer_for(indexer)
indexer = self.categories.get_indexer(np.asarray(keyarr))
if (indexer == -1).any():
raise KeyError(
"a list-indexer must only "
"include values that are "
"in the categories")
return self.get_indexer(keyarr)
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
keyarr = com._asarray_tuplesafe(keyarr)
if self.categories._defer_to_indexing:
return keyarr
return self._shallow_copy(keyarr)
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
return self._shallow_copy(keyarr)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
taken = self._assert_take_fillable(self.codes, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=-1)
return self._create_from_codes(taken)
def is_dtype_equal(self, other):
return self._data.is_dtype_equal(other)
take_nd = take
def map(self, mapper):
"""
Map values using input correspondence (a dict, Series, or function).
Maps the values (their categories, not the codes) of the index to new
categories. If the mapping correspondence is one-to-one the result is a
:class:`~pandas.CategoricalIndex` which has the same order property as
the original, otherwise an :class:`~pandas.Index` is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.CategoricalIndex or pandas.Index
Mapped index.
See Also
--------
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'])
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=False, dtype='category')
>>> idx.map(lambda x: x.upper())
CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'],
ordered=False, dtype='category')
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'})
CategoricalIndex(['first', 'second', 'third'], categories=['first',
'second', 'third'], ordered=False, dtype='category')
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True)
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=True, dtype='category')
>>> idx.map({'a': 3, 'b': 2, 'c': 1})
CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True,
dtype='category')
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> idx.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
return self._shallow_copy_with_infer(self.values.map(mapper))
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._create_from_codes(np.delete(self.codes, loc))
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
Raises
------
ValueError if the item is not in the categories
"""
code = self.categories.get_indexer([item])
if (code == -1) and not (is_scalar(item) and isna(item)):
raise TypeError("cannot insert an item into a CategoricalIndex "
"that is not already an existing category")
codes = self.codes
codes = np.concatenate((codes[:loc], code, codes[loc:]))
return self._create_from_codes(codes)
def _concat(self, to_concat, name):
# if calling index is category, don't check dtype of others
return CategoricalIndex._concat_same_dtype(self, to_concat, name)
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
ValueError if other is not in the categories
"""
to_concat = [self._is_dtype_compat(c) for c in to_concat]
codes = np.concatenate([c.codes for c in to_concat])
result = self._create_from_codes(codes, name=name)
# if name is None, _create_from_codes sets self.name
result.name = name
return result
def _codes_for_groupby(self, sort, observed):
""" Return a Categorical adjusted for groupby """
return self.values._codes_for_groupby(sort, observed)
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
def _make_compare(op):
opname = '__{op}__'.format(op=op.__name__)
def _evaluate_compare(self, other):
# if we have a Categorical type, then must have the same
# categories
if isinstance(other, CategoricalIndex):
other = other._values
elif isinstance(other, Index):
other = self._create_categorical(
self, other._values, categories=self.categories,
ordered=self.ordered)
if isinstance(other, (ABCCategorical, np.ndarray,
ABCSeries)):
if len(self.values) != len(other):
raise ValueError("Lengths must match to compare")
if isinstance(other, ABCCategorical):
if not self.values.is_dtype_equal(other):
raise TypeError("categorical index comparisons must "
"have the same categories and ordered "
"attributes")
result = op(self.values, other)
if isinstance(result, ABCSeries):
# Dispatch to pd.Categorical returned NotImplemented
# and we got a Series back; down-cast to ndarray
result = result.values
return result
return compat.set_function_name(_evaluate_compare, opname, cls)
cls.__eq__ = _make_compare(operator.eq)
cls.__ne__ = _make_compare(operator.ne)
cls.__lt__ = _make_compare(operator.lt)
cls.__gt__ = _make_compare(operator.gt)
cls.__le__ = _make_compare(operator.le)
cls.__ge__ = _make_compare(operator.ge)
def _delegate_method(self, name, *args, **kwargs):
""" method delegation to the ._values """
method = getattr(self._values, name)
if 'inplace' in kwargs:
raise ValueError("cannot use inplace with CategoricalIndex")
res = method(*args, **kwargs)
if is_scalar(res):
return res
return CategoricalIndex(res, name=self.name)
@classmethod
def _add_accessors(cls):
""" add in Categorical accessor methods """
from pandas.core.arrays import Categorical
CategoricalIndex._add_delegate_accessors(
delegate=Categorical, accessors=["rename_categories",
"reorder_categories",
"add_categories",
"remove_categories",
"remove_unused_categories",
"set_categories",
"as_ordered", "as_unordered",
"min", "max"],
typ='method', overwrite=True)
CategoricalIndex._add_numeric_methods_add_sub_disabled()
CategoricalIndex._add_numeric_methods_disabled()
CategoricalIndex._add_logical_methods_disabled()
CategoricalIndex._add_comparison_methods()
CategoricalIndex._add_accessors()
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import encodeutils
from oslo_utils import strutils
import six
import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder import volume
LOG = logging.getLogger(__name__)
def authorize(context, action_name):
action = 'volume_actions:%s' % action_name
extensions.extension_authorizer('volume', action)(context)
class VolumeToImageSerializer(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('os-volume_upload_image',
selector='os-volume_upload_image')
root.set('id')
root.set('updated_at')
root.set('status')
root.set('display_description')
root.set('size')
root.set('volume_type')
root.set('image_id')
root.set('container_format')
root.set('disk_format')
root.set('image_name')
return xmlutil.MasterTemplate(root, 1)
class VolumeToImageDeserializer(wsgi.XMLDeserializer):
"""Deserializer to handle xml-formatted requests."""
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
action_node = dom.childNodes[0]
action_name = action_node.tagName
action_data = {}
attributes = ["force", "image_name", "container_format", "disk_format"]
for attr in attributes:
if action_node.hasAttribute(attr):
action_data[attr] = action_node.getAttribute(attr)
if 'force' in action_data and action_data['force'] == 'True':
action_data['force'] = True
return {'body': {action_name: action_data}}
class VolumeActionsController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(VolumeActionsController, self).__init__(*args, **kwargs)
self.volume_api = volume.API()
@wsgi.action('os-attach')
def _attach(self, req, id, body):
"""Add attachment metadata."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
# instance uuid is an option now
instance_uuid = None
if 'instance_uuid' in body['os-attach']:
instance_uuid = body['os-attach']['instance_uuid']
host_name = None
# Keep API backward compatibility
if 'host_name' in body['os-attach']:
host_name = body['os-attach']['host_name']
mountpoint = body['os-attach']['mountpoint']
if 'mode' in body['os-attach']:
mode = body['os-attach']['mode']
else:
mode = 'rw'
if instance_uuid is None and host_name is None:
msg = _("Invalid request to attach volume to an invalid target")
raise webob.exc.HTTPBadRequest(explanation=msg)
if mode not in ('rw', 'ro'):
msg = _("Invalid request to attach volume with an invalid mode. "
"Attaching mode should be 'rw' or 'ro'")
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
self.volume_api.attach(context, volume,
instance_uuid, host_name, mountpoint, mode)
except messaging.RemoteError as error:
if error.exc_type in ['InvalidVolume', 'InvalidUUID',
'InvalidVolumeAttachMode']:
msg = "Error attaching volume - %(err_type)s: %(err_msg)s" % {
'err_type': error.exc_type, 'err_msg': error.value}
raise webob.exc.HTTPBadRequest(explanation=msg)
else:
# There are also few cases where attach call could fail due to
# db or volume driver errors. These errors shouldn't be exposed
# to the user and in such cases it should raise 500 error.
raise
return webob.Response(status_int=202)
@wsgi.action('os-detach')
def _detach(self, req, id, body):
"""Clear attachment metadata."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
attachment_id = None
if body['os-detach']:
attachment_id = body['os-detach'].get('attachment_id', None)
try:
self.volume_api.detach(context, volume, attachment_id)
except messaging.RemoteError as error:
if error.exc_type in ['VolumeAttachmentNotFound', 'InvalidVolume']:
msg = "Error detaching volume - %(err_type)s: %(err_msg)s" % \
{'err_type': error.exc_type, 'err_msg': error.value}
raise webob.exc.HTTPBadRequest(explanation=msg)
else:
# There are also few cases where detach call could fail due to
# db or volume driver errors. These errors shouldn't be exposed
# to the user and in such cases it should raise 500 error.
raise
return webob.Response(status_int=202)
@wsgi.action('os-reserve')
def _reserve(self, req, id, body):
"""Mark volume as reserved."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
self.volume_api.reserve_volume(context, volume)
return webob.Response(status_int=202)
@wsgi.action('os-unreserve')
def _unreserve(self, req, id, body):
"""Unmark volume as reserved."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
self.volume_api.unreserve_volume(context, volume)
return webob.Response(status_int=202)
@wsgi.action('os-begin_detaching')
def _begin_detaching(self, req, id, body):
"""Update volume status to 'detaching'."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
self.volume_api.begin_detaching(context, volume)
return webob.Response(status_int=202)
@wsgi.action('os-roll_detaching')
def _roll_detaching(self, req, id, body):
"""Roll back volume status to 'in-use'."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
self.volume_api.roll_detaching(context, volume)
return webob.Response(status_int=202)
@wsgi.action('os-initialize_connection')
def _initialize_connection(self, req, id, body):
"""Initialize volume attachment."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
try:
connector = body['os-initialize_connection']['connector']
except KeyError:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'connector'"))
try:
info = self.volume_api.initialize_connection(context,
volume,
connector)
except exception.InvalidInput as err:
raise webob.exc.HTTPBadRequest(
explanation=err)
except exception.VolumeBackendAPIException as error:
msg = _("Unable to fetch connection information from backend.")
raise webob.exc.HTTPInternalServerError(explanation=msg)
return {'connection_info': info}
@wsgi.action('os-terminate_connection')
def _terminate_connection(self, req, id, body):
"""Terminate volume attachment."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
try:
connector = body['os-terminate_connection']['connector']
except KeyError:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'connector'"))
try:
self.volume_api.terminate_connection(context, volume, connector)
except exception.VolumeBackendAPIException as error:
msg = _("Unable to terminate volume connection from backend.")
raise webob.exc.HTTPInternalServerError(explanation=msg)
return webob.Response(status_int=202)
@wsgi.response(202)
@wsgi.action('os-volume_upload_image')
@wsgi.serializers(xml=VolumeToImageSerializer)
@wsgi.deserializers(xml=VolumeToImageDeserializer)
def _volume_upload_image(self, req, id, body):
"""Uploads the specified volume to image service."""
context = req.environ['cinder.context']
params = body['os-volume_upload_image']
if not params.get("image_name"):
msg = _("No image_name was specified in request.")
raise webob.exc.HTTPBadRequest(explanation=msg)
force = params.get('force', 'False')
try:
force = strutils.bool_from_string(force, strict=True)
except ValueError as error:
err_msg = encodeutils.exception_to_unicode(error)
msg = _("Invalid value for 'force': '%s'") % err_msg
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
authorize(context, "upload_image")
image_metadata = {"container_format": params.get("container_format",
"bare"),
"disk_format": params.get("disk_format", "raw"),
"name": params["image_name"]}
try:
response = self.volume_api.copy_volume_to_image(context,
volume,
image_metadata,
force)
except exception.InvalidVolume as error:
raise webob.exc.HTTPBadRequest(explanation=error.msg)
except ValueError as error:
raise webob.exc.HTTPBadRequest(explanation=six.text_type(error))
except messaging.RemoteError as error:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': error.exc_type,
'err_msg': error.value}
raise webob.exc.HTTPBadRequest(explanation=msg)
except Exception as error:
raise webob.exc.HTTPBadRequest(explanation=six.text_type(error))
return {'os-volume_upload_image': response}
@wsgi.action('os-extend')
def _extend(self, req, id, body):
"""Extend size of volume."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
try:
int(body['os-extend']['new_size'])
except (KeyError, ValueError, TypeError):
msg = _("New volume size must be specified as an integer.")
raise webob.exc.HTTPBadRequest(explanation=msg)
size = int(body['os-extend']['new_size'])
try:
self.volume_api.extend(context, volume, size)
except exception.InvalidVolume as error:
raise webob.exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=202)
@wsgi.action('os-update_readonly_flag')
def _volume_readonly_update(self, req, id, body):
"""Update volume readonly flag."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
try:
readonly_flag = body['os-update_readonly_flag']['readonly']
except KeyError:
msg = _("Must specify readonly in request.")
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
readonly_flag = strutils.bool_from_string(readonly_flag,
strict=True)
except ValueError as error:
err_msg = encodeutils.exception_to_unicode(error)
msg = _("Invalid value for 'readonly': '%s'") % err_msg
raise webob.exc.HTTPBadRequest(explanation=msg)
self.volume_api.update_readonly_flag(context, volume, readonly_flag)
return webob.Response(status_int=202)
@wsgi.action('os-retype')
def _retype(self, req, id, body):
"""Change type of existing volume."""
context = req.environ['cinder.context']
volume = self.volume_api.get(context, id)
try:
new_type = body['os-retype']['new_type']
except KeyError:
msg = _("New volume type must be specified.")
raise webob.exc.HTTPBadRequest(explanation=msg)
policy = body['os-retype'].get('migration_policy')
self.volume_api.retype(context, volume, new_type, policy)
return webob.Response(status_int=202)
@wsgi.action('os-set_bootable')
def _set_bootable(self, req, id, body):
"""Update bootable status of a volume."""
context = req.environ['cinder.context']
try:
volume = self.volume_api.get(context, id)
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
try:
bootable = body['os-set_bootable']['bootable']
except KeyError:
msg = _("Must specify bootable in request.")
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
bootable = strutils.bool_from_string(bootable,
strict=True)
except ValueError as error:
err_msg = encodeutils.exception_to_unicode(error)
msg = _("Invalid value for 'bootable': '%s'") % err_msg
raise webob.exc.HTTPBadRequest(explanation=msg)
update_dict = {'bootable': bootable}
self.volume_api.update(context, volume, update_dict)
return webob.Response(status_int=200)
class Volume_actions(extensions.ExtensionDescriptor):
"""Enable volume actions."""
name = "VolumeActions"
alias = "os-volume-actions"
namespace = "http://docs.openstack.org/volume/ext/volume-actions/api/v1.1"
updated = "2012-05-31T00:00:00+00:00"
def get_controller_extensions(self):
controller = VolumeActionsController()
extension = extensions.ControllerExtension(self, 'volumes', controller)
return [extension]
|
|
# Copyright (c) 2013 - 2014 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Tests for huawei 18000 storage."""
import json
import os
import shutil
import tempfile
import time
from xml.dom import minidom
import mock
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.huawei import huawei_18000
from cinder.volume.drivers.huawei import rest_common
LOG = logging.getLogger(__name__)
test_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 2,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'provider_location': '11'}
test_snap = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 1,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'provider_location': '11'}
FakeConnector = {'initiator': 'iqn.1993-08.debian:01:ec2bff7ac3a3',
'wwpns': ['10000090fa0d6754'],
'wwnns': ['10000090fa0d6755'],
'host': 'ubuntuc'}
def find_data(method):
if method is None:
data = """{"error":{"code":0},
"data":{"ID":"1",
"NAME":"5mFHcBv4RkCcD+JyrWc0SA"}}"""
if method == 'GET':
data = """{"error":{"code":0},
"data":[{"ID":"1",
"NAME":"IexzQZJWSXuX2e9I7c8GNQ"}]}"""
return data
def find_data_lun(method):
if method == 'GET':
data = """{"error":{"code":0},
"data":{"ID":"1",
"NAME":"IexzQZJWSXuX2e9I7c8GNQ",
"HEALTHSTATUS":"1",
"RUNNINGSTATUS":"27"}}"""
return data
def find_data_lungroup(method):
if method is None:
data = '{"error":{"code":0},\
"data":{"NAME":"5mFHcBv4RkCcD+JyrWc0SA",\
"DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA",\
"ID":"11",\
"TYPE":256}}'
if method == "GET":
data = """{"error":{"code":0},
"data":[{
"NAME":"OpenStack_LunGroup_1",
"DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA",
"ID":"11",
"TYPE":256}]}"""
if method == "DELETE":
data = """{"error":{"code":0},
"data":[{
"NAME":"IexzQZJWSXuX2e9I7c8GNQ",
"DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA",
"ID":"11",
"TYPE":256}]}"""
return data
def find_data_hostgroup(method):
if method is None:
data = """{"error":{"code":0},"data":{
"NAME":"ubuntuc",
"DESCRIPTION":"",
"ID":"0",
"TYPE":14}}"""
if method == "GET":
data = """{"error":{"code":0},"data":[{
"NAME":"ubuntuc",
"DESCRIPTION":"",
"ID":"0","TYPE":14}]}"""
return data
def Fake_sleep(time):
pass
def find_data_mappingview(method, other_flag):
if method is None:
data = """{"error":{"code":0},"data":
{"WORKMODE":"255","HEALTHSTATUS":"1",
"NAME":"mOWtSXnaQKi3hpB3tdFRIQ",
"RUNNINGSTATUS":"27","DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"1","INBANDLUNWWN":"",
"TYPE":245}}
"""
if method == "GET":
if other_flag:
data = """{"error":{"code":0},"data":[
{"WORKMODE":"255","HEALTHSTATUS":"1",
"NAME":"mOWtSXnaQKi3hpB3tdFRIQ",
"RUNNINGSTATUS":"27","DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true","ID":"1",
"INBANDLUNWWN":"","TYPE":245},
{"WORKMODE":"255","HEALTHSTATUS":"1",
"NAME":"YheUoRwbSX2BxN767nvLSw",
"RUNNINGSTATUS":"27","DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"2","INBANDLUNWWN":"",
"TYPE":245}]}
"""
else:
data = """{"error":{"code":0},"data":[
{"WORKMODE":"255","HEALTHSTATUS":"1",
"NAME":"IexzQZJWSXuX2e9I7c8GNQ",
"RUNNINGSTATUS":"27","DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true","ID":"1",
"INBANDLUNWWN":"","TYPE":245},
{"WORKMODE":"255","HEALTHSTATUS":"1",
"NAME":"YheUoRwbSX2BxN767nvLSw",
"RUNNINGSTATUS":"27","DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true","ID":"2",
"INBANDLUNWWN":"","TYPE":245}]}
"""
return data
def find_data_snapshot(method):
if method is None:
data = '{"error":{"code":0},"data":{"ID":11,"NAME":"YheUoRwbSX2BxN7"}}'
if method == "GET":
data = """{"error":{"code":0},"data":[
{"ID":11,"NAME":"SDFAJSDFLKJ"},
{"ID":12,"NAME":"SDFAJSDFLKJ2"}]}"""
return data
def find_data_host(method):
if method is None:
data = """{"error":{"code":0},
"data":
{"PARENTTYPE":245,
"NAME":"Default Host",
"DESCRIPTION":"",
"RUNNINGSTATUS":"1",
"IP":"",
"PARENTNAME":"0",
"OPERATIONSYSTEM":"1",
"LOCATION":"",
"HEALTHSTATUS":"1",
"MODEL":"",
"ID":"0",
"PARENTID":"0",
"NETWORKNAME":"",
"TYPE":21}} """
if method == "GET":
data = """{"error":{"code":0},
"data":[
{"PARENTTYPE":245,"NAME":"ubuntuc",
"DESCRIPTION":"","RUNNINGSTATUS":"1",
"IP":"","PARENTNAME":"",
"OPERATIONSYSTEM":"0","LOCATION":"",
"HEALTHSTATUS":"1","MODEL":"",
"ID":"1","PARENTID":"",
"NETWORKNAME":"","TYPE":21},
{"PARENTTYPE":245,"NAME":"ubuntu",
"DESCRIPTION":"","RUNNINGSTATUS":"1",
"IP":"","PARENTNAME":"","OPERATIONSYSTEM":"0",
"LOCATION":"","HEALTHSTATUS":"1",
"MODEL":"","ID":"2","PARENTID":"",
"NETWORKNAME":"","TYPE":21}]} """
return data
def find_data_host_associate(method):
if (method is None) or (method == "GET"):
data = '{"error":{"code":0}}'
return data
def data_session(url):
if url == "/xx/sessions":
data = """{"error":{"code":0},
"data":{"username":"admin",
"iBaseToken":"2001031430",
"deviceid":"210235G7J20000000000"}}"""
if url == "sessions":
data = '{"error":{"code":0},"data":{"ID":11}}'
return data
def data_lun(url, method):
if url == "lun":
data = find_data(method)
if url == "lun/1":
data = find_data_lun(method)
if url == "lun?range=[0-65535]":
data = find_data(method)
if url == "lungroup?range=[0-8191]":
data = find_data_lungroup(method)
if url == "lungroup":
data = find_data_lungroup(method)
if url == "lungroup/associate":
data = """{"error":{"code":0},
"data":{"NAME":"5mFHcBv4RkCcD+JyrWc0SA",
"DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA",
"ID":"11",
"TYPE":256}}"""
return data
def data_host(url, method):
if url == "hostgroup":
data = find_data_hostgroup(method)
if url == "hostgroup?range=[0-8191]":
data = find_data_hostgroup(method)
if url == "host":
data = find_data_host(method)
if url == "host?range=[0-65534]":
data = find_data_host(method)
if url == "host/associate":
data = find_data_host_associate(method)
if url == "host/associate?TYPE=21&ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0":
data = find_data_host_associate(method)
return data
def find_data_storagepool_snapshot(url, method):
if url == "storagepool":
data = """{"error":{"code":0},
"data":[{"USERFREECAPACITY":"985661440",
"ID":"0",
"NAME":"OpenStack_Pool",
"USERTOTALCAPACITY":"985661440"
}]}"""
if url == "snapshot":
data = find_data_snapshot(method)
if url == "snapshot/activate":
data = """{"error":{"code":0},"data":[
{"ID":11,"NAME":"SDFAJSDFLKJ"},
{"ID":12,"NAME":"SDFAJSDFLKJ"}]}"""
return data
def find_data_luncpy_range_eth_port(url):
if url == "luncopy":
data = """{"error":{"code":0},
"data":{"COPYSTOPTIME":"-1",
"HEALTHSTATUS":"1",
"NAME":"w1PSNvu6RumcZMmSh4/l+Q==",
"RUNNINGSTATUS":"36",
"DESCRIPTION":"w1PSNvu6RumcZMmSh4/l+Q==",
"ID":"0","LUNCOPYTYPE":"1",
"COPYPROGRESS":"0","COPYSPEED":"2",
"TYPE":219,"COPYSTARTTIME":"-1"}}"""
if url == "LUNCOPY?range=[0-100000]":
data = """{"error":{"code":0},
"data":[{"COPYSTOPTIME":"1372209335",
"HEALTHSTATUS":"1",
"NAME":"w1PSNvu6RumcZMmSh4/l+Q==",
"RUNNINGSTATUS":"40",
"DESCRIPTION":"w1PSNvu6RumcZMmSh4/l+Q==",
"ID":"0","LUNCOPYTYPE":"1",
"COPYPROGRESS":"100",
"COPYSPEED":"2",
"TYPE":219,
"COPYSTARTTIME":"1372209329"}]}"""
if url == "eth_port":
data = """{"error":{"code":0},
"data":[{"PARENTTYPE":209,
"MACADDRESS":"00:22:a1:0a:79:57",
"ETHNEGOTIATE":"-1","ERRORPACKETS":"0",
"IPV4ADDR":"192.168.100.2",
"IPV6GATEWAY":"","IPV6MASK":"0",
"OVERFLOWEDPACKETS":"0","ISCSINAME":"P0",
"HEALTHSTATUS":"1","ETHDUPLEX":"2",
"ID":"16909568","LOSTPACKETS":"0",
"TYPE":213,"NAME":"P0","INIORTGT":"4",
"RUNNINGSTATUS":"10","IPV4GATEWAY":"",
"BONDNAME":"","STARTTIME":"1371684218",
"SPEED":"1000","ISCSITCPPORT":"0",
"IPV4MASK":"255.255.0.0","IPV6ADDR":"",
"LOGICTYPE":"0","LOCATION":"ENG0.B5.P0",
"MTU":"1500","PARENTID":"1.5"}]}"""
return data
class Fake18000Common(rest_common.RestCommon):
def __init__(self, configuration):
rest_common.RestCommon.__init__(self, configuration)
self.test_normal = True
self.other_flag = True
self.connect_flag = False
self.delete_flag = False
self.terminateFlag = False
self.deviceid = None
def _change_file_mode(self, filepath):
pass
def _parse_volume_type(self, volume):
poolinfo = self._find_pool_info()
volume_size = self._get_volume_size(poolinfo, volume)
params = {'LUNType': 0,
'WriteType': '1',
'PrefetchType': '3',
'qos_level': 'Qos-high',
'StripUnitSize': '64',
'PrefetchValue': '0',
'PrefetchTimes': '0',
'qos': 'OpenStack_Qos_High',
'MirrorSwitch': '1',
'tier': 'Tier_high'}
params['volume_size'] = volume_size
params['pool_id'] = poolinfo['ID']
return params
def _get_snapshotid_by_name(self, snapshot_name):
return "11"
def _get_qosid_by_lunid(self, lunid):
return ""
def _check_snapshot_exist(self, snapshot_id):
return True
def fc_initiator_data(self):
data = """{"error":{"code":0},"data":[
{"HEALTHSTATUS":"1","NAME":"",
"MULTIPATHTYPE":"1","ISFREE":"true",
"RUNNINGSTATUS":"27","ID":"10000090fa0d6754",
"OPERATIONSYSTEM":"255","TYPE":223},
{"HEALTHSTATUS":"1","NAME":"",
"MULTIPATHTYPE":"1","ISFREE":"true",
"RUNNINGSTATUS":"27","ID":"10000090fa0d6755",
"OPERATIONSYSTEM":"255","TYPE":223}]}"""
return data
def host_link(self):
data = """{"error":{"code":0},
"data":[{"PARENTTYPE":21,
"TARGET_ID":"0000000000000000",
"INITIATOR_NODE_WWN":"20000090fa0d6754",
"INITIATOR_TYPE":"223",
"RUNNINGSTATUS":"27",
"PARENTNAME":"ubuntuc",
"INITIATOR_ID":"10000090fa0d6754",
"TARGET_PORT_WWN":"24000022a10a2a39",
"HEALTHSTATUS":"1",
"INITIATOR_PORT_WWN":"10000090fa0d6754",
"ID":"010000090fa0d675-0000000000110400",
"TARGET_NODE_WWN":"21000022a10a2a39",
"PARENTID":"1","CTRL_ID":"0",
"TYPE":255,"TARGET_TYPE":"212"}]}"""
self.connect_flag = True
return data
def call(self, url=False, data=None, method=None):
url = url.replace('http://100.115.10.69:8082/deviceManager/rest', '')
url = url.replace('/210235G7J20000000000/', '')
data = None
if self.test_normal:
if url == "/xx/sessions" or url == "sessions":
data = data_session(url)
if url == "lun/count?TYPE=11&ASSOCIATEOBJTYPE=256&"\
"ASSOCIATEOBJID=11":
data = """{"data":{"COUNT":"7"},
"error":{"code":0,"description":"0"}}"""
if url == "lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=11&"\
"ASSOCIATEOBJID=11":
data = """{"error":{"code":0},
"data":[{"ID":11}]}"""
if url == "storagepool" or url == "snapshot" or url == "snaps"\
"hot/activate":
data = find_data_storagepool_snapshot(url, method)
if url == "lungroup" or url == "lungroup/associate"\
or url == "lun" or url == "lun/1":
data = data_lun(url, method)
if url == "lun?range=[0-65535]" or url == "lungroup?r"\
"ange=[0-8191]":
data = data_lun(url, method)
if url == "lungroup/associate?ID=11"\
"&ASSOCIATEOBJTYPE=11&ASSOCIATEOBJID=11":
data = '{"error":{"code":0}}'
self.terminateFlag = True
if url == "fc_initiator/10000090fa0d6754" or url == "lun/11"\
or url == "LUNCOPY/0"\
or url == "mappingview/REMOVE_ASSOCIATE":
data = '{"error":{"code":0}}'
self.delete_flag = True
if url == "LUNCOPY/start" or url == "mappingview/1"\
or url == "hostgroup/associate":
data = '{"error":{"code":0}}'
if url == "MAPPINGVIEW/CREATE_ASSOCIATE" or url == "snapshot/11"\
or url == "snapshot/stop" or url == "LUNGroup/11":
data = '{"error":{"code":0}}'
self.delete_flag = True
if url == "luncopy" or url == "eth_port" or url == "LUNC"\
"OPY?range=[0-100000]":
data = find_data_luncpy_range_eth_port(url)
if url == "iscsidevicename":
data = """{"error":{"code":0},
"data":[{"CMO_ISCSI_DEVICE_NAME":
"iqn.2006-08.com.huawei:oceanstor:21000022a10a2a39:iscsinametest"}]}"""
if url == "hostgroup" or url == "host" or url == "host/associate":
data = data_host(url, method)
if url == "host/associate?TYPE=21&ASSOCIATEOBJTYPE=14&AS"\
"SOCIATEOBJID=0":
data = data_host(url, method)
if url == "hostgroup?range=[0-8191]" or url == "host?ra"\
"nge=[0-65534]":
data = data_host(url, method)
if url == "iscsi_initiator/iqn.1993-08.debian:01:ec2bff7ac3a3":
data = """{"error":{"code":0},"data":{
"ID":"iqn.1993-08.debian:01:ec2bff7ac3a3",
"NAME":"iqn.1993-08.debian:01:ec2bff7ac3a3",
"ISFREE":"True"}}"""
if url == "iscsi_initiator" or url == "iscsi_initiator/"\
or url == "iscsi_initiator?range=[0-65535]":
data = '{"error":{"code":0}}'
if url == "mappingview" or url == "mappingview?range=[0-65535]":
data = find_data_mappingview(method, self.other_flag)
if url == ("lun/associate?ID=1&TYPE=11&"
"ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=0"):
data = '{"error":{"code":0}}'
if url == "fc_initiator?ISFREE=true&range=[0-1000]":
data = self.fc_initiator_data()
if url == "host_link?INITIATOR_TYPE=223&INITIATOR_PORT_WWN="\
"10000090fa0d6754":
data = self.host_link()
if url == "mappingview/associate?TYPE=245&"\
"ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0"\
or url == "mappingview/associate?TYPE=245&"\
"ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=11":
data = '{"error":{"code":0},"data":[{"ID":11,"NAME":"test"}]}'
if url == "lun/associate?TYPE=11&"\
"ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=1":
data = '{"error":{"code":0}}'
self.connect_flag = True
if url == "iscsi_tgt_port":
data = '{"data":[{"ETHPORTID":"139267",\
"ID":"iqn.oceanstor:21004846fb8ca15f::22003:111.111.101.244",\
"TPGT":"8196","TYPE":249}],\
"error":{"code":0,"description":"0"}}'
else:
data = '{"error":{"code":31755596}}'
if url == "lun/11":
if method == "GET":
data = """{"error":{"code":0},"data":{"ID":"11",
"IOCLASSID":"11",
"NAME":"5mFHcBv4RkCcD+JyrWc0SA"}}"""
res_json = json.loads(data)
return res_json
class Fake18000Storage(huawei_18000.Huawei18000ISCSIDriver):
"""Fake Huawei Storage, Rewrite some methods of HuaweiISCSIDriver."""
def __init__(self, configuration):
super(Fake18000Storage, self).__init__(configuration)
self.configuration = configuration
def do_setup(self):
self.common = Fake18000Common(configuration=self.configuration)
class Fake18000FCStorage(huawei_18000.Huawei18000FCDriver):
"""Fake Huawei Storage, Rewrite some methods of HuaweiISCSIDriver."""
def __init__(self, configuration):
super(Fake18000FCStorage, self).__init__(configuration)
self.configuration = configuration
def do_setup(self):
self.common = Fake18000Common(configuration=self.configuration)
class Huawei18000ISCSIDriverTestCase(test.TestCase):
def setUp(self):
super(Huawei18000ISCSIDriverTestCase, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml'
self.addCleanup(shutil.rmtree, self.tmp_dir)
self.create_fake_conf_file()
self.addCleanup(os.remove, self.fake_conf_file)
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.cinder_huawei_conf_file = self.fake_conf_file
self.stubs.Set(time, 'sleep', Fake_sleep)
driver = Fake18000Storage(configuration=self.configuration)
self.driver = driver
self.driver.do_setup()
self.driver.common.test_normal = True
def testloginSuccess(self):
deviceid = self.driver.common.login()
self.assertEqual(deviceid, '210235G7J20000000000')
def testcreatevolumesuccess(self):
self.driver.common.login()
lun_info = self.driver.create_volume(test_volume)
self.assertEqual(lun_info['provider_location'], '1')
self.assertEqual(lun_info['lun_info']['NAME'],
'5mFHcBv4RkCcD+JyrWc0SA')
def testcreatesnapshotsuccess(self):
self.driver.common.login()
lun_info = self.driver.create_snapshot(test_volume)
self.assertEqual(lun_info['provider_location'], 11)
self.assertEqual(lun_info['lun_info']['NAME'], 'YheUoRwbSX2BxN7')
def testdeletevolumesuccess(self):
self.driver.common.login()
self.driver.common.delete_flag = False
self.driver.delete_volume(test_volume)
self.assertTrue(self.driver.common.delete_flag)
def testdeletesnapshotsuccess(self):
self.driver.common.login()
self.driver.common.delete_flag = False
self.driver.delete_snapshot(test_snap)
self.assertTrue(self.driver.common.delete_flag)
def testcolonevolumesuccess(self):
self.driver.common.login()
lun_info = self.driver.create_cloned_volume(test_volume,
test_volume)
self.assertEqual(lun_info['provider_location'], '1')
self.assertEqual(lun_info['lun_info']['NAME'],
'5mFHcBv4RkCcD+JyrWc0SA')
def testcreateolumefromsnapsuccess(self):
self.driver.common.login()
lun_info = self.driver.create_volume_from_snapshot(test_volume,
test_volume)
self.assertEqual(lun_info['provider_location'], '1')
self.assertEqual(lun_info['lun_info']['NAME'],
'5mFHcBv4RkCcD+JyrWc0SA')
def testinitializeconnectionsuccess(self):
self.driver.common.login()
iscsi_properties = self.driver.initialize_connection(test_volume,
FakeConnector)
self.assertEqual(iscsi_properties['data']['target_lun'], 1)
def testterminateconnectionsuccess(self):
self.driver.common.login()
self.driver.common.terminateFlag = False
self.driver.terminate_connection(test_volume, FakeConnector)
self.assertTrue(self.driver.common.terminateFlag)
def testinitializeconnectionnoviewsuccess(self):
self.driver.common.login()
self.driver.common.other_flag = False
self.driver.common.connect_flag = False
self.driver.initialize_connection(test_volume, FakeConnector)
self.assertTrue(self.driver.common.connect_flag)
def testterminateconnectionoviewnsuccess(self):
self.driver.common.login()
self.driver.common.terminateFlag = False
self.driver.terminate_connection(test_volume, FakeConnector)
self.assertTrue(self.driver.common.terminateFlag)
def testgetvolumestatus(self):
self.driver.common.login()
data = self.driver.get_volume_stats()
self.assertEqual(data['driver_version'], '1.1.0')
def testloginfail(self):
self.driver.common.test_normal = False
self.assertRaises(exception.CinderException, self.driver.common.login)
def testcreatesnapshotfail(self):
self.driver.common.login()
self.driver.common.test_normal = False
self.assertRaises(exception.CinderException,
self.driver.create_snapshot, test_volume)
def testcreatevolumefail(self):
self.driver.common.login()
self.driver.common.test_normal = False
self.assertRaises(exception.CinderException,
self.driver.create_volume, test_volume)
def testdeletevolumefail(self):
self.driver.common.login()
self.driver.common.test_normal = False
self.assertRaises(exception.CinderException,
self.driver.delete_volume, test_volume)
def testdeletesnapshotfail(self):
self.driver.common.login()
self.driver.common.test_normal = False
self.assertRaises(exception.CinderException,
self.driver.delete_snapshot, test_volume)
def testinitializeconnectionfail(self):
self.driver.common.login()
self.driver.common.test_normal = False
self.assertRaises(exception.CinderException,
self.driver.initialize_connection,
test_volume, FakeConnector)
def testgetdefaulttimeout(self):
result = self.driver.common._get_default_timeout()
self.assertEqual('43200', result)
def testgetwaitinterval(self):
result = self.driver.common._get_wait_interval('LUNReadyWaitInterval')
self.assertEqual('2', result)
def create_fake_conf_file(self):
"""Create a fake Config file
Huawei storage customize a XML configuration file, the configuration
file is used to set the Huawei storage custom parameters, therefore,
in the UT test we need to simulate such a configuration file
"""
doc = minidom.Document()
config = doc.createElement('config')
doc.appendChild(config)
storage = doc.createElement('Storage')
config.appendChild(storage)
controllerip0 = doc.createElement('ControllerIP0')
controllerip0_text = doc.createTextNode('10.10.10.1')
controllerip0.appendChild(controllerip0_text)
storage.appendChild(controllerip0)
controllerip1 = doc.createElement('ControllerIP1')
controllerip1_text = doc.createTextNode('10.10.10.2')
controllerip1.appendChild(controllerip1_text)
storage.appendChild(controllerip1)
username = doc.createElement('UserName')
username_text = doc.createTextNode('admin')
username.appendChild(username_text)
storage.appendChild(username)
userpassword = doc.createElement('UserPassword')
userpassword_text = doc.createTextNode('Admin@storage')
userpassword.appendChild(userpassword_text)
storage.appendChild(userpassword)
url = doc.createElement('RestURL')
url_text = doc.createTextNode('http://100.115.10.69:8082/'
'deviceManager/rest/')
url.appendChild(url_text)
storage.appendChild(url)
lun = doc.createElement('LUN')
config.appendChild(lun)
storagepool = doc.createElement('StoragePool')
pool_text = doc.createTextNode('OpenStack_Pool')
storagepool.appendChild(pool_text)
lun.appendChild(storagepool)
timeout = doc.createElement('Timeout')
timeout_text = doc.createTextNode('43200')
timeout.appendChild(timeout_text)
lun.appendChild(timeout)
lun_ready_wait_interval = doc.createElement('LUNReadyWaitInterval')
lun_ready_wait_interval_text = doc.createTextNode('2')
lun_ready_wait_interval.appendChild(lun_ready_wait_interval_text)
lun.appendChild(lun_ready_wait_interval)
prefetch = doc.createElement('Prefetch')
prefetch.setAttribute('Type', '0')
prefetch.setAttribute('Value', '0')
lun.appendChild(prefetch)
iscsi = doc.createElement('iSCSI')
config.appendChild(iscsi)
defaulttargetip = doc.createElement('DefaultTargetIP')
defaulttargetip_text = doc.createTextNode('100.115.10.68')
defaulttargetip.appendChild(defaulttargetip_text)
iscsi.appendChild(defaulttargetip)
initiator = doc.createElement('Initiator')
initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3')
initiator.setAttribute('TargetIP', '192.168.100.2')
iscsi.appendChild(initiator)
fakefile = open(self.fake_conf_file, 'w')
fakefile.write(doc.toprettyxml(indent=''))
fakefile.close()
class Huawei18000FCDriverTestCase(test.TestCase):
def setUp(self):
super(Huawei18000FCDriverTestCase, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml'
self.addCleanup(shutil.rmtree, self.tmp_dir)
self.create_fake_conf_file()
self.addCleanup(os.remove, self.fake_conf_file)
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.cinder_huawei_conf_file = self.fake_conf_file
self.stubs.Set(time, 'sleep', Fake_sleep)
driver = Fake18000FCStorage(configuration=self.configuration)
self.driver = driver
self.driver.do_setup()
self.driver.common.test_normal = True
def testloginSuccess(self):
deviceid = self.driver.common.login()
self.assertEqual(deviceid, '210235G7J20000000000')
def testcreatevolumesuccess(self):
self.driver.common.login()
lun_info = self.driver.create_volume(test_volume)
self.assertEqual(lun_info['provider_location'], '1')
self.assertEqual(lun_info['lun_info']['NAME'],
'5mFHcBv4RkCcD+JyrWc0SA')
def testcreatesnapshotsuccess(self):
self.driver.common.login()
lun_info = self.driver.create_snapshot(test_volume)
self.assertEqual(lun_info['provider_location'], 11)
self.assertEqual(lun_info['lun_info']['NAME'], 'YheUoRwbSX2BxN7')
def testdeletevolumesuccess(self):
self.driver.common.login()
self.driver.common.delete_flag = False
self.driver.delete_volume(test_volume)
self.assertTrue(self.driver.common.delete_flag)
def testdeletesnapshotsuccess(self):
self.driver.common.login()
self.driver.common.delete_flag = False
self.driver.delete_snapshot(test_snap)
self.assertTrue(self.driver.common.delete_flag)
def testcolonevolumesuccess(self):
self.driver.common.login()
lun_info = self.driver.create_cloned_volume(test_volume,
test_volume)
self.assertEqual(lun_info['provider_location'], '1')
self.assertEqual(lun_info['lun_info']['NAME'],
'5mFHcBv4RkCcD+JyrWc0SA')
def testcreateolumefromsnapsuccess(self):
self.driver.common.login()
volumeid = self.driver.create_volume_from_snapshot(test_volume,
test_volume)
self.assertEqual(volumeid['provider_location'], '1')
def testinitializeconnectionsuccess(self):
self.driver.common.login()
properties = self.driver.initialize_connection(test_volume,
FakeConnector)
self.assertEqual(properties['data']['target_lun'], 1)
def testterminateconnectionsuccess(self):
self.driver.common.login()
self.driver.common.terminateFlag = False
self.driver.terminate_connection(test_volume, FakeConnector)
self.assertTrue(self.driver.common.terminateFlag)
def testinitializeconnectionnoviewsuccess(self):
self.driver.common.login()
self.driver.common.other_flag = False
self.driver.common.connect_flag = False
self.driver.initialize_connection(test_volume, FakeConnector)
self.assertTrue(self.driver.common.connect_flag)
def testterminateconnectionoviewnsuccess(self):
self.driver.common.login()
self.driver.common.terminateFlag = False
self.driver.terminate_connection(test_volume, FakeConnector)
self.assertTrue(self.driver.common.terminateFlag)
def testgetvolumestatus(self):
self.driver.common.login()
data = self.driver.get_volume_stats()
self.assertEqual(data['driver_version'], '1.1.0')
def testloginfail(self):
self.driver.common.test_normal = False
self.assertRaises(exception.CinderException,
self.driver.common.login)
def testcreatesnapshotfail(self):
self.driver.common.login()
self.driver.common.test_normal = False
self.assertRaises(exception.CinderException,
self.driver.create_snapshot, test_volume)
def testcreatevolumefail(self):
self.driver.common.login()
self.driver.common.test_normal = False
self.assertRaises(exception.CinderException,
self.driver.create_volume, test_volume)
def testdeletevolumefail(self):
self.driver.common.login()
self.driver.common.test_normal = False
self.assertRaises(exception.CinderException,
self.driver.delete_volume, test_volume)
def testdeletesnapshotfail(self):
self.driver.common.login()
self.driver.common.test_normal = False
self.assertRaises(exception.CinderException,
self.driver.delete_snapshot, test_volume)
def testinitializeconnectionfail(self):
self.driver.common.login()
self.driver.common.test_normal = False
self.assertRaises(exception.CinderException,
self.driver.initialize_connection,
test_volume, FakeConnector)
def testgetdefaulttimeout(self):
result = self.driver.common._get_default_timeout()
self.assertEqual('43200', result)
def testgetwaitinterval(self):
result = self.driver.common._get_wait_interval('LUNReadyWaitInterval')
self.assertEqual('2', result)
def create_fake_conf_file(self):
"""Create a fake Config file
Huawei storage customize a XML configuration file, the configuration
file is used to set the Huawei storage custom parameters, therefore,
in the UT test we need to simulate such a configuration file
"""
doc = minidom.Document()
config = doc.createElement('config')
doc.appendChild(config)
storage = doc.createElement('Storage')
config.appendChild(storage)
controllerip0 = doc.createElement('ControllerIP0')
controllerip0_text = doc.createTextNode('10.10.10.1')
controllerip0.appendChild(controllerip0_text)
storage.appendChild(controllerip0)
controllerip1 = doc.createElement('ControllerIP1')
controllerip1_text = doc.createTextNode('10.10.10.2')
controllerip1.appendChild(controllerip1_text)
storage.appendChild(controllerip1)
username = doc.createElement('UserName')
username_text = doc.createTextNode('admin')
username.appendChild(username_text)
storage.appendChild(username)
userpassword = doc.createElement('UserPassword')
userpassword_text = doc.createTextNode('Admin@storage')
userpassword.appendChild(userpassword_text)
storage.appendChild(userpassword)
url = doc.createElement('RestURL')
url_text = doc.createTextNode('http://100.115.10.69:8082/'
'deviceManager/rest/')
url.appendChild(url_text)
storage.appendChild(url)
lun = doc.createElement('LUN')
config.appendChild(lun)
storagepool = doc.createElement('StoragePool')
pool_text = doc.createTextNode('OpenStack_Pool')
storagepool.appendChild(pool_text)
lun.appendChild(storagepool)
timeout = doc.createElement('Timeout')
timeout_text = doc.createTextNode('43200')
timeout.appendChild(timeout_text)
lun.appendChild(timeout)
lun_ready_wait_interval = doc.createElement('LUNReadyWaitInterval')
lun_ready_wait_interval_text = doc.createTextNode('2')
lun_ready_wait_interval.appendChild(lun_ready_wait_interval_text)
lun.appendChild(lun_ready_wait_interval)
prefetch = doc.createElement('Prefetch')
prefetch.setAttribute('Type', '0')
prefetch.setAttribute('Value', '0')
lun.appendChild(prefetch)
iscsi = doc.createElement('iSCSI')
config.appendChild(iscsi)
defaulttargetip = doc.createElement('DefaultTargetIP')
defaulttargetip_text = doc.createTextNode('100.115.10.68')
defaulttargetip.appendChild(defaulttargetip_text)
iscsi.appendChild(defaulttargetip)
initiator = doc.createElement('Initiator')
initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3')
initiator.setAttribute('TargetIP', '192.168.100.2')
iscsi.appendChild(initiator)
fakefile = open(self.fake_conf_file, 'w')
fakefile.write(doc.toprettyxml(indent=''))
fakefile.close()
|
|
"""
A class help to create and manage a virtual test env
"""
import contextlib
import copy
import itertools
from .log import get_logger
from .dependency import Consumer, Provider, Cut, Graft, get_all_depend
LOGGER = get_logger(__name__)
class Env(object):
"""
TODO
"""
def __init__(self, data=None, parent=None, childs=None, path=''):
self.data = data
self.parent = parent
self.childs = childs if childs else {}
self._path = path
self._record = False
self._history = []
def __getitem__(self, key):
value = self.childs.get(key)
if value is None and not key.startswith("_"):
value = child_env = self.__class__(parent=self, path=key)
self.childs[key] = child_env
return value
def __setitem__(self, key, value):
if isinstance(value, self.__class__):
self.childs[key] = value
value.parent = self
else:
child_env = self[key]
child_env.data = value
@contextlib.contextmanager
def record(self):
self._history = []
self._record = True
yield
self._record = False
def __str__(self):
return self.struct_table()
def keys(self):
return self.childs.keys()
def values(self):
return self.childs.values()
def items(self):
return self.childs.items()
def get_data(self, path):
return self._get_data_from_path(path)
def set_data(self, path, value):
LOGGER.debug('Env %s set_data, path: %s, value: %s', self, path, value)
self._set_data_from_path(path, value)
def _get_data_from_path(self, path, use_getitem=False):
if not path:
return self
split_path = path.split('.')
tmp_env = self
for data in split_path:
if not isinstance(tmp_env, self.__class__):
return
if use_getitem:
tmp_env = tmp_env[data]
else:
tmp_env = tmp_env.childs.get(data)
if tmp_env is None:
return
return tmp_env
def _set_data_from_path(self, path, value):
env = self._get_data_from_path(path, True)
if env is None:
raise Exception
if isinstance(value, self.__class__):
env.childs = value.childs
env._change_parent(env)
LOGGER.debug('Env %s update sub env: %s, value: %s', self, path, value)
env.data = value.data
else:
LOGGER.debug('Env %s update sub env %s, data %s -> %s', self, path, env.data, value)
env.data = value
def _change_parent(self, tgt):
for child in self.childs.values():
child.parent = tgt
def hit_require(self, depend):
if depend.type == Consumer.REQUIRE:
require = True
elif depend.type == Consumer.REQUIRE_N:
require = False
else:
raise NotImplementedError
# TODO: support more kind of operation
paths = depend.env_depend.split('|')
for path in paths:
if self._valid_single_require(path, require):
return True
return False
def _valid_single_require(self, path, require):
ret = self._get_data_from_path(path)
if ret is not None and (ret.struct_table() != '{}' or ret.data):
return require
else:
return not require
def hit_requires(self, depends):
for depend in depends:
if not self.hit_require(depend):
return False
return True
def call_effect_env(self, objs):
for obj in objs:
obj.effect_env(self)
def gen_transfer_env(self, func):
"""
return transfered env Or Null if not suit
"""
con = get_all_depend(func, depend_cls=Consumer)
if not self.hit_requires(con):
return
new_env = copy.deepcopy(self)
# Make sure run Graft, Cut, Migrate before Provider
# Notice Migrate is subclass of Graft
objs = get_all_depend(func, depend_cls=(Graft, Cut))
new_env.call_effect_env(objs)
objs = get_all_depend(func, depend_cls=(Provider))
new_env.call_effect_env(objs)
return new_env
@classmethod
def gen_require_env(cls, func):
"""
return required env
"""
cons = get_all_depend(func, depend_cls=Consumer)
env_paths = []
for con in cons:
if con.type == Consumer.REQUIRE:
env_paths.append(con.env_depend.split('|'))
all_paths = itertools.product(*env_paths)
for paths in all_paths:
env = cls()
for path in paths:
env._set_data_from_path(path, True)
yield env
def __reduce__(self):
return self.__reduce_ex__(None)
def __reduce_ex__(self, protocol):
return (self.__class__, (self.data, self.parent, self.childs, self._path),)
def __hash__(self):
"""
NOTICE: don't change the env if really use this
"""
return hash(self.struct_table())
def struct_table(self):
if not self.keys():
# TODO
return '{}'
ret = '{'
for key, value in sorted(self.items()):
if value.need_fmt():
ret += ' %s|%s: %s,' % (key, bool(value.data), value.struct_table())
ret += '}'
return ret
def __cmp__(self, target):
if not isinstance(target, self.__class__):
return False
return self.struct_table() == target.struct_table()
def __eq__(self, target):
return self.__cmp__(target)
def __len__(self):
num = 0
if self.values():
for child in self.values():
num += len(child)
elif self.data:
return 1
return num
def need_fmt(self):
for child in self.values():
if child.need_fmt():
return True
if self.data:
return True
return False
def _full_path(self):
if self.parent is not None and self.parent._full_path():
return '%s.%s' % (self.parent._full_path(), self._path)
return self._path
def __repr__(self):
return "<%s path='%s' data='%s'>" % (self.__class__.__name__,
self._full_path(), self.data)
def __le__(self, target):
return self._check_include(target)
def __ge__(self, target):
return target._check_include(self)
def _check_include(self, target):
for key, value in self.items():
# TODO: any possible to make this looks more correct ?
# if not value.childs:
if value.data and (key not in target.keys() or not target[key].data):
return False
if value.data is False and key in target.keys() and target[key].data:
return False
if not value._check_include(target[key]):
return False
return True
|
|
#!/usr/bin/env python
"""
Assignment 3-2:
Use SNMPv3 to create two SVG image files.
Image1: input/output bytes on pynet-rtr1-Fa4
Image2: input/output packets on pynet-rtr1-Fa4
Get bytes and packets every five minutes for an hour, compute the differences and graph each point over time
Relevant OIDs:
('ifDescr_fa4', '1.3.6.1.2.1.2.2.1.2.5')
('ifInOctets_fa4', '1.3.6.1.2.1.2.2.1.10.5')
('ifInUcastPkts_fa4', '1.3.6.1.2.1.2.2.1.11.5')
('ifOutOctets_fa4', '1.3.6.1.2.1.2.2.1.16.5'),
('ifOutUcastPkts_fa4', '1.3.6.1.2.1.2.2.1.17.5')
Command line syntax:
]$ SNMP_Graphing <ip_addr> <username> <graph_file_name> <graphing_interval_minutes> <num_iterations> <ifDescr_OID>...
<OID1> [<OID2> [<OID3> [<OID4>]]]
<will prompt for SNMPv3 auth_key/encrypt_key>
Plan:
Create my_router object, get SNMP key, download data. If interval is specified, sleep for interval, do until
num_iterations. If interval is not specified, run once and terminate. Either way, store diffs in a data structure in
a pickle file.
"""
import pygal
import time
import sys
import pickle
import getpass
import os.path
import json
import snmp_helper
from collections import namedtuple
DEBUG = False
SNMP_PORT = 161
PICKLE_FILE = "SNMP_Graphing_pkl.p"
SYSNAME = "SysName"
DESCRIPTION = "ifDescr"
class RouterOID(object):
"""
Represents a router OID, the keys to measure it, a list of time deltas that measurements were taken,
and a list of the measurements taken at those times
"""
def __init__(self, ip_addr, username, auth_encr_key, oid_name, oid):
self.router = (ip_addr, SNMP_PORT)
self.snmp_user = (username, auth_encr_key, auth_encr_key)
self.oid_name = oid_name
self.oid = oid
self.timestamp = 0
self.total_measurement = 0
self.num_data_points = 0
self.time_diff_list = []
self.measurement_diff_list = []
def add_data_point(self):
timestamp = int(time.time())
if self.timestamp:
time_diff = timestamp - self.timestamp
else:
time_diff = 0
self.timestamp = timestamp
self.time_diff_list.append(time_diff)
snmp_data = snmp_helper.snmp_get_oid_v3(self.router, self.snmp_user, self.oid)
total_measurement = int(snmp_helper.snmp_extract(snmp_data))
if self.total_measurement:
diff_measurement = total_measurement - self.total_measurement
else:
diff_measurement = 0
self.total_measurement = total_measurement
self.measurement_diff_list.append(diff_measurement)
self.num_data_points += 1
def get_time_list(self, last_n_values):
first_index = 0 - last_n_values
return self.time_diff_list[first_index:]
def get_measurement_list(self, last_n_values):
first_index = 0 - last_n_values
return self.measurement_diff_list[first_index:]
def print_me(self):
print """
router: {0}
snmp_user: {1}
oid_name: {2}
timestamp: {3}
total_measurement: {4}
num_data_points: {5}
""".format(self.router, self.snmp_user, self.oid_name, self.timestamp, self.total_measurement, self.num_data_points)
print "time_diff_list: "
for element in self.time_diff_list:
print element
print "measurement_diff_list: "
for element in self.measurement_diff_list:
print element
def get_data(oid_name, snmp_oid_list, ip_addr, snmp_port, username, auth_encr_key):
router = (ip_addr, snmp_port)
snmp_user = (username, auth_encr_key, auth_encr_key)
for OID in snmp_oid_list:
if OID['OID_NAME'] == oid_name:
target_OID = OID['OID_VALUE']
snmp_data = snmp_helper.snmp_get_oid_v3(router, snmp_user, target_OID)
return snmp_helper.snmp_extract(snmp_data)
else:
return ""
def main():
if len(sys.argv) < 7:
sys.exit(("Usage %s <ip_addr> <username> <oid_file_name.json> <graph_file_name> <graphing_interval_minutes>" +
" <num_iterations> <graph_max_iterations>") % sys.argv[0])
ip_addr = sys.argv[1]
username = sys.argv[2]
oid_file_name = sys.argv[3]
graph_file = sys.argv[4]
graph_interval = int(sys.argv[5])
num_iterations = int(sys.argv[6])
graph_max_iterations = int(sys.argv[7])
auth_key = getpass.getpass("Please enter auth/encryption key: ")
if DEBUG:
print """
ip address: {0}
username: {1}
auth/encr key: {2}
oid_file_name: {3}
graph_file: {4}
graph_interval: {5}
num_iterations: {6}
graph_max_iterations: {7}
""".format(ip_addr, username, auth_key, oid_file_name, graph_file, graph_interval, num_iterations, graph_max_iterations)
try:
with open(oid_file_name) as f:
json_snmp_oid_list = json.load(f)
except IOError:
sys.exit("Could not open OID file for reading: " + oid_file_name)
router_hostname = get_data(SYSNAME, json_snmp_oid_list, ip_addr, SNMP_PORT, username, auth_key)
my_router_oid_obj_list = []
pickle_file = router_hostname + ".pkl"
if os.path.exists(pickle_file):
my_router_oid_obj_list = pickle.load(open(pickle_file, 'rb'))
else:
for OID in json_snmp_oid_list:
if OID['Is_Counter'] == "True":
myRouterOIDObj = RouterOID(ip_addr, username, auth_key, OID['OID_NAME'], OID['OID_VALUE'])
my_router_oid_obj_list.append(myRouterOIDObj)
if graph_interval == 0:
num_iterations = 1
for i in range(0, num_iterations):
for router_oid_obj in my_router_oid_obj_list:
router_oid_obj.add_data_point()
if graph_interval > 0:
time.sleep(60*graph_interval)
if DEBUG:
i = 1
for myobj in my_router_oid_obj_list:
print "MyRouterOIDObj {0}: ".format(i)
myobj.print_me()
i += 1
print "line chart title: " + router_hostname + " stats"
print "time_list for max_iterations (" + str(graph_max_iterations) + "):"
for time_element in my_router_oid_obj_list[0].get_time_list(graph_max_iterations):
print " " + str(time_element)
print "measurement_list:"
for oidObj in my_router_oid_obj_list:
print "OID: " + oidObj.oid_name
for measElem in oidObj.get_measurement_list(graph_max_iterations):
print " " + str(measElem)
line_chart = pygal.Line()
line_chart.title = router_hostname + " stats"
line_chart.x_label = my_router_oid_obj_list[0].get_time_list(graph_max_iterations)
for router_oid_obj in my_router_oid_obj_list:
line_chart.add(router_oid_obj.oid_name, router_oid_obj.get_measurement_list(graph_max_iterations))
line_chart.render_to_file(graph_file)
try:
pickle.dump(my_router_oid_obj_list, open(pickle_file, "wb"))
except IOError:
sys.exit("Could not write state file %s" % pickle_file)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
"""
* DocumentNGramGraph.java
*
* Created on 17/5/2017 16:00
*
"""
import logging
import matplotlib.pyplot as plt
import networkx as nx
from networkx.algorithms.isomorphism import numerical_edge_match
from networkx.drawing.nx_agraph import graphviz_layout
logger = logging.getLogger(__name__)
"""
* Represents the graph of a document, with vertices n-grams of the document and edges the number
* of the n-grams' co-occurences within a given window.
#*
* @author ysig
"""
class DocumentNGramGraph:
# initialization
def __init__(self, n=3, Dwin=2, Data=[], GPrintVerbose=True):
# consider not having characters but lists of objects
self._Data = []
# data size build for reuse of len(data)
self._dSize = 0
# stores the ngram
self._ngram = []
# store the ngram graph
self._Graph = nx.DiGraph()
# cache of edges (set vs. list)
self._edges = set()
# the graph stores it's maximum and minimum weigh
self._maxW = 0
self._minW = float("inf")
# data must be "listable"
self._Dwin = abs(int(Dwin))
# n for the n-graph
self._n = abs(int(n))
self.setData(Data)
# a printing flag determining if the printing result will be stored on document or
# be displayed on string
self._GPrintVerbose = GPrintVerbose
if not (self._Data == []):
self.buildGraph()
def __len__(self):
return self._Graph.size()
def __eq__(self, other):
return nx.is_isomorphic(
self._Graph, other._Graph, edge_match=numerical_edge_match("weight", 1)
)
# we will now define @method buildGraph
# which takes a data input
# segments ngrams
# and creates ngrams based on a given window
# !notice: at this developmental stage the weighting method
# may not be correct
def buildGraph(self, verbose=False, d=[]):
# set Data @class_var
self.setData(d)
self._Data
# build ngram
ng = self.build_ngram()
s = len(ng)
self._Dwin
# init graph
# TODO: add clear function
o = min(self._Dwin, s)
if o >= 1:
window = [ng[0]]
# append the first full window
# while adding the needed edges
for gram in ng[1 : o + 1]:
for w in window:
self.addEdgeInc(gram, w)
window.append(gram)
# with full window span till
# the end.
for gram in ng[o + 1 :]:
for w in window:
self.addEdgeInc(gram, w)
window.pop(0)
window.append(gram)
# print graph (optional)
if verbose:
self.GraphDraw(self._GPrintVerbose)
return self._Graph
# add's an edge if it's non existent
# if it is increments it's weight
# !notice: reiweighting technique may be false
# at this developmental stage
def addEdgeInc(self, a, b, w=1):
# A = repr(a)#str(a)
# B = repr(b)#str(b)
# merging can also be done in other ways
# add an extra class variable
A = tuple(a)
B = tuple(b)
if (A, B) in self._edges:
edata = self._Graph.get_edge_data(A, B)
# DEBUG LINES
# print "updating edge between (",A,B,")"
# print "to weight",(edata['weight']+1)
r = edata["weight"] + w
else:
# DEBUG LINES
# print "adding edge between (",A,B,")"
r = w
# update/add edge weight
self.setEdge(A, B, r)
# creates ngram's of window based on @param n
def build_ngram(self, d=[]):
self.setData(d)
Data = self._Data
l = Data[0 : min(self._n, self._dSize)]
q = []
q.append(l[:])
if self._n < self._dSize:
for d in Data[min(self._n, self._dSize) :]:
l.pop(0)
l.append(d)
q.append(l[:])
self._ngram = q
return q
# draws a graph using math plot lib
def GraphDraw(self, verbose=True, print_name="graph", lf=True, ns=1000, wf=True):
pos = graphviz_layout(self._Graph)
# pos = sring_layout(self._Graph, scale=1)
# nx.draw(self._Graph,pos = pos,node_size=ns,with_labels = lf, node_color = 'm')
nx.draw(
self._Graph,
pos=graphviz_layout(self._Graph, prog="dot"),
node_size=ns,
cmap=plt.cm.Blues,
node_color=list(range(len(self._Graph))),
with_labels=lf,
)
if wf:
weight_labels = nx.get_edge_attributes(self._Graph, "weight")
nx.draw_networkx_edge_labels(
self._Graph, pos=pos, edge_labels=weight_labels
)
if verbose:
plt.show()
else:
# plt.savefig('g.png')
# or to dot
nx.drawing.nx_pydot.write_dot(self._Graph, print_name + ".dot")
# !!Uknown error: the produced dot file is
# not readable by dot/xdot.
## set functions for structure's protected fields
def setData(self, Data):
if not (Data == []):
self._Data = list(Data)
self._dSize = len(self._Data)
# sets an edges weight
def setEdge(self, a, b, w=1):
self._edges.add((a, b)) # Update cache
self._Graph.add_edge(a, b, key="edge", weight=w)
self._maxW = max(self._maxW, w)
self._minW = min(self._minW, w)
# deletes
def delEdge(self, u, v):
self._edges.remove((u, v))
self._Graph.remove_edge(u, v)
# trims the graph by removing unreached nodes
def deleteUnreachedNodes(self):
self._Graph.remove_nodes_from(list(nx.isolates(self._Graph)))
def setN(self, n):
self._n = n
def setDwin(self, win):
self._Dwin = win
## get functions for structures protected fields
def getMin(self):
return self._MinSize
def getngram(self):
return self._ngram
def getGraph(self):
return self._Graph
def maxW(self):
return self._maxW
def minW(self):
return self._minW
def number_of_edges(self):
return self._Graph.number_of_nodes()
def union(self, other, learning_factor=0.5):
"""
Pseudocode:
For graphs G1,G2 where smallGraph = min(G1,G2) & bigGraph = max(G1,G2)
bigGraph gets deepcopied to `bigGraph`
For all (A,B) belongs in smallGraph edges
if (A,B) belongs also to bigGraph edges (deep-copied graph)
replace the weight with value w1*lf+w2*(1-lf) on `bigGraph`
else
add edge to `bigGraph` with the value it has on small graph
return `bigGraph`
"""
other_graph = other.getGraph()
# Convert edge-list to set to speed-up look-up
edge_set = set(self._Graph.edges())
for (vertex_start, vertex_end, edge_data) in other_graph.edges(data=True):
edge_weight = edge_data["weight"]
if (vertex_start, vertex_end) in edge_set:
current_edge_data = self._Graph.get_edge_data(vertex_start, vertex_end)
current_edge_weight = current_edge_data["weight"]
edge_weight = (
learning_factor * edge_weight
+ (1 - learning_factor) * current_edge_weight
)
self.setEdge(vertex_start, vertex_end, edge_weight)
return self
# test script
# 1. construct a 2-gram graph of window_size = 2
# from the word "abcdef"
|
|
# Sublime Text plugin for Parinfer
# v0.8.0
# https://github.com/oakmac/sublime-text-parinfer
#
# More information about Parinfer can be found here:
# http://shaunlebron.github.io/parinfer/
#
# Copyright (c) 2015, Chris Oakman and other contributors
# Released under the ISC license
# https://github.com/oakmac/sublime-text-parinfer/blob/master/LICENSE.md
import sublime
import sublime_plugin
import functools
import re
import pprint
pp = pprint.PrettyPrinter(indent=4)
try:
# Python 2
from parinfer import indent_mode, paren_mode
except ImportError:
from .parinfer import indent_mode, paren_mode
try:
basestring
except NameError:
basestring = str
# constants
DEBOUNCE_INTERVAL_MS = 50
STATUS_KEY = 'parinfer'
PAREN_STATUS = 'Parinfer: Paren'
INDENT_STATUS = 'Parinfer: Indent'
PARENT_EXPRESSION_RE = re.compile(r"^\([a-zA-Z]")
SYNTAX_LANGUAGE_RE = r"([\w\d\s]*)(\.sublime-syntax)"
def get_syntax_language(view):
regex_res = re.search(SYNTAX_LANGUAGE_RE, view.settings().get("syntax"))
if regex_res:
return regex_res.group(1)
else:
None
# TODO: This is ugly, but I'm not sure how to avoid the ugly iteration lookup on each view.
comment_chars = {}
def get_comment_char(view):
comment_char = ';'
srclang = get_syntax_language(view)
if srclang in comment_chars:
comment_char = comment_chars[srclang]
else:
# Iterate over all shellVariables for the given view.
# 0 is a position so probably should be the current cursor location,
# but since we don't nest Clojure in other syntaxes, just use 0.
for var in view.meta_info("shellVariables", 0):
if var['name'] == 'TM_COMMENT_START':
comment_char = var['value'].strip()
comment_chars[srclang] = comment_char
break
return comment_char
def get_setting(view, key):
settings = view.settings().get('Parinfer')
if settings is None:
settings = sublime.load_settings('Parinfer.sublime-settings')
return settings.get(key)
def is_parent_expression(txt):
return re.match(PARENT_EXPRESSION_RE, txt) is not None
def find_start_parent_expression(lines, line_no):
line_no = line_no - 4
if line_no < 0:
return 0
idx = line_no - 1
while idx > 0:
if is_parent_expression(lines[idx]):
return idx
idx = idx - 1
return 0
def find_end_parent_expression(lines, line_no):
max_idx = len(lines) - 1
line_no = line_no + 4
if line_no > max_idx:
return max_idx
idx = line_no + 1
while idx < max_idx:
if is_parent_expression(lines[idx]):
return idx
idx = idx + 1
return max_idx
# this command applies the parinfer changes to the buffer
# NOTE: this needs to be in it's own command so we can override "undo"
class ParinferApplyCommand(sublime_plugin.TextCommand):
def run(self, edit, start_line = 0, end_line = 0, cursor_row = 0, cursor_col = 0, result_text = ''):
# get the current selection
current_selections = [(self.view.rowcol(start), self.view.rowcol(end))
for start, end in self.view.sel()]
# update the buffer
start_point = self.view.text_point(start_line, 0)
end_point = self.view.text_point(end_line, 0)
region = sublime.Region(start_point, end_point)
self.view.replace(edit, region, result_text)
# re-apply their selection
self.view.sel().clear()
for start, end in current_selections:
self.view.sel().add(
sublime.Region(self.view.text_point(*start),
self.view.text_point(*end)))
# NOTE: This command inspects the text around the cursor to determine if we need
# to run Parinfer on it. It does not modify the buffer directly.
class ParinferInspectCommand(sublime_plugin.TextCommand):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# holds the text of the last update
self.last_update_text = None
self.comment_char = get_comment_char(self.view)
def run(self, edit):
current_view = self.view
current_status = current_view.get_status(STATUS_KEY)
# exit if Parinfer is not enabled on this view
if current_status not in (INDENT_STATUS, PAREN_STATUS):
return
whole_region = sublime.Region(0, current_view.size())
all_text = current_view.substr(whole_region)
lines = all_text.split("\n")
# add a newline at the end of the file if there is not one
if lines[-1] != "":
lines.append("")
selections = current_view.sel()
first_cursor = selections[0].begin()
cursor_row, cursor_col = current_view.rowcol(first_cursor)
start_line = find_start_parent_expression(lines, cursor_row)
end_line = find_end_parent_expression(lines, cursor_row)
start_point = current_view.text_point(start_line, 0)
end_point = current_view.text_point(end_line, 0)
region = sublime.Region(start_point, end_point)
text = current_view.substr(region)
modified_cursor_row = cursor_row - start_line
# exit early if there has been no change since our last update
if text == self.last_update_text:
return
parinfer_options = {
'cursorLine': modified_cursor_row,
'cursorX': cursor_col,
'commentChar': self.comment_char,
}
# specify the Parinfer mode
parinfer_fn = indent_mode
if current_status == PAREN_STATUS:
# TODO: add parinfer_options.cursorDx here
parinfer_fn = paren_mode
# run Parinfer on the text
result = parinfer_fn(text, parinfer_options)
if result['success']:
# save the text of this update so we don't have to process it again
self.last_update_text = result['text']
# update the buffer in a separate command if the text needs to be changed
if result['text'] != text:
cmd_options = {
'cursor_row': cursor_row,
'cursor_col': cursor_col,
'start_line': start_line,
'end_line': end_line,
'result_text': result['text'],
}
sublime.set_timeout(lambda: current_view.run_command('parinfer_apply', cmd_options), 1)
class ParinferParenOnOpen(sublime_plugin.TextCommand):
def run(self, edit):
# run Paren Mode on the whole file
whole_region = sublime.Region(0, self.view.size())
all_text = self.view.substr(whole_region)
result = paren_mode(all_text, None)
# TODO:
# - what to do when paren mode fails on a new file?
# show them a message?
# - warn them before applying Paren Mode changes?
if result['success']:
# update the buffer if we need to
if all_text != result['text']:
self.view.replace(edit, whole_region, result['text'])
# drop them into Indent Mode
self.view.set_status(STATUS_KEY, INDENT_STATUS)
class Parinfer(sublime_plugin.EventListener):
def __init__(self):
# stateful debounce counter
self.pending = 0
# Should we automatically start Parinfer on this file?
def should_start(self, view):
# False if filename is not a string
filename = view.file_name()
if isinstance(filename, basestring) is not True:
return False
# check the extensions in settings
for extension in get_setting(view, 'file_extensions'):
if filename.endswith(extension):
return True
# didn't find anything; do not automatically start Parinfer
return False
# debounce intermediary
def handle_timeout(self, view):
self.pending = self.pending - 1
if self.pending == 0:
view.run_command('parinfer_inspect')
# fires everytime the editor is modified; basically calls a
# debounced run_parinfer
def on_modified(self, view):
self.pending = self.pending + 1
sublime.set_timeout(
functools.partial(self.handle_timeout, view), DEBOUNCE_INTERVAL_MS)
# fires everytime the cursor is moved
def on_selection_modified(self, view):
self.on_modified(view)
# fires when a file is finished loading
def on_load(self, view):
# exit early if we do not recognize this file extension
if not self.should_start(view):
return
# run Paren Mode on the whole file
view.run_command('parinfer_paren_on_open')
class ParinferToggleOnCommand(sublime_plugin.TextCommand):
def run(self, edit):
# update the status bar
current_status = self.view.get_status(STATUS_KEY)
if current_status == INDENT_STATUS:
self.view.set_status(STATUS_KEY, PAREN_STATUS)
else:
self.view.set_status(STATUS_KEY, INDENT_STATUS)
class ParinferToggleOffCommand(sublime_plugin.TextCommand):
def run(self, edit):
# remove from the status bar
self.view.erase_status(STATUS_KEY)
# override undo
class ParinferUndoListener(sublime_plugin.EventListener):
def on_text_command(self, view, command_name, args):
# TODO: Only run in parinfer views?
# TODO: Simplify duplicated logic?
if command_name == 'undo':
# check to see if the last command was a 'parinfer_apply'
cmd_history = view.command_history(0)
# if so, run an extra "undo" to erase the changes
if cmd_history[0] == 'parinfer_apply':
view.run_command('undo')
# run "undo" as normal
elif command_name == 'redo':
# check to see if the command after next was a 'parinfer_apply'
cmd_history = view.command_history(2)
# if so, run an extra "redo" to erase the changes
if cmd_history[0] == 'parinfer_apply':
view.run_command('redo')
# run "redo" as normal
|
|
"""
The heart of the library.
See ``README.rst`` and ``Configuration`` class for more details.
"""
import dataclasses
import enum
import os
import warnings
from typing import Any, Callable, Dict, NewType, Optional, Type, Union, get_type_hints
# Aliased in order to avoid a conflict with the _Options.transform attribute.
from . import transform as transform_module
_NO_DEFAULT = object()
VariableName = NewType("VariableName", Union[str, bytes])
VariableValue = NewType("VariableValue", Union[str, bytes])
Source = NewType("Source", Dict[VariableName, VariableValue])
TransformCallable = NewType("TransformCallable", Callable[[VariableValue, Type], Any])
class Autoload(enum.Enum):
"""
Represents different approaches to the attribute values
autoloading on ``Config`` class:
- ``Autoload.CLASS`` - load variable values to class on its subclass creation,
- ``Autoload.OBJECT`` - load variable values to object instance on its initialization,
- ``Autoload.NEVER`` - does not perform any autoloading; ``Config.load`` method needs to be called explicitly.
"""
CLASS = "CLASS"
OBJECT = "OBJECT"
NEVER = "NEVER"
def _generate_environ_name(
attr_name: str, prefix: Optional[str] = None
) -> VariableName:
"""
Outputs an environment variable name based on the ``Configuration``'s
subclass attribute name and the optional prefix.
>>> _generate_environ_name("attr_name", prefix="prefixed")
"PREFIXED_ATTR_NAME"
"""
variable_name = ""
if prefix:
variable_name += f"{prefix}_"
variable_name += attr_name
return variable_name.upper()
@dataclasses.dataclass
class _Options:
"""
Acts as the container for metaclass keyword arguments provided during
``Config`` class creation.
"""
prefix: Optional[str] = None
autoload: Autoload = Autoload.CLASS
source: Source = os.environ
transform: TransformCallable = transform_module.cast
wanted_type: Type = str
variable_name: Callable[[str, Optional[str]], VariableName] = _generate_environ_name
@classmethod
def from_dict(cls, options_dict: Dict) -> "_Options":
"""
Produces ``_Options`` instance from given dictionary.
Items are deleted from ``options_dict`` as a side-effect.
"""
options_kwargs = {}
for field in dataclasses.fields(cls):
value = options_dict.pop(field.name, None)
if value is None:
continue
options_kwargs[field.name] = value
try:
return cls(**options_kwargs)
except TypeError as e:
raise ValueError(
f"Invalid options for Config class: {options_dict}."
) from e
@dataclasses.dataclass
class Variable:
"""
Represents a single variable from the configuration source
and user preferences how to process it.
"""
variable_name: Optional[VariableName] = None
default: Any = _NO_DEFAULT
transform: Optional[TransformCallable] = None
source: Optional[Source] = None
wanted_type: Type = dataclasses.field(init=False)
def set_defaults(
self,
*,
variable_name: VariableName,
transform: TransformCallable,
source: Source,
wanted_type: Type,
):
"""
Sets missing properties of the instance of `Variable`` in order to
be able to fetch its value with the ``Variable.get`` method.
"""
self.variable_name = self.variable_name or variable_name
self.transform = self.transform or transform
self.source = self.source or source
self.wanted_type = wanted_type
def get(self) -> VariableValue:
"""
Fetches a value of variable from the ``self.source`` and invoke the
``self.transform`` operation on it. Falls back to ``self.default``
if the value is not found.
"""
try:
raw_value = self.source[self.variable_name]
except KeyError:
if self.default is _NO_DEFAULT:
raise AttributeError(
f"Configuration error: {self.variable_name!r} is not set."
) from None
else:
return self.default
try:
return self.transform(raw_value, self.wanted_type)
except (ValueError, SyntaxError) as e:
raise ValueError(
f"Invalid configuration for {self.variable_name!r}."
) from e
class Config:
"""
When ``Config`` subclasses are created, by default ``Ecological`` will set their
attributes automatically based on the corresponding environment variables.
For example if ``DEBUG`` is set to ``"True"`` and ``PORT`` is set to ``"8080"`` and your
configuration class looks like::
class Configuration(ecological.Config):
port: int
debug: bool
``Configuration.port`` will be ``8080`` and ``Configuration.debug`` will be ``True``, with the
correct types.
It is possible to defer the calculation of attribute values by specifying the ``autoload``
keyword argument on your class definition. For possible strategies see the ``Autoload`` class definition.
Caveats and Known Limitations
=============================
- ``Ecological`` doesn't support (public) methods in ``Config`` classes.
Further Information
===================
Further information is available in the ``README.rst``.
"""
_options: _Options
def __init_subclass__(cls, **kwargs):
cls._options = _Options.from_dict(kwargs)
super().__init_subclass__(**kwargs)
if cls._options.autoload is Autoload.CLASS:
cls.load(cls)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
cls = type(self)
if cls._options.autoload is Autoload.OBJECT:
cls.load(self)
@classmethod
def load(cls: "Config", target_obj: Optional[object] = None):
"""
Fetches and converts values of variables declared as attributes on ``cls`` according
to their specification and finally assigns them to the corresponding attributes
on ``target_obj`` (which by default is ``cls`` itself).
"""
target_obj = target_obj or cls
cls_dict = vars(cls).copy()
attr_types: Dict[str, type] = get_type_hints(cls)
# There is no single place that has all class attributes regardless of
# having default value or not. Thus the keys of cls.__annotations__ and
# cls.__dict__ are merged providing a complete list.
attr_names = set(cls_dict).union(attr_types.keys())
for attr_name in attr_names:
# Omit private and nested configuration attributes
# (Attribute value can be the instance of Config itself).
if attr_name.startswith("_") or isinstance(attr_name, cls):
continue
attr_value = cls_dict.get(attr_name, _NO_DEFAULT)
attr_type = attr_types.get(attr_name, cls._options.wanted_type)
if isinstance(attr_value, Variable):
variable = attr_value
else:
variable = Variable(default=attr_value)
variable.set_defaults(
variable_name=cls._options.variable_name(
attr_name, prefix=cls._options.prefix
),
transform=cls._options.transform,
source=cls._options.source,
wanted_type=attr_type,
)
setattr(target_obj, attr_name, variable.get())
class AutoConfig(Config, autoload=Autoload.NEVER):
"""
DEPRECATED: For backward compatibility purposes only; please use ``ecological.Config`` instead.
"""
def __init_subclass__(cls, prefix: Optional[str] = None, **kwargs):
warnings.warn(
"ecological.AutoConfig is deprecated, please use ecological.Config instead.",
DeprecationWarning,
)
super().__init_subclass__(prefix=prefix, autoload=Autoload.CLASS)
|
|
from django.test import TestCase
from django.conf import settings
import mongoengine
from mongoengine.django.auth import User
import re
from datetime import datetime
from mumblr.entrytypes.core import TextEntry, HtmlComment, LinkEntry
mongoengine.connect('mumblr-unit-tests')
class MumblrTest(TestCase):
urls = 'mumblr.urls'
# Stop ORM-related stuff from happening as we don't use the ORM
def _fixture_setup(self):
pass
def _fixture_teardown(self):
pass
def login(self):
self.client.get('/admin/login/')
data = self.user_data.copy()
data['csrfmiddlewaretoken'] = self.get_csrf_token()
return self.client.post('/admin/login/', data)
def setUp(self):
# Create a test user
self.user_data = {
'username': 'test',
'password': 'testpassword123',
}
self.user = User.create_user(*self.user_data.values())
# Create a test entry
self.text_entry = TextEntry(title='Test-Entry', slug='test-entry')
self.text_entry.tags = ['tests']
self.text_entry.published = True
self.text_entry.content = 'some-test-content'
self.text_entry.rendered_content = '<p>some test content</p>'
# Create test comment
self.comment = HtmlComment(
author='Mr Test',
body='test comment',
rendered_content = '<p>test comment</p>',
)
self.text_entry.comments = [self.comment]
self.text_entry.save()
def get_csrf_token(self):
# Scrape CSRF token
response = self.client.get('/admin/login/')
csrf_regex = r'csrfmiddlewaretoken\'\s+value=\'(\w+)\''
csrf_regex = r'value=\'(\w+)\''
return re.search(csrf_regex, response.content).groups()[0]
def test_recent_entries(self):
"""Ensure that the recent entries page works properly.
"""
response = self.client.get('/')
self.assertContains(response, self.text_entry.rendered_content,
status_code=200)
def test_entry_detail(self):
"""Ensure that the entry detail page works properly.
"""
response = self.client.get(self.text_entry.get_absolute_url())
self.assertContains(response, self.text_entry.rendered_content,
status_code=200)
def test_tagged_entries(self):
"""Ensure that the 'tagged entries' page works properly.
"""
response = self.client.get('/tag/tests/')
self.assertContains(response, self.text_entry.rendered_content,
status_code=200)
response = self.client.get('/tag/programming/')
self.assertNotContains(response, self.text_entry.rendered_content,
status_code=200)
def test_tag_cloud(self):
"""Ensure that the 'tag cloud' page works properly.
"""
response = self.client.get('/tags/')
self.assertContains(response, 'tests', status_code=200)
def test_add_link(self):
"""Ensure links get added properly, without nofollow attr
"""
self.login()
response = self.client.get('/admin/add/Lext')
entry_data = {
'title': 'Link Entry',
'slug': 'link-entry',
'tags': 'tests',
'published': 'true',
'content': 'test',
'publish_date_year': datetime.now().year,
'publish_date_month': datetime.now().month,
'publish_date_day': datetime.now().day,
'publish_time': datetime.now().strftime('%H:%M:%S'),
'rendered_content': '<p>test</p>',
'link_url': 'http://stevechallis.com/',
'csrfmiddlewaretoken': self.get_csrf_token(),
}
# Check invalid form fails
invalid_data = entry_data.copy()
invalid_data['link_url'] = 'this-is-not-a-url'
response = self.client.post('/admin/add/Link/', invalid_data)
self.assertTemplateUsed(response, 'mumblr/admin/add_entry.html')
# Check adding an entry does work
response = self.client.post('/admin/add/text/', entry_data)
entry = LinkEntry(slug=entry_data['slug'], publish_time=datetime.now())
url = entry.get_absolute_url()
self.assertRedirects(response, url, target_status_code=200)
response = self.client.get(url)
self.assertNotContains(response, 'rel="nofollow"')
response = self.client.get('/')
self.assertContains(response, entry_data['content'])
def test_add_entry(self):
"""Ensure that entries may be added.
"""
self.login()
response = self.client.get('/admin/add/text/')
entry_data = {
'title': 'Second test entry',
'slug': 'second-test-entry',
'tags': 'tests',
'published': 'true',
'content': 'test',
'publish_date_year': datetime.now().year,
'publish_date_month': datetime.now().month,
'publish_date_day': datetime.now().day,
'publish_time': datetime.now().strftime('%H:%M:%S'),
'rendered_content': '<p>test</p>',
'csrfmiddlewaretoken': self.get_csrf_token(),
}
# Check invalid form fails
response = self.client.post('/admin/add/text/', {
'csrfmiddlewaretoken': self.get_csrf_token(),
'content': 'test',
})
self.assertTemplateUsed(response, 'mumblr/admin/add_entry.html')
# Check adding an entry does work
response = self.client.post('/admin/add/text/', entry_data)
entry = TextEntry(slug=entry_data['slug'], publish_time=datetime.now())
url = entry.get_absolute_url()
self.assertRedirects(response, url, target_status_code=200)
response = self.client.get('/')
self.assertContains(response, entry_data['content'])
def test_add_comment(self):
"""Ensure that comments can be added
"""
# Login to prevent Captcha
self.login()
add_url = self.text_entry.get_absolute_url()+'#comments'
comment_data = {
'author': 'Mr Test 2',
'body': 'another-test-comment',
'rendered_content': '<p>another-test-comment</p>',
'csrfmiddlewaretoken': self.get_csrf_token(),
}
# Check invalid form fails
response = self.client.post(add_url, {
'body': 'test',
'csrfmiddlewaretoken': self.get_csrf_token(),
})
# Check adding comment works
response = self.client.post(add_url, comment_data)
self.assertRedirects(response, add_url, target_status_code=200)
response = self.client.get(add_url)
self.assertContains(response, comment_data['rendered_content'])
def test_edit_entry(self):
"""Ensure that entries may be edited.
"""
self.login()
edit_url = '/admin/edit/%s/' % self.text_entry.id
entry_data = {
'title': self.text_entry.title,
'slug': self.text_entry.slug,
'published': 'true',
'publish_date_year': datetime.now().year,
'publish_date_month': datetime.now().month,
'publish_date_day': datetime.now().day,
'publish_time': datetime.now().strftime('%H:%M:%S'),
'content': 'modified-test-content',
'csrfmiddlewaretoken': self.get_csrf_token(),
}
# Check invalid form fails
response = self.client.post(edit_url, {
'content': 'test',
'csrfmiddlewaretoken': self.get_csrf_token(),
})
self.assertTemplateUsed(response, 'mumblr/admin/add_entry.html')
# Check editing an entry does work
response = self.client.post(edit_url, entry_data)
entry = TextEntry(slug=entry_data['slug'], publish_time=datetime.now())
url = entry.get_absolute_url()
self.assertRedirects(response, url, target_status_code=200)
response = self.client.get('/')
self.assertContains(response, entry_data['content'])
def test_delete_entry(self):
"""Ensure that entries may be deleted.
"""
delete_url = '/admin/delete/'
data = {
'entry_id': self.text_entry.id,
'csrfmiddlewaretoken': self.get_csrf_token(),
}
response = self.client.post(delete_url, data)
self.assertRedirects(response, '/admin/login/?next=' + delete_url,
target_status_code=200)
self.login()
data['csrfmiddlewaretoken'] = self.get_csrf_token()
response = self.client.post(delete_url, data)
self.assertRedirects(response, '/')
response = self.client.get('/')
self.assertNotContains(response, self.text_entry.rendered_content,
status_code=200)
def test_delete_comment(self):
"""Ensure that comments can be deleted
"""
self.login()
data = {
'comment_id': self.text_entry.comments[0].id,
'csrfmiddlewaretoken': self.get_csrf_token(),
}
delete_url = '/admin/delete-comment/'
response = self.client.post(delete_url, data)
redirect_url = self.text_entry.get_absolute_url() + '#comments'
self.assertRedirects(response, redirect_url)
self.text_entry.reload()
self.assertEqual(len(self.text_entry.comments), 0)
def test_login_logout(self):
"""Ensure that users may log in and out.
"""
# User not logged in
response = self.client.get('/admin/login/')
self.assertFalse(isinstance(response.context['user'], User))
# User logging in
data = self.user_data.copy()
data['csrfmiddlewaretoken'] = self.get_csrf_token()
response = self.client.post('/admin/login/', data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL,
target_status_code=200)
# User logged in
response = self.client.get('/')
self.assertTrue(isinstance(response.context['user'], User))
response = self.client.get('/admin/logout/')
self.assertRedirects(response, '/', target_status_code=200)
# User logged out
response = self.client.get('/admin/login/')
self.assertFalse(isinstance(response.context['user'], User))
def test_login_requred(self):
"""Ensure that a login is required for restricted pages.
"""
restricted_pages = ['/admin/', '/admin/add/text/']
restricted_pages.append('/admin/edit/%s/' % self.text_entry.id)
restricted_pages.append('/admin/delete/')
# Check in turn that each of the restricted pages may not be accessed
for url in restricted_pages:
response = self.client.get(url)
self.assertRedirects(response, '/admin/login/?next=' + url,
target_status_code=200)
self.login()
# Check in turn that each of the restricted pages may be accessed
for url in restricted_pages:
response = self.client.get(url, follow=True)
self.assertFalse('/admin/login' in response.get('location', ''))
def tearDown(self):
self.user.delete()
TextEntry.objects.delete()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import request
from flask_restplus import Resource, reqparse
from flask_restplus.reqparse import Argument
from flask_jwt_extended import jwt_required, create_access_token, \
jwt_refresh_token_required
from app import db, api
from .models import *
from .decorators import *
from .fields.auth import *
from .fields.users import *
from .fields.groups import *
from .fields.abilities import *
from .fields.containers import *
from .fields.hosts import *
import lwp
import lxc
import os.path
import platform
import re
import gzip
import socket
import subprocess
import psutil
from distro import linux_distribution
class Auth(Resource):
@api.marshal_with(auth_fields_get)
@api.expect(auth_fields_post, validate=True)
def post(self):
"""
Get Json Web Token
"""
request_data = request.get_json()
username = request_data['username']
password = request_data['password']
user = User.query.filter_by(username=username).first()
if not user or not user.verify_password(password):
api.abort(code=401, message='Incorrect user or password')
ret = {'access_token': create_access_token(identity=user)}
return ret
class AuthRefresh(Resource):
decorators = [jwt_required]
@api.marshal_with(auth_fields_get)
def post(self):
"""
Get new token with valid token
"""
current_identity = import_user()
ret = {
'access_token': create_access_token(identity=current_identity)
}
return ret
class AuthCheck(Resource):
decorators = [jwt_required]
@api.doc(responses={
200: 'Token OK',
401: 'Token invalid or expired',
422: 'Signature verification failed'
})
def get(self):
"""
Check token
"""
return {}, 200
class UsersList(Resource):
decorators = [jwt_required]
@user_has('users_infos_all')
@api.marshal_with(users_fields_get_many)
def get(self):
"""
Get users list
"""
users = User.query.all()
users_list = []
for user in users:
users_list.append(user.__jsonapi__())
return {'data': users_list}
@user_has('users_create')
@api.expect(users_fields_post, validate=True)
@api.marshal_with(users_fields_get)
def post(self):
"""
Create user
"""
current_identity = import_user()
data = request.get_json()['data']
if User.query.filter_by(username=data['attributes']['username']).first():
api.abort(code=409, message='User already exists')
user = User()
user.username = data['attributes']['username']
user.name = data['attributes']['name']
user.hash_password(data['attributes']['password'])
if 'admin' in data['attributes'] and current_identity.admin:
user.admin = data['attributes']['admin']
if 'email' in data['attributes']:
user.email = data['attributes']['email']
try:
user.groups = list(id['id'] for id in data[
'relationships']['groups']['data'])
except KeyError:
pass
try:
user.containers = list(id['id'] for id in data[
'relationships']['containers']['data'])
except KeyError:
pass
db.session.add(user)
db.session.commit()
return {'data': user.__jsonapi__()}, 201
class Users(Resource):
decorators = [jwt_required]
@user_has('users_infos')
@api.marshal_with(users_fields_get)
def get(self, id):
"""
Get user
"""
user = User.query.get(id)
if not user:
api.abort(code=404, message='User not found')
return {'data': user.__jsonapi__()}
@user_has('users_update')
@api.expect(users_fields_put, validate=True)
@api.marshal_with(users_fields_get)
def put(self, id):
"""
Update user
"""
current_identity = import_user()
user = User.query.get(id)
if not user:
api.abort(code=404, message='User not found')
data = request.get_json()['data']
if 'name' in data['attributes']:
user.name = data['attributes']['name']
if 'admin' in data['attributes'] and current_identity.admin:
user.admin = data['attributes']['admin']
if 'email' in data['attributes']:
user.email = data['attributes']['email']
if 'password' in data['attributes']:
user.hash_password(data['attributes']['password'])
try:
user.groups = list(id['id'] for id in data[
'relationships']['groups']['data'])
except KeyError:
pass
try:
user.containers = list(id['id'] for id in data[
'relationships']['containers']['data'])
except KeyError:
pass
if len(data) > 0:
db.session.commit()
return {'data': user.__jsonapi__()}
@user_has('users_delete')
def delete(self, id):
"""
Delete user
"""
user = User.query.get(id)
if not user:
api.abort(code=404, message='User not found')
db.session.delete(user)
db.session.commit()
return {}, 204
class Me(Resource):
decorators = [jwt_required]
@api.marshal_with(users_fields_get)
def get(self):
"""
Get me
"""
current_identity = import_user()
return {'data': current_identity.__jsonapi__()}
@user_has('me_edit')
@api.expect(users_fields_put, validate=True)
@api.marshal_with(users_fields_get)
def put(self):
"""
Update me
"""
current_identity = import_user()
user = User.query.get(current_identity.id)
data = request.get_json()['data']
if 'name' in data['attributes']:
user.name = data['attributes']['name']
if 'admin' in data['attributes'] and current_identity.admin:
user.admin = data['attributes']['admin']
if 'email' in data['attributes']:
user.email = data['attributes']['email']
if 'password' in data['attributes']:
user.hash_password(data['attributes']['password'])
try:
user.groups = list(id['id'] for id in data[
'relationships']['groups']['data'])
except KeyError:
pass
try:
user.containers = list(id['id'] for id in data[
'relationships']['containers']['data'])
except KeyError:
pass
if len(data) > 0:
db.session.commit()
return {'data': user.__jsonapi__()}
@user_has('me_edit')
def delete(self):
"""
Delete me (stupid)
"""
current_identity = import_user()
user = User.query.get(current_identity.id)
db.session.delete(user)
db.session.commit()
return {}, 204
class GroupsList(Resource):
decorators = [jwt_required]
@user_has('groups_infos_all')
@api.marshal_with(groups_fields_get_many)
def get(self):
"""
Get groups list
"""
groups = Group.query.all()
groups_list = []
for group in groups:
groups_list.append(group.__jsonapi__())
return {'data': groups_list}
@user_has('groups_create')
@api.expect(groups_fields_post, validate=True)
@api.marshal_with(groups_fields_get)
def post(self):
"""
Create group
"""
data = request.get_json()['data']
group = Group(name=data['attributes']['name'])
try:
group.abilities = list(id['id'] for id in data[
'relationships']['abilities']['data'])
except KeyError:
pass
try:
group.users = list(id['id'] for id in data[
'relationships']['users']['data'])
except KeyError:
pass
db.session.add(group)
db.session.commit()
return {'data': group.__jsonapi__()}, 201
class Groups(Resource):
decorators = [jwt_required]
@user_has('groups_infos')
@api.marshal_with(groups_fields_get)
def get(self, id):
"""
Get group
"""
group = Group.query.get(id)
if not group:
api.abort(code=404, message='Group not found')
return {'data': group.__jsonapi__()}
@user_has('groups_update')
@api.expect(groups_fields_put, validate=True)
@api.marshal_with(groups_fields_get)
def put(self, id):
"""
Update group
"""
group = Group.query.get(id)
if not group:
api.abort(code=404, message='Group not found')
data = request.get_json()['data']
if 'name' in data['attributes']:
group.name = data['attributes']['name']
try:
group.abilities = list(id['id'] for id in data[
'relationships']['abilities']['data'])
except KeyError:
pass
try:
group.users = list(id['id'] for id in data[
'relationships']['users']['data'])
except KeyError:
pass
if len(data) > 0:
db.session.commit()
return {'data': group.__jsonapi__()}
@user_has('groups_delete')
def delete(self, id):
"""
Delete group
"""
group = Group.query.get(id)
if not group:
api.abort(code=404, message='Group not found')
db.session.delete(group)
db.session.commit()
return {}, 204
class AbilitiesList(Resource):
decorators = [jwt_required]
@user_has('abilities_infos_all')
@api.marshal_with(abilities_fields_get_many)
def get(self):
"""
Get abilities list
"""
abilities = Ability.query.all()
abilities_list = []
for ability in abilities:
abilities_list.append(ability.__jsonapi__())
return {'data': abilities_list}
class Abilities(Resource):
decorators = [jwt_required]
@user_has('abilities_infos')
@api.marshal_with(abilities_fields_get)
def get(self, id):
"""
Get ability
"""
ability = Ability.query.get(id)
if not ability:
api.abort(code=404, message='Ability not found')
return {'data': ability.__jsonapi__()}
@user_has('abilities_update')
@api.expect(abilities_fields_put, validate=True)
@api.marshal_with(abilities_fields_get)
def put(self, id):
"""
Update ability
"""
ability = Ability.query.get(id)
data = request.get_json()['data']
try:
if len(data['relationships']['groups']['data']) >= 0:
ability.groups = list(id['id'] for id in data[
'relationships']['groups']['data'])
db.session.commit()
except KeyError:
pass
return {'data': ability.__jsonapi__()}
##################
# Containers API #
##################
class ContainersList(Resource):
decorators = [jwt_required]
@user_has('ct_infos')
@api.marshal_with(containers_fields_get_many)
def get(self):
"""
Get containers list
"""
current_identity = import_user()
containers = []
for c in lxc.list_containers():
container = Container.query.filter_by(name=c).first()
if container.id in current_identity.containers or current_identity.admin:
infos = lwp.ct_infos(c)
container_json = container.__jsonapi__()
container_json['attributes'] = infos
containers.append(container_json)
return {'data': containers}
@user_has('ct_create')
@api.expect(containers_fields_post, validate=True)
@api.marshal_with(containers_fields_get)
@api.doc(responses={
201: 'Container created',
409: 'Container already exists',
500: 'Can\'t create container'
})
def post(self):
"""
Create container
"""
current_identity = import_user()
data = request.get_json()['data']
if 'name' in data['attributes']:
c = lxc.Container(data['attributes']['name'])
if not c.defined:
try:
if not isinstance(data['attributes']['template']['args'], str):
data['attributes']['template']['args'] = ''
except KeyError:
data['attributes']['template']['args'] = ''
if not c.create(
template=data['attributes']['template']['name'],
flags=lxc.LXC_CREATE_QUIET,
args=data['attributes']['template']['args'],
bdevtype=None
):
api.abort(code=500, message='Can\'t create container')
# Add container to database
container = Container(name=data['attributes']['name'])
db.session.add(container)
db.session.commit()
# Get container ID
container = Container.query.filter_by(
name=data['attributes']['name']).first()
# Add container to allowed user's containers
user = User.query.get(current_identity.id)
user.containers.append(container.id)
db.session.commit()
return Containers.put(self, container.id, d=data), 201
api.abort(code=409, message='Container already exists')
class Containers(Resource):
decorators = [jwt_required]
@user_has('ct_infos')
@api.marshal_with(containers_fields_get)
def get(self, id):
"""
Get container
"""
current_identity = import_user()
container = Container.query.get(id)
c = lxc.Container(container.name)
if c.defined and (id in current_identity.containers or current_identity.admin):
infos = lwp.ct_infos(container.name)
container_json = container.__jsonapi__()
container_json['attributes'] = infos
return {'data': container_json}
api.abort(code=404, message='Container doesn\'t exists')
@user_has('ct_update')
@api.expect(containers_fields_put, validate=True)
@api.marshal_with(containers_fields_get)
def put(self, id, d=None):
"""
Update container
"""
def set_config(container, config_item, config_value):
if container.set_config_item(config_item, config_value):
container.save_config()
# python-lxc workaround (issue #1415 on lxc/lxc)
f = open(container.config_file_name, "r")
lines = f.readlines()
f.close()
f = open(container.config_file_name, "w")
for line in lines:
if not line.endswith(' = \n'):
f.write(line)
f.close()
else:
api.abort(
code=500, message='Error while setting container\'s parameter')
# Get data from ContainersList.post()
# or
# from Containers.put()
if d:
data = d
else:
data = request.get_json()['data']
current_identity = import_user()
container = Container.query.get(id)
c = lxc.Container(container.name)
if c.defined and (id in current_identity.containers or current_identity.admin):
if 'name' in data['attributes']:
if data['attributes']['name'] != c.name:
if c.rename(data['attributes']['name']):
c = lxc.Container(data['attributes']['name'])
else:
api.abort(
code=500, message='Error while instantiate container')
if 'lxc' in data['attributes']:
if 'aa_allow_incomplete' in data['attributes']['lxc']:
set_config(c, 'lxc.aa_allow_incomplete', data['attributes'][
'lxc']['aa_allow_incomplete'])
if 'aa_profile' in data['attributes']['lxc']:
set_config(c, 'lxc.aa_profile', data[
'attributes']['lxc']['aa_profile'])
if 'arch' in data['attributes']['lxc']:
set_config(c, 'lxc.arch', data[
'attributes']['lxc']['arch'])
if 'autodev' in data['attributes']['lxc']:
set_config(c, 'lxc.autodev', data[
'attributes']['lxc']['autodev'])
if 'cap' in data['attributes']['lxc']:
if 'drop' in data['attributes']['lxc']['cap']:
set_config(c, 'lxc.cap.drop', data['attributes'][
'lxc']['cap']['drop'])
if 'keep' in data['attributes']['lxc']['cap']:
set_config(c, 'lxc.cap.keep', data['attributes'][
'lxc']['cap']['keep'])
if 'cgroup' in data['attributes']['lxc']:
if 'memory' in data['attributes']['lxc']['cgroup']:
if 'limit_in_bytes' in data['attributes']['lxc']['cgroup']['memory']:
set_config(c, 'lxc.cgroup.memory.limit_in_bytes', data['attributes'][
'lxc']['cgroup']['memory']['limit_in_bytes'])
if 'memsw' in data['attributes']['lxc']['cgroup']['memory']:
if 'limit_in_bytes' in data['attributes']['lxc']['cgroup']['memory']['memsw']:
set_config(c, 'lxc.cgroup.memory.memsw.limit_in_bytes', data['attributes'][
'lxc']['cgroup']['memory']['memsw']['limit_in_bytes'])
if 'cpu' in data['attributes']['lxc']['cgroup']:
if 'shares' in data['attributes']['lxc']['cgroup']['cpu']:
set_config(c, 'lxc.cgroup.cpu.shares', data['attributes'][
'lxc']['cgroup']['cpu']['shares'])
if 'cpuset' in data['attributes']['lxc']['cgroup']:
if 'cpus' in data['attributes']['lxc']['cgroup']['cpuset']:
set_config(c, 'lxc.cgroup.cpuset.cpus', data['attributes'][
'lxc']['cgroup']['cpuset']['cpus'])
if 'console' in data['attributes']['lxc']:
if '_' in data['attributes']['lxc']['console']:
set_config(c, 'lxc.console', data['attributes'][
'lxc']['console']['_'])
if 'logfile' in data['attributes']['lxc']['console']:
set_config(c, 'lxc.console.logfile', data['attributes'][
'lxc']['console']['logfile'])
if 'devttydir' in data['attributes']['lxc']:
set_config(c, 'lxc.devttydir', data[
'attributes']['lxc']['devttydir'])
if 'environment' in data['attributes']['lxc']:
set_config(c, 'lxc.environment', data['attributes'][
'lxc']['environment'])
if 'ephemeral' in data['attributes']['lxc']:
set_config(c, 'lxc.ephemeral', data[
'attributes']['lxc']['ephemeral'])
if 'group' in data['attributes']['lxc']:
set_config(c, 'lxc.group', data[
'attributes']['lxc']['group'])
if 'haltsignal' in data['attributes']['lxc']:
set_config(c, 'lxc.haltsignal', data[
'attributes']['lxc']['haltsignal'])
if 'hook' in data['attributes']['lxc']:
if 'autodev' in data['attributes']['lxc']['hook']:
set_config(c, 'lxc.hook.autodev', data['attributes'][
'lxc']['hook']['autodev'])
if 'clone' in data['attributes']['lxc']['hook']:
set_config(c, 'lxc.hook.clone', data['attributes'][
'lxc']['hook']['clone'])
if 'destroy' in data['attributes']['lxc']['hook']:
set_config(c, 'lxc.hook.destroy', data['attributes'][
'lxc']['hook']['destroy'])
if 'mount' in data['attributes']['lxc']['hook']:
set_config(c, 'lxc.hook.mount', data['attributes'][
'lxc']['hook']['mount'])
if 'post-stop' in data['attributes']['lxc']['hook']:
set_config(c, 'lxc.hook.post-stop',
data['attributes']['lxc']['hook']['post-stop'])
if 'pre-mount' in data['attributes']['lxc']['hook']:
set_config(c, 'lxc.hook.pre-mount',
data['attributes']['lxc']['hook']['pre-mount'])
if 'pre-start' in data['attributes']['lxc']['hook']:
set_config(c, 'lxc.hook.pre-start',
data['attributes']['lxc']['hook']['pre-start'])
if 'start' in data['attributes']['lxc']['hook']:
set_config(c, 'lxc.hook.start', data['attributes'][
'lxc']['hook']['start'])
if 'stop' in data['attributes']['lxc']['hook']:
set_config(c, 'lxc.hook.stop', data['attributes'][
'lxc']['hook']['stop'])
if 'id_map' in data['attributes']['lxc']:
set_config(c, 'lxc.id_map', data[
'attributes']['lxc']['id_map'])
if 'include' in data['attributes']['lxc']:
set_config(c, 'lxc.include', data[
'attributes']['lxc']['include'])
if 'init_cmd' in data['attributes']['lxc']:
set_config(c, 'lxc.init_cmd', data[
'attributes']['lxc']['init_cmd'])
if 'init_gid' in data['attributes']['lxc']:
set_config(c, 'lxc.init_gid', data[
'attributes']['lxc']['init_gid'])
if 'init_uid' in data['attributes']['lxc']:
set_config(c, 'lxc.init_uid', data[
'attributes']['lxc']['init_uid'])
if 'kmsg' in data['attributes']['lxc']:
set_config(c, 'lxc.kmsg', data[
'attributes']['lxc']['kmsg'])
if 'logfile' in data['attributes']['lxc']:
set_config(c, 'lxc.logfile', data[
'attributes']['lxc']['logfile'])
if 'loglevel' in data['attributes']['lxc']:
set_config(c, 'lxc.loglevel', data[
'attributes']['lxc']['loglevel'])
if 'monitor' in data['attributes']['lxc']:
if 'unshare' in data['attributes']['lxc']['monitor']:
set_config(c, 'lxc.monitor.unshare', data['attributes'][
'lxc']['monitor']['unshare'])
if 'mount' in data['attributes']['lxc']:
if '_' in data['attributes']['lxc']['mount']:
set_config(c, 'lxc.mount', data[
'attributes']['lxc']['mount']['_'])
if 'auto' in data['attributes']['lxc']['mount']:
set_config(c, 'lxc.mount.auto', data['attributes'][
'lxc']['mount']['auto'])
if 'entry' in data['attributes']['lxc']['mount']:
set_config(c, 'lxc.mount.entry', data['attributes'][
'lxc']['mount']['entry'])
if 'network' in data['attributes']['lxc']:
for i in range(len(data['attributes']['lxc']['network'])):
if 'type' in data['attributes']['lxc']['network'][i]:
set_config(c, 'lxc.network.%s.type' %
i, data['attributes']['lxc']['network'][i]['type'])
if 'veth' in data['attributes']['lxc']['network'][i]:
if 'pair' in data['attributes']['lxc']['network'][i]['veth']:
set_config(c, 'lxc.network.%s.veth.pair' %
i, data['attributes']['lxc']['network'][i]['veth']['pair'])
if 'vlan' in data['attributes']['lxc']['network'][i]:
if 'id' in data['attributes']['lxc']['network'][i]['vlan']:
set_config(c, 'lxc.network.%s.vlan.id' %
i, data['attributes']['lxc']['network'][i]['vlan']['id'])
if 'macvlan' in data['attributes']['lxc']['network'][i]:
if 'mode' in data['attributes']['lxc']['network'][i]['macvlan']:
set_config(c, 'lxc.network.%s.macvlan.mode' % i, data['attributes'][
'lxc']['network'][i]['macvlan']['mode'])
if 'flags' in data['attributes']['lxc']['network'][i]:
set_config(c, 'lxc.network.%s.flags' %
i, data['attributes']['lxc']['network'][i]['flags'])
if 'link' in data['attributes']['lxc']['network'][i]:
set_config(c, 'lxc.network.%s.link' %
i, data['attributes']['lxc']['network'][i]['link'])
if 'mtu' in data['attributes']['lxc']['network'][i]:
set_config(c, 'lxc.network.%s.mtu' %
i, data['attributes']['lxc']['network'][i]['mtu'])
if 'name' in data['attributes']['lxc']['network'][i]:
set_config(c, 'lxc.network.%s.name' %
i, data['attributes']['lxc']['network'][i]['name'])
if 'hwaddr' in data['attributes']['lxc']['network'][i]:
set_config(c, 'lxc.network.%s.hwaddr' %
i, data['attributes']['lxc']['network'][i]['hwaddr'])
if 'ipv4' in data['attributes']['lxc']['network'][i]:
if '_' in data['attributes']['lxc']['network'][i]['ipv4']:
set_config(c, 'lxc.network.%s.ipv4' %
i, data['attributes']['lxc']['network'][i]['ipv4']['_'])
if 'gateway' in data['attributes']['lxc']['network'][i]['ipv4']:
set_config(c, 'lxc.network.%s.ipv4.gateway' % i, data['attributes'][
'lxc']['network'][i]['ipv4']['gateway'])
if 'ipv6' in data['attributes']['lxc']['network'][i]:
if '_' in data['attributes']['lxc']['network'][i]['ipv6']:
set_config(c, 'lxc.network.%s.ipv6' %
i, data['attributes']['lxc']['network'][i]['ipv6']['_'])
if 'gateway' in data['attributes']['lxc']['network'][i]['ipv6']:
set_config(c, 'lxc.network.%s.ipv6.gateway' % i, data['attributes'][
'lxc']['network'][i]['ipv6']['gateway'])
if 'script' in data['attributes']['lxc']['network'][i]:
if 'up' in data['attributes']['lxc']['network'][i]['script']:
set_config(c, 'lxc.network.%s.script.up' %
i, data['attributes']['lxc']['network'][i]['script']['up'])
if 'down' in data['attributes']['lxc']['network'][i]['script']:
set_config(c, 'lxc.network.%s.script.down' % i, data['attributes'][
'lxc']['network'][i]['script']['down'])
if 'no_new_privs' in data['attributes']['lxc']:
set_config(c, 'lxc.no_new_privs', data['attributes'][
'lxc']['no_new_privs'])
if 'pts' in data['attributes']['lxc']:
set_config(c, 'lxc.pts', data['attributes']['lxc']['pts'])
if 'rebootsignal' in data['attributes']['lxc']:
set_config(c, 'lxc.rebootsignal', data['attributes'][
'lxc']['rebootsignal'])
if 'rootfs' in data['attributes']['lxc']:
if '_' in data['attributes']['lxc']['rootfs']:
set_config(c, 'lxc.rootfs', data[
'attributes']['lxc']['rootfs']['_'])
if 'mount' in data['attributes']['lxc']['rootfs']:
set_config(c, 'lxc.rootfs.mount', data['attributes'][
'lxc']['rootfs']['mount'])
if 'options' in data['attributes']['lxc']['rootfs']:
set_config(c, 'lxc.rootfs.options', data['attributes'][
'lxc']['rootfs']['options'])
if 'backend' in data['attributes']['lxc']['rootfs']:
set_config(c, 'lxc.rootfs.backend', data['attributes'][
'lxc']['rootfs']['backend'])
if 'se_context' in data['attributes']['lxc']:
set_config(c, 'lxc.se_context', data[
'attributes']['lxc']['se_context'])
if 'seccomp' in data['attributes']['lxc']:
set_config(c, 'lxc.seccomp', data[
'attributes']['lxc']['seccomp'])
if 'start' in data['attributes']['lxc']:
if 'auto' in data['attributes']['lxc']['start']:
set_config(c, 'lxc.start.auto', data['attributes'][
'lxc']['start']['auto'])
if 'delay' in data['attributes']['lxc']['start']:
set_config(c, 'lxc.start.delay', data['attributes'][
'lxc']['start']['delay'])
if 'order' in data['attributes']['lxc']['start']:
set_config(c, 'lxc.start.order', data['attributes'][
'lxc']['start']['order'])
if 'stopsignal' in data['attributes']['lxc']:
set_config(c, 'lxc.stopsignal', data[
'attributes']['lxc']['stopsignal'])
if 'syslog' in data['attributes']['lxc']:
set_config(c, 'lxc.syslog', data[
'attributes']['lxc']['syslog'])
if 'tty' in data['attributes']['lxc']:
set_config(c, 'lxc.tty', data['attributes']['lxc']['tty'])
if 'utsname' in data['attributes']['lxc']:
set_config(c, 'lxc.utsname', data[
'attributes']['lxc']['utsname'])
return Containers.get(self, container.id)
api.abort(code=404, message='Container doesn\'t exists')
@user_has('ct_delete')
@api.doc(responses={
204: 'Container destroyed',
404: 'Container doesn\'t exists',
409: 'Can\'t destroy and/or stop container',
})
def delete(self, id):
"""
Destroy container
"""
current_identity = import_user()
container = Container.query.get(id)
c = lxc.Container(container.name)
if c.defined and (id in current_identity.containers or current_identity.admin):
if c.running:
if not c.stop():
api.abort(
code=409, message='Can\'t destroy and/or stop container')
if not c.destroy():
api.abort(
code=409, message='Can\'t destroy and/or stop container')
return {}, 204
api.abort(code=404, message='Container doesn\'t exists')
class ContainersClone(Resource):
decorators = [jwt_required]
@user_has('ct_clone')
@api.expect(containers_clone_post, validate=True)
@api.marshal_with(containers_fields_get)
def post(self, id):
"""
Clone container
"""
current_identity = import_user()
data = request.get_json()['data']
if 'name' in data['attributes']:
container = Container.query.get(id)
c = lxc.Container(container.name)
if c.defined and (id in current_identity.containers or current_identity.admin):
c2 = lxc.Container(data['attributes']['name'])
if not c2.defined:
c2 = c.clone(data['attributes']['name'],
flags=lxc.LXC_CLONE_MAYBE_SNAPSHOT)
if c2.defined:
# Add container to database
container = Container(name=data['attributes']['name'])
db.session.add(container)
db.session.commit()
# Get container ID
container = Container.query.filter_by(
name=data['attributes']['name']).first()
# Add container to allowed user's containers
user = User.query.get(current_identity.id)
user.containers.append(container.id)
db.session.commit()
return Containers.get(self, container.id), 201
api.abort(code=404, message='Container doesn\'t exists')
class ContainersStart(Resource):
decorators = [jwt_required]
@user_has('ct_start')
@api.doc(responses={
204: 'Container started',
404: 'Container doesn\'t exists',
500: 'Start timed out'
})
def post(self, id):
"""
Start container
"""
current_identity = import_user()
container = Container.query.get(id)
c = lxc.Container(container.name)
if c.defined and (id in current_identity.containers or current_identity.admin):
c.start()
if c.wait('RUNNING', 30):
return {}, 204
else:
api.abort(code=500, message='Start timed out')
api.abort(code=404, message='Container doesn\'t exists')
class ContainersFreeze(Resource):
decorators = [jwt_required]
@user_has('ct_freeze')
@api.doc(responses={
204: 'Container frozen',
404: 'Container doesn\'t exists',
500: 'Freeze timed out'
})
def post(self, id):
"""
Freeze container
"""
current_identity = import_user()
container = Container.query.get(id)
c = lxc.Container(container.name)
if c.defined and (id in current_identity.containers or current_identity.admin):
c.freeze()
if c.wait('FROZEN', 30):
return {}, 204
else:
api.abort(code=500, message='Freeze timed out')
api.abort(code=404, message='Container doesn\'t exists')
class ContainersUnfreeze(Resource):
decorators = [jwt_required]
@user_has('ct_unfreeze')
@api.doc(responses={
204: 'Container thawed',
404: 'Container doesn\'t exists',
500: 'Unfreeze timed out'
})
def post(self, id):
"""
Unfreeze container
"""
current_identity = import_user()
container = Container.query.get(id)
c = lxc.Container(container.name)
if c.defined and (id in current_identity.containers or current_identity.admin):
c.unfreeze()
if c.wait('RUNNING', 30):
return {}, 204
else:
api.abort(code=500, message='Unfreeze timed out')
api.abort(code=404, message='Container doesn\'t exists')
class ContainersStop(Resource):
decorators = [jwt_required]
@user_has('ct_stop')
@api.doc(responses={
204: 'Container stopped',
404: 'Container doesn\'t exists',
500: 'Stop timed out'
})
def post(self, id):
"""
Stop container
"""
current_identity = import_user()
container = Container.query.get(id)
c = lxc.Container(container.name)
if c.defined and (id in current_identity.containers or current_identity.admin):
c.stop()
if c.wait('STOPPED', 30):
return {}, 204
else:
api.abort(code=500, message='Stop timed out')
api.abort(code=404, message='Container doesn\'t exists')
class ContainersShutdown(Resource):
decorators = [jwt_required]
@user_has('ct_stop')
@api.doc(responses={
204: 'Container stopped',
404: 'Container doesn\'t exists',
500: 'Shutdown timed out'
})
def post(self, id):
"""
Shutdown container
"""
current_identity = import_user()
container = Container.query.get(id)
c = lxc.Container(container.name)
if c.defined and (id in current_identity.containers or current_identity.admin):
c.shutdown(30)
if c.wait('STOPPED', 30):
return {}, 204
else:
api.abort(code=500, message='Shutdown timed out')
api.abort(code=404, message='Container doesn\'t exists')
class ContainersRestart(Resource):
decorators = [jwt_required]
@user_has('ct_restart')
@api.doc(responses={
204: 'Container restarted',
404: 'Container doesn\'t exists',
500: 'Restart timed out'
})
def post(self, id):
"""
Restart container
"""
current_identity = import_user()
container = Container.query.get(id)
c = lxc.Container(container.name)
if c.defined and (id in current_identity.containers or current_identity.admin):
try:
if ContainersStop.post(self, id)[1] == 204 and ContainersStart.post(self, id)[1] == 204:
return {}, 204
except KeyError:
api.abort(code=500, message='Unknown error')
api.abort(code=404, message='Container doesn\'t exists')
class LxcCheckConfig(Resource):
decorators = [jwt_required]
@user_has('lxc_infos')
def get(self):
"""
Check LXC configuration (lxc-checkconfig)
"""
config = '/proc/config.gz'
def is_set(config_name):
if config.endswith('.gz'):
config_file = gzip.open(config, 'r')
else:
config_file = open(config, 'r')
for line in config_file:
if re.match('%s=[y|m]' % config_name, line):
return True
def is_enabled(config_name, mandatory=None):
if is_set(config_name):
return 'enabled'
else:
if mandatory == True:
return 'required'
else:
return 'missing'
kver = platform.uname()[2]
kver_split = kver.split('.')
kver_major = int(kver_split[0])
kver_minor = int(kver_split[1])
if not os.path.isfile(config):
headers_config = '/lib/modules/%s/build/.config' % kver
boot_config = '/boot/config-%s' % kver
if os.path.isfile(headers_config):
config = headers_config
if os.path.isfile(boot_config):
config = boot_config
config_dict = {}
config_dict['namespaces'] = is_enabled('CONFIG_NAMESPACES', True)
config_dict['utsname_namespace'] = is_enabled('CONFIG_UTS_NS')
config_dict['ipc_namespace'] = is_enabled('CONFIG_IPC_NS', True)
config_dict['pid_namespace'] = is_enabled('CONFIG_PID_NS', True)
config_dict['user_namespace'] = is_enabled('CONFIG_USER_NS')
config_dict['network_namespace'] = is_enabled('CONFIG_NET_NS')
config_dict[
'multiple_/dev/pts_instances'] = is_enabled('CONFIG_DEVPTS_MULTIPLE_INSTANCES')
config_dict['cgroup'] = is_enabled('CONFIG_CGROUPS', True)
config_dict['cgroup_namespace'] = is_enabled('CONFIG_CGROUP_NS', True)
config_dict['cgroup_device'] = is_enabled('CONFIG_CGROUP_DEVICE')
config_dict['cgroup_sched'] = is_enabled('CONFIG_CGROUP_SCHED')
config_dict['cgroup_cpu_account'] = is_enabled('CONFIG_CGROUP_CPUACCT')
if kver_major >= 3 and kver_minor >= 6:
config_dict['cgroup_memory_controller'] = is_enabled(
'CONFIG_MEMCG')
else:
config_dict['cgroup_memory_controller'] = is_enabled(
'CONFIG_CGROUP_MEM_RES_CTLR')
if is_set('CONFIG_SMP'):
config_dict['cgroup_cpuset'] = is_enabled('CONFIG_CPUSETS')
config_dict['veth_pair_device'] = is_enabled('CONFIG_VETH')
config_dict['macvlan'] = is_enabled('CONFIG_MACVLAN')
config_dict['vlan'] = is_enabled('CONFIG_VLAN_8021Q')
if kver_major == 2 and kver_minor < 33:
config_dict['file_capabilities'] = is_enabled(
'CONFIG_SECURITY_FILE_CAPABILITIES')
if (kver_major == 2 and kver_minor > 32) or kver_major > 2:
config_dict['file_capabilities'] = 'enabled'
return {'data': config_dict}
class HostStats(Resource):
decorators = [jwt_required]
@user_has('host_stats')
@api.marshal_with(host_stats_fields_get)
def get(self, container=False):
"""
Get host stats (uptime, cpu, ram, etc)
"""
host_cpu_infos = lwp.host_cpu_infos()
cpu_count_logical = psutil.cpu_count()
cpu_count_physical = psutil.cpu_count(logical=False)
cpu_percent = lwp.host_cpu_percent()
virtual_memory = psutil.virtual_memory()
swap_memory = psutil.swap_memory()
disk_partitions = psutil.disk_partitions()
disk_partitions_usage = []
for partition in disk_partitions:
partition_data = psutil.disk_usage(partition.mountpoint)
disk_partitions_usage.append({
'name': partition.mountpoint,
'total': partition_data.total,
'used': partition_data.used,
'free': partition_data.free,
'percent': partition_data.percent
})
net_if_addrs = psutil.net_if_addrs()
adapters = []
for adapter in net_if_addrs:
adapters.append({
'name': adapter,
'ipv4': None,
'ipv6': None
})
index = len(adapters) - 1
for snic in net_if_addrs[adapter]:
if snic.family.name == 'AF_INET':
adapters[index]['ipv4'] = snic.address
if snic.family.name == 'AF_INET6':
adapters[index]['ipv6'] = snic.address
json_output = {
'uptime': lwp.host_uptime(),
'hostname': socket.gethostname(),
'distrib': ' '.join(linux_distribution()),
'disk': disk_partitions_usage,
'cpu': {
'usage': cpu_percent,
'model': host_cpu_infos['name'],
'physical': cpu_count_physical,
'logical': cpu_count_logical
},
'memory': {
'virtual': {
'total': virtual_memory.total,
'used': virtual_memory.used,
'free': virtual_memory.free,
'percent': virtual_memory.percent
},
'swap': {
'total': swap_memory.total,
'used': swap_memory.used,
'free': swap_memory.free,
'percent': swap_memory.percent
}
},
'adapters': adapters,
'kernel': platform.release(),
'lxc': {
'version': lxc.version,
'lxcpath': lxc.get_global_config_item('lxc.lxcpath'),
'default_config': lxc.get_global_config_item('lxc.default_config')
}
}
if not container:
output = {
'attributes': json_output
}
else:
output = json_output
return {'data': output}
host_reboot_parser = api.parser()
host_reboot_parser.add_argument('message', type=str, location='json')
class HostReboot(Resource):
decorators = [jwt_required]
@user_has('host_reboot')
@api.expect(host_reboot_fields_post)
def post(self):
"""
Reboot host
"""
args = host_reboot_parser.parse_args()
if not args.message:
message = 'Reboot from RESTful API'
else:
message = args.message
msg = '*** LXC Web Panel *** \
\n%s' % message
try:
# DEBUG
subprocess.check_call('echo \'%s\' | wall' % msg, shell=True)
# subprocess.check_call('/sbin/shutdown -r now \'%s\'' % msg, shell=True)
return {
'status': 'success',
'message': message
}
except:
api.abort(code=500, message='Error during system call')
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2014-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
import csv
import json
from nose.tools import assert_equal, assert_not_equal, ok_
from bigmler.tests.world import world, res_filename
from subprocess import check_call, CalledProcessError
from bigml.api import check_resource, HTTP_NOT_FOUND
from bigmler.checkpoint import file_number_of_lines
from bigmler.tests.common_steps import (check_debug, store_init_resources,
store_final_resources, check_init_equals_final)
from bigmler.tests.basic_tst_prediction_steps import shell_execute
#@step(r'I create a BigML source from file "(.*)" storing results in "(.*)"')
def i_create_source_from_file(step, data=None, output_dir=None):
ok_(data is not None and output_dir is not None)
command = ("bigmler --train " + res_filename(data) + " --store --output-dir " +
output_dir +
" --no-dataset --no-model --store")
shell_execute(command, os.path.join(output_dir, "p.csv"), test=None,
project=False)
def i_check_source_exists_by_id(step, source_id):
source = check_resource(source_id,
world.api.get_source)
assert_not_equal(source['code'], HTTP_NOT_FOUND)
world.source = source
#@step(r'I check that the source exists$')
def i_check_source_exists(step):
source_file = "%s%ssource" % (world.directory, os.sep)
source_file = open(source_file, "r")
source_id = source_file.readline().strip()
source_file.close()
i_check_source_exists_by_id(step, source_id)
#@step(r'I check that the failed source exists$')
def i_check_faulty_source_exists(step):
source_file = "%s%ssource" % (world.directory, os.sep)
source_file = open(source_file, "r")
source_id = source_file.readline().strip()
source_file.close()
source = world.api.get_source(source_id)
assert_not_equal(source['code'], HTTP_NOT_FOUND)
world.source = source
#@step(r'I check that the source doesn\'t exist$')
def i_check_source_does_not_exist(step, source_id=None):
if source_id is None:
source_id = world.source['resource']
source = world.api.get_source(source_id)
assert_equal(source['code'], HTTP_NOT_FOUND)
#@step(r'I delete the source by id using --ids storing results in "(.*)"$')
def i_delete_source_by_ids(step, output_dir=None):
if output_dir is None:
assert False
command = ("bigmler delete --ids " + world.source['resource'] +
" --output-dir " + output_dir)
shell_execute(command, os.path.join(output_dir, "p.csv"), test=None)
#@step(r'I delete the source by id using --ids and --dry-run storing results in "(.*)"$')
def i_delete_source_by_ids_dry(step, output_dir=None):
if output_dir is None:
assert False
command = ("bigmler delete --ids " + world.source['resource'] +
" --dry-run --output-dir " + output_dir)
shell_execute(command, os.path.join(output_dir, "p.csv"), test=None)
#@step(r'I delete the source by id using --ids and --resource-types "(.*)" storing results in "(.*)"$')
def i_delete_source_by_ids_filtered(step, resource_types=None, output_dir=None):
if output_dir is None or resource_types is None:
assert False
command = ("bigmler delete --ids " + world.source['resource'] +
" --dry-run --output-dir " + output_dir +
" --resource-types " + resource_types)
shell_execute(command, os.path.join(output_dir, "p.csv"), test=None)
#@step(r'I delete the source by id using --from-file and the source file storing results in "(.*)"$')
def i_delete_source_by_file(step, output_dir=None):
if output_dir is None:
assert False
command = ("bigmler delete --from-file %s%ssource " % (output_dir, os.sep) +
" --output-dir " + output_dir)
shell_execute(command, os.path.join(output_dir, "p.csv"), test=None)
#@step(r'I delete the source by id using --from-file and --status faulty and the source file storing results in "(.*)"$')
def i_delete_source_faulty_by_file(step, output_dir=None):
ok_(output_dir is not None)
command = ("bigmler delete --from-file %s%ssource " % (output_dir, os.sep) +
" --status faulty --output-dir " + output_dir)
shell_execute(command, os.path.join(output_dir, "p.csv"), test=None)
#@step(r'I delete the source by id using --from-file, the source file and --resource-types "(.*)" storing results in "(.*)"$')
def i_delete_source_by_file_filtered(step, resource_types=None, output_dir=None):
ok_(output_dir is not None and resource_types is not None)
command = ("bigmler delete --from-file %s%ssource " % (output_dir, os.sep) +
" --output-dir " + output_dir +
" --resource-types " + resource_types)
shell_execute(command, os.path.join(output_dir, "p.csv"), test=None)
#@step(r'I store the source id as (lower|upper|reference)$')
def i_store_source_id_as_bound(step, which=None):
if which == 'lower':
world.source_lower = world.source['resource']
elif which == 'upper':
world.source_upper = world.source['resource']
elif which == 'reference':
world.source_reference = world.source['resource']
#@step(r'I delete the source using --older-than and --newer-than storing results in "(.*)"$')
def i_delete_source_older_newer(step, output_dir=None):
ok_(output_dir is not None)
command = ("bigmler delete --older-than " + world.source_upper +
" --newer-than " + world.source_lower +
" --output-dir " + output_dir)
shell_execute(command, os.path.join(output_dir, "p.csv"), test=None)
#@step(r'I check that the reference source doesn\'t exist$')
def i_check_reference_source_does_not_exist(step):
i_check_source_does_not_exist(step, source_id=world.source_reference)
#@step(r'I delete the source using --older-than and --newer-than with resource_types "(.*)" storing results in "(.*)"$')
def i_delete_source_older_newer_with_resource_types(step, resource_types=None, output_dir=None):
ok_(output_dir is not None and resource_types is not None)
command = ("bigmler delete --older-than " + world.source_upper +
" --newer-than " + world.source_lower +
" --resource-types " + resource_types +
" --output-dir " + output_dir)
shell_execute(command, os.path.join(output_dir, "p.csv"), test=None)
#@step(r'I check that the reference source exists$')
def i_check_reference_source_exists(step):
i_check_source_exists_by_id(step, source_id=world.source_reference)
#@step(r'I create a BigML source from file "(.*)" with tag "(.*)" storing results in "(.*)"')
def i_create_source_from_file_with_tag(step, data=None, tag=None, output_dir=None):
ok_(data is not None and output_dir is not None and tag is not None)
command = ("bigmler --train " + res_filename(data) + " --store --output-dir " +
output_dir + " --tag " + tag +
" --no-dataset --no-model --store")
shell_execute(command, os.path.join(output_dir, "p.csv"), test=None)
#@step(r'I create a BigML source from file "(.*)" with tag "(.*)" storing results in "(.*)"')
def i_create_faulty_source_from_file_with_tag(step, data=None, tag=None, output_dir=None):
ok_(data is not None and output_dir is not None and tag is not None)
command = ("bigmler --train " + res_filename(data) + " --store --output-dir " +
output_dir + " --tag " + tag +
" --no-dataset --no-model --store")
try:
shell_execute(command, os.path.join(output_dir, "p.csv"), test=None)
except:
pass
#@step(r'I delete the source using --newer-than and --source-tag "(.*)" storing results in "(.*)"$')
def i_delete_source_newer_and_tag(step, tag=None, output_dir=None):
ok_(output_dir is not None and tag is not None)
command = ("bigmler delete --newer-than " + world.source_lower +
" --source-tag " + tag +
" --output-dir " + output_dir)
shell_execute(command, os.path.join(output_dir, "p.csv"), test=None)
#@step(r'I delete the source using --newer-than and --status faulty and --source-tag "(.*)" storing results in "(.*)"$')
def i_delete_source_newer_faulty_and_tag(step, tag=None, output_dir=None):
ok_(output_dir is not None and tag is not None)
command = ("bigmler delete --newer-than " + world.source_lower +
" --source-tag " + tag +
" --status faulty --output-dir " + output_dir)
shell_execute(command, os.path.join(output_dir, "p.csv"), test=None)
#@step(r'I check that the upper source exists$')
def i_check_upper_source_exists(step):
i_check_source_exists_by_id(step, source_id=world.source_upper)
#@step(r'I create a BigML dataset from the source with tag "(.*)" storing results in "(.*)"')
def i_create_dataset_from_source_with_tag(step, tag=None, output_dir=None):
ok_(tag is not None and output_dir is not None)
command = ("bigmler --source " + world.source['resource'] +
" --tag " + tag +
" --store --output-dir " + output_dir +
" --no-model --store")
shell_execute(command, os.path.join(output_dir, "p.csv"), test=None)
def i_check_dataset_exists_by_id(step, dataset_id):
dataset = check_resource(dataset_id,
world.api.get_dataset)
assert_not_equal(dataset['code'], HTTP_NOT_FOUND)
world.dataset = dataset
#@step(r'I check that the dataset exists$')
def i_check_dataset_exists(step):
dataset_file = "%s%sdataset" % (world.directory, os.sep)
dataset_file = open(dataset_file, "r")
dataset_id = dataset_file.readline().strip()
dataset_file.close()
i_check_dataset_exists_by_id(step, dataset_id)
#@step(r'I check that the dataset doesn\'t exist$')
def i_check_dataset_does_not_exist(step, dataset_id=None):
if dataset_id is None:
dataset_id = world.dataset['resource']
dataset = world.api.get_dataset(dataset_id)
assert_equal(dataset['code'], HTTP_NOT_FOUND)
#@step(r'I delete the resources using --newer-than and --all-tag "(.*)" storing results in "(.*)"$')
def i_delete_resources_newer_and_tag(step, tag=None, output_dir=None):
ok_(output_dir is not None and tag is not None)
command = ("bigmler delete --newer-than " + world.source_lower +
" --all-tag " + tag +
" --output-dir " + output_dir)
shell_execute(command, os.path.join(output_dir, "p.csv"), test=None)
#@step(r'I create BigML resources uploading train "(.*)" storing results in "(.*)"$')
def i_create_all_resources_in_output_dir(step, data=None, output_dir=None):
ok_(output_dir is not None and data is not None)
command = ("bigmler --train " + res_filename(data) +
" --output-dir " + output_dir)
shell_execute(command, os.path.join(output_dir, "p.csv"), test=None)
#@step(r'I check that the number of resources has changed$')
def i_check_changed_number_of_resources(step):
store_final_resources()
assert (world.counters['sources']['final'] != world.counters['sources']['init'] or
world.counters['datasets']['final'] != world.counters['datasets']['init'] or
world.counters['models']['final'] != world.counters['models']['init'] )
#@step(r'I delete the resources from the output directory$')
def i_delete_resources_from_dir(step):
command = ("bigmler delete --from-dir " + world.directory +
" --output-dir " + world.directory)
shell_execute(command, os.path.join(world.directory, "p.csv"), test=None)
#@step(r'I store the number of existing resources$')
def i_store_the_number_of_resources(step):
store_init_resources()
#@step(r'the number of resources has not changed$')
def i_check_equal_number_of_resources(step):
store_final_resources()
check_init_equals_final()
|
|
'''
Created on Apr 17, 2009
@author: Sergey Chikuyonok (http://chikuyonok.ru)
'''
zen_settings = {
'indentation': '\t',
'html': {
'snippets': {
'cc:ie6': '<!--[if lte IE 6]>\n\t${child}|\n<![endif]-->',
'cc:ie': '<!--[if IE]>\n\t${child}|\n<![endif]-->',
'cc:noie': '<!--[if !IE]><!-->\n\t${child}|\n<!--<![endif]-->',
'html:4t': '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' +
'<html lang="ru">\n' +
'<head>\n' +
' <title></title>\n' +
' <meta http-equiv="Content-Type" content="text/html;charset=UTF-8">\n' +
'</head>\n' +
'<body>\n\t${child}|\n</body>\n' +
'</html>',
'html:4s': '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n' +
'<html lang="ru">\n' +
'<head>\n' +
' <title></title>\n' +
' <meta http-equiv="Content-Type" content="text/html;charset=UTF-8">\n' +
'</head>\n' +
'<body>\n\t${child}|\n</body>\n' +
'</html>',
'html:xt': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n' +
'<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="ru">\n' +
'<head>\n' +
' <title></title>\n' +
' <meta http-equiv="Content-Type" content="text/html;charset=UTF-8" />\n' +
'</head>\n' +
'<body>\n\t${child}|\n</body>\n' +
'</html>',
'html:xs': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n' +
'<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="ru">\n' +
'<head>\n' +
' <title></title>\n' +
' <meta http-equiv="Content-Type" content="text/html;charset=UTF-8" />\n' +
'</head>\n' +
'<body>\n\t${child}|\n</body>\n' +
'</html>',
'html:xxs': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' +
'<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="ru">\n' +
'<head>\n' +
' <title></title>\n' +
' <meta http-equiv="Content-Type" content="text/html;charset=UTF-8" />\n' +
'</head>\n' +
'<body>\n\t${child}|\n</body>\n' +
'</html>',
'html:5': '<!DOCTYPE HTML>\n' +
'<html lang="ru-RU">\n' +
'<head>\n' +
' <title></title>\n' +
' <meta charset="UTF-8">\n' +
'</head>\n' +
'<body>\n\t${child}|\n</body>\n' +
'</html>'
},
'default_attributes': {
'a': {'href': ''},
'a:link': {'href': 'http://|'},
'a:mail': {'href': 'mailto:|'},
'abbr': {'title': ''},
'acronym': {'title': ''},
'base': {'href': ''},
'bdo': {'dir': ''},
'bdo:r': {'dir': 'rtl'},
'bdo:l': {'dir': 'ltr'},
'link:css': [{'rel': "stylesheet"}, {'type': "text/css"}, {'href': "|style.css"}, {'media': "all"}],
'link:print': [{'rel': "stylesheet"}, {'type': "text/css"}, {'href': "|print.css"}, {'media': "print"}],
'link:favicon': [{'rel': "shortcut icon"}, {'type': "image/x-icon"}, {'href': "|favicon.ico"}],
'link:touch': [{'rel': "apple-touch-icon"}, {'href': "|favicon.png"}],
'link:rss': [{'rel': "alternate"}, {'type': "application/rss+xml"}, {'title': "RSS"}, {'href': "|rss.xml"}],
'link:atom': [{'rel': "alternate"}, {'type': "application/atom+xml"}, {'title': "Atom"}, {'href': "atom.xml"}],
'meta:utf': [{"http-equiv": "Content-Type"}, {'content': "text/html;charset=UTF-8"}],
'meta:win': [{"http-equiv": "Content-Type"}, {'content': "text/html;charset=Win-1251"}],
'meta:compat': [{"http-equiv": "X-UA-Compatible"}, {'content': "IE=7"}],
'style': {'type': 'text/css'},
'script': {'type': 'text/javascript'},
'script:src': [{'type': 'text/javascript'}, {'src': ""}],
'img': [{'src': ''}, {'alt': ''}],
'iframe': [{'src': ''}, {'frameborder': '0'}],
'embed': [{'src': ''}, {'type': ''}],
'object': [{'data': ''}, {'type': ''}],
'param': [{'name': ''}, {'value': ''}],
'map': {'name': ''},
'area': [{'shape': ''}, {'coords': ''}, {'href': ''}, {'alt': ''}],
'area:d': [{'shape': 'default'}, {'href': ''}, {'alt': ''}],
'area:c': [{'shape': 'circle'}, {'coords': ''}, {'href': ''}, {'alt': ''}],
'area:r': [{'shape': 'rect'}, {'coords': ''}, {'href': ''}, {'alt': ''}],
'area:p': [{'shape': 'poly'}, {'coords': ''}, {'href': ''}, {'alt': ''}],
'link': [{'rel': 'stylesheet'}, {'href': ''}],
'form': {'action': ''},
'form:get': [{'action': ''}, {'method': 'get'}],
'form:post': [{'action': ''}, {'method': 'post'}],
'label': {'for': ''},
'input': {'type': ''},
'input:hidden': [{'type': 'hidden'}, {'name': ''}],
'input:h': [{'type': 'hidden'}, {'name': ''}],
'input:text': [{'type': 'text'}, {'name': ''}, {'id': ''}],
'input:t': [{'type': 'text'}, {'name': ''}, {'id': ''}],
'input:search': [{'type': 'search'}, {'name': ''}, {'id': ''}],
'input:email': [{'type': 'email'}, {'name': ''}, {'id': ''}],
'input:url': [{'type': 'url'}, {'name': ''}, {'id': ''}],
'input:password': [{'type': 'password'}, {'name': ''}, {'id': ''}],
'input:p': [{'type': 'password'}, {'name': ''}, {'id': ''}],
'input:datetime': [{'type': 'datetime'}, {'name': ''}, {'id': ''}],
'input:date': [{'type': 'date'}, {'name': ''}, {'id': ''}],
'input:datetime-local': [{'type': 'datetime-local'}, {'name': ''}, {'id': ''}],
'input:month': [{'type': 'month'}, {'name': ''}, {'id': ''}],
'input:week': [{'type': 'week'}, {'name': ''}, {'id': ''}],
'input:time': [{'type': 'time'}, {'name': ''}, {'id': ''}],
'input:number': [{'type': 'number'}, {'name': ''}, {'id': ''}],
'input:color': [{'type': 'color'}, {'name': ''}, {'id': ''}],
'input:checkbox': [{'type': 'checkbox'}, {'name': ''}, {'id': ''}],
'input:c': [{'type': 'checkbox'}, {'name': ''}, {'id': ''}],
'input:radio': [{'type': 'radio'}, {'name': ''}, {'id': ''}],
'input:r': [{'type': 'radio'}, {'name': ''}, {'id': ''}],
'input:file': [{'type': 'file'}, {'name': ''}, {'id': ''}],
'input:f': [{'type': 'file'}, {'name': ''}, {'id': ''}],
'input:submit': [{'type': 'submit'}, {'value': ''}],
'input:s': [{'type': 'submit'}, {'value': ''}],
'input:image': [{'type': 'image'}, {'src': ''}, {'alt': ''}],
'input:i': [{'type': 'image'}, {'src': ''}, {'alt': ''}],
'input:reset': [{'type': 'reset'}, {'value': ''}],
'input:button': [{'type': 'button'}, {'value': ''}],
'input:b': [{'type': 'button'}, {'value': ''}],
'select': [{'name': ''}, {'id': ''}],
'option': {'value': ''},
'textarea': [{'name': ""}, {'id': ""}, {'cols': "30"}, {'rows': "10"}],
'menu:context': {'type': 'context'},
'menu:c': {'type': 'context'},
'menu:toolbar': {'type': 'toolbar'},
'menu:t': {'type': 'toolbar'},
'video': {'src': ''},
'audio': {'src': ''},
'html:xml': [{'xmlns': "http://www.w3.org/1999/xhtml"}, {'xml:lang': "ru"}]
},
'aliases': {
'link:*': 'link',
'meta:*': 'meta',
'area:*': 'area',
'bdo:*': 'bdo',
'form:*': 'form',
'input:*': 'input',
'script:*': 'script',
'html:*': 'html',
'a:*': 'a',
'menu:*': 'menu',
'bq': 'blockquote',
'acr': 'acronym',
'fig': 'figure',
'ifr': 'iframe',
'emb': 'embed',
'obj': 'object',
'src': 'source',
'cap': 'caption',
'colg': 'colgroup',
'fst': 'fieldset',
'btn': 'button',
'optg': 'optgroup',
'opt': 'option',
'tarea': 'textarea',
'leg': 'legend',
'sect': 'section',
'art': 'article',
'hdr': 'header',
'ftr': 'footer',
'adr': 'address',
'dlg': 'dialog',
'str': 'strong',
'prog': 'progress',
'fset': 'fieldset',
'datag': 'datagrid',
'datal': 'datalist',
'kg': 'keygen',
'out': 'output',
'det': 'details',
'cmd': 'command'
},
# Expanded patterns (ends with +)
'expandos': {
'ol': 'ol>li',
'ul': 'ul>li',
'dl': 'dl>dt+dd',
'map': 'map>area',
'table': 'table>tr>td',
'colgroup': 'colgroup>col',
'colg': 'colgroup>col',
'tr': 'tr>td',
'select': 'select>option',
'optgroup': 'optgroup>option',
'optg': 'optgroup>option'
},
'empty_elements': "area,base,basefont,br,col,frame,hr,img,input,isindex,link,meta,param,embed,keygen,command",
'block_elements': "address,applet,blockquote,button,center,dd,del,dir,div,dl,dt,fieldset,form,frameset,hr,iframe,ins,isindex,li,link,map,menu,noframes,noscript,object,ol,p,pre,script,table,tbody,td,tfoot,th,thead,tr,ul,h1,h2,h3,h4,h5,h6",
'inline_elements': "a,abbr,acronym,applet,b,basefont,bdo,big,br,button,cite,code,del,dfn,em,font,i,iframe,img,input,ins,kbd,label,map,object,q,s,samp,script,select,small,span,strike,strong,sub,sup,textarea,tt,u,var"
},
'css': {
'snippets': {
"@i": "@import url(|);",
"@m": "@media print {\n\t|\n}",
"@f": "@font-face {\n\tfont-family:|;\n\tsrc:url(|);\n}",
"!": "!important",
"pos": "position:|;",
"pos:s": "position:static",
"pos:a": "position:absolute",
"pos:r": "position:relative",
"pos:f": "position:fixed",
"t": "top:|;",
"t:a": "top:auto",
"r": "right:|;",
"r:a": "right:auto",
"b": "bottom:|;",
"b:a": "bottom:auto",
"l": "left:|;",
"l:a": "left:auto",
"z": "z-index:|;",
"z:a": "z-index:auto",
"fl": "float:|;",
"fl:n": "float:none",
"fl:l": "float:left",
"fl:r": "float:right",
"cl": "clear:|;",
"cl:n": "clear:none",
"cl:l": "clear:left",
"cl:r": "clear:right",
"cl:b": "clear:both",
"d": "display:|;",
"d:n": "display:none",
"d:b": "display:block",
"d:ib": "display:inline",
"d:li": "display:list-item",
"d:ri": "display:run-in",
"d:cp": "display:compact",
"d:tb": "display:table",
"d:itb": "display:inline-table",
"d:tbcp": "display:table-caption",
"d:tbcl": "display:table-column",
"d:tbclg": "display:table-column-group",
"d:tbhg": "display:table-header-group",
"d:tbfg": "display:table-footer-group",
"d:tbr": "display:table-row",
"d:tbrg": "display:table-row-group",
"d:tbc": "display:table-cell",
"d:rb": "display:ruby",
"d:rbb": "display:ruby-base",
"d:rbbg": "display:ruby-base-group",
"d:rbt": "display:ruby-text",
"d:rbtg": "display:ruby-text-group",
"v": "visibility:|;",
"v:v": "visibility:visible",
"v:h": "visibility:hidden",
"v:c": "visibility:collapse",
"ov": "overflow:|;",
"ov:v": "overflow:visible",
"ov:h": "overflow:hidden",
"ov:s": "overflow:scroll",
"ov:a": "overflow:auto",
"ovx": "overflow-x:|;",
"ovx:v": "overflow-x:visible",
"ovx:h": "overflow-x:hidden",
"ovx:s": "overflow-x:scroll",
"ovx:a": "overflow-x:auto",
"ovy": "overflow-y:|;",
"ovy:v": "overflow-y:visible",
"ovy:h": "overflow-y:hidden",
"ovy:s": "overflow-y:scroll",
"ovy:a": "overflow-y:auto",
"ovs": "overflow-style:|;",
"ovs:a": "overflow-style:auto",
"ovs:s": "overflow-style:scrollbar",
"ovs:p": "overflow-style:panner",
"ovs:m": "overflow-style:move",
"ovs:mq": "overflow-style:marquee",
"zoo": "zoom:1",
"cp": "clip:|;",
"cp:a": "clip:auto",
"cp:r": "clip:rect(|)",
"bxz": "box-sizing:|;",
"bxz:cb": "box-sizing:content-box",
"bxz:bb": "box-sizing:border-box",
"bxsh": "box-shadow:|;",
"bxsh:n": "box-shadow:none",
"bxsh:w": "-webkit-box-shadow:0 0 0 #000",
"bxsh:m": "-moz-box-shadow:0 0 0 0 #000",
"m": "margin:|;",
"m:a": "margin:auto",
"m:0": "margin:0",
"m:2": "margin:0 0",
"m:3": "margin:0 0 0",
"m:4": "margin:0 0 0 0",
"mt": "margin-top:|;",
"mt:a": "margin-top:auto",
"mr": "margin-right:|;",
"mr:a": "margin-right:auto",
"mb": "margin-bottom:|;",
"mb:a": "margin-bottom:auto",
"ml": "margin-left:|;",
"ml:a": "margin-left:auto",
"p": "padding:|;",
"p:0": "padding:0",
"p:2": "padding:0 0",
"p:3": "padding:0 0 0",
"p:4": "padding:0 0 0 0",
"pt": "padding-top:|;",
"pr": "padding-right:|;",
"pb": "padding-bottom:|;",
"pl": "padding-left:|;",
"w": "width:|;",
"w:a": "width:auto",
"h": "height:|;",
"h:a": "height:auto",
"maw": "max-width:|;",
"maw:n": "max-width:none",
"mah": "max-height:|;",
"mah:n": "max-height:none",
"miw": "min-width:|;",
"mih": "min-height:|;",
"o": "outline:|;",
"o:n": "outline:none",
"oo": "outline-offset:|;",
"ow": "outline-width:|;",
"os": "outline-style:|;",
"oc": "outline-color:#000",
"oc:i": "outline-color:invert",
"bd": "border:|;",
"bd+": "border:1px solid #000",
"bd:n": "border:none",
"bdbk": "border-break:|;",
"bdbk:c": "border-break:close",
"bdcl": "border-collapse:|;",
"bdcl:c": "border-collapse:collapse",
"bdcl:s": "border-collapse:separate",
"bdc": "border-color:#000",
"bdi": "border-image:url(|)",
"bdi:n": "border-image:none",
"bdi:w": "-webkit-border-image:url(|) 0 0 0 0 stretch stretch",
"bdi:m": "-moz-border-image:url(|) 0 0 0 0 stretch stretch",
"bdti": "border-top-image:url(|)",
"bdti:n": "border-top-image:none",
"bdri": "border-right-image:url(|)",
"bdri:n": "border-right-image:none",
"bdbi": "border-bottom-image:url(|)",
"bdbi:n": "border-bottom-image:none",
"bdli": "border-left-image:url(|)",
"bdli:n": "border-left-image:none",
"bdci": "border-corner-image:url(|)",
"bdci:n": "border-corner-image:none",
"bdci:c": "border-corner-image:continue",
"bdtli": "border-top-left-image:url(|)",
"bdtli:n": "border-top-left-image:none",
"bdtli:c": "border-top-left-image:continue",
"bdtri": "border-top-right-image:url(|)",
"bdtri:n": "border-top-right-image:none",
"bdtri:c": "border-top-right-image:continue",
"bdbri": "border-bottom-right-image:url(|)",
"bdbri:n": "border-bottom-right-image:none",
"bdbri:c": "border-bottom-right-image:continue",
"bdbli": "border-bottom-left-image:url(|)",
"bdbli:n": "border-bottom-left-image:none",
"bdbli:c": "border-bottom-left-image:continue",
"bdf": "border-fit:|;",
"bdf:c": "border-fit:clip",
"bdf:r": "border-fit:repeat",
"bdf:sc": "border-fit:scale",
"bdf:st": "border-fit:stretch",
"bdf:ow": "border-fit:overwrite",
"bdf:of": "border-fit:overflow",
"bdf:sp": "border-fit:space",
"bdl": "border-length:|;",
"bdl:a": "border-length:auto",
"bdsp": "border-spacing:|;",
"bds": "border-style:|;",
"bds:n": "border-style:none",
"bds:h": "border-style:hidden",
"bds:dt": "border-style:dotted",
"bds:ds": "border-style:dashed",
"bds:s": "border-style:solid",
"bds:db": "border-style:double",
"bds:dtds": "border-style:dot-dash",
"bds:dtdtds": "border-style:dot-dot-dash",
"bds:w": "border-style:wave",
"bds:g": "border-style:groove",
"bds:r": "border-style:ridge",
"bds:i": "border-style:inset",
"bds:o": "border-style:outset",
"bdw": "border-width:|;",
"bdt": "border-top:|;",
"bdt+": "border-top:1px solid #000",
"bdt:n": "border-top:none",
"bdtw": "border-top-width:|;",
"bdts": "border-top-style:|;",
"bdts:n": "border-top-style:none",
"bdtc": "border-top-color:#000",
"bdr": "border-right:|;",
"bdr+": "border-right:1px solid #000",
"bdr:n": "border-right:none",
"bdrw": "border-right-width:|;",
"bdrs": "border-right-style:|;",
"bdrs:n": "border-right-style:none",
"bdrc": "border-right-color:#000",
"bdb": "border-bottom:|;",
"bdb+": "border-bottom:1px solid #000",
"bdb:n": "border-bottom:none",
"bdbw": "border-bottom-width:|;",
"bdbs": "border-bottom-style:|;",
"bdbs:n": "border-bottom-style:none",
"bdbc": "border-bottom-color:#000",
"bdl": "border-left:|;",
"bdl+": "border-left:1px solid #000",
"bdl:n": "border-left:none",
"bdlw": "border-left-width:|;",
"bdls": "border-left-style:|;",
"bdls:n": "border-left-style:none",
"bdlc": "border-left-color:#000",
"bdrs": "border-radius:|;",
"bdtrrs": "border-top-right-radius:|;",
"bdtlrs": "border-top-left-radius:|;",
"bdbrrs": "border-bottom-right-radius:|;",
"bdblrs": "border-bottom-left-radius:|;",
"bg": "background:|;",
"bg+": "background:#FFF url(|) 0 0 no-repeat;",
"bg:n": "background:none",
"bg:ie": "filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='|x.png');",
"bgc": "background-color:#FFF",
"bgi": "background-image:url(|);",
"bgi:n": "background-image:none",
"bgr": "background-repeat:|;",
"bgr:n": "background-repeat:no-repeat",
"bgr:x": "background-repeat:repeat-x",
"bgr:y": "background-repeat:repeat-y",
"bga": "background-attachment:|;",
"bga:f": "background-attachment:fixed",
"bga:s": "background-attachment:scroll",
"bgp": "background-position:0 0",
"bgpx": "background-position-x:|;",
"bgpy": "background-position-y:|;",
"bgbk": "background-break:|;",
"bgbk:bb": "background-break:bounding-box",
"bgbk:eb": "background-break:each-box",
"bgbk:c": "background-break:continuous",
"bgcp": "background-clip:|;",
"bgcp:bb": "background-clip:border-box",
"bgcp:pb": "background-clip:padding-box",
"bgcp:cb": "background-clip:content-box",
"bgcp:nc": "background-clip:no-clip",
"bgo": "background-origin:|;",
"bgo:pb": "background-origin:padding-box",
"bgo:bb": "background-origin:border-box",
"bgo:cb": "background-origin:content-box",
"bgz": "background-size:|;",
"bgz:a": "background-size:auto",
"bgz:ct": "background-size:contain",
"bgz:cv": "background-size:cover",
"c": "color:#000",
"tbl": "table-layout:|;",
"tbl:a": "table-layout:auto",
"tbl:f": "table-layout:fixed",
"cps": "caption-side:|;",
"cps:t": "caption-side:top",
"cps:b": "caption-side:bottom",
"ec": "empty-cells:|;",
"ec:s": "empty-cells:show",
"ec:h": "empty-cells:hide",
"lis": "list-style:|;",
"lis:n": "list-style:none",
"lisp": "list-style-position:|;",
"lisp:i": "list-style-position:inside",
"lisp:o": "list-style-position:outside",
"list": "list-style-type:|;",
"list:n": "list-style-type:none",
"list:d": "list-style-type:disc",
"list:c": "list-style-type:circle",
"list:s": "list-style-type:square",
"list:dc": "list-style-type:decimal",
"list:dclz": "list-style-type:decimal-leading-zero",
"list:lr": "list-style-type:lower-roman",
"list:ur": "list-style-type:upper-roman",
"lisi": "list-style-image:|;",
"lisi:n": "list-style-image:none",
"q": "quotes:|;",
"q:n": "quotes:none",
"q:ru": "quotes:'\00AB' '\00BB' '\201E' '\201C'",
"q:en": "quotes:'\201C' '\201D' '\2018' '\2019'",
"ct": "content:|;",
"ct:n": "content:normal",
"ct:oq": "content:open-quote",
"ct:noq": "content:no-open-quote",
"ct:cq": "content:close-quote",
"ct:ncq": "content:no-close-quote",
"ct:a": "content:attr(|);",
"ct:c": "content:counter(|);",
"ct:cs": "content:counters(|);",
"coi": "counter-increment:|;",
"cor": "counter-reset:|;",
"va": "vertical-align:|;",
"va:sup": "vertical-align:super",
"va:t": "vertical-align:top",
"va:tt": "vertical-align:text-top",
"va:m": "vertical-align:middle",
"va:bl": "vertical-align:baseline",
"va:b": "vertical-align:bottom",
"va:tb": "vertical-align:text-bottom",
"va:sub": "vertical-align:sub",
"ta": "text-align:|;",
"ta:l": "text-align:left",
"ta:c": "text-align:center",
"ta:r": "text-align:right",
"tal": "text-align-last:|;",
"tal:a": "text-align-last:auto",
"tal:l": "text-align-last:left",
"tal:c": "text-align-last:center",
"tal:r": "text-align-last:right",
"td": "text-decoration:|;",
"td:n": "text-decoration:none",
"td:u": "text-decoration:underline",
"td:o": "text-decoration:overline",
"td:l": "text-decoration:line-through",
"te": "text-emphasis:|;",
"te:n": "text-emphasis:none",
"te:ac": "text-emphasis:accent",
"te:dt": "text-emphasis:dot",
"te:c": "text-emphasis:circle",
"te:ds": "text-emphasis:disc",
"te:b": "text-emphasis:before",
"te:a": "text-emphasis:after",
"th": "text-height:|;",
"th:a": "text-height:auto",
"th:f": "text-height:font-size",
"th:t": "text-height:text-size",
"th:m": "text-height:max-size",
"ti": "text-indent:|;",
"ti:-": "text-indent:-9999px",
"tj": "text-justify:|;",
"tj:a": "text-justify:auto",
"tj:iw": "text-justify:inter-word",
"tj:ii": "text-justify:inter-ideograph",
"tj:ic": "text-justify:inter-cluster",
"tj:d": "text-justify:distribute",
"tj:k": "text-justify:kashida",
"tj:t": "text-justify:tibetan",
"to": "text-outline:|;",
"to+": "text-outline:0 0 #000",
"to:n": "text-outline:none",
"tr": "text-replace:|;",
"tr:n": "text-replace:none",
"tt": "text-transform:|;",
"tt:n": "text-transform:none",
"tt:c": "text-transform:capitalize",
"tt:u": "text-transform:uppercase",
"tt:l": "text-transform:lowercase",
"tw": "text-wrap:|;",
"tw:n": "text-wrap:normal",
"tw:no": "text-wrap:none",
"tw:u": "text-wrap:unrestricted",
"tw:s": "text-wrap:suppress",
"tsh": "text-shadow:|;",
"tsh+": "text-shadow:0 0 0 #000",
"tsh:n": "text-shadow:none",
"lh": "line-height:|;",
"whs": "white-space:|;",
"whs:n": "white-space:normal",
"whs:p": "white-space:pre",
"whs:nw": "white-space:nowrap",
"whs:pw": "white-space:pre-wrap",
"whs:pl": "white-space:pre-line",
"whsc": "white-space-collapse:|;",
"whsc:n": "white-space-collapse:normal",
"whsc:k": "white-space-collapse:keep-all",
"whsc:l": "white-space-collapse:loose",
"whsc:bs": "white-space-collapse:break-strict",
"whsc:ba": "white-space-collapse:break-all",
"wob": "word-break:|;",
"wob:n": "word-break:normal",
"wob:k": "word-break:keep-all",
"wob:l": "word-break:loose",
"wob:bs": "word-break:break-strict",
"wob:ba": "word-break:break-all",
"wos": "word-spacing:|;",
"wow": "word-wrap:|;",
"wow:nm": "word-wrap:normal",
"wow:n": "word-wrap:none",
"wow:u": "word-wrap:unrestricted",
"wow:s": "word-wrap:suppress",
"lts": "letter-spacing:|;",
"f": "font:|;",
"f+": "font:1em Arial,sans-serif",
"fw": "font-weight:|;",
"fw:n": "font-weight:normal",
"fw:b": "font-weight:bold",
"fw:br": "font-weight:bolder",
"fw:lr": "font-weight:lighter",
"fs": "font-style:|;",
"fs:n": "font-style:normal",
"fs:i": "font-style:italic",
"fs:o": "font-style:oblique",
"fv": "font-variant:|;",
"fv:n": "font-variant:normal",
"fv:sc": "font-variant:small-caps",
"fz": "font-size:|;",
"fza": "font-size-adjust:|;",
"fza:n": "font-size-adjust:none",
"ff": "font-family:|;",
"ff:s": "font-family:serif",
"ff:ss": "font-family:sans-serif",
"ff:c": "font-family:cursive",
"ff:f": "font-family:fantasy",
"ff:m": "font-family:monospace",
"fef": "font-effect:|;",
"fef:n": "font-effect:none",
"fef:eg": "font-effect:engrave",
"fef:eb": "font-effect:emboss",
"fef:o": "font-effect:outline",
"fem": "font-emphasize:|;",
"femp": "font-emphasize-position:|;",
"femp:b": "font-emphasize-position:before",
"femp:a": "font-emphasize-position:after",
"fems": "font-emphasize-style:|;",
"fems:n": "font-emphasize-style:none",
"fems:ac": "font-emphasize-style:accent",
"fems:dt": "font-emphasize-style:dot",
"fems:c": "font-emphasize-style:circle",
"fems:ds": "font-emphasize-style:disc",
"fsm": "font-smooth:|;",
"fsm:a": "font-smooth:auto",
"fsm:n": "font-smooth:never",
"fsm:aw": "font-smooth:always",
"fst": "font-stretch:|;",
"fst:n": "font-stretch:normal",
"fst:uc": "font-stretch:ultra-condensed",
"fst:ec": "font-stretch:extra-condensed",
"fst:c": "font-stretch:condensed",
"fst:sc": "font-stretch:semi-condensed",
"fst:se": "font-stretch:semi-expanded",
"fst:e": "font-stretch:expanded",
"fst:ee": "font-stretch:extra-expanded",
"fst:ue": "font-stretch:ultra-expanded",
"op": "opacity:|;",
"op:ie": "filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=100)",
"op:ms": "-ms-filter:'progid:DXImageTransform.Microsoft.Alpha(Opacity=100)'",
"rz": "resize:|;",
"rz:n": "resize:none",
"rz:b": "resize:both",
"rz:h": "resize:horizontal",
"rz:v": "resize:vertical",
"cur": "cursor:|;",
"cur:a": "cursor:auto",
"cur:d": "cursor:default",
"cur:c": "cursor:crosshair",
"cur:ha": "cursor:hand",
"cur:he": "cursor:help",
"cur:m": "cursor:move",
"cur:p": "cursor:pointer",
"cur:t": "cursor:text",
"pgbb": "page-break-before:|;",
"pgbb:au": "page-break-before:auto",
"pgbb:al": "page-break-before:always",
"pgbb:l": "page-break-before:left",
"pgbb:r": "page-break-before:right",
"pgbi": "page-break-inside:|;",
"pgbi:au": "page-break-inside:auto",
"pgbi:av": "page-break-inside:avoid",
"pgba": "page-break-after:|;",
"pgba:au": "page-break-after:auto",
"pgba:al": "page-break-after:always",
"pgba:l": "page-break-after:left",
"pgba:r": "page-break-after:right",
"orp": "orphans:|;",
"wid": "widows:|;"
}
},
'xsl': {
'default_attributes': {
'tmatch': [{'match': ''}, {'mode': ''}],
'tname': {'name': ''},
'xsl:when': {'test': ''}
},
'aliases': {
'tmatch': 'xsl:template',
'tname': 'xsl:template'
},
'expandos': {
'choose': 'xsl:choose>xsl:when'
}
},
'xml': {
}
}
|
|
"""
pin info:
the staccato and hold buttons connect to generic digital GPIOs
pin 37: hold
pin 31: staccato_1
pin 33: staccato_2
pin 35: staccato_3
voice key 1 encoder is connected through SPI
pin 19: SPI 0 MOSI
pin 21: SPI 0 MISO
pin 23: SPI 0 CLOCK
pin 36: SPI 0 CHIP_SELECT_MASTER
pin 2: +5VDC
pin 6: Ground
voice key 2 encoder is connected through SPI
pin 19: SPI 0 MOSI
pin 21: SPI 0 MISO
pin 23: SPI 0 CLOCK
pin 38: SPI 0 CHIP_SELECT_MASTER
pin 2: +5VDC
pin 6: Ground
voice key 3 encoder is connected through SPI
pin 19: SPI 0 MOSI
pin 21: SPI 0 MISO
pin 23: SPI 0 CLOCK
pin 40: SPI 0 CHIP_SELECT_MASTER
pin 2: +5VDC
pin 6: Ground
"""
from __future__ import division
import test_that_works
import AMT203
import os
import Queue
import settings
import time
import threading
import traceback
import sys
from thirtybirds_2_0.Network.manager import init as network_init
#from thirtybirds_2_0.Adaptors.Sensors import AMT203_expanded_spi
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
class Network(object):
def __init__(self, hostname, network_message_handler, network_status_handler):
self.hostname = hostname
self.thirtybirds = network_init(
hostname=hostname,
role="client",
discovery_multicastGroup=settings.discovery_multicastGroup,
discovery_multicastPort=settings.discovery_multicastPort,
discovery_responsePort=settings.discovery_responsePort,
pubsub_pubPort=settings.pubsub_pubPort,
message_callback=network_message_handler,
status_callback=network_status_handler
)
########################
## UTILS
########################
class Utils(object):
def __init__(self, hostname):
self.hostname = hostname
def reboot(self):
os.system("sudo reboot now")
def get_shelf_id(self):
return self.hostname[11:][:1]
def get_camera_id(self):
return self.hostname[12:]
def create_image_file_name(self, timestamp, light_level, process_type):
return "{}_{}_{}_{}_{}.png".format(timestamp, self.get_shelf_id() , self.get_camera_id(), light_level, process_type)
def remote_update_git(self, oratio, thirtybirds, update, upgrade):
if oratio:
subprocess.call(['sudo', 'git', 'pull'], cwd='/home/pi/oratio')
if thirtybirds:
subprocess.call(['sudo', 'git', 'pull'], cwd='/home/pi/thirtybirds_2_0')
return
def remote_update_scripts(self):
updates_init("/home/pi/oratio", False, True)
return
def get_update_script_version(self):
(updates, ghStatus, bsStatus) = updates_init("/home/pi/oratio", False, False)
return updates.read_version_pickle()
def get_git_timestamp(self):
return commands.getstatusoutput("cd /home/pi/oratio/; git log -1 --format=%cd")[1]
def get_client_status(self):
return (self.hostname, self.get_update_script_version(), self.get_git_timestamp())
class Voice_Key(object):
def __init__(self, spi_chip_select_pin, min_encoder_position, max_encoder_position):
self.min_encoder_position = min_encoder_position
self.max_encoder_position = max_encoder_position
self.resolution = 4096
print "Voice_Key.__init__ 0", spi_chip_select_pin
self.encoder = AMT203.AMT203(0, 0, spi_chip_select_pin)
print "Voice_Key.__init__ 1", self.encoder
time.sleep(1)
self.encoder.set_zero()
print "Voice_Key.__init__ 2"
self.last_encoder_postion = self.encoder.get_position()
print "Voice_Key initialized with spi_chip_select_pin", spi_chip_select_pin
def normalize(self, encoder_value):
if encoder_value < 2000:
encoder_value = self.resolution
inverse_value = self.resolution - encoder_value
#print "normalize 0", encoder_value, inverse_value
if inverse_value <= self.min_encoder_position:
inverse_value = self.min_encoder_position
#print "normalize 1", inverse_value
if inverse_value > self.max_encoder_position:
inverse_value = self.max_encoder_position
#print "normalize 2", inverse_value
ranged_value = inverse_value / (self.max_encoder_position - self.min_encoder_position)
#print "normalize 3", ranged_value
return ranged_value
def get_value(self):
current_encoder_position = self.encoder.get_position()
if current_encoder_position != self.last_encoder_postion:
self.last_encoder_postion = current_encoder_position
return self.normalize(current_encoder_position)
else:
return None
class Voice_Keys():
def __init__(self):
spi_chip_select_pins = [16, 20 ,21 ]
min_encoder_positions = [0, 0, 0]
max_encoder_positions = [100.0, 100.0, 100.0]
self.voice_keys = [ Voice_Key(spi_chip_select_pins[key_number], min_encoder_positions[key_number], max_encoder_positions[key_number]) for key_number in range(3) ]
def get_positions(self):
voice_key_new_positions = []
for key_number, voice_key in enumerate(self.voice_keys):
voice_key_new_position = voice_key.get_value()
if voice_key_new_position is not None:
voice_key_new_positions.append((key_number, voice_key_new_position))
return voice_key_new_positions
class Button(object):
def __init__(self, name, pin):
self.name = name
self.pin = pin
GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
self.last_state = None # so this always returns something on first query
def get_state(self):
current_state = GPIO.input(self.pin)
if current_state == self.last_state:
return (self.name, None)
else:
self.last_state = current_state
return (self.name, GPIO.input(self.pin))
class Buttons(object):
def __init__(self, defs):
self.buttons = [ Button(defs[i][0], defs[i][1]) for i in range(len(defs)) ]
def get_states(self):
button_states = []
for button in self.buttons:
name, state = button.get_state()
if state is not None:
button_states.append((name, state))
return button_states
# Main handles network send/recv and can see all other classes directly
class Main(threading.Thread):
def __init__(self, hostname):
print "----- start main init"
threading.Thread.__init__(self)
self.network = Network(hostname, self.network_message_handler, self.network_status_handler)
self.queue = Queue.Queue()
self.utils = Utils(hostname)
self.voice_keys = Voice_Keys()
defs = [
("hold", 26),
("staccato_3", 19),
("staccato_2", 13),
("staccato_1", 06),
]
self.buttons = Buttons(defs)
self.hold = False
self.staccato_1 = False
self.staccato_2 = False
self.staccato_3 = False
#self.network.thirtybirds.subscribe_to_topic("door_closed")
self.status = {
"avl-voice-keys":"pass",
"avl-voice-keys-encoder-1":"pass",
"avl-voice-keys-encoder-2":"pass",
"avl-voice-keys-encoder-3":"pass"
}
self.network.thirtybirds.subscribe_to_topic("client_monitor_request")
self.network.thirtybirds.subscribe_to_topic("mandala_device_request")
print "----- end main init"
def update_device_status(self, devicename, status):
print "update_device_status 1",devicename, status
if self.status[devicename] != status:
self.status[devicename] = status
msg = [devicename, status]
print "update_device_status 2",devicename, status
self.network.thirtybirds.send("mandala_device_status", msg)
def get_device_status(self):
for devicename in self.status:
msg = [devicename, self.status[devicename]]
self.network.thirtybirds.send("mandala_device_status", msg)
def network_message_handler(self, topic_msg):
# this method runs in the thread of the caller, not the tread of Main
topic, msg = topic_msg # separating just to eval msg. best to do it early. it should be done in TB.
if len(msg) > 0:
msg = eval(msg)
self.add_to_queue(topic, msg)
def network_status_handler(self, topic_msg):
# this method runs in the thread of the caller, not the tread of Main
print "Main.network_status_handler", topic_msg
def add_to_queue(self, topic, msg):
self.queue.put((topic, msg))
def run(self):
print "----- start main run - before loop"
topic_names = ["voice_key_1_position", "voice_key_2_position", "voice_key_3_position"]
while True:
try:
try:
topic, msg = self.queue.get(False)
if topic == "mandala_device_request":
self.get_device_status()
except Queue.Empty:
pass
# how do I add this with non-blocking queues
button_states = self.buttons.get_states()
for button_state in button_states:
name, state = button_state
if name == "hold":
self.hold = True if state == 0 else False
if name == "staccato_1":
self.staccato_1 = True if state == 0 else False
if name == "staccato_2":
self.staccato_2 = True if state == 0 else False
if name == "staccato_3":
self.staccato_3 = True if state == 0 else False
if not self.hold:
voice_key_positions = self.voice_keys.get_positions()
for voice_key_position in voice_key_positions:
key_number, voice_key_new_position = voice_key_position
if key_number == 0 and self.staccato_1:
voice_key_new_position = 1.0 if voice_key_new_position >= 0.15 else 0.0
if key_number == 1 and self.staccato_2:
voice_key_new_position = 1.0 if voice_key_new_position >= 0.15 else 0.0
if key_number == 2 and self.staccato_3:
voice_key_new_position = 1.0 if voice_key_new_position >= 0.15 else 0.0
print key_number, voice_key_new_position
#print "button:", name, state
#print "button_states:", button_states
self.network.thirtybirds.send(topic_names[key_number], voice_key_new_position)
time.sleep(0.03)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
print e, repr(traceback.format_exception(exc_type, exc_value,exc_traceback))
time.sleep(0.03)
def init(hostname):
main = Main(hostname)
main.daemon = True
main.start()
return main
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Info_email'
db.create_table('blogs_info_email', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('blog', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['blogs.Blog'], null=True)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('status', self.gf('django.db.models.fields.CharField')(max_length=2)),
('content', self.gf('django.db.models.fields.TextField')(max_length=10000, blank=True)),
('name', self.gf('django.db.models.fields.TextField')(max_length=100, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('blogs', ['Info_email'])
def backwards(self, orm):
# Deleting model 'Info_email'
db.delete_table('blogs_info_email')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blogs.blog': {
'Meta': {'object_name': 'Blog'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'custom_domain': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '30'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True', 'blank': 'True'})
},
'blogs.category': {
'Meta': {'object_name': 'Category'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '140'}),
'top_level_cat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'})
},
'blogs.comment': {
'Meta': {'object_name': 'Comment'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'notify_me': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Post']", 'null': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'blogs.info_email': {
'Meta': {'object_name': 'Info_email'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'blogs.page': {
'Meta': {'object_name': 'Page'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.post': {
'Meta': {'object_name': 'Post'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'base62id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_0': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_01': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_3': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_4': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_5': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_6': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_video': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_ready': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_top': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'layout_type': ('django.db.models.fields.CharField', [], {'default': "'s'", 'max_length': '1'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'pic': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_0': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_04': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_1': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_10': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_11': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_12': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_13': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_14': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_15': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_16': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_17': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_18': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_19': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_2': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_20': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_21': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_22': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_23': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_24': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_3': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_4': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_5': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_6': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_7': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_8': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_9': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'publish_on_facebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '2', 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'translated_content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'translated_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'youtube_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'youtube_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'blogs.subscription': {
'Meta': {'object_name': 'Subscription'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'blogs.tag': {
'Meta': {'object_name': 'Tag'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '140'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blogs']
|
|
from direct.directnotify import DirectNotifyGlobal
from toontown.battle import BattlePlace
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.showbase import BulletinBoardWatcher
from panda3d.core import *
from panda3d.direct import *
from otp.distributed.TelemetryLimiter import RotationLimitToH, TLGatherAllAvs
from toontown.toon import Toon
from toontown.toonbase import ToontownGlobals
from toontown.hood import ZoneUtil
from toontown.toonbase import TTLocalizer
from toontown.toontowngui import TTDialog
from toontown.toonbase import ToontownBattleGlobals
from toontown.coghq import DistributedCountryClub
from toontown.building import Elevator
from otp.nametag import NametagGlobals
import random
class CountryClubInterior(BattlePlace.BattlePlace):
notify = DirectNotifyGlobal.directNotify.newCategory('CountryClubInterior')
def __init__(self, loader, parentFSM, doneEvent):
BattlePlace.BattlePlace.__init__(self, loader, doneEvent)
self.parentFSM = parentFSM
self.zoneId = loader.countryClubId
self.elevatorDoneEvent = 'elevatorDone'
self.fsm = ClassicFSM.ClassicFSM('CountryClubInterior', [State.State('start', self.enterStart, self.exitStart, ['walk', 'teleportIn', 'fallDown']),
State.State('walk', self.enterWalk, self.exitWalk, ['push',
'sit',
'stickerBook',
'WaitForBattle',
'battle',
'died',
'teleportOut',
'squished',
'DFA',
'fallDown',
'stopped',
'elevator']),
State.State('stopped', self.enterStopped, self.exitStopped, ['walk', 'teleportOut', 'stickerBook']),
State.State('sit', self.enterSit, self.exitSit, ['walk', 'died', 'teleportOut']),
State.State('push', self.enterPush, self.exitPush, ['walk', 'died', 'teleportOut']),
State.State('stickerBook', self.enterStickerBook, self.exitStickerBook, ['walk',
'battle',
'DFA',
'WaitForBattle',
'died',
'teleportOut']),
State.State('WaitForBattle', self.enterWaitForBattle, self.exitWaitForBattle, ['battle',
'walk',
'died',
'teleportOut']),
State.State('battle', self.enterBattle, self.exitBattle, ['walk', 'teleportOut', 'died']),
State.State('fallDown', self.enterFallDown, self.exitFallDown, ['walk', 'died', 'teleportOut']),
State.State('squished', self.enterSquished, self.exitSquished, ['walk', 'died', 'teleportOut']),
State.State('teleportIn', self.enterTeleportIn, self.exitTeleportIn, ['walk',
'teleportOut',
'quietZone',
'died']),
State.State('teleportOut', self.enterTeleportOut, self.exitTeleportOut, ['teleportIn',
'FLA',
'quietZone',
'WaitForBattle']),
State.State('DFA', self.enterDFA, self.exitDFA, ['DFAReject', 'teleportOut']),
State.State('DFAReject', self.enterDFAReject, self.exitDFAReject, ['walkteleportOut']),
State.State('died', self.enterDied, self.exitDied, ['teleportOut']),
State.State('FLA', self.enterFLA, self.exitFLA, ['quietZone']),
State.State('quietZone', self.enterQuietZone, self.exitQuietZone, ['teleportIn']),
State.State('elevator', self.enterElevator, self.exitElevator, ['walk']),
State.State('final', self.enterFinal, self.exitFinal, ['start'])], 'start', 'final')
def load(self):
self.parentFSM.getStateNamed('countryClubInterior').addChild(self.fsm)
BattlePlace.BattlePlace.load(self)
musicName = random.choice(['phase_12/audio/bgm/Bossbot_Factory_v1.ogg', 'phase_12/audio/bgm/Bossbot_Factory_v2.ogg', 'phase_12/audio/bgm/Bossbot_Factory_v3.ogg', 'phase_12/audio/bgm/Bossbot_Factory_v4.ogg'])
self.music = base.loadMusic(musicName)
def unload(self):
self.parentFSM.getStateNamed('countryClubInterior').removeChild(self.fsm)
del self.music
del self.fsm
del self.parentFSM
BattlePlace.BattlePlace.unload(self)
def enter(self, requestStatus):
self.fsm.enterInitialState()
base.transitions.fadeOut(t=0)
base.localAvatar.inventory.setRespectInvasions(0)
base.cr.forbidCheesyEffects(1)
self._telemLimiter = TLGatherAllAvs('CountryClubInterior', RotationLimitToH)
def commence(self = self):
NametagGlobals.setMasterArrowsOn(1)
self.fsm.request(requestStatus['how'], [requestStatus])
base.playMusic(self.music, looping=1, volume=0.8)
base.transitions.irisIn()
CountryClub = bboard.get(DistributedCountryClub.DistributedCountryClub.ReadyPost)
self.loader.hood.spawnTitleText(CountryClub.countryClubId, CountryClub.floorNum)
self.CountryClubReadyWatcher = BulletinBoardWatcher.BulletinBoardWatcher('CountryClubReady', DistributedCountryClub.DistributedCountryClub.ReadyPost, commence)
self.CountryClubDefeated = 0
self.acceptOnce(DistributedCountryClub.DistributedCountryClub.WinEvent, self.handleCountryClubWinEvent)
if __debug__ and 0:
self.accept('f10', lambda : messenger.send(DistributedCountryClub.DistributedCountryClub.WinEvent))
self.confrontedBoss = 0
def handleConfrontedBoss(self = self):
self.confrontedBoss = 1
self.acceptOnce('localToonConfrontedCountryClubBoss', handleConfrontedBoss)
def exit(self):
NametagGlobals.setMasterArrowsOn(0)
bboard.remove(DistributedCountryClub.DistributedCountryClub.ReadyPost)
self._telemLimiter.destroy()
del self._telemLimiter
base.cr.forbidCheesyEffects(0)
base.localAvatar.inventory.setRespectInvasions(1)
self.fsm.requestFinalState()
self.loader.music.stop()
self.music.stop()
self.ignoreAll()
del self.CountryClubReadyWatcher
def enterStopped(self):
BattlePlace.BattlePlace.enterStopped(self)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
def enterWalk(self, teleportIn = 0):
BattlePlace.BattlePlace.enterWalk(self, teleportIn)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
def enterPush(self):
BattlePlace.BattlePlace.enterPush(self)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
def enterWaitForBattle(self):
CountryClubInterior.notify.debug('enterWaitForBattle')
BattlePlace.BattlePlace.enterWaitForBattle(self)
if base.localAvatar.getParent() != render:
base.localAvatar.wrtReparentTo(render)
base.localAvatar.b_setParent(ToontownGlobals.SPRender)
def exitWaitForBattle(self):
CountryClubInterior.notify.debug('exitWaitForBattle')
BattlePlace.BattlePlace.exitWaitForBattle(self)
def enterBattle(self, event):
CountryClubInterior.notify.debug('enterBattle')
self.music.stop()
BattlePlace.BattlePlace.enterBattle(self, event)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
def enterTownBattle(self, event):
mult = ToontownBattleGlobals.getCountryClubCreditMultiplier(self.zoneId)
base.localAvatar.inventory.setBattleCreditMultiplier(mult)
self.loader.townBattle.enter(event, self.fsm.getStateNamed('battle'), bldg=1, creditMultiplier=mult)
def exitBattle(self):
CountryClubInterior.notify.debug('exitBattle')
BattlePlace.BattlePlace.exitBattle(self)
self.loader.music.stop()
base.playMusic(self.music, looping=1, volume=0.8)
def enterStickerBook(self, page = None):
BattlePlace.BattlePlace.enterStickerBook(self, page)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
def enterSit(self):
BattlePlace.BattlePlace.enterSit(self)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
def enterZone(self, zoneId):
pass
def enterTeleportOut(self, requestStatus):
CountryClubInterior.notify.debug('enterTeleportOut()')
BattlePlace.BattlePlace.enterTeleportOut(self, requestStatus, self.__teleportOutDone)
def __processLeaveRequest(self, requestStatus):
hoodId = requestStatus['hoodId']
if hoodId == ToontownGlobals.MyEstate:
self.getEstateZoneAndGoHome(requestStatus)
else:
self.doneStatus = requestStatus
messenger.send(self.doneEvent)
def __teleportOutDone(self, requestStatus):
CountryClubInterior.notify.debug('__teleportOutDone()')
messenger.send('leavingCountryClub')
messenger.send('localToonLeft')
if self.CountryClubDefeated and not self.confrontedBoss:
self.fsm.request('FLA', [requestStatus])
else:
self.__processLeaveRequest(requestStatus)
def exitTeleportOut(self):
CountryClubInterior.notify.debug('exitTeleportOut()')
BattlePlace.BattlePlace.exitTeleportOut(self)
def handleCountryClubWinEvent(self):
CountryClubInterior.notify.debug('handleCountryClubWinEvent')
if base.cr.playGame.getPlace().fsm.getCurrentState().getName() == 'died':
return
self.CountryClubDefeated = 1
if 1:
zoneId = ZoneUtil.getHoodId(self.zoneId)
else:
zoneId = ZoneUtil.getSafeZoneId(base.localAvatar.defaultZone)
self.fsm.request('teleportOut', [{
'loader': ZoneUtil.getLoaderName(zoneId),
'where': ZoneUtil.getToonWhereName(zoneId),
'how': 'teleportIn',
'hoodId': zoneId,
'zoneId': zoneId,
'shardId': None,
'avId': -1,
}])
def enterDied(self, requestStatus, callback = None):
CountryClubInterior.notify.debug('enterDied')
def diedDone(requestStatus, self = self, callback = callback):
if callback is not None:
callback()
messenger.send('leavingCountryClub')
self.doneStatus = requestStatus
messenger.send(self.doneEvent)
return
BattlePlace.BattlePlace.enterDied(self, requestStatus, diedDone)
def enterFLA(self, requestStatus):
CountryClubInterior.notify.debug('enterFLA')
self.flaDialog = TTDialog.TTGlobalDialog(message=TTLocalizer.ForcedLeaveCountryClubAckMsg, doneEvent='FLADone', style=TTDialog.Acknowledge, fadeScreen=1)
def continueExit(self = self, requestStatus = requestStatus):
self.__processLeaveRequest(requestStatus)
self.accept('FLADone', continueExit)
self.flaDialog.show()
def exitFLA(self):
CountryClubInterior.notify.debug('exitFLA')
if hasattr(self, 'flaDialog'):
self.flaDialog.cleanup()
del self.flaDialog
def detectedElevatorCollision(self, distElevator):
self.fsm.request('elevator', [distElevator])
def enterElevator(self, distElevator, skipDFABoard = 0):
self.accept(self.elevatorDoneEvent, self.handleElevatorDone)
self.elevator = Elevator.Elevator(self.fsm.getStateNamed('elevator'), self.elevatorDoneEvent, distElevator)
if skipDFABoard:
self.elevator.skipDFABoard = 1
self.elevator.setReverseBoardingCamera(True)
distElevator.elevatorFSM = self.elevator
self.elevator.load()
self.elevator.enter()
def exitElevator(self):
self.ignore(self.elevatorDoneEvent)
self.elevator.unload()
self.elevator.exit()
def handleElevatorDone(self, doneStatus):
self.notify.debug('handling elevator done event')
where = doneStatus['where']
if where == 'reject':
if hasattr(base.localAvatar, 'elevatorNotifier') and base.localAvatar.elevatorNotifier.isNotifierOpen():
pass
else:
self.fsm.request('walk')
elif where == 'exit':
self.fsm.request('walk')
elif where == 'factoryInterior' or where == 'suitInterior':
self.doneStatus = doneStatus
self.doneEvent = 'lawOfficeFloorDone'
messenger.send(self.doneEvent)
else:
self.notify.error('Unknown mode: ' + where + ' in handleElevatorDone')
|
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
from collections import deque
from functools import partial
import logging
import os
import socket
import ssl
from threading import Lock, Thread
import time
import weakref
from six.moves import range
from cassandra.connection import (Connection, ConnectionShutdown,
NONBLOCKING, Timer, TimerManager)
try:
import cassandra.io.libevwrapper as libev
except ImportError:
raise ImportError(
"The C extension needed to use libev was not found. This "
"probably means that you didn't have the required build dependencies "
"when installing the driver. See "
"http://datastax.github.io/python-driver/installation.html#c-extensions "
"for instructions on installing build dependencies and building "
"the C extension.")
log = logging.getLogger(__name__)
def _cleanup(loop_weakref):
try:
loop = loop_weakref()
except ReferenceError:
return
loop._cleanup()
class LibevLoop(object):
def __init__(self):
self._pid = os.getpid()
self._loop = libev.Loop()
self._notifier = libev.Async(self._loop)
self._notifier.start()
# prevent _notifier from keeping the loop from returning
self._loop.unref()
self._started = False
self._shutdown = False
self._lock = Lock()
self._lock_thread = Lock()
self._thread = None
# set of all connections; only replaced with a new copy
# while holding _conn_set_lock, never modified in place
self._live_conns = set()
# newly created connections that need their write/read watcher started
self._new_conns = set()
# recently closed connections that need their write/read watcher stopped
self._closed_conns = set()
self._conn_set_lock = Lock()
self._preparer = libev.Prepare(self._loop, self._loop_will_run)
# prevent _preparer from keeping the loop from returning
self._loop.unref()
self._preparer.start()
self._timers = TimerManager()
self._loop_timer = libev.Timer(self._loop, self._on_loop_timer)
atexit.register(partial(_cleanup, weakref.ref(self)))
def maybe_start(self):
should_start = False
with self._lock:
if not self._started:
log.debug("Starting libev event loop")
self._started = True
should_start = True
if should_start:
with self._lock_thread:
if not self._shutdown:
self._thread = Thread(target=self._run_loop, name="event_loop")
self._thread.daemon = True
self._thread.start()
self._notifier.send()
def _run_loop(self):
while True:
self._loop.start()
# there are still active watchers, no deadlock
with self._lock:
if not self._shutdown and self._live_conns:
log.debug("Restarting event loop")
continue
else:
# all Connections have been closed, no active watchers
log.debug("All Connections currently closed, event loop ended")
self._started = False
break
def _cleanup(self):
self._shutdown = True
if not self._thread:
return
for conn in self._live_conns | self._new_conns | self._closed_conns:
conn.close()
for watcher in (conn._write_watcher, conn._read_watcher):
if watcher:
watcher.stop()
self.notify() # wake the timer watcher
# PYTHON-752 Thread might have just been created and not started
with self._lock_thread:
self._thread.join(timeout=1.0)
if self._thread.is_alive():
log.warning(
"Event loop thread could not be joined, so shutdown may not be clean. "
"Please call Cluster.shutdown() to avoid this.")
log.debug("Event loop thread was joined")
def add_timer(self, timer):
self._timers.add_timer(timer)
self._notifier.send() # wake up in case this timer is earlier
def _update_timer(self):
if not self._shutdown:
next_end = self._timers.service_timeouts()
if next_end:
self._loop_timer.start(next_end - time.time()) # timer handles negative values
else:
self._loop_timer.stop()
def _on_loop_timer(self):
self._timers.service_timeouts()
def notify(self):
self._notifier.send()
def connection_created(self, conn):
with self._conn_set_lock:
new_live_conns = self._live_conns.copy()
new_live_conns.add(conn)
self._live_conns = new_live_conns
new_new_conns = self._new_conns.copy()
new_new_conns.add(conn)
self._new_conns = new_new_conns
def connection_destroyed(self, conn):
with self._conn_set_lock:
new_live_conns = self._live_conns.copy()
new_live_conns.discard(conn)
self._live_conns = new_live_conns
new_closed_conns = self._closed_conns.copy()
new_closed_conns.add(conn)
self._closed_conns = new_closed_conns
self._notifier.send()
def _loop_will_run(self, prepare):
changed = False
for conn in self._live_conns:
if not conn.deque and conn._write_watcher_is_active:
if conn._write_watcher:
conn._write_watcher.stop()
conn._write_watcher_is_active = False
changed = True
elif conn.deque and not conn._write_watcher_is_active:
conn._write_watcher.start()
conn._write_watcher_is_active = True
changed = True
if self._new_conns:
with self._conn_set_lock:
to_start = self._new_conns
self._new_conns = set()
for conn in to_start:
conn._read_watcher.start()
changed = True
if self._closed_conns:
with self._conn_set_lock:
to_stop = self._closed_conns
self._closed_conns = set()
for conn in to_stop:
if conn._write_watcher:
conn._write_watcher.stop()
# clear reference cycles from IO callback
del conn._write_watcher
if conn._read_watcher:
conn._read_watcher.stop()
# clear reference cycles from IO callback
del conn._read_watcher
changed = True
# TODO: update to do connection management, timer updates through dedicated async 'notifier' callbacks
self._update_timer()
if changed:
self._notifier.send()
class LibevConnection(Connection):
"""
An implementation of :class:`.Connection` that uses libev for its event loop.
"""
_libevloop = None
_write_watcher_is_active = False
_read_watcher = None
_write_watcher = None
_socket = None
@classmethod
def initialize_reactor(cls):
if not cls._libevloop:
cls._libevloop = LibevLoop()
else:
if cls._libevloop._pid != os.getpid():
log.debug("Detected fork, clearing and reinitializing reactor state")
cls.handle_fork()
cls._libevloop = LibevLoop()
@classmethod
def handle_fork(cls):
if cls._libevloop:
cls._libevloop._cleanup()
cls._libevloop = None
@classmethod
def create_timer(cls, timeout, callback):
timer = Timer(timeout, callback)
cls._libevloop.add_timer(timer)
return timer
def __init__(self, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
self.deque = deque()
self._deque_lock = Lock()
self._connect_socket()
self._socket.setblocking(0)
with self._libevloop._lock:
self._read_watcher = libev.IO(self._socket.fileno(), libev.EV_READ, self._libevloop._loop, self.handle_read)
self._write_watcher = libev.IO(self._socket.fileno(), libev.EV_WRITE, self._libevloop._loop, self.handle_write)
self._send_options_message()
self._libevloop.connection_created(self)
# start the global event loop if needed
self._libevloop.maybe_start()
def close(self):
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s", id(self), self.host)
self._libevloop.connection_destroyed(self)
self._socket.close()
log.debug("Closed socket to %s", self.host)
# don't leave in-progress operations hanging
if not self.is_defunct:
self.error_all_requests(
ConnectionShutdown("Connection to %s was closed" % self.host))
def handle_write(self, watcher, revents, errno=None):
if revents & libev.EV_ERROR:
if errno:
exc = IOError(errno, os.strerror(errno))
else:
exc = Exception("libev reported an error")
self.defunct(exc)
return
while True:
try:
with self._deque_lock:
next_msg = self.deque.popleft()
except IndexError:
return
try:
sent = self._socket.send(next_msg)
except socket.error as err:
if (err.args[0] in NONBLOCKING):
with self._deque_lock:
self.deque.appendleft(next_msg)
else:
self.defunct(err)
return
else:
if sent < len(next_msg):
with self._deque_lock:
self.deque.appendleft(next_msg[sent:])
def handle_read(self, watcher, revents, errno=None):
if revents & libev.EV_ERROR:
if errno:
exc = IOError(errno, os.strerror(errno))
else:
exc = Exception("libev reported an error")
self.defunct(exc)
return
try:
while True:
buf = self._socket.recv(self.in_buffer_size)
self._iobuf.write(buf)
if len(buf) < self.in_buffer_size:
break
except socket.error as err:
if ssl and isinstance(err, ssl.SSLError):
if err.args[0] not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
self.defunct(err)
return
elif err.args[0] not in NONBLOCKING:
self.defunct(err)
return
if self._iobuf.tell():
self.process_io_buffer()
else:
log.debug("Connection %s closed by server", self)
self.close()
def push(self, data):
sabs = self.out_buffer_size
if len(data) > sabs:
chunks = []
for i in range(0, len(data), sabs):
chunks.append(data[i:i + sabs])
else:
chunks = [data]
with self._deque_lock:
self.deque.extend(chunks)
self._libevloop.notify()
|
|
import collections
import os
import re
import socket
import sqlite3
import struct
import tempfile
from .queries import Queries
from .wdetector import *
unperlize_re = re.compile('^/?(.*)/([si]*)$')
def make_empty_row(columns_dict):
return dict((col, None) for col in columns_dict)
class cached_property(object):
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
"""
def __init__(self, func):
self.func = func
def __get__(self, instance, unused_type=None):
if instance is None:
return self
res = instance.__dict__[self.func.__name__] = self.func(instance)
return res
class LRUDict(collections.MutableMapping):
def __init__(self, maxlen, *a, **k):
self.maxlen = maxlen
self.d = dict(*a, **k)
while len(self) > maxlen:
self.popitem()
def __iter__(self):
return iter(self.d)
def __len__(self):
return len(self.d)
def __getitem__(self, k):
return self.d[k]
def __delitem__(self, k):
del self.d[k]
def __setitem__(self, k, v):
if k not in self and len(self) == self.maxlen:
self.popitem()
self.d[k] = v
class IdRegString(object):
def __init__(self, rowid, word_id, word2_id, pattern):
self.rowid = rowid
self.word_id = word_id
self.word2_id = word2_id
self.pattern = pattern
class UdgerBase(object):
db_filename = 'udgerdb_v3.dat'
_client_word_detector = None
_os_word_detector = None
_device_word_detector = None
_client_regstring_list = None
_os_regstring_list = None
_device_regstring_list = None
client_emptyrow = make_empty_row(Queries.client_columns)
client_emptyrow.update(
ua_class="Unrecognized",
ua_class_code="unrecognized",
)
os_emptyrow = make_empty_row(Queries.os_columns)
device_emptyrow = make_empty_row(Queries.device_columns)
marketname_emptyrow = make_empty_row(Queries.marketname_columns)
ip_datacenter_emptyrow = make_empty_row(Queries.ip_columns)
ip_datacenter_emptyrow.update(
make_empty_row(Queries.datacenter_columns),
ip_ver=None,
)
def __init__(self, data_dir=None, lru_cache_size=10000):
self.data_dir = data_dir or tempfile.gettempdir()
self.regexp_cache = {}
if lru_cache_size > 0:
self.lru_cache = LRUDict(lru_cache_size)
@staticmethod
def dict_factory(cursor, row):
return dict(
(col[0], row[idx])
for idx, col in enumerate(cursor.description)
)
perl_flags = {
's': re.DOTALL,
'i': re.IGNORECASE,
'm': re.MULTILINE,
'x': re.VERBOSE,
}
def regexp_func(self, expr, item):
global unperlize_re
expr_re = self.regexp_cache.get(expr)
if expr_re is None:
m = unperlize_re.match(expr)
old_expr = expr
flags = 0
if m:
# strip / from the beginning and /(si...) from the end
expr, opts = m.groups()
# this fails for unsupported Perl flag
flags = sum(map(self.perl_flags.get, opts))
expr_re = re.compile(expr, flags)
self.regexp_cache[old_expr] = expr_re
self.last_regexp_match = expr_re.search(item) # this does not take flags!
return bool(self.last_regexp_match)
@cached_property
def db_cursor(self):
db_filepath = os.path.join(self.data_dir, self.db_filename)
db = sqlite3.connect(db_filepath)
# db.create_function("REGEXP", 2, self.regexp_func)
cursor = db.cursor()
cursor.row_factory = self.dict_factory
return cursor
@cached_property
def client_regstring_list(self):
if UdgerBase._client_regstring_list is None:
UdgerBase._client_regstring_list = UdgerBase.prepare_regexp_struct(self.db_cursor, "udger_client_regex")
return UdgerBase._client_regstring_list
@cached_property
def os_regstring_list(self):
if UdgerBase._os_regstring_list is None:
UdgerBase._os_regstring_list = UdgerBase.prepare_regexp_struct(self.db_cursor, "udger_os_regex")
return UdgerBase._os_regstring_list
@cached_property
def device_regstring_list(self):
if UdgerBase._device_regstring_list is None:
UdgerBase._device_regstring_list = UdgerBase.prepare_regexp_struct(self.db_cursor, "udger_deviceclass_regex")
return UdgerBase._device_regstring_list
@staticmethod
def prepare_regexp_struct(db_cursor, regexp_table_name):
result = []
global unperlize_re
for row in db_cursor.execute('SELECT rowid, regstring, word_id, word2_id FROM %s ORDER BY sequence' % regexp_table_name):
regstring = row['regstring']
m = unperlize_re.match(regstring)
flags = 0
if m:
# strip / from the beginning and /(si...) from the end
regstring, opts = m.groups()
# this fails for unsupported Perl flag
flags = sum(map(UdgerBase.perl_flags.get, opts))
expr_re = re.compile(regstring, flags)
rs = IdRegString(row['rowid'], row['word_id'], row['word2_id'], expr_re)
result.append(rs)
return result;
@cached_property
def client_word_detector(self):
if UdgerBase._client_word_detector is None:
UdgerBase._client_word_detector = UdgerBase.create_word_detector(self.db_cursor, 'udger_client_regex', 'udger_client_regex_words')
return UdgerBase._client_word_detector
@cached_property
def device_word_detector(self):
if UdgerBase._device_word_detector is None:
UdgerBase._device_word_detector = UdgerBase.create_word_detector(self.db_cursor, 'udger_deviceclass_regex', 'udger_deviceclass_regex_words')
return UdgerBase._device_word_detector
@cached_property
def os_word_detector(self):
if UdgerBase._os_word_detector is None:
UdgerBase._os_word_detector = UdgerBase.create_word_detector(self.db_cursor, 'udger_os_regex', 'udger_os_regex_words')
return UdgerBase._os_word_detector
@staticmethod
def create_word_detector(db_cursor, regex_table, word_table_name):
wdetector = WordDetector()
sql = "SELECT %s FROM " + regex_table
used_words = set(row['word_id'] for row in db_cursor.execute(sql % 'word_id'))
used_words |= set(row['word2_id'] for row in db_cursor.execute(sql % 'word2_id'))
for row in db_cursor.execute('SELECT * FROM %s' % word_table_name):
if row['id'] in used_words:
wdetector.add_word(row['id'], row['word'])
return wdetector
def db_get_first_row(self, sql, *params):
# self.last_regexp_match = None
self.db_cursor.execute(sql, params)
for row in self.db_cursor:
return row
def db_iter_rows(self, sql, *params):
# self.last_regexp_match = None
self.db_cursor.execute(sql, params)
for row in self.db_cursor:
yield row
# self.last_regexp_match = None
@staticmethod
def normalize_ipaddress(ip_string):
try:
packed = socket.inet_pton(socket.AF_INET, ip_string)
ip_string = socket.inet_ntop(socket.AF_INET, packed)
ipv6_words = None
ipv4_int = struct.unpack("!L", packed)[0]
except socket.error:
packed = socket.inet_pton(socket.AF_INET6, ip_string)
ip_string = socket.inet_ntop(socket.AF_INET6, packed)
ipv6_words = struct.unpack("!8H", packed)
ipv4_int = None
return ip_string, ipv4_int, ipv6_words
|
|
# This represents the "top level" of the .xinfo hierarchy, and should
# exactly correspond to the contents of the .xinfo file.
from __future__ import annotations
import inspect
import json
import logging
import os
import pathlib
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.Syminfo import Syminfo
from xia2.Handlers.XInfo import XInfo
from xia2.Schema.XCrystal import XCrystal
from xia2.Schema.XSample import XSample
from xia2.Schema.XWavelength import XWavelength
logger = logging.getLogger("xia2.Schema.XProject")
class XProject:
"""A representation of a complete project. This will contain a dictionary
of crystals."""
def __init__(self, xinfo_file=None, name=None, base_path=None):
self.path = pathlib.Path(base_path or os.getcwd()).absolute()
self._crystals = {}
if xinfo_file:
self._setup_from_xinfo_file(xinfo_file)
else:
self._name = name
# serialization functions
def to_dict(self):
obj = {"__id__": "XProject"}
attributes = inspect.getmembers(self, lambda m: not inspect.isroutine(m))
for a in attributes:
if a[0] == "_crystals":
crystals = {cname: cryst.to_dict() for cname, cryst in a[1].items()}
obj[a[0]] = crystals
elif a[0].startswith("__"):
continue
elif hasattr(a[1], "__fspath__"):
obj[a[0]] = a[1].__fspath__()
else:
obj[a[0]] = a[1]
return obj
@classmethod
def from_dict(cls, obj, base_path=None):
assert obj["__id__"] == "XProject"
return_obj = cls()
for k, v in obj.items():
if k == "_crystals":
v_ = {}
for cname, cdict in v.items():
cryst = XCrystal.from_dict(cdict)
cryst._project = return_obj
v_[cname] = cryst
v = v_
setattr(return_obj, k, v)
if hasattr(return_obj, "path"):
return_obj.path = pathlib.Path(return_obj.path).absolute()
else:
return_obj.path = pathlib.Path(base_path or os.getcwd()).absolute()
return return_obj
def as_json(self, filename=None, compact=True):
obj = self.to_dict()
if compact:
text = json.dumps(
obj, skipkeys=True, separators=(",", ":"), ensure_ascii=True
)
else:
text = json.dumps(obj, skipkeys=True, indent=2, ensure_ascii=True)
# If a filename is set then dump to file otherwise return string
if filename is not None:
with open(filename, "w") as outfile:
outfile.write(text)
else:
return text
@classmethod
def from_json(cls, filename=None, string=None):
def _decode_dict(data):
"""Recursively decode possible float and int values."""
rv = {}
for key, value in data.items():
if isinstance(value, dict):
value = _decode_dict(value)
try:
key = float(key)
if int(key) == key:
key = int(key)
except ValueError:
pass
rv[key] = value
return rv
assert [filename, string].count(None) == 1
if filename:
with open(filename, "rb") as f:
string = f.read()
base_path = os.path.dirname(filename)
else:
base_path = None
obj = json.loads(string, object_hook=_decode_dict)
return cls.from_dict(obj, base_path=base_path)
def get_output(self):
result = "Project: %s\n" % self._name
for crystal in self._crystals:
result += self._crystals[crystal].get_output()
return result[:-1]
def summarise(self):
"""Produce summary information."""
summary = ["Project: %s" % self._name]
for crystal in self._crystals:
for record in self._crystals[crystal].summarise():
summary.append(record)
return summary
def get_name(self):
return self._name
def add_crystal(self, xcrystal):
"""Add a new xcrystal to the project."""
if not xcrystal.__class__.__name__ == "XCrystal":
raise RuntimeError("crystal must be class XCrystal.")
if xcrystal.get_name() in self._crystals:
raise RuntimeError(
"XCrystal with name %s already exists" % xcrystal.get_name()
)
self._crystals[xcrystal.get_name()] = xcrystal
def get_crystals(self):
return self._crystals
def _setup_from_xinfo_file(self, xinfo_file):
"""Set up this object & all subobjects based on the .xinfo
file contents."""
settings = PhilIndex.params.xia2.settings
sweep_ids = [sweep.id for sweep in settings.sweep]
sweep_ranges = [sweep.range for sweep in settings.sweep]
if not sweep_ids:
sweep_ids = None
sweep_ranges = None
xinfo = XInfo(xinfo_file, sweep_ids=sweep_ids, sweep_ranges=sweep_ranges)
self._name = xinfo.get_project()
crystals = xinfo.get_crystals()
for crystal in crystals:
xc = XCrystal(crystal, self)
if "sequence" in crystals[crystal]:
xc.set_aa_sequence(crystals[crystal]["sequence"])
if "ha_info" in crystals[crystal]:
if crystals[crystal]["ha_info"] != {}:
xc.set_ha_info(crystals[crystal]["ha_info"])
if "scaled_merged_reflection_file" in crystals[crystal]:
xc.set_scaled_merged_reflections(
crystals[crystal]["scaled_merged_reflections"]
)
if "reference_reflection_file" in crystals[crystal]:
xc.set_reference_reflection_file(
crystals[crystal]["reference_reflection_file"]
)
if "freer_file" in crystals[crystal]:
xc.set_freer_file(crystals[crystal]["freer_file"])
# user assigned spacegroup
if "user_spacegroup" in crystals[crystal]:
xc.set_user_spacegroup(crystals[crystal]["user_spacegroup"])
elif settings.space_group is not None:
# XXX do we ever actually get here?
xc.set_user_spacegroup(settings.space_group.type().lookup_symbol())
# add a default sample if none present in xinfo file
if not crystals[crystal]["samples"]:
crystals[crystal]["samples"]["X1"] = {}
for sample in crystals[crystal]["samples"]:
xsample = XSample(sample, xc)
xc.add_sample(xsample)
if not crystals[crystal]["wavelengths"]:
raise RuntimeError("No wavelengths specified in xinfo file")
for wavelength, wave_info in crystals[crystal]["wavelengths"].items():
# FIXME 29/NOV/06 in here need to be able to cope with
# no wavelength information - this should default to the
# information in the image header (John Cowan pointed
# out that this was untidy - requiring that it agrees
# with the value in the header makes this almost
# useless.)
if "wavelength" not in wave_info:
logger.debug(
"No wavelength value given for wavelength %s", wavelength
)
else:
logger.debug(
"Overriding value for wavelength %s to %8.6f",
wavelength,
float(wave_info["wavelength"]),
)
# handle case where user writes f" in place of f''
if 'f"' in wave_info and "f''" not in wave_info:
wave_info["f''"] = wave_info['f"']
xw = XWavelength(
wavelength,
xc,
wavelength=wave_info.get("wavelength", 0.0),
f_pr=wave_info.get("f'", 0.0),
f_prpr=wave_info.get("f''", 0.0),
dmin=wave_info.get("dmin", 0.0),
dmax=wave_info.get("dmax", 0.0),
)
# in here I also need to look and see if we have
# been given any scaled reflection files...
# check to see if we have a user supplied lattice...
if "user_spacegroup" in crystals[crystal]:
lattice = Syminfo.get_lattice(crystals[crystal]["user_spacegroup"])
elif settings.space_group is not None:
# XXX do we ever actually get here?
lattice = Syminfo.get_lattice(
settings.space_group.type().lookup_symbol()
)
else:
lattice = None
# and also user supplied cell constants - from either
# the xinfo file (the first port of call) or the
# command-line.
if "user_cell" in crystals[crystal]:
cell = crystals[crystal]["user_cell"]
elif settings.unit_cell is not None:
# XXX do we ever actually get here?
cell = settings.unit_cell.parameters()
else:
cell = None
dmin = wave_info.get("dmin", 0.0)
dmax = wave_info.get("dmax", 0.0)
if dmin == 0.0 and dmax == 0.0:
dmin = PhilIndex.params.xia2.settings.resolution.d_min
dmax = PhilIndex.params.xia2.settings.resolution.d_max
# want to be able to locally override the resolution limits
# for this sweep while leaving the rest for the data set
# intact...
for sweep_name, sweep_info in crystals[crystal]["sweeps"].items():
sample_name = sweep_info.get("sample")
if sample_name is None:
if len(crystals[crystal]["samples"]) == 1:
sample_name = list(crystals[crystal]["samples"])[0]
else:
raise RuntimeError(
"No sample given for sweep %s" % sweep_name
)
xsample = xc.get_xsample(sample_name)
assert xsample is not None
dmin_old = dmin
dmax_old = dmax
if "RESOLUTION" in sweep_info:
values = [float(x) for x in sweep_info["RESOLUTION"].split()]
if len(values) == 1:
dmin = values[0]
elif len(values) == 2:
dmin = min(values)
dmax = max(values)
else:
raise RuntimeError(
"bad resolution for sweep %s" % sweep_name
)
if sweep_info["wavelength"] == wavelength:
frames_to_process = sweep_info.get("start_end")
xsweep = xw.add_sweep(
sweep_name,
sample=xsample,
directory=sweep_info.get("DIRECTORY"),
image=sweep_info.get("IMAGE"),
beam=sweep_info.get("beam"),
reversephi=sweep_info.get("reversephi", False),
distance=sweep_info.get("distance"),
gain=float(sweep_info.get("GAIN", 0.0)),
dmin=dmin,
dmax=dmax,
polarization=float(sweep_info.get("POLARIZATION", 0.0)),
frames_to_process=frames_to_process,
user_lattice=lattice,
user_cell=cell,
epoch=sweep_info.get("epoch", 0),
ice=sweep_info.get("ice", False),
excluded_regions=sweep_info.get("excluded_regions", []),
)
xsample.add_sweep(xsweep)
dmin = dmin_old
dmax = dmax_old
xc.add_wavelength(xw)
self.add_crystal(xc)
|
|
"""
.. module: lemur.sources.service
:platform: Unix
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
import arrow
from datetime import timedelta
import copy
from flask import current_app
from sentry_sdk import capture_exception
from sqlalchemy import cast
from sqlalchemy_utils import ArrowType
from lemur import database
from lemur.sources.models import Source
from lemur.certificates.models import Certificate
from lemur.certificates import service as certificate_service
from lemur.endpoints import service as endpoint_service
from lemur.endpoints.models import Endpoint
from lemur.extensions import metrics
from lemur.destinations import service as destination_service
from lemur.certificates.schemas import CertificateUploadInputSchema
from lemur.common.utils import find_matching_certificates_by_hash, parse_certificate
from lemur.common.defaults import serial
from lemur.logs import service as log_service
from lemur.plugins.base import plugins
from lemur.plugins.utils import get_plugin_option, set_plugin_option
def certificate_create(certificate, source):
data, errors = CertificateUploadInputSchema().load(certificate)
if errors:
raise Exception(
"Unable to import certificate: {reasons}".format(reasons=errors)
)
data["creator"] = certificate["creator"]
cert = certificate_service.import_certificate(**data)
cert.description = "This certificate was automatically discovered by Lemur"
cert.sources.append(source)
sync_update_destination(cert, source)
database.update(cert)
return cert
def certificate_update(certificate, source):
for s in certificate.sources:
if s.label == source.label:
break
else:
certificate.sources.append(source)
sync_update_destination(certificate, source)
database.update(certificate)
def sync_update_destination(certificate, source):
dest = destination_service.get_by_label(source.label)
if dest:
for d in certificate.destinations:
if d.label == source.label:
break
else:
certificate.destinations.append(dest)
def sync_endpoints(source):
new, updated, updated_by_hash = 0, 0, 0
current_app.logger.debug("Retrieving endpoints from {0}".format(source.label))
s = plugins.get(source.plugin_name)
try:
endpoints = s.get_endpoints(source.options)
except NotImplementedError:
current_app.logger.warning(
"Unable to sync endpoints for source {0} plugin has not implemented 'get_endpoints'".format(
source.label
)
)
return new, updated, updated_by_hash
for endpoint in endpoints:
exists = endpoint_service.get_by_dnsname_and_port(
endpoint["dnsname"], endpoint["port"]
)
certificate_name = endpoint.pop("certificate_name")
endpoint["certificate"] = certificate_service.get_by_name(certificate_name)
# if get cert by name failed, we attempt a search via serial number and hash comparison
# and link the endpoint certificate to Lemur certificate
if not endpoint["certificate"]:
certificate_attached_to_endpoint = None
try:
certificate_attached_to_endpoint = s.get_certificate_by_name(certificate_name, source.options)
except NotImplementedError:
current_app.logger.warning(
"Unable to describe server certificate for endpoints in source {0}:"
" plugin has not implemented 'get_certificate_by_name'".format(
source.label
)
)
capture_exception()
if certificate_attached_to_endpoint:
lemur_matching_cert, updated_by_hash_tmp = find_cert(certificate_attached_to_endpoint)
updated_by_hash += updated_by_hash_tmp
if lemur_matching_cert:
endpoint["certificate"] = lemur_matching_cert[0]
if len(lemur_matching_cert) > 1:
current_app.logger.error(
"Too Many Certificates Found{0}. Name: {1} Endpoint: {2}".format(
len(lemur_matching_cert), certificate_name, endpoint["name"]
)
)
metrics.send("endpoint.certificate.conflict",
"gauge", len(lemur_matching_cert),
metric_tags={"cert": certificate_name, "endpoint": endpoint["name"],
"acct": s.get_option("accountNumber", source.options)})
if not endpoint["certificate"]:
current_app.logger.error({
"message": "Certificate Not Found",
"certificate_name": certificate_name,
"endpoint_name": endpoint["name"],
"dns_name": endpoint.get("dnsname"),
"account": s.get_option("accountNumber", source.options),
})
metrics.send("endpoint.certificate.not.found",
"counter", 1,
metric_tags={"cert": certificate_name, "endpoint": endpoint["name"],
"acct": s.get_option("accountNumber", source.options),
"dnsname": endpoint.get("dnsname")})
continue
policy = endpoint.pop("policy")
policy_ciphers = []
for nc in policy["ciphers"]:
policy_ciphers.append(endpoint_service.get_or_create_cipher(name=nc))
policy["ciphers"] = policy_ciphers
endpoint["policy"] = endpoint_service.get_or_create_policy(**policy)
endpoint["source"] = source
if not exists:
current_app.logger.debug(
"Endpoint Created: Name: {name}".format(name=endpoint["name"])
)
endpoint_service.create(**endpoint)
new += 1
else:
current_app.logger.debug("Endpoint Updated: {}".format(endpoint))
endpoint_service.update(exists.id, **endpoint)
updated += 1
return new, updated, updated_by_hash
def expire_endpoints(source, ttl_hours):
now = arrow.utcnow()
expiration = now - timedelta(hours=ttl_hours)
endpoints = database.session_query(Endpoint).filter(Endpoint.source_id == source.id).filter(
cast(Endpoint.last_updated, ArrowType) <= expiration
)
expired = 0
for endpoint in endpoints:
current_app.logger.debug(
"Expiring endpoint from source {source}: {name} Last Updated: {last_updated}".format(
source=source.label, name=endpoint.name, last_updated=endpoint.last_updated))
database.delete(endpoint)
metrics.send("endpoint_expired", "counter", 1,
metric_tags={"source": source.label})
expired += 1
return expired
def find_cert(certificate):
updated_by_hash = 0
exists = False
if certificate.get("search", None):
conditions = certificate.pop("search")
exists = certificate_service.get_by_attributes(conditions)
if not exists and certificate.get("name"):
result = certificate_service.get_by_name(certificate["name"])
if result:
exists = [result]
if not exists and certificate.get("serial"):
exists = certificate_service.get_by_serial(certificate["serial"])
if not exists:
cert = parse_certificate(certificate["body"])
matching_serials = certificate_service.get_by_serial(serial(cert))
exists = find_matching_certificates_by_hash(cert, matching_serials)
updated_by_hash += 1
exists = [x for x in exists if x]
return exists, updated_by_hash
# TODO this is very slow as we don't batch update certificates
def sync_certificates(source, user):
new, updated, updated_by_hash, unlinked = 0, 0, 0, 0
current_app.logger.debug("Retrieving certificates from {0}".format(source.label))
s = plugins.get(source.plugin_name)
certificates = s.get_certificates(source.options)
# emitting the count of certificates on the source
metrics.send("sync_certificates_count",
"gauge", len(certificates),
metric_tags={"source": source.label})
existing_certificates_with_source_by_id = {}
for e in certificate_service.get_all_valid_certificates_with_source(source.id):
existing_certificates_with_source_by_id[e.id] = e
for certificate in certificates:
exists, updated_by_hash = find_cert(certificate)
if not certificate.get("owner"):
certificate["owner"] = user.email
certificate["creator"] = user
if not exists:
certificate_create(certificate, source)
new += 1
else:
for e in exists:
if certificate.get("external_id"):
e.external_id = certificate["external_id"]
if certificate.get("authority_id"):
e.authority_id = certificate["authority_id"]
certificate_update(e, source)
if e.id in existing_certificates_with_source_by_id:
del existing_certificates_with_source_by_id[e.id]
updated += 1
# remove source from any certificates no longer being reported by it
destination = destination_service.get_by_label(source.label)
for certificate in existing_certificates_with_source_by_id.values():
certificate_service.remove_source_association(certificate, source)
current_app.logger.warning(f"Removed source {source.label} for {certificate.name} during source sync")
if destination in certificate.destinations:
certificate_service.remove_destination_association(certificate, destination, clean=False)
current_app.logger.warning(f"Removed destination {source.label} for {certificate.name} during source sync")
updated += 1
unlinked += 1
metrics.send("sync_certificates_unlinked",
"gauge", unlinked,
metric_tags={"source": source.label})
return new, updated, updated_by_hash
def sync(source, user, ttl_hours=2):
try:
new_certs, updated_certs, updated_certs_by_hash = sync_certificates(source, user)
metrics.send("sync.updated_certs_by_hash",
"gauge", updated_certs_by_hash,
metric_tags={"source": source.label})
new_endpoints, updated_endpoints, updated_endpoints_by_hash = sync_endpoints(source)
metrics.send("sync.updated_endpoints_by_hash",
"gauge", updated_endpoints_by_hash,
metric_tags={"source": source.label})
expired_endpoints = expire_endpoints(source, ttl_hours)
source.last_run = arrow.utcnow()
database.update(source)
return {
"endpoints": (new_endpoints, updated_endpoints, expired_endpoints),
"certificates": (new_certs, updated_certs),
}
except Exception as e: # noqa
current_app.logger.warning(f"Sync source '{source.label}' aborted: {e}")
capture_exception()
raise e
def create(label, plugin_name, options, description=None):
"""
Creates a new source, that can then be used as a source for certificates.
:param label: Source common name
:param plugin_name:
:param options:
:param description:
:rtype: Source
:return: New source
"""
source = Source(
label=label, options=options, plugin_name=plugin_name, description=description
)
log_service.audit_log("create_source", source.label, "Creating new source")
return database.create(source)
def update(source_id, label, plugin_name, options, description):
"""
Updates an existing source.
:param source_id: Lemur assigned ID
:param label: Source common name
:param options:
:param plugin_name:
:param description:
:rtype: Source
:return:
"""
source = get(source_id)
source.label = label
source.plugin_name = plugin_name
source.options = options
source.description = description
log_service.audit_log("update_source", source.label, "Updating source")
return database.update(source)
def delete(source_id):
"""
Deletes an source.
:param source_id: Lemur assigned ID
"""
source = get(source_id)
if source:
# remove association of this source from all valid certificates
certificates = certificate_service.get_all_valid_certificates_with_source(source_id)
for certificate in certificates:
certificate_service.remove_source_association(certificate, source)
current_app.logger.warning(f"Removed source {source.label} for {certificate.name} during source delete")
# proceed with source delete
log_service.audit_log("delete_source", source.label, "Deleting source")
database.delete(source)
def get(source_id):
"""
Retrieves an source by its lemur assigned ID.
:param source_id: Lemur assigned ID
:rtype: Source
:return:
"""
return database.get(Source, source_id)
def get_by_label(label):
"""
Retrieves a source by its label
:param label:
:return:
"""
return database.get(Source, label, field="label")
def get_all():
"""
Retrieves all source currently known by Lemur.
:return:
"""
query = database.session_query(Source)
return database.find_all(query, Source, {}).all()
def render(args):
filt = args.pop("filter")
certificate_id = args.pop("certificate_id", None)
if certificate_id:
query = database.session_query(Source).join(Certificate, Source.certificate)
query = query.filter(Certificate.id == certificate_id)
else:
query = database.session_query(Source)
if filt:
terms = filt.split(";")
query = database.filter(query, Source, terms)
return database.sort_and_page(query, Source, args)
def add_aws_destination_to_sources(dst):
"""
Given a destination, check if it can be added as sources, and include it if not already a source
We identify qualified destinations based on the sync_as_source attributed of the plugin.
The destination sync_as_source_name reveals the name of the suitable source-plugin.
We rely on account numbers to avoid duplicates.
:return: true for success and false for not adding the destination as source
"""
# check that destination can be synced to a source
destination_plugin = plugins.get(dst.plugin_name)
if destination_plugin.sync_as_source is None or not destination_plugin.sync_as_source:
return False
account_number = get_plugin_option("accountNumber", dst.options)
if account_number is None:
return False
path = get_plugin_option("path", dst.options)
if path is None:
return False
# a set of all (account number, path) available as sources
src_account_paths = set()
sources = get_all()
for src in sources:
src_account_paths.add(
(get_plugin_option("accountNumber", src.options), get_plugin_option("path", src.options))
)
if (account_number, path) not in src_account_paths:
src_options = copy.deepcopy(
plugins.get(destination_plugin.sync_as_source_name).options
)
set_plugin_option("accountNumber", account_number, src_options)
set_plugin_option("path", path, src_options)
# Set the right endpointType for cloudfront sources.
if get_plugin_option("endpointType", src_options) is not None and path == "/cloudfront/":
set_plugin_option("endpointType", "cloudfront", src_options)
create(
label=dst.label,
plugin_name=destination_plugin.sync_as_source_name,
options=src_options,
description=dst.description,
)
return True
return False
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tridiagonal solve ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients as gradient_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl
from tensorflow.python.platform import test
_sample_diags = np.array([[2, 1, 4, 0], [1, 3, 2, 2], [0, 1, -1, 1]],
dtype=np.float32)
_sample_rhs = np.array([1, 2, 3, 4], dtype=np.float32)
_sample_result = np.array([-9, 5, -4, 4], dtype=np.float32)
def _tfconst(array):
return constant_op.constant(array, dtype=dtypes.float32)
def _tf_ones(shape):
return array_ops.ones(shape, dtype=dtypes.float64)
class TridiagonalSolveOpsTest(xla_test.XLATestCase):
"""Test for tri-diagonal matrix related ops."""
def testTridiagonalSolverSolves1Rhs(self):
np.random.seed(19)
batch_size = 8
num_dims = 11
diagonals_np = np.random.normal(size=(batch_size, 3,
num_dims)).astype(np.float32)
rhs_np = np.random.normal(size=(batch_size, num_dims, 1)).astype(np.float32)
with self.session() as sess, self.test_scope():
diags = array_ops.placeholder(
shape=(batch_size, 3, num_dims), dtype=dtypes.float32)
rhs = array_ops.placeholder(
shape=(batch_size, num_dims, 1), dtype=dtypes.float32)
x_np = sess.run(
linalg_impl.tridiagonal_solve(diags, rhs, partial_pivoting=False),
feed_dict={
diags: diagonals_np,
rhs: rhs_np
})[:, :, 0]
superdiag_np = diagonals_np[:, 0]
diag_np = diagonals_np[:, 1]
subdiag_np = diagonals_np[:, 2]
y = np.zeros((batch_size, num_dims), dtype=np.float32)
for i in range(num_dims):
if i == 0:
y[:, i] = (
diag_np[:, i] * x_np[:, i] + superdiag_np[:, i] * x_np[:, i + 1])
elif i == num_dims - 1:
y[:, i] = (
subdiag_np[:, i] * x_np[:, i - 1] + diag_np[:, i] * x_np[:, i])
else:
y[:, i] = (
subdiag_np[:, i] * x_np[:, i - 1] + diag_np[:, i] * x_np[:, i] +
superdiag_np[:, i] * x_np[:, i + 1])
self.assertAllClose(y, rhs_np[:, :, 0], rtol=1e-4, atol=1e-4)
def testTridiagonalSolverSolvesKRhs(self):
np.random.seed(19)
batch_size = 8
num_dims = 11
num_rhs = 5
diagonals_np = np.random.normal(size=(batch_size, 3,
num_dims)).astype(np.float32)
rhs_np = np.random.normal(size=(batch_size, num_dims,
num_rhs)).astype(np.float32)
with self.session() as sess, self.test_scope():
diags = array_ops.placeholder(
shape=(batch_size, 3, num_dims), dtype=dtypes.float32)
rhs = array_ops.placeholder(
shape=(batch_size, num_dims, num_rhs), dtype=dtypes.float32)
x_np = sess.run(
linalg_impl.tridiagonal_solve(diags, rhs, partial_pivoting=False),
feed_dict={
diags: diagonals_np,
rhs: rhs_np
})
superdiag_np = diagonals_np[:, 0]
diag_np = diagonals_np[:, 1]
subdiag_np = diagonals_np[:, 2]
for eq in range(num_rhs):
y = np.zeros((batch_size, num_dims), dtype=np.float32)
for i in range(num_dims):
if i == 0:
y[:, i] = (
diag_np[:, i] * x_np[:, i, eq] +
superdiag_np[:, i] * x_np[:, i + 1, eq])
elif i == num_dims - 1:
y[:, i] = (
subdiag_np[:, i] * x_np[:, i - 1, eq] +
diag_np[:, i] * x_np[:, i, eq])
else:
y[:, i] = (
subdiag_np[:, i] * x_np[:, i - 1, eq] +
diag_np[:, i] * x_np[:, i, eq] +
superdiag_np[:, i] * x_np[:, i + 1, eq])
self.assertAllClose(y, rhs_np[:, :, eq], rtol=1e-4, atol=1e-4)
# All the following is adapted from tridiagonal_solve_op_test.py
def _test(self,
diags,
rhs,
expected,
diags_format="compact",
transpose_rhs=False):
with self.session() as sess, self.test_scope():
self.assertAllClose(
sess.run(
linalg_impl.tridiagonal_solve(
_tfconst(diags),
_tfconst(rhs),
diags_format,
transpose_rhs,
conjugate_rhs=False,
partial_pivoting=False)),
np.asarray(expected, dtype=np.float32))
def _testWithDiagonalLists(self,
diags,
rhs,
expected,
diags_format="compact",
transpose_rhs=False):
with self.session() as sess, self.test_scope():
self.assertAllClose(
sess.run(
linalg_impl.tridiagonal_solve([_tfconst(x) for x in diags],
_tfconst(rhs),
diags_format,
transpose_rhs,
conjugate_rhs=False,
partial_pivoting=False)),
sess.run(_tfconst(expected)))
def testReal(self):
self._test(diags=_sample_diags, rhs=_sample_rhs, expected=_sample_result)
# testComplex is skipped as complex type is not yet supported.
def test3x3(self):
self._test(
diags=[[2.0, -1.0, 0.0], [1.0, 3.0, 1.0], [0.0, -1.0, -2.0]],
rhs=[1.0, 2.0, 3.0],
expected=[-3.0, 2.0, 7.0])
def test2x2(self):
self._test(
diags=[[2.0, 0.0], [1.0, 3.0], [0.0, 1.0]],
rhs=[1.0, 4.0],
expected=[-5.0, 3.0])
def test1x1(self):
self._test(diags=[[0], [3], [0]], rhs=[6], expected=[2])
def test0x0(self):
self._test(
diags=np.zeros(shape=(3, 0), dtype=np.float32),
rhs=np.zeros(shape=(0, 1), dtype=np.float32),
expected=np.zeros(shape=(0, 1), dtype=np.float32))
def test2x2WithMultipleRhs(self):
self._test(
diags=[[2, 0], [1, 3], [0, 1]],
rhs=[[1, 2, 3], [4, 8, 12]],
expected=[[-5, -10, -15], [3, 6, 9]])
def test1x1WithMultipleRhs(self):
self._test(diags=[[0], [3], [0]], rhs=[[6, 9, 12]], expected=[[2, 3, 4]])
# test1x1NotInvertible is skipped as runtime error not raised for now.
# test2x2NotInvertible is skipped as runtime error not raised for now.
def testPartialPivotingRaises(self):
np.random.seed(0)
batch_size = 8
num_dims = 11
num_rhs = 5
diagonals_np = np.random.normal(size=(batch_size, 3,
num_dims)).astype(np.float32)
rhs_np = np.random.normal(size=(batch_size, num_dims,
num_rhs)).astype(np.float32)
with self.session() as sess, self.test_scope():
with self.assertRaisesRegex(
errors_impl.UnimplementedError,
"Current implementation does not yet support pivoting."):
diags = array_ops.placeholder(
shape=(batch_size, 3, num_dims), dtype=dtypes.float32)
rhs = array_ops.placeholder(
shape=(batch_size, num_dims, num_rhs), dtype=dtypes.float32)
sess.run(
linalg_impl.tridiagonal_solve(diags, rhs, partial_pivoting=True),
feed_dict={
diags: diagonals_np,
rhs: rhs_np
})
# testCaseRequiringPivotingLastRows is skipped as pivoting is not supported
# for now.
# testNotInvertible is skipped as runtime error not raised for now.
def testDiagonal(self):
self._test(
diags=[[0, 0, 0, 0], [1, 2, -1, -2], [0, 0, 0, 0]],
rhs=[1, 2, 3, 4],
expected=[1, 1, -3, -2])
def testUpperTriangular(self):
self._test(
diags=[[2, 4, -1, 0], [1, 3, 1, 2], [0, 0, 0, 0]],
rhs=[1, 6, 4, 4],
expected=[13, -6, 6, 2])
def testLowerTriangular(self):
self._test(
diags=[[0, 0, 0, 0], [2, -1, 3, 1], [0, 1, 4, 2]],
rhs=[4, 5, 6, 1],
expected=[2, -3, 6, -11])
def testWithTwoRightHandSides(self):
self._test(
diags=_sample_diags,
rhs=np.transpose([_sample_rhs, 2 * _sample_rhs]),
expected=np.transpose([_sample_result, 2 * _sample_result]))
def testBatching(self):
self._test(
diags=np.array([_sample_diags, -_sample_diags]),
rhs=np.array([_sample_rhs, 2 * _sample_rhs]),
expected=np.array([_sample_result, -2 * _sample_result]))
def testWithTwoBatchingDimensions(self):
self._test(
diags=np.array([[_sample_diags, -_sample_diags, _sample_diags],
[-_sample_diags, _sample_diags, -_sample_diags]]),
rhs=np.array([[_sample_rhs, 2 * _sample_rhs, 3 * _sample_rhs],
[4 * _sample_rhs, 5 * _sample_rhs, 6 * _sample_rhs]]),
expected=np.array(
[[_sample_result, -2 * _sample_result, 3 * _sample_result],
[-4 * _sample_result, 5 * _sample_result, -6 * _sample_result]]))
def testBatchingAndTwoRightHandSides(self):
rhs = np.transpose([_sample_rhs, 2 * _sample_rhs])
expected_result = np.transpose([_sample_result, 2 * _sample_result])
self._test(
diags=np.array([_sample_diags, -_sample_diags]),
rhs=np.array([rhs, 2 * rhs]),
expected=np.array([expected_result, -2 * expected_result]))
def testSequenceFormat(self):
self._testWithDiagonalLists(
diags=[[2, 1, 4], [1, 3, 2, 2], [1, -1, 1]],
rhs=[1, 2, 3, 4],
expected=[-9, 5, -4, 4],
diags_format="sequence")
def testSequenceFormatWithDummyElements(self):
dummy = 20 # Should be ignored by the solver.
self._testWithDiagonalLists(
diags=[
[2, 1, 4, dummy],
[1, 3, 2, 2],
[dummy, 1, -1, 1],
],
rhs=[1, 2, 3, 4],
expected=[-9, 5, -4, 4],
diags_format="sequence")
def testSequenceFormatWithBatching(self):
self._testWithDiagonalLists(
diags=[[[2, 1, 4], [-2, -1, -4]], [[1, 3, 2, 2], [-1, -3, -2, -2]],
[[1, -1, 1], [-1, 1, -1]]],
rhs=[[1, 2, 3, 4], [1, 2, 3, 4]],
expected=[[-9, 5, -4, 4], [9, -5, 4, -4]],
diags_format="sequence")
def testMatrixFormat(self):
self._test(
diags=[[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4], [0, 0, 1, 2]],
rhs=[1, 2, 3, 4],
expected=[-9, 5, -4, 4],
diags_format="matrix")
def testMatrixFormatWithMultipleRightHandSides(self):
self._test(
diags=[[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4], [0, 0, 1, 2]],
rhs=[[1, -1], [2, -2], [3, -3], [4, -4]],
expected=[[-9, 9], [5, -5], [-4, 4], [4, -4]],
diags_format="matrix")
def testMatrixFormatWithBatching(self):
self._test(
diags=[[[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4], [0, 0, 1, 2]],
[[-1, -2, 0, 0], [-1, -3, -1, 0], [0, 1, -2, -4], [0, 0, -1,
-2]]],
rhs=[[1, 2, 3, 4], [1, 2, 3, 4]],
expected=[[-9, 5, -4, 4], [9, -5, 4, -4]],
diags_format="matrix")
def testRightHandSideAsColumn(self):
self._test(
diags=_sample_diags,
rhs=np.transpose([_sample_rhs]),
expected=np.transpose([_sample_result]),
diags_format="compact")
def testTransposeRhs(self):
self._test(
diags=_sample_diags,
rhs=np.array([_sample_rhs, 2 * _sample_rhs]),
expected=np.array([_sample_result, 2 * _sample_result]).T,
transpose_rhs=True)
# testConjugateRhs is skipped as complex type is not yet supported.
# testAjointRhs is skipped as complex type is not yet supported
def testTransposeRhsWithRhsAsVector(self):
self._test(
diags=_sample_diags,
rhs=_sample_rhs,
expected=_sample_result,
transpose_rhs=True)
# testConjugateRhsWithRhsAsVector is skipped as complex type is not yet
# supported.
def testTransposeRhsWithRhsAsVectorAndBatching(self):
self._test(
diags=np.array([_sample_diags, -_sample_diags]),
rhs=np.array([_sample_rhs, 2 * _sample_rhs]),
expected=np.array([_sample_result, -2 * _sample_result]),
transpose_rhs=True)
# Gradient tests
def _gradientTest(
self,
diags,
rhs,
y, # output = reduce_sum(y * tridiag_solve(diags, rhs))
expected_grad_diags, # expected gradient of output w.r.t. diags
expected_grad_rhs, # expected gradient of output w.r.t. rhs
diags_format="compact",
transpose_rhs=False,
feed_dict=None):
expected_grad_diags = np.array(expected_grad_diags).astype(np.float32)
expected_grad_rhs = np.array(expected_grad_rhs).astype(np.float32)
with self.session() as sess, self.test_scope():
diags = _tfconst(diags)
rhs = _tfconst(rhs)
y = _tfconst(y)
x = linalg_impl.tridiagonal_solve(
diags,
rhs,
diagonals_format=diags_format,
transpose_rhs=transpose_rhs,
conjugate_rhs=False,
partial_pivoting=False)
res = math_ops.reduce_sum(x * y)
actual_grad_diags = sess.run(
gradient_ops.gradients(res, diags)[0], feed_dict=feed_dict)
actual_rhs_diags = sess.run(
gradient_ops.gradients(res, rhs)[0], feed_dict=feed_dict)
self.assertAllClose(expected_grad_diags, actual_grad_diags)
self.assertAllClose(expected_grad_rhs, actual_rhs_diags)
def testGradientSimple(self):
self._gradientTest(
diags=_sample_diags,
rhs=_sample_rhs,
y=[1, 3, 2, 4],
expected_grad_diags=[[-5, 0, 4, 0], [9, 0, -4, -16], [0, 0, 5, 16]],
expected_grad_rhs=[1, 0, -1, 4])
def testGradientWithMultipleRhs(self):
self._gradientTest(
diags=_sample_diags,
rhs=[[1, 2], [2, 4], [3, 6], [4, 8]],
y=[[1, 5], [2, 6], [3, 7], [4, 8]],
expected_grad_diags=[[-20, 28, -60, 0], [36, -35, 60, 80],
[0, 63, -75, -80]],
expected_grad_rhs=[[0, 2], [1, 3], [1, 7], [0, -10]])
def _makeDataForGradientWithBatching(self):
y = np.array([1, 3, 2, 4]).astype(np.float32)
grad_diags = np.array([[-5, 0, 4, 0], [9, 0, -4, -16],
[0, 0, 5, 16]]).astype(np.float32)
grad_rhs = np.array([1, 0, -1, 4]).astype(np.float32)
diags_batched = np.array(
[[_sample_diags, 2 * _sample_diags, 3 * _sample_diags],
[4 * _sample_diags, 5 * _sample_diags,
6 * _sample_diags]]).astype(np.float32)
rhs_batched = np.array([[_sample_rhs, -_sample_rhs, _sample_rhs],
[-_sample_rhs, _sample_rhs,
-_sample_rhs]]).astype(np.float32)
y_batched = np.array([[y, y, y], [y, y, y]]).astype(np.float32)
expected_grad_diags_batched = np.array(
[[grad_diags, -grad_diags / 4, grad_diags / 9],
[-grad_diags / 16, grad_diags / 25,
-grad_diags / 36]]).astype(np.float32)
expected_grad_rhs_batched = np.array(
[[grad_rhs, grad_rhs / 2, grad_rhs / 3],
[grad_rhs / 4, grad_rhs / 5, grad_rhs / 6]]).astype(np.float32)
return (y_batched, diags_batched, rhs_batched, expected_grad_diags_batched,
expected_grad_rhs_batched)
def testGradientWithBatchDims(self):
y, diags, rhs, expected_grad_diags, expected_grad_rhs = (
self._makeDataForGradientWithBatching())
self._gradientTest(
diags=diags,
rhs=rhs,
y=y,
expected_grad_diags=expected_grad_diags,
expected_grad_rhs=expected_grad_rhs)
# testGradientWithUnknownShapes is skipped as shapes should be fully known.
def _assertRaises(self, diags, rhs, diags_format="compact"):
with self.assertRaises(ValueError):
linalg_impl.tridiagonal_solve(diags, rhs, diags_format)
# Invalid input shapes
def testInvalidShapesCompactFormat(self):
def test_raises(diags_shape, rhs_shape):
self._assertRaises(_tf_ones(diags_shape), _tf_ones(rhs_shape), "compact")
test_raises((5, 4, 4), (5, 4))
test_raises((5, 3, 4), (4, 5))
test_raises((5, 3, 4), (5))
test_raises((5), (5, 4))
def testInvalidShapesSequenceFormat(self):
def test_raises(diags_tuple_shapes, rhs_shape):
diagonals = tuple(_tf_ones(shape) for shape in diags_tuple_shapes)
self._assertRaises(diagonals, _tf_ones(rhs_shape), "sequence")
test_raises(((5, 4), (5, 4)), (5, 4))
test_raises(((5, 4), (5, 4), (5, 6)), (5, 4))
test_raises(((5, 3), (5, 4), (5, 6)), (5, 4))
test_raises(((5, 6), (5, 4), (5, 3)), (5, 4))
test_raises(((5, 4), (7, 4), (5, 4)), (5, 4))
test_raises(((5, 4), (7, 4), (5, 4)), (3, 4))
def testInvalidShapesMatrixFormat(self):
def test_raises(diags_shape, rhs_shape):
self._assertRaises(_tf_ones(diags_shape), _tf_ones(rhs_shape), "matrix")
test_raises((5, 4, 7), (5, 4))
test_raises((5, 4, 4), (3, 4))
test_raises((5, 4, 4), (5, 3))
# Tests involving placeholder with an unknown dimension are all skipped.
# Dimensions have to be all known statically.
if __name__ == "__main__":
test.main()
|
|
from copy import deepcopy
from uuid import uuid4
import simplejson as json
def boolean (val):
if isinstance (val, str):
lower = val.lower ()
if lower == 'false':
return False
elif lower == 'f':
return False
elif lower == 'true':
return True
elif lower == 't':
return True
elif isinstance (val, int):
if val == 0:
return False
elif val == 1:
return True
elif isinstance (val, float):
if val == 0.0:
return False
elif val == 1.0:
return True
else:
if val is None:
return False
raise RuntimeError ('Cast to boolean failed: Could not convert ' +
str (val) + ' to a boolean')
def cond_assign (dst, src, key):
if src.has_key (key):
dst[key] = src[key]
class attr_dict (dict):
def __init__ (self, **attr):
dict.__init__ (self, **attr)
def __getattr__ (self, key):
try:
return self[key]
except KeyError:
return None
def default (self, key, default):
if self.has_key (key):
return
else:
self[key] = default
def json (self):
return json.dumps (self)
class DM_List (list):
def __init__ (self, *args):
list.__init__ (self, *args)
def first (self):
if len (self) == 0:
return None
else:
return self[0]
def json (self):
return json.dumps (map (lambda x: x.public (), self))
class DM_Entry (dict):
def __init__ (self, model):
if model is None:
raise HTTP (400, "Attempt to create entry without a model")
self.model = model
dict.__init__ (self)
def __getattr__ (self, key):
return dict.__getitem__ (self, key)
def public (self):
publicValues = {}
for key in self.model.public ():
publicValues[key.name] = self[key.name]
publicValues['id'] = self['id']
return attr_dict (**publicValues)
def json (self):
return json.dumps (self.public ())
class DM_Field (attr_dict):
def __init__ (self, name, field_type, **attr):
self.name = name
self.type = field_type
self.lookup_id = 'f_' + self.name
attr_dict.__init__ (self, **attr)
#def __deepcopy__ (self, memo):
# pass
def toField (self):
if self.type == 'table':
return Field (self.lookup_id, 'string', default = None)
kw = {}
cond_assign (kw, self, 'default')
cond_assign (kw, self, 'required')
return Field (self.lookup_id, self.type, **kw)
def display (self):
kw = {}
cond_assign (kw, self, 'title')
cond_assign (kw, self, 'visible')
cond_assign (kw, self, 'text')
if not kw.has_key ('text'):
kw['text'] = self.name
return kw
class DM_TableModel (dict):
def __init__ (self, *fields, **kw):
if kw.has_key ('name'):
self.name = kw['name']
else:
self.name = None
values = []
self.publicList = []
for item in fields:
values.append ((item.name, item))
if not item.private and not item.protected:
self.publicList.append (item)
dict.__init__ (self, values)
def map_key (self, key):
return 'f_' + key
def __deepcopy__ (self, memo):
return DM_TableModel (*self, name = self.name)
def __iter__ (self):
return iter (self.values ())
def append (self, field):
self[field.name] = field
def toFields (self):
return map (lambda item: item.toField (), self.values ())
def public (self):
return self.publicList
'''class DM_TableFactory:
def __init__ (self, name = None, writeback = None, entry = None, col = None):
self.name = name
self.writeback = writeback
self.entry = entry
self.col = col
self.model = False
def exists (self):
return False'''
class DM_Table:
def __init__ (self, name = None, model = None, writeback = None):
self.name = name
self.writeback = writeback
self.ex = (name in db.tables)
self.model = model
def exists (self):
return self.ex
def create (self, model = None):
if model:
self.model = model
if self.ex:
return
if not self.model:
raise HTTP (400, 'Attempt access abstract table')
if not self.name:
self.name = 't' + uuid4 ().hex
db.define_table (self.name, *(self.model.toFields ()))
if self.writeback:
db (db[self.writeback[0]].id == self.writeback[1]).update (**{self.writeback[2]: self.name})
self.ex = True
def count (self):
if not self.ex:
self.create (self.model)
return db (db[self.name].id >= 0).count ()
def all (self):
if not self.ex:
self.create (self.model)
entries = db (db[self.name].id >= 0).select ()
result = DM_List ()
for e in entries:
result.append (self.__load (e))
return result
def multi_get (self, entry_ids):
if not self.ex:
self.create (self.model)
query = None
for key in entry_ids:
next = (db[self.name].id == key)
if query:
query = query | next
else:
query = next
if query:
entries = db (query).select ()
else:
entries = []
result = DM_List ()
for e in entries:
result.append (self.__load (e))
return result
def get (self, entry_id):
if not self.ex:
self.create (self.model)
entry = db (db[self.name].id == entry_id).select ().first ()
if not entry:
raise HTTP (400, 'Record Not Found')
return self.__load (entry)
def query (self, **kw):
query = self.__make_query (kw)
entries = db (query).select ()
result = DM_List ()
for e in entries:
result.append (self.__load (e))
return result
def delete (self, **kw):
query = self.__make_query (kw)
db (query).delete ()
def insert (self, **values):
if not self.ex:
self.create (self.model)
data = {}
for item in self.model:
if values.has_key (item.name):
if item.type == 'table':
data[self.model.map_key (item.name)] = values[item.name].name
else:
data[self.model.map_key (item.name)] = values[item.name]
return db[self.name].insert (**data)
def update (self, entry_id, **values):
if not self.ex:
self.create (self.model)
data = {}
for v in values:
data[self.model.map_key (v)] = values[v]
db (db[self.name].id == entry_id).update (**data)
def __load (self, entry):
val = DM_Entry (self.model)
for field in self.model:
key = field.name
lookup_id = self.model.map_key (key)
if field.type == 'table':
#for key, value in entry.iteritems ():
#if self.model[key].type == 'table':
if entry[lookup_id] is None:
val[key] = DM_Table (writeback = (self.name, entry['id'], lookup_id), model = field.model)
else:
if field.model:
val[key] = DM_Table (name = entry[lookup_id], model = field.model)
else:
val[key] = DM_Table (name = entry[lookup_id])
else:
val[key] = entry[lookup_id]
val['id'] = entry['id']
return val
def __make_query (self, kw):
if not self.ex:
self.create (self.model)
query = None
for key, value in kw.iteritems ():
next = (db[self.name][self.model.map_key (key)] == value)
if query:
query = query & next
else:
query = next
return query
class DataManager:
def __init__ (self):
self.root = DM_Table (deployment_settings.data.base_table, root_table)
self.__checker = set ()
self.models = {}
def define_datatype (self, datatype, model):
if datatype in self.__checker:
raise HTTP (500, 'Attempt to redefine datatype')
self.__checker.add (datatype)
self.models[datatype] = deepcopy (model)
self.models[datatype].append (DM_Field ('owner', 'integer', private = True, default = -1))
self.models[datatype].append (DM_Field ('public', 'boolean', private = True, default = False))
entry = self.root.query (datatype = datatype).first ()
if not entry:
id = self.root.insert (datatype = datatype)
lookup = self.root.get (id)
lookup.data.create (self.models[datatype])
lookup.user.create ()
lookup.kw.create ()
def insert (self, datatype, **kw):
user_id = require_logged_in ()
kw['owner'] = user_id
lookup = self.root.query (datatype = datatype).first ()
lookup.data.create (self.models[datatype])
id = lookup.data.insert (**kw)
return id
def count (self, datatype):
user_id = require_logged_in ()
lookup = self.root.query (datatype = datatype).first ()
lookup.data.create (self.models[datatype])
return lookup.data.count ()
def update (self, datatype, entry_id, **kw):
user_id = require_logged_in ()
entry = dm.get (datatype, entry_id)
if entry.owner != user_id:
raise HTTP (400, 'Permission Denied to edit data')
lookup = self.root.query (datatype = datatype).first ()
lookup.data.create (self.models[datatype])
id = lookup.data.update (entry_id, **kw)
def global_load (self, datatype, keywords = None):
lookup = self.root.query (datatype = datatype).first ()
lookup.data.create (self.models[datatype])
if not keywords:
return lookup.data.query (public = True)
else:
keys = set ()
for kw in keywords:
key_table = lookup.kw.query (name = kw).first ()
if not key_table:
continue
else:
current_keys = key_table.ref.all ()
for k in current_keys:
keys.add (k.ref)
return lookup.data.multi_get (keys)
def local_load (self, datatype, keywords = None):
user_id = require_logged_in ()
lookup = self.root.query (datatype = datatype).first ()
lookup.data.create (self.models[datatype])
ref_table = lookup.user.query (user = user_id).first ()
if not ref_table:
id = lookup.user.insert (user = user_id)
ref_table = lookup.user.get (id)
user_table = ref_table.ref
result = user_table.all ()
loadList = []
for r in result:
loadList.append (r.ref)
return lookup.data.multi_get (loadList)
def load_keyworded (self, datatype, kw):
lookup = self.root.query (datatype = datatype).first ()
key_table = lookup.kw.query (name = kw).first ()
if key_table is None:
return DM_List ()
ids = map (lambda x: x.ref, key_table.ref.all ())
lookup.data.create (self.models[datatype])
return lookup.data.multi_get (ids)
def get (self, datatype, object_id):
user_id = check_logged_in ()
lookup = self.root.query (datatype = datatype).first ()
lookup.data.create (self.models[datatype])
result = lookup.data.get (object_id)
if user_id != result.owner and not result['public']:
raise HTTP (400, "Attempt to access private data")
return result
def query (self, datatype, **query):
user_id = check_logged_in ()
lookup = self.root.query (datatype = datatype).first ()
lookup.data.create (self.models[datatype])
return lookup.data.query (**query)
def owner (self, datatype, object_id, user_id):
pass
def public (self, datatype, object_id, pub_status):
user_id = require_logged_in ()
lookup = self.root.query (datatype = datatype).first ()
lookup.data.create (self.models[datatype])
data = lookup.data.get (object_id)
if user_id != data.owner:
raise HTTP (400, "Permission denied to change permissions of resource")
lookup.data.update (object_id, public = pub_status)
def link (self, datatype, object_id):
user_id = require_logged_in ()
data = dm.get (datatype, object_id)
if user_id != data.owner and not data.public:
raise HTTP (400, "Permission denied to link resource")
user_table = self.__traverse_to_user_table (datatype)
user_table.insert (ref = object_id)
def unlink (self, datatype, object_id):
user_table = self.__traverse_to_user_table (datatype)
user_table.delete (ref = object_id)
def delete (self, datatype, **kw):
user_id = require_logged_in ()
data = self.query (datatype, **kw)
if not check_role (admin_role):
for item in data:
if item.owner != user_id:
raise HTTP (400, "Permission Denied")
lookup = self.root.query (datatype = datatype).first ()
lookup.data.create (self.models[datatype])
lookup.data.delete (**kw)
def keywords (self, datatype, object_id, keywords):
lookup = self.root.query (datatype = datatype).first ()
for kw in keywords:
key_table = lookup.kw.query (name = kw).first ()
if not key_table:
id = lookup.kw.insert (name = kw)
key_table = lookup.kw.get (id)
key_list = key_table.ref
key_list.insert (ref = object_id)
def dup (self, datatype, alt_datatype):
if alt_datatype in self.__checker:
raise HTTP (500, 'Attempt to redefine datatype')
self.__checker.add (alt_datatype)
self.models[alt_datatype] = deepcopy (self.models[datatype])
#raise HTTP (400, str (self.models[alt_datatype]))
entry = self.root.query (datatype = alt_datatype).first ()
if not entry:
lookup = self.root.query (datatype = datatype).first ()
if not lookup:
raise HTTP (400, 'Datatype not defined')
id = self.root.insert (datatype = alt_datatype, data = lookup.data, kw = lookup.kw)
lookup = self.root.get (id)
lookup.user.create ()
def get_types (self):
return self.models
def __traverse_to_user_table (self, datatype):
user_id = require_logged_in ()
lookup = self.root.query (datatype = datatype).first ()
lookup.data.create (self.models[datatype])
ref_table = lookup.user.query (user = user_id).first ()
if not ref_table:
id = lookup.user.insert (user = user_id)
ref_table = lookup.user.get (id)
user_table = ref_table.ref
return user_table
user_data = DM_TableModel (DM_Field ('ref', 'integer'))
user_table = DM_TableModel (DM_Field ('user', 'integer'),
DM_Field ('ref', 'table', model = user_data),
)
keyword_list = DM_TableModel (DM_Field ('ref', 'integer'))
keyword_table = DM_TableModel (DM_Field ('name', 'string'),
DM_Field ('ref', 'table', model = keyword_list)
)
root_table = DM_TableModel (DM_Field ('datatype', 'string'),
DM_Field ('data', 'table'),
DM_Field ('user', 'table', model = user_table),
DM_Field ('kw', 'table', model = keyword_table),
)
dm = DataManager ()
|
|
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import sys
from operator import attrgetter
from typing import Optional, Callable, Tuple, Union
from pktverify import consts, errors
from pktverify.addrs import EthAddr, ExtAddr, Ipv6Addr
from pktverify.bytes import Bytes
from pktverify.consts import THREAD_ALLOWED_ICMPV6_TYPES
from pktverify.packet import Packet
from pktverify.utils import make_filter_func
WPAN, ETH = 0, 1
class _SavedIndex(object):
__slots__ = ('_pkts', '_saved_index')
def __init__(self, pkts):
self._pkts = pkts
self._saved_index = pkts.index
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
self._pkts.index = self._saved_index
def _always_true(p):
return True
class PacketFilter(object):
"""
Represents a range of packets that are filtered by given filter
"""
def __init__(self,
pkts,
start=(0, 0),
stop=None,
*,
index=None,
filter_func: Optional[Callable] = None,
parent: Optional['PacketFilter'] = None):
if stop is None:
stop = (len(pkts), len(pkts))
self._pkts = pkts
self._start_index = start
self._stop_index = stop
self._index = index if index is not None else self._start_index
self._last_index = -1
self._filter_func = filter_func or _always_true
self._parent = parent
self._check_type_ok()
def _check_type_ok(self):
assert self._last_index == -1 or 0 <= self._last_index < len(self._pkts)
assert isinstance(self._start_index, tuple) and len(self._start_index) == 2, self._start_index
assert isinstance(self._stop_index, tuple) and len(self._stop_index) == 2, self._stop_index
assert isinstance(self._index, tuple) and len(self._index) == 2, self._index
self._check_idx_range_ok((0, 0), self._start_index)
self._check_idx_range_ok(self._start_index, self._index)
self._check_idx_range_ok(self._index, self._stop_index)
self._check_idx_range_ok(self._stop_index, (len(self._pkts), len(self._pkts)))
def _check_idx_range_ok(self, start, stop):
assert start[0] <= stop[0], (start, stop)
assert start[1] <= stop[1], (start, stop)
@property
def index(self) -> Tuple[int, int]:
"""
:return: the current index (which is a tuple)
"""
return self._index
@index.setter
def index(self, index: Tuple[int, int]):
"""
Set the current index
:param index: the index tuple to set
"""
assert isinstance(index, tuple) and len(index) == 2, index
self._check_type_ok()
self._index = index
self._check_type_ok()
def __len__(self):
"""
:return: length of packets
"""
return len(self._pkts)
def save_index(self):
"""
Save the current index to be restored.
:return: a context that saves the current index when entering, and restores when exiting
"""
return _SavedIndex(self)
@property
def start_index(self) -> Tuple[int, int]:
"""
:return: the start index tuple
"""
return self._start_index
@property
def stop_index(self) -> Tuple[int, int]:
"""
:return: the stop index tuple
"""
return self._stop_index
def filter(self, func, cascade=True, **vars) -> 'PacketFilter':
"""
Create a new PacketFilter based on this packet filter with given filter func
:param func: a callable that returns a bool (e.x. lambda p: xxx) or a filter string
:param cascade: True if calling next in the new filter will also set index for this filter, False otherwise
:param vars: variables for filter string
:return: a new PacketFilter
"""
print('\n>>> filtering in range %s~%s%s:' %
(self._index, self._stop_index, "<end>" if self._stop_index == len(self._pkts) else "<stop>"),
file=sys.stderr)
func = make_filter_func(func, **vars)
self._check_type_ok()
return PacketFilter(self._pkts,
self._index,
self._stop_index,
filter_func=lambda p: self._filter_func(p) and func(p),
parent=self if cascade else None)
def filter_if(self, cond: bool, *args, **kwargs) -> 'PacketFilter':
"""
Create a filter using given arguments if `cond` is true.
:param cond: the condition to be checked
:param args: arguments for filter func
:param kwargs: arguments for filter func
:return: a sub filter using given arguments if cond is true, or self otherwise
"""
if cond:
return self.filter(*args, **kwargs)
else:
return self
@property
def last_index(self) -> Tuple[int, int]:
return self._last_index
def last(self) -> Packet:
"""
:return: the last packet found
"""
if self._last_index >= 0:
return self._pkts[self._last_index]
else:
raise errors.PacketNotFound(self.index, self._stop_index)
def next(self) -> Optional[Packet]:
"""
Find the next packet starting from the current index to the stop index that matches the current filter.
:return: the next matching packet, or None if packet not found
"""
self._check_type_ok()
idx = min(self._index)
stop_idx = max(self._stop_index)
while idx < stop_idx:
p = self._pkts[idx]
sys.stderr.write('#%d %s' % (idx + 1, '\n' if idx % 40 == 39 else ''))
if self._filter_func(p):
if p.wpan and not (self._index[0] <= idx < self._stop_index[0]): # wpan matched but not in range
pass
elif p.eth and not (self._index[1] <= idx < self._stop_index[1]): # eth matched but not in range
pass
else:
self._on_found_next(idx, p)
print("\n>>> found packet at #%d!" % (idx + 1,), file=sys.stderr)
return p
idx += 1
return None
def must_next(self) -> Packet:
"""
Call .next(), raise error if packet is not found.
:return: the next matching packet
"""
p = self.next()
if p is not None:
return p
else:
raise errors.PacketNotFound(self.index, self._stop_index)
def must_not_next(self) -> None:
"""
Call .next(), raise error if packet is found
"""
p = self.next()
if p is None:
return
else:
logging.error("Found unexpected packet at #%s", self.index)
p.show()
p.debug_fields()
raise errors.UnexpectedPacketFound(self.index, p)
def _on_found_next(self, idx: int, p: Packet):
assert self._pkts[idx] is p
assert idx >= min(self._index)
assert not p.wpan or idx >= self._index[0]
assert not p.eth or idx >= self._index[1], (self._index, idx)
if p.wpan:
wpan_idx = idx + 1
eth_idx = max(self._index[1],
self._find_prev_packet(idx + 1, p.sniff_timestamp - consts.AUTO_SEEK_BACK_MAX_DURATION, ETH))
else:
eth_idx = idx + 1
wpan_idx = max(
self._index[0],
self._find_prev_packet(idx + 1, p.sniff_timestamp - consts.AUTO_SEEK_BACK_MAX_DURATION, WPAN))
# make sure index never go back
assert wpan_idx >= self._index[0]
assert eth_idx >= self._index[1]
print('\n>>>_on_found_next %d %s => %s' % (idx, self._index, (wpan_idx, eth_idx)), file=sys.stderr)
self._set_found_index(idx, (wpan_idx, eth_idx))
def _find_prev_packet(self, idx, min_sniff_timestamp, pkttype):
assert pkttype in (WPAN, ETH)
prev_idx = idx
while idx > 0 and self._pkts[idx - 1].sniff_timestamp >= min_sniff_timestamp:
idx -= 1
if pkttype == WPAN and self._pkts[idx].wpan:
prev_idx = idx
elif pkttype == ETH and self._pkts[idx].eth:
prev_idx = idx
return prev_idx
def __iter__(self):
for pkt in self._pkts:
yield pkt
def range(self, start, stop=None, cascade=True) -> 'PacketFilter':
"""
Create a new PacketFilter using the specified start and stop index tuples
:param start: the new start index tuple
:param stop: the new stop index tuple
:param cascade: True if calling next in the new filter will also set index for this filter, False otherwise
:return: a new PacketFilter with new start and stop range
"""
if stop is None:
stop = self._stop_index
assert self._start_index <= start <= self._stop_index
assert self._start_index <= stop <= self._stop_index
return PacketFilter(self._pkts, start, stop, filter_func=self._filter_func, parent=self if cascade else None)
def copy(self) -> 'PacketFilter':
"""
:return: a copy of the current PacketFilter
"""
return PacketFilter(self._pkts, self._index, self._stop_index, filter_func=self._filter_func, parent=None)
def __getitem__(self, index: int) -> Packet:
"""
:param index: the packet index (not tuple!)
:return: the packet at the specified index
"""
assert isinstance(index, int), index
return self._pkts[index]
def seek_back(self, max_duration: float, *, eth=False, wpan=False) -> 'PacketFilter':
"""
Move the current index back in time within the specified max duration. Either eth or wpan must be True.
:param max_duration: the max duration to move back
:param eth: True if eth index can be moved back
:param wpan: True if wpan index can be moved back
:return: self
"""
assert eth or wpan, "must have eth or wpan"
wpan_idx = self._index[0]
if wpan and wpan_idx < len(self._pkts):
wpan_idx = self._find_prev_packet(wpan_idx, self._pkts[wpan_idx].sniff_timestamp - max_duration, WPAN)
wpan_idx = max(self._start_index[0], wpan_idx)
eth_idx = self._index[1]
if eth and eth_idx < len(self._pkts):
eth_idx = self._find_prev_packet(eth_idx, self._pkts[eth_idx].sniff_timestamp - max_duration, ETH)
eth_idx = max(self._start_index[1], eth_idx)
print("\n>>> back %s wpan=%s, eth=%s: index %s => %s" % (max_duration, wpan, eth, self._index,
(wpan_idx, eth_idx)),
file=sys.stderr)
self._index = (wpan_idx, eth_idx)
self._check_type_ok()
return self
def _set_found_index(self, last_index: Tuple[int, int], index: Tuple[int, int]):
self._last_index = last_index
self._index = index
self._check_type_ok()
if self._parent is not None:
self._parent._set_found_index(last_index, index)
def filter_mle_advertisement(self, role: str, **kwargs):
assert role in ('Leader', 'Router', 'REED'), role
tlv_set = {consts.LEADER_DATA_TLV, consts.SOURCE_ADDRESS_TLV}
if role != 'REED':
tlv_set.add(consts.ROUTE64_TLV)
return self.filter_LLANMA(). \
filter_mle_cmd(consts.MLE_ADVERTISEMENT). \
filter(lambda p: tlv_set ==
set(p.mle.tlv.type) and \
p.ipv6.hlim == 255, **kwargs
)
def filter_coap(self, **kwargs):
"""
Create a new PacketFilter to filter COAP packets.
:param kwargs: Extra arguments for `filter`.
:return: The new PacketFilter to filter COAP packets.
"""
return self.filter(attrgetter('coap'), **kwargs)
def filter_coap_request(self, uri_path, port=None, confirmable=None, **kwargs):
"""
Create a new PacketFilter to filter COAP Request packets.
:param uri_path: The COAP URI path to filter.
:param port: The UDP port to filter if specified.
:param kwargs: Extra arguments for `filter`.
:return: The new PacketFilter to filter COAP Request packets.
"""
assert isinstance(uri_path, str), uri_path
assert port is None or isinstance(port, int), port
return self.filter(
lambda p: (p.coap.is_post and p.coap.opt.uri_path_recon == uri_path and
(confirmable is None or p.coap.type ==
(0 if confirmable else 1)) and (port is None or p.udp.dstport == port)), **kwargs)
def filter_coap_ack(self, uri_path, port=None, **kwargs):
"""
Create a new PacketFilter for filter COAP ACK packets.
:param uri_path: The COAP URI path to filter.
:param port: The UDP port to filter if specified.
:param kwargs: Extra arguments for `filter`.
:return: The new PacketFilter to filter COAP ACK packets.
"""
assert isinstance(uri_path, str), uri_path
assert port is None or isinstance(port, int), port
return self.filter(
lambda p: (p.coap.is_ack and p.coap.opt.uri_path_recon == uri_path and
(port is None or p.udp.dstport == port)), **kwargs)
def filter_backbone_answer(self,
target: str,
*,
eth_src: Optional[EthAddr] = None,
port: int = None,
confirmable: bool = None,
mliid=None):
filter_eth = self.filter_eth_src(eth_src) if eth_src else self.filter_eth()
f = filter_eth.filter_coap_request('/b/ba', port=port,
confirmable=confirmable).filter('thread_bl.tlv.target_eid == {target}',
target=target)
if mliid is not None:
f = f.filter('thread_bl.tlv.ml_eid == {mliid}', mliid=mliid)
return f
def filter_backbone_query(self, target: str, *, eth_src: EthAddr, port: int = None) -> 'PacketFilter':
return self.filter_eth_src(eth_src).filter_coap_request('/b/bq', port=port, confirmable=False).filter(
'thread_bl.tlv.target_eid == {target}', target=target)
def filter_wpan(self, **kwargs):
"""
Create a new PacketFilter for filter WPAN packets.
:param kwargs: Extra arguments for `filter`.
:return: The new PacketFilter to filter WPAN packets.
"""
return self.filter(attrgetter('wpan'), **kwargs)
def filter_wpan_ack(self, **kwargs):
"""
Create a new PacketFilter for filter WPAN ACK packets.
:param kwargs: Extra arguments for `filter`.
:return: The new PacketFilter to filter WPAN packets.
"""
return self.filter(lambda p: p.wpan.frame_type == consts.MAC_FRAME_TYPE_ACK, **kwargs)
def filter_wpan_beacon(self, **kwargs):
"""
Create a new PacketFilter for filter WPAN beacon.
:param kwargs: Extra arguments for `filter`.
:return: The new PacketFilter to filter WPAN packets.
"""
return self.filter(lambda p: p.wpan.frame_type == consts.MAC_FRAME_TYPE_BEACON, **kwargs)
def filter_wpan_data(self, **kwargs):
"""
Create a new PacketFilter for filter WPAN data packets.
:param kwargs: Extra arguments for `filter`.
:return: The new PacketFilter to filter WPAN packets.
"""
return self.filter(lambda p: p.wpan.frame_type == consts.MAC_FRAME_TYPE_DATA, **kwargs)
def filter_wpan_seq(self, seq, **kwargs):
"""
Create a new PacketFilter for filter WPAN packets of a sequence number.
:param seq: The sequence number to filter.
:param kwargs: Extra arguments for `filter`.
:return: The new PacketFilter to filter WPAN packets.
"""
return self.filter(lambda p: p.wpan.seq_no == seq, **kwargs)
def filter_wpan_version(self, version: int, **kwargs):
"""
Create a new PacketFilter for filter WPAN packets of a given version.
:param version: The version to filter.
:param kwargs: Extra arguments for `filter`.
:return: The new PacketFilter to filter WPAN packets.
"""
return self.filter(lambda p: p.wpan.version == version, **kwargs)
def filter_wpan_channel(self, channel: int, **kwargs):
"""
Create a new PacketFilter for filter WPAN packets of a given channel.
:param channel: The channel to filter.
:param kwargs: Extra arguments for `filter`.
:return: The new PacketFilter to filter WPAN packets.
"""
return self.filter(lambda p: p.wpan.channel == channel, **kwargs)
def filter_wpan_src16(self, addr, **kwargs):
return self.filter(lambda p: p.wpan.src16 == addr, **kwargs)
def filter_wpan_dst16(self, addr, **kwargs):
return self.filter(lambda p: p.wpan.dst16 == addr, **kwargs)
def filter_wpan_src16_dst16(self, src_addr, dst_addr, **kwargs):
return self.filter(lambda p: p.wpan.src16 == src_addr and p.wpan.dst16 == dst_addr, **kwargs)
def filter_wpan_src64(self, addr, **kwargs):
assert isinstance(addr, (str, ExtAddr)), addr
return self.filter(lambda p: p.wpan.src64 == addr, **kwargs)
def filter_wpan_dst64(self, addr, **kwargs):
assert isinstance(addr, (str, ExtAddr)), addr
return self.filter(lambda p: p.wpan.dst64 == addr, **kwargs)
def filter_dst16(self, rloc16: int, **kwargs):
return self.filter(lambda p: p.lowpan.mesh.dest16 == rloc16 or p.wpan.dst16 == rloc16, **kwargs)
def filter_wpan_ie_present(self, **kwargs):
return self.filter(lambda p: p.wpan.ie_present == 1)
def filter_wpan_ie_not_present(self, **kwargs):
return self.filter(lambda p: p.wpan.ie_present == 0)
def filter_ping_request(self, identifier=None, **kwargs):
return self.filter(
lambda p: p.icmpv6.is_ping_request and (identifier is None or p.icmpv6.echo.identifier == identifier),
**kwargs)
def filter_ping_reply(self, **kwargs):
identifier = kwargs.pop('identifier', None)
return self.filter(
lambda p: (p.icmpv6.is_ping_reply and (identifier is None or p.icmpv6.echo.identifier == identifier)),
**kwargs)
def filter_eth(self, **kwargs):
return self.filter(attrgetter('eth'), **kwargs)
def filter_eth_src(self, addr, **kwargs):
assert isinstance(addr, (str, EthAddr))
return self.filter(lambda p: p.eth.src == addr, **kwargs)
def filter_ipv6_dst(self, addr, **kwargs):
assert isinstance(addr, (str, Ipv6Addr))
return self.filter(lambda p: p.ipv6.dst == addr, **kwargs)
def filter_ipv6_2dsts(self, addr1, addr2, **kwargs):
assert isinstance(addr1, (str, Ipv6Addr))
assert isinstance(addr2, (str, Ipv6Addr))
return self.filter(lambda p: p.ipv6.dst == addr1 or p.ipv6.dst == addr2, **kwargs)
def filter_ipv6_src_dst(self, src_addr, dst_addr, **kwargs):
assert isinstance(src_addr, (str, Ipv6Addr))
assert isinstance(dst_addr, (str, Ipv6Addr))
return self.filter(lambda p: p.ipv6.src == src_addr and p.ipv6.dst == dst_addr, **kwargs)
def filter_LLATNMA(self, **kwargs):
return self.filter(lambda p: p.ipv6.dst == consts.LINK_LOCAL_All_THREAD_NODES_MULTICAST_ADDRESS, **kwargs)
def filter_RLANMA(self, **kwargs):
return self.filter(lambda p: p.ipv6.dst == consts.REALM_LOCAL_ALL_NODES_ADDRESS, **kwargs)
def filter_RLARMA(self, **kwargs):
return self.filter(lambda p: p.ipv6.dst == consts.REALM_LOCAL_ALL_ROUTERS_ADDRESS, **kwargs)
def filter_RLATNMA(self, **kwargs):
return self.filter(lambda p: p.ipv6.dst == consts.REALM_LOCAL_All_THREAD_NODES_MULTICAST_ADDRESS, **kwargs)
def filter_LLANMA(self, **kwargs):
return self.filter(lambda p: p.ipv6.dst == consts.LINK_LOCAL_ALL_NODES_MULTICAST_ADDRESS, **kwargs)
def filter_LLABMA(self, **kwargs):
return self.filter(lambda p: p.ipv6.dst == consts.LINK_LOCAL_ALL_BBRS_MULTICAST_ADDRESS, **kwargs)
def filter_LLARMA(self, **kwargs):
return self.filter(lambda p: p.ipv6.dst == consts.LINK_LOCAL_ALL_ROUTERS_MULTICAST_ADDRESS, **kwargs)
def filter_AMPLFMA(self, mpl_seed_id: Union[int, Ipv6Addr] = None, **kwargs):
f = self.filter(lambda p: p.ipv6.dst == consts.ALL_MPL_FORWARDERS_MA, **kwargs)
if mpl_seed_id is not None:
if isinstance(mpl_seed_id, int):
mpl_seed_id = Bytes([mpl_seed_id >> 8, mpl_seed_id & 0xFF])
f = f.filter(lambda p: p.ipv6.opt.mpl.seed_id == mpl_seed_id)
else:
rloc = mpl_seed_id
rloc16 = bytes(rloc[-2:])
f = f.filter(lambda p: (p.ipv6.src == rloc and p.ipv6.opt.mpl.flag.s == 0) or
(p.ipv6.src != rloc and p.ipv6.opt.mpl.flag.s == 1 and p.ipv6.opt.mpl.seed_id == rloc16))
return f
def filter_mle(self, **kwargs):
return self.filter(attrgetter('mle'), **kwargs)
def filter_wpan_cmd(self, cmd, **kwargs):
assert isinstance(cmd, int), cmd
return self.filter(lambda p: p.wpan.cmd == cmd, **kwargs)
def filter_mle_cmd(self, cmd, **kwargs):
assert isinstance(cmd, int), cmd
return self.filter(lambda p: p.mle.cmd == cmd, **kwargs)
def filter_mle_cmd2(self, cmd1, cmd2, **kwargs):
assert isinstance(cmd1, int), cmd1
assert isinstance(cmd2, int), cmd2
return self.filter(lambda p: p.mle.cmd == cmd1 or p.mle.cmd == cmd2, **kwargs)
def filter_mle_has_tlv(self, *tlv_types, **kwargs):
return self.filter(lambda p: set(tlv_types) <= set(p.mle.tlv.type), **kwargs)
def filter_icmpv6(self, **kwargs):
return self.filter(attrgetter('icmpv6'), **kwargs)
def filter_icmpv6_nd_ns(self, target_address: Ipv6Addr):
return self.filter(lambda p:
(p.icmpv6.is_neighbor_solicitation and p.icmpv6.nd.ns.target_address == target_address))
def filter_icmpv6_nd_na(self, target_address: Ipv6Addr):
return self.filter(lambda p:
(p.icmpv6.is_neighbor_advertisement and p.icmpv6.nd.na.target_address == target_address))
def filter_icmpv6_nd_ra(self):
return self.filter(lambda p: p.icmpv6.is_router_advertisement)
def filter_thread_unallowed_icmpv6(self):
return self.filter('wpan and icmpv6 and icmpv6.type not in {THREAD_ALLOWED_ICMPV6_TYPES}',
THREAD_ALLOWED_ICMPV6_TYPES=THREAD_ALLOWED_ICMPV6_TYPES)
def filter_has_bbr_dataset(self):
return self.filter("""
thread_nwd.tlv.server.has('16')
and thread_nwd.tlv.service.s_data.seqno is not null
and thread_nwd.tlv.service.s_data.rrdelay is not null
and thread_nwd.tlv.service.s_data.mlrtimeout is not null
""")
|
|
from configparser import ConfigParser
from unittest import mock
import time
import json
import asyncio
import pytest
from zake.fake_client import FakeClient
from kazoo.client import KazooState
import kazoo.exceptions
from zgres import sync
from . import FakeSleeper
class MyFakeClient(FakeClient):
@property
def client_id(self):
return (self.session_id, 'abc')
@pytest.mark.asyncio
async def test_functional(deadman_plugin):
"""Test as much of the whole stack of zgres-sync as we can."""
config = {
'sync': {
'plugins': 'zgres#zookeeper zgres#mock-subscriber'},
'zookeeper': {
'connection_string': 'example.org:2181',
'path': '/mypath',
}
}
deadmanA = deadman_plugin('A')
deadmanB = deadman_plugin('B')
deadmanA.dcs_set_database_identifier('1234')
deadmanA.dcs_set_conn_info(dict(answer=42))
deadmanA.dcs_lock('master')
deadmanB.dcs_set_state(dict(mystate='lamentable'))
ev = asyncio.Event()
async def next_ev():
await ev.wait()
ev.clear()
def set_ev(*args, **kw):
ev.set()
for i in range(10):
asyncio.get_event_loop().call_later(4 + 0.1 * i, set_ev)
from . import MockSyncPlugin as RealMockSyncPlugin
with mock.patch('zgres.tests.MockSyncPlugin') as MockSyncPlugin:
# sigh, FAR to complex
proxy, p = RealMockSyncPlugin('', '')
p.databases.side_effect = set_ev
p.state.side_effect = set_ev
p.masters.side_effect = set_ev
p.conn_info.side_effect = set_ev
MockSyncPlugin.return_value = proxy
with mock.patch('zgres.zookeeper.KazooClient') as KazooClient:
KazooClient.return_value = MyFakeClient(storage=deadmanA._storage._zk._storage)
app = sync.SyncApp(config)
for i in range(3):
await next_ev()
deadmanA.dcs_set_state(dict(mystate='great!'))
deadmanB.dcs_set_conn_info(dict(answer=43))
deadmanA.dcs_unlock('master')
for i in range(3):
await next_ev()
# the plugin was called twice, once with the original data, and once with new data
assert p.conn_info.mock_calls == [
mock.call({'mygroup': {'A': {'answer': 42}}}),
mock.call({'mygroup': {'A': {'answer': 42}, 'B': {'answer': 43}}})]
p.state.assert_has_calls(
[mock.call({'mygroup': {'B': {'mystate': 'lamentable'}}}),
mock.call({'mygroup': {'B': {'mystate': 'lamentable'}, 'A': {'mystate': 'great!'}}})]
)
p.masters.assert_has_calls(
[mock.call({'mygroup': 'A'}),
mock.call({})]
)
p.databases.assert_has_calls([mock.call(['mygroup'])])
@pytest.fixture
def storage(request):
from ..zookeeper import ZookeeperStorage
s = ZookeeperStorage('connection_string', '/path')
zk = MyFakeClient()
with mock.patch('zgres.zookeeper.KazooClient') as KazooClient:
KazooClient.return_value = zk
s.dcs_connect()
return s
@pytest.fixture
def deadman_plugin(request):
from ..deadman import App
storage = None
def factory(my_id='42'):
nonlocal storage
app = mock.Mock(spec_set=App)
app.my_id = my_id
app.restart._is_coroutine = False
app.config = dict(
zookeeper=dict(
connection_string='localhost:1234',
path='/mypath',
group='mygroup',
))
app.master_lock_changed._is_coroutine = False # otherwise tests fail :(
from ..zookeeper import ZooKeeperDeadmanPlugin
plugin = ZooKeeperDeadmanPlugin('zgres#zookeeper', app)
zk = MyFakeClient(storage=storage)
if storage is None:
# all plugins created by this factory SHARE a storage
storage = zk.storage
with mock.patch('zgres.zookeeper.KazooClient') as KazooClient:
KazooClient.return_value = zk
plugin.initialize()
return plugin
return factory
@pytest.mark.asyncio
async def test_disconnect_should_not_restart(deadman_plugin):
plugin = deadman_plugin()
await asyncio.sleep(0.001)
plugin.dcs_disconnect()
await asyncio.sleep(0.001)
assert plugin.app.mock_calls == [] # restart was not called
@pytest.mark.asyncio
async def test_session_suspended(deadman_plugin):
plugin = deadman_plugin()
await asyncio.sleep(0.001)
# suspend the connection
plugin.logger.warn = mock.Mock()
plugin._storage._zk._fire_state_change(KazooState.SUSPENDED)
await asyncio.sleep(0.001)
plugin.logger.warn.assert_called_once_with('zookeeper connection state: SUSPENDED')
assert plugin._dcs_state == 'SUSPENDED'
assert plugin.app.mock_calls == []
@pytest.mark.asyncio
async def test_session_suspended_but_reconnect_in_5_seconds(deadman_plugin):
with mock.patch('zgres.zookeeper.sleep') as sleep:
# yeah, tests with firewalls show that this really does happen
plugin = deadman_plugin()
await asyncio.sleep(0.001)
sleeper = FakeSleeper(max_loops=1000)
sleep.side_effect = sleeper
# suspend the connection
plugin.logger.warn = mock.Mock()
plugin._storage._zk._fire_state_change(KazooState.SUSPENDED)
await sleeper.next()
await sleeper.next()
await sleeper.next()
await sleeper.next()
ntasks = len(asyncio.Task.all_tasks())
plugin._storage._zk._fire_state_change(KazooState.CONNECTED)
time.sleep(0.001)
await asyncio.sleep(0.001)
assert ntasks - len(asyncio.Task.all_tasks()) == 1 # the _check_state task finished
assert plugin.app.mock_calls == []
assert plugin.logger.warn.mock_calls == [
mock.call('zookeeper connection state: SUSPENDED'),
mock.call('zookeeper connection state: CONNECTED'),
]
assert plugin._dcs_state == KazooState.CONNECTED
@pytest.mark.asyncio
async def test_session_suspended_but_never_reconnects_or_is_lost(deadman_plugin):
with mock.patch('zgres.zookeeper.sleep') as sleep:
# yeah, tests with firewalls show that this really does happen
plugin = deadman_plugin()
await asyncio.sleep(0.001)
sleeper = FakeSleeper(max_loops=25)
sleep.side_effect = sleeper
def finish(timeout):
sleeper.finish()
plugin.app.restart.side_effect = finish
# suspend the connection
plugin.logger.warn = mock.Mock()
plugin._storage._zk._fire_state_change(KazooState.SUSPENDED)
await sleeper.wait()
assert plugin.app.mock_calls == [
mock.call.restart(0)
]
assert plugin.logger.warn.mock_calls == [
mock.call('zookeeper connection state: SUSPENDED'),
]
assert plugin._dcs_state == KazooState.SUSPENDED
@pytest.mark.asyncio
async def test_session_lost(deadman_plugin):
plugin = deadman_plugin()
await asyncio.sleep(0.001)
plugin.app.reset_mock()
plugin._storage._zk._fire_state_change(KazooState.LOST)
await asyncio.sleep(0.001)
assert plugin._dcs_state == 'LOST'
assert plugin.app.mock_calls == [
mock.call.restart(0)
]
@pytest.mark.asyncio
async def test_notifications_of_state_chagnge_where_id_has_a_dash(deadman_plugin):
pluginA = deadman_plugin('i-9b61354f')
finished = asyncio.Event()
asyncio.get_event_loop().call_later(5, finished.set)
callback = mock.Mock()
callback.side_effect = lambda *args, **kw: finished.set()
pluginA.dcs_watch(None, callback, None)
pluginA.dcs_set_state(dict(name='A'))
await finished.wait()
assert callback.mock_calls == [
mock.call({'i-9b61354f': {'name': 'A'}}),
]
@pytest.mark.asyncio
async def test_groups_are_independant(deadman_plugin):
plugin = deadman_plugin
pluginA, pluginB, pluginC = plugin('A'), plugin('B'), plugin('C')
pluginC._group_name = 'another'
# pluginB watches state, plugin A doesn't
pluginA.dcs_watch(None, None, None)
callbackB = mock.Mock()
pluginB.dcs_watch(None, callbackB, None)
callbackC = mock.Mock()
pluginC.dcs_watch(None, callbackC, None)
# set state from both plugins
pluginA.dcs_set_state(dict(name='A'))
pluginB.dcs_set_state(dict(name='B'))
pluginC.dcs_set_state(dict(name='C'))
await asyncio.sleep(0.005)
# pluginB gets events, but ONLY from plugins in its group
# i.e. c is ignored
# NOTE: we test only the LAST call as state for A and B may come out-of-order
# but the final, rest state, should be correct
assert callbackB.mock_calls[-1] == mock.call({'A': {'name': 'A'}, 'B': {'name': 'B'}})
# C got it's own event
assert callbackC.mock_calls == [
mock.call({'C': {'name': 'C'}}),
]
# We can get all info
assert sorted(pluginA.dcs_list_state()) == sorted(pluginB.dcs_list_state())
assert sorted(pluginA.dcs_list_state()) == [('A', {'name': 'A'}), ('B', {'name': 'B'})]
assert sorted(pluginC.dcs_list_state()) == [('C', {'name': 'C'})]
def test_errorlog_after_second_takeover(deadman_plugin):
plugin = deadman_plugin
# 2 servers with the same id should NOT happen in real life...
pluginA1 = plugin(my_id='A')
pluginA2 = plugin(my_id='A')
pluginA2.logger = mock.Mock()
# now they start to fight
pluginA1.dcs_set_state(dict(server=41))
pluginA2.dcs_set_state(dict(server=42))
pluginA1.dcs_set_state(dict(server=43))
# this is the second time plugin2 is taking over
# We should log an error message now
assert not pluginA2.logger.error.called
pluginA2.dcs_set_state(dict(server=44))
assert pluginA2.logger.error.called
# though the state is still set
assert sorted(pluginA1.dcs_list_state()) == [('A', dict(server=44))]
def test_storage_get_database_identifiers(storage):
assert storage.dcs_get_database_identifiers() == {}
storage.dcs_set_database_identifier('db1', '124')
assert storage.dcs_get_database_identifiers() == {'db1': 124}
def mock_verify(plugin, side_effect):
# cause the verify() function to fail in zake, thus all api calls error
verify = mock.Mock()
verify.side_effect = side_effect
plugin._storage.connection.verify = verify
plugin._kazoo_retry.sleep_func = lambda x: None # speed up tests by not sleeping
return verify
@pytest.mark.asyncio
async def test_retry_on_connection_loss(deadman_plugin):
# connection loss is a temporary exception which seems to happen after a re-connection
# (but not session expiration)in zookeeper. We just retry that till it works.
plugin = deadman_plugin('A')
verify = mock_verify(plugin, [
kazoo.exceptions.ConnectionLoss(),
kazoo.exceptions.ConnectionLoss(),
kazoo.exceptions.ConnectionLoss(),
kazoo.exceptions.ConnectionLoss(),
kazoo.exceptions.ConnectionLoss(),
None,
None])
# set state from both plugins
plugin.dcs_set_state(dict(name='A'))
await asyncio.sleep(0.001)
assert plugin.app.mock_calls == []
assert verify.call_count > 4
@pytest.mark.asyncio
async def test_retry_NO_retry_on_session_expired(deadman_plugin):
# connection loss is a temporary exception which seems to happen after a re-connection
# (but not session expiration)in zookeeper. We just retry that till it works.
plugin = deadman_plugin('A')
verify = mock_verify(plugin, [kazoo.exceptions.SessionExpiredError()])
# set state from both plugins
with pytest.raises(kazoo.exceptions.SessionExpiredError):
plugin.dcs_set_state(dict(name='A'))
await asyncio.sleep(0.001)
assert plugin.app.mock_calls == [
mock.call.restart(0)
]
@pytest.mark.asyncio
async def test_retry_with_random_exception(deadman_plugin):
# connection loss is a temporary exception which seems to happen after a re-connection
# (but not session expiration)in zookeeper. We just retry that till it works.
plugin = deadman_plugin('A')
class MyException(Exception):
pass
verify = mock_verify(plugin, [MyException()])
# set state from both plugins
with pytest.raises(MyException):
plugin.dcs_set_state(dict(name='A'))
await asyncio.sleep(0.001)
assert plugin.app.mock_calls == []
import time as ttt
@pytest.mark.asyncio
async def test_retry_deadline(deadman_plugin):
with mock.patch('time.time') as time:
plugin = deadman_plugin('A')
time.return_value = 120
print(ttt.time(), time())
def my_side_effect():
time.return_value = 240
raise kazoo.exceptions.ConnectionLoss()
verify = mock_verify(plugin, my_side_effect)
# set state from both plugins
with pytest.raises(kazoo.retry.RetryFailedError) as e:
plugin.dcs_set_state(dict(name='A'))
assert e.value.args[0] == "Exceeded retry deadline"
await asyncio.sleep(0.001)
assert plugin.app.mock_calls == []
@pytest.mark.asyncio
async def test_retry_list_all_states(deadman_plugin):
# connection loss is a temporary exception which seems to happen after a re-connection
# (but not session expiration)in zookeeper. We just retry that till it works.
plugin = deadman_plugin('A')
plugin.dcs_set_state(dict(name='A'))
verify = mock_verify(plugin, [
kazoo.exceptions.ConnectionLoss(),
kazoo.exceptions.ConnectionLoss(),
kazoo.exceptions.ConnectionLoss(),
None,
None,
None,
None])
# set state from both plugins
assert list(plugin.dcs_list_state()) == [('A', {'name': 'A'})]
await asyncio.sleep(0.001)
assert plugin.app.mock_calls == []
|
|
import datetime
from dateutil.tz import tzutc
acm_certificate_list = {
'CertificateSummaryList': [
{
'DomainName': 'my.any-domain.com',
'CertificateArn': 'arn:aws:acm:eu-west-2:012345678:certificate/01234567-abcd-0123-0123-abcdfe01234'
}
]
}
acm_result_find_certificate = {
'CertificateArn': 'arn:aws:acm:eu-west-2:012345678:certificate/01234567-abcd-0123-0123-abcdfe01234',
'CreatedAt': '2017-01-11T09:23:40+01:00',
'DomainName': 'my.any-domain.com',
'DomainValidationOptions': [
{
'DomainName': 'my.any-domain.com',
'ValidationDomain': 'any-domain.com',
'ValidationEmails': [
'hostmaster@any-domain.com',
'admin@any-domain.com',
'webmaster@any-domain.com',
'postmaster@any-domain.com',
'administrator@any-domain.com'
],
'ValidationStatus': 'SUCCESS'
}
],
'InUseBy': [],
'IssuedAt': '2017-01-11T09:25:15+01:00',
'Issuer': 'Amazon',
'KeyAlgorithm': 'RSA-2048',
'NotAfter': '2018-01-12T13:00:00+01:00',
'NotBefore': '2017-01-12T01:00:00+01:00',
'Serial': '00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff',
'SignatureAlgorithm': 'SHA256WITHRSA',
'Status': 'ISSUED',
'Subject': 'CN=my.any-domain.com',
'SubjectAlternativeNames': ['my.any-domain.com'],
'Type': 'AMAZON_ISSUED'
}
elbv2_describe_load_balancers = {
'LoadBalancers': [
{
'AvailabilityZones': [
{
'SubnetId': 'subnet-0123456',
'ZoneName': 'eu-west-2a'
},
{
'SubnetId': 'subnet-0123457',
'ZoneName': 'eu-west-2b'}
],
'CanonicalHostedZoneId': 'ZHURV9DERC5T8',
'CreatedTime': datetime.datetime(2017, 1, 12, 8, 25, 11, 840000, tzinfo=tzutc()),
'DNSName': 'internal-my-elbv2-0123456.eu-west-2.elb.amazonaws.com',
'IpAddressType': 'ipv4',
'LoadBalancerArn': 'arn:aws:elasticloadbalancing:eu-west-2:01234567890:loadbalancer/app/my-elbv2/9987acf27',
'LoadBalancerName': 'my-elbv2',
'Scheme': 'internal',
'SecurityGroups': ['sg-02232883'],
'State': {'Code': 'active'},
'Type': 'application',
'VpcId': 'vpc-9839873'}
]
}
elbv2_describe_load_balancer_attributes = {
'Attributes': [
{'Key': 'access_logs.s3.bucket', 'Value': 'bucket'},
{'Key': 'deletion_protection.enabled', 'Value': 'false'},
{'Key': 'access_logs.s3.prefix', 'Value': 'prefix'},
{'Key': 'idle_timeout.timeout_seconds', 'Value': '60'},
{'Key': 'access_logs.s3.enabled', 'Value': 'false'}
],
}
elbv2_describe_listeners = {
'Listeners': [
{
'DefaultActions': [
{
'TargetGroupArn': 'arn:aws:elasticloadbalancing:eu-west-2:12345679012:targetgroup/my-app-tg/0bd28872872',
'Type': 'forward'
}
],
'ListenerArn': 'arn:aws:elasticloadbalancing:eu-west-2:01234567890:listener/app/my-elbv2/9b54/2ab1',
'LoadBalancerArn': 'arn:aws:elasticloadbalancing:eu-west-2:01234567890:loadbalancer/app/my-elbv2/9987acf27',
'Port': 8080,
'Protocol': 'HTTP'
}
]
}
elbv2_describe_target_groups = {
'TargetGroups': [
{
'HealthCheckIntervalSeconds': 30,
'HealthCheckPath': '/',
'HealthCheckPort': 'traffic-port',
'HealthCheckProtocol': 'HTTP',
'HealthCheckTimeoutSeconds': 5,
'HealthyThresholdCount': 5,
'LoadBalancerArns': [
'arn:aws:elasticloadbalancing:eu-west-2:01234567890:loadbalancer/app/my-elbv2/9987acf27'
],
'Matcher': {'HttpCode': '200'},
'Port': 8080,
'Protocol': 'HTTP',
'TargetGroupArn': 'arn:aws:elasticloadbalancing:eu-west-2:12345679012:targetgroup/my-app-tg/0bd28872872',
'TargetGroupName': 'my-app-tg',
'TargetType': 'instance',
'UnhealthyThresholdCount': 2,
'VpcId': 'vpc-9839873'
}
]
}
elbv2_describe_target_group_attributes = {
'Attributes': [
{'Key': 'stickiness.enabled', 'Value': 'true'},
{'Key': 'deregistration_delay.timeout_seconds', 'Value': '300'},
{'Key': 'stickiness.type', 'Value': 'lb_cookie'},
{'Key': 'stickiness.lb_cookie.duration_seconds', 'Value': '86400'}
],
}
elbv2_describe_tags = {
'TagDescriptions': [
{
'Tags': [
{'Key': 'Environment', 'Value': 'dev'},
{'Key': 'Name', 'Value': 'my-elbv2'},
]
}
]
}
elbv2_result = {
'LoadBalancers': [
{
'Attributes': [
{'Key': 'access_logs.s3.bucket', 'Value': 'bucket'},
{'Key': 'deletion_protection.enabled', 'Value': 'false'},
{'Key': 'access_logs.s3.prefix', 'Value': 'prefix'},
{'Key': 'idle_timeout.timeout_seconds', 'Value': '60'},
{'Key': 'access_logs.s3.enabled', 'Value': 'false'}
],
'AvailabilityZones': [
{'SubnetId': 'subnet-0123456', 'ZoneName': 'eu-west-2a'},
{'SubnetId': 'subnet-0123457', 'ZoneName': 'eu-west-2b'}
],
'CanonicalHostedZoneId': 'ZHURV9DERC5T8',
'CreatedTime': datetime.datetime(2017, 1, 12, 8, 25, 11, 840000, tzinfo=tzutc()),
'DNSName': 'internal-my-elbv2-0123456.eu-west-2.elb.amazonaws.com',
'IpAddressType': 'ipv4',
'Listeners': [
{
'DefaultActions': [
{
'TargetGroupArn': 'arn:aws:elasticloadbalancing:eu-west-2:12345679012:targetgroup/my-app-tg/0bd28872872',
'Type': 'forward'
}
],
'ListenerArn': 'arn:aws:elasticloadbalancing:eu-west-2:01234567890:listener/app/my-elbv2/9b54/2ab1',
'LoadBalancerArn': 'arn:aws:elasticloadbalancing:eu-west-2:01234567890:loadbalancer/app/my-elbv2/9987acf27',
'Port': 8080,
'Protocol': 'HTTP'
}
],
'LoadBalancerArn': 'arn:aws:elasticloadbalancing:eu-west-2:01234567890:loadbalancer/app/my-elbv2/9987acf27',
'LoadBalancerName': 'my-elbv2',
'Scheme': 'internal',
'SecurityGroups': ['sg-02232883'],
'State': {'Code': 'active'},
'TargetGroups': [
{
'Attributes': [
{'Key': 'stickiness.enabled', 'Value': 'true'},
{'Key': 'deregistration_delay.timeout_seconds', 'Value': '300'},
{'Key': 'stickiness.type', 'Value': 'lb_cookie'},
{'Key': 'stickiness.lb_cookie.duration_seconds', 'Value': '86400'}
],
'HealthCheckIntervalSeconds': 30,
'HealthCheckPath': '/',
'HealthCheckPort': 'traffic-port',
'HealthCheckProtocol': 'HTTP',
'HealthCheckTimeoutSeconds': 5,
'HealthyThresholdCount': 5,
'LoadBalancerArns': [
'arn:aws:elasticloadbalancing:eu-west-2:01234567890:loadbalancer/app/my-elbv2/9987acf27'
],
'Matcher': {'HttpCode': '200'},
'Port': 8080,
'Protocol': 'HTTP',
'TargetGroupArn': 'arn:aws:elasticloadbalancing:eu-west-2:12345679012:targetgroup/my-app-tg/0bd28872872',
'TargetGroupName': 'my-app-tg',
'TargetType': 'instance',
'UnhealthyThresholdCount': 2,
'VpcId': 'vpc-9839873'
}
],
'Type': 'application',
'VpcId': 'vpc-9839873',
'Tags': [
{'Key': 'Environment', 'Value': 'dev'},
{'Key': 'Name', 'Value': 'my-elbv2'},
]
},
]
}
elb_describe_load_balancers = {
'LoadBalancerDescriptions': [
{
'Subnets': ['subnet-0123456', 'subnet-0123457'],
'CanonicalHostedZoneNameID': 'ZABCDEFG',
'VPCId': 'vpc-0123456',
'ListenerDescriptions': [
{
'Listener': {
'InstancePort': 443,
'LoadBalancerPort': 443,
'Protocol': 'TCP',
'InstanceProtocol': 'TCP'
},
'PolicyNames': []
}
],
'HealthCheck': {
'HealthyThreshold': 2,
'Interval': 30,
'Target': 'HTTPS:443',
'Timeout': 5,
'UnhealthyThreshold': 2
},
'BackendServerDescriptions': [],
'Instances': [
{'InstanceId': 'i-0123456'}
],
'DNSName': 'internal-my-elb-0123456.eu-west-2.elb.amazonaws.com',
'SecurityGroups': ['sg-0123456'],
'Policies': {
'LBCookieStickinessPolicies': [],
'AppCookieStickinessPolicies': [],
'OtherPolicies': []
},
'LoadBalancerName': 'my-elb',
'CreatedTime': '2018-04-24T21:44:24.670Z',
'AvailabilityZones': [
'eu-west-2a',
'eu-west-2b'
],
'Scheme': 'internal',
'SourceSecurityGroup': {
'OwnerAlias': '01234567890',
'GroupName': 'my.example.com'
},
}
]
}
elb_describe_tags = {
'TagDescriptions': [
{
'Tags': [
{'Key': 'Environment', 'Value': 'dev'},
{'Key': 'Name', 'Value': 'my-elb'},
]
}
]
}
elb_result = {
'LoadBalancerDescriptions': [
{
'Subnets': ['subnet-0123456', 'subnet-0123457'],
'CanonicalHostedZoneNameID': 'ZABCDEFG',
'VPCId': 'vpc-0123456',
'ListenerDescriptions': [
{
'Listener': {
'InstancePort': 443,
'LoadBalancerPort': 443,
'Protocol': 'TCP',
'InstanceProtocol': 'TCP'
},
'PolicyNames': []
}
],
'HealthCheck': {
'HealthyThreshold': 2,
'Interval': 30,
'Target': 'HTTPS:443',
'Timeout': 5,
'UnhealthyThreshold': 2
},
'BackendServerDescriptions': [],
'Instances': [
{'InstanceId': 'i-0123456'}
],
'DNSName': 'internal-my-elb-0123456.eu-west-2.elb.amazonaws.com',
'SecurityGroups': ['sg-0123456'],
'Policies': {
'LBCookieStickinessPolicies': [],
'AppCookieStickinessPolicies': [],
'OtherPolicies': []
},
'LoadBalancerName': 'my-elb',
'CreatedTime': '2018-04-24T21:44:24.670Z',
'AvailabilityZones': [
'eu-west-2a',
'eu-west-2b'
],
'Scheme': 'internal',
'SourceSecurityGroup': {
'OwnerAlias': '01234567890',
'GroupName': 'my.example.com'
},
'Tags': [
{'Key': 'Environment', 'Value': 'dev'},
{'Key': 'Name', 'Value': 'my-elb'},
]
}
]
}
|
|
__author__ = 'Tom Schaul, tom@idsia.ch'
from scipy import array, randn, ndarray, isinf, isnan, isscalar
import logging
from pybrain.utilities import setAllArgs, abstractMethod, DivergenceError
from pybrain.rl.learners.directsearch.directsearch import DirectSearchLearner
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.rl.environments.functions.function import FunctionEnvironment
from pybrain.rl.environments.fitnessevaluator import FitnessEvaluator
from pybrain.rl.environments.functions.transformations import oppositeFunction
from pybrain.structure.evolvables.maskedmodule import MaskedModule
from pybrain.structure.evolvables.maskedparameters import MaskedParameters
from pybrain.structure.evolvables.topology import TopologyEvolvable
from pybrain.structure.modules.module import Module
class BlackBoxOptimizer(DirectSearchLearner):
""" The super-class for learning algorithms that treat the problem as a black box.
At each step they change the policy, and get a fitness value by invoking
the FitnessEvaluator (provided as first argument upon initialization).
Evaluable objects can be lists or arrays of continuous values (also wrapped in ParameterContainer)
or subclasses of Evolvable (that define its methods).
"""
# some algorithms are designed for minimization only, those can put this flag:
mustMinimize = False
mustMaximize = False
#: Is there a known value of sufficient fitness?
desiredEvaluation = None
#: Stopping criterion based on number of evaluations.
maxEvaluations = 1e6
#: Stopping criterion based on number of learning steps.
maxLearningSteps = None
#: dimension of the search space, if applicable
numParameters = None
#: Store all evaluations (in the ._allEvaluations list)?
storeAllEvaluations = False
#: Store all evaluated instances (in the ._allEvaluated list)?
storeAllEvaluated = False
# an optimizer can take different forms of evaluables, and depending on its
# needs, wrap them into a ParameterContainer (which is also an Evolvable)
# or unwrap them to act directly on the array of parameters (all ContinuousOptimizers)
_wasWrapped = False
_wasUnwrapped = False
_wasOpposed = False
listener = None
#: provide console output during learning
verbose = False
# some algorithms have a predetermined (minimal) number of
# evaluations they will perform during each learningStep:
batchSize = 1
def __init__(self, evaluator = None, initEvaluable = None, **kwargs):
""" The evaluator is any callable object (e.g. a lambda function).
Algorithm parameters can be set here if provided as keyword arguments. """
# set all algorithm-specific parameters in one go:
self.__minimize = None
self.__evaluator = None
setAllArgs(self, kwargs)
# bookkeeping
self.numEvaluations = 0
self.numLearningSteps = 0
if self.storeAllEvaluated:
self._allEvaluated = []
self._allEvaluations = []
elif self.storeAllEvaluations:
self._allEvaluations = []
if evaluator is not None:
self.setEvaluator(evaluator, initEvaluable)
def _getMinimize(self): return self.__minimize
def _setMinimize(self, flag):
""" Minimization vs. maximization: priority to algorithm requirements,
then evaluator, default = maximize."""
self.__minimize = flag
opp = False
if flag is True:
if self.mustMaximize:
opp = True
self.__minimize = False
if flag is False:
if self.mustMinimize:
opp = True
self.__minimize = True
if self.__evaluator is not None:
if opp is not self._wasOpposed:
self._flipDirection()
self._wasOpposed = opp
#: Minimize cost or maximize fitness? By default, all functions are maximized.
minimize = property(_getMinimize, _setMinimize)
def setEvaluator(self, evaluator, initEvaluable = None):
""" If not provided upon construction, the objective function can be given through this method.
If necessary, also provide an initial evaluable."""
# default settings, if provided by the evaluator:
if isinstance(evaluator, FitnessEvaluator):
if self.desiredEvaluation is None:
self.desiredEvaluation = evaluator.desiredValue
if self.minimize is None:
self.minimize = evaluator.toBeMinimized
# in some cases, we can deduce the dimension from the provided evaluator:
if isinstance(evaluator, FunctionEnvironment):
if self.numParameters is None:
self.numParameters = evaluator.xdim
elif self.numParameters is not evaluator.xdim:
raise ValueError("Parameter dimension mismatch: evaluator expects "+str(evaluator.xdim)\
+" but it was set to "+str(self.numParameters)+".")
# default: maximize
if self.minimize is None:
self.minimize = False
self.__evaluator = evaluator
if self._wasOpposed:
self._flipDirection()
#set the starting point for optimization (as provided, or randomly)
self._setInitEvaluable(initEvaluable)
self.bestEvaluation = None
self._additionalInit()
self.bestEvaluable = self._initEvaluable
def _flipDirection(self):
self.__evaluator = oppositeFunction(self.__evaluator)
if self.desiredEvaluation is not None:
self.desiredEvaluation *= -1
def _additionalInit(self):
""" a method for subclasses that need additional initialization code but don't want to redefine __init__ """
def _setInitEvaluable(self, evaluable):
if evaluable is None:
# if there is no initial point specified, we start at one that's sampled
# normally around the origin.
if self.numParameters is not None:
evaluable = randn(self.numParameters)
else:
raise ValueError('Could not determine the dimensionality of the evaluator. '+\
'Please provide an initial search point.')
if isinstance(evaluable, list):
evaluable = array(evaluable)
# If the evaluable is provided as a list of numbers or as an array,
# we wrap it into a ParameterContainer.
if isinstance(evaluable, ndarray):
pc = ParameterContainer(len(evaluable))
pc._setParameters(evaluable)
self._wasWrapped = True
evaluable = pc
self._initEvaluable = evaluable
if isinstance(self._initEvaluable, ParameterContainer):
if self.numParameters is None:
self.numParameters = len(self._initEvaluable)
elif self.numParameters != len(self._initEvaluable):
raise ValueError("Parameter dimension mismatch: evaluator expects "+str(self.numParameters)\
+" but the evaluable has "+str(len(self._initEvaluable))+".")
def learn(self, additionalLearningSteps = None):
""" The main loop that does the learning. """
assert self.__evaluator is not None, "No evaluator has been set. Learning cannot start."
if additionalLearningSteps is not None:
self.maxLearningSteps = self.numLearningSteps + additionalLearningSteps
while not self._stoppingCriterion():
try:
self._learnStep()
self._notify()
self.numLearningSteps += 1
except DivergenceError:
logging.warning("Algorithm diverged. Stopped after "+str(self.numLearningSteps)+" learning steps.")
break
return self._bestFound()
def _learnStep(self):
""" The core method to be implemented by all subclasses. """
abstractMethod()
def _bestFound(self):
""" return the best found evaluable and its associated fitness. """
bestE = self.bestEvaluable.params.copy() if self._wasWrapped else self.bestEvaluable
if self._wasOpposed and isscalar(self.bestEvaluation):
bestF = -self.bestEvaluation
else:
bestF = self.bestEvaluation
return bestE, bestF
def _oneEvaluation(self, evaluable):
""" This method should be called by all optimizers for producing an evaluation. """
if self._wasUnwrapped:
self.wrappingEvaluable._setParameters(evaluable)
res = self.__evaluator(self.wrappingEvaluable)
elif self._wasWrapped:
res = self.__evaluator(evaluable.params)
else:
res = self.__evaluator(evaluable)
if isscalar(res):
# detect numerical instability
if isnan(res) or isinf(res):
raise DivergenceError
# always keep track of the best
if (self.numEvaluations == 0
or self.bestEvaluation is None
or (self.minimize and res <= self.bestEvaluation)
or (not self.minimize and res >= self.bestEvaluation)):
self.bestEvaluation = res
self.bestEvaluable = evaluable.copy()
self.numEvaluations += 1
# if desired, also keep track of all evaluables and/or their fitness.
if self.storeAllEvaluated:
if self._wasUnwrapped:
self._allEvaluated.append(self.wrappingEvaluable.copy())
elif self._wasWrapped:
self._allEvaluated.append(evaluable.params.copy())
else:
self._allEvaluated.append(evaluable.copy())
if self.storeAllEvaluations:
if self._wasOpposed and isscalar(res):
self._allEvaluations.append(-res)
else:
self._allEvaluations.append(res)
return res
def _stoppingCriterion(self):
if self.maxEvaluations is not None and self.numEvaluations+self.batchSize > self.maxEvaluations:
return True
if self.desiredEvaluation is not None and self.bestEvaluation is not None and isscalar(self.bestEvaluation):
if ((self.minimize and self.bestEvaluation <= self.desiredEvaluation)
or (not self.minimize and self.bestEvaluation >= self.desiredEvaluation)):
return True
if self.maxLearningSteps is not None and self.numLearningSteps > self.maxLearningSteps:
return True
return False
def _notify(self):
""" Provide some feedback during the run. """
if self.verbose:
print 'Step:', self.numLearningSteps, 'best:', self.bestEvaluation
if self.listener is not None:
self.listener(self.bestEvaluable, self.bestEvaluation)
class ContinuousOptimizer(BlackBoxOptimizer):
""" A more restricted class of black-box optimization algorithms
that assume the parameters to be necessarily an array of continuous values
(which can be wrapped in a ParameterContainer). """
def _setInitEvaluable(self, evaluable):
""" If the parameters are wrapped, we keep track of the wrapper explicitly. """
if isinstance(evaluable, ParameterContainer):
self.wrappingEvaluable = evaluable.copy()
self._wasUnwrapped = True
elif not (evaluable is None
or isinstance(evaluable, list)
or isinstance(evaluable, ndarray)):
raise ValueError('Continuous optimization algorithms require a list, array or'+\
' ParameterContainer as evaluable.')
BlackBoxOptimizer._setInitEvaluable(self, evaluable)
self._wasWrapped = False
self._initEvaluable = self._initEvaluable.params.copy()
def _bestFound(self):
""" return the best found evaluable and its associated fitness. """
bestE, bestF = BlackBoxOptimizer._bestFound(self)
if self._wasUnwrapped:
self.wrappingEvaluable._setParameters(bestE)
bestE = self.wrappingEvaluable.copy()
return bestE, bestF
class TopologyOptimizer(BlackBoxOptimizer):
""" A class of algorithms that changes the topology as well as the parameters.
It does not accept an arbitrary Evolvable as initial point, only a
ParameterContainer (or a continuous vector). """
def _setInitEvaluable(self, evaluable):
BlackBoxOptimizer._setInitEvaluable(self, evaluable)
# distinguish modules from parameter containers.
if not isinstance(evaluable, TopologyEvolvable):
if isinstance(evaluable, Module):
self._initEvaluable = MaskedModule(self._initEvaluable)
else:
self._initEvaluable = MaskedParameters(self._initEvaluable, returnZeros = True)
|
|
import gym
import tensorflow as tf
import tensorflow.contrib.slim as slim
import random
import numpy as np
import scipy.misc
import time, requests
import PIL
from tensorflow.core.framework import summary_pb2
def sendStatElastic(data, endpoint="http://35.187.182.237:9200/reinforce/games"):
data['step_time'] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
try:
requests.post(endpoint, json=data)
except:
print("Elasticsearch exception")
#log.warning(r.text)
finally:
pass
def update_target_graph(from_scope, to_scope, tau=0.001):
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)
op_holder = []
for from_v,to_v in zip(from_vars, to_vars):
op_holder.append(tf.assign(to_v, from_v.value() * tau + to_v.value() * (1.0-tau)))
return op_holder
def copy_target_graph(from_scope, to_scope):
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)
op_holder = []
for from_v,to_v in zip(from_vars, to_vars):
op_holder.append(tf.assign(to_v, from_v))
return op_holder
class QNetwork():
def __init__(self,h_size,action_size, img_size=84, learning_rate=0.00025, frame_count=4):
self.frame_in = tf.placeholder(tf.float32, [None, img_size * img_size * frame_count], name="frame_in")
img_in = tf.reshape(self.frame_in, [-1,img_size, img_size, frame_count])
conv1 = slim.convolution2d(scope="conv1",inputs=img_in, num_outputs=32, kernel_size=[8,8], stride=[4, 4], padding="VALID", biases_initializer=None)
conv2 = slim.convolution2d(scope="conv2",inputs=conv1, num_outputs=64, kernel_size=[4, 4], stride=[2, 2], padding="VALID", biases_initializer=None)
conv3 = slim.convolution2d(scope="conv3",inputs=conv2, num_outputs=64, kernel_size=[3, 3], stride=[1, 1], padding="VALID", biases_initializer=None)
conv4 = slim.convolution2d(scope="conv4",inputs=conv3, num_outputs=h_size, kernel_size=[7, 7], stride=[1, 1], padding="VALID", biases_initializer=None)
self.batch_size = tf.placeholder(tf.int32, [])
self.conv_flat = tf.reshape(slim.flatten(conv4), [self.batch_size, h_size])
with tf.variable_scope("va_split"):
stream_a, stream_v = tf.split(self.conv_flat,2,axis=1)
self.w_a = tf.Variable(tf.random_normal([h_size//2, action_size]))
self.w_v = tf.Variable(tf.random_normal([h_size//2, 1]))
advantage = tf.matmul(stream_a, self.w_a)
self.value = tf.matmul(stream_v, self.w_v)
# salience = tf.gradients(advantage, img_in)
with tf.variable_scope("predict"):
#self.q_out = value + tf.subtract(advantage, tf.reduce_mean(advantage, axis=1, keep_dims=True))
self.policy = tf.nn.softmax(advantage)
self.policy = tf.clip_by_value(self.policy, 1e-10, 1.0)
self.pred = tf.argmax(self.policy, axis=1)
self.target_q = tf.placeholder(tf.float32, [None])
self.actions = tf.placeholder(tf.int32, [None])
actions_onehot = tf.one_hot(self.actions, action_size,dtype=tf.float32)
Q = tf.reduce_sum(tf.multiply(self.q_out, actions_onehot), axis=1)
td_error = tf.square(self.target_q - Q)
loss = tf.reduce_mean(td_error)
#loss = tf.losses.huber_loss(self.target_q,Q)
self.update = tf.train.RMSPropOptimizer(learning_rate=learning_rate, momentum=0.0, decay=0.99).minimize(loss)
with tf.name_scope("summary"):
tf.summary.scalar("loss", loss)
tf.summary.scalar("mean_value", tf.reduce_mean(self.value))
tf.summary.scalar("max_advantage", tf.reduce_max(advantage))
tf.summary.scalar("min_advantage", tf.reduce_min(advantage))
tf.summary.scalar("mean_target_q", tf.reduce_mean(self.target_q))
tf.summary.scalar("mean_pred_q", tf.reduce_mean(self.q_out))
self.summary_op = tf.summary.merge_all()
def predict_act(self, frame_list, batch_size=1, session=None):
dict = {
self.frame_in: frame_list,
self.batch_size: batch_size
}
act, q_vals = session.run([self.pred, self.q_out], feed_dict=dict)
return act, q_vals
def update_nn(self, in_frame, target_q_val, acts, batch_size, session, writer=None, step=None):
update_dict = {
self.frame_in: in_frame,
self.target_q: target_q_val,
self.actions: acts,
self.batch_size: batch_size
}
_, summ = session.run([self.update, self.summary_op], feed_dict=update_dict)
if writer is not None and step is not None:
writer.add_summary(summ, step)
class ExperienceBuffer():
def __init__(self, buffer_size=100000):
self.buffer = []
self.buffer_size = buffer_size
def add(self, experience):
self.buffer.append(experience)
if len(self.buffer) > self.buffer_size:
self.buffer.pop(0)
def sample(self, batch_size):
samples_ep = random.sample(self.buffer, batch_size)
return np.reshape(np.array(samples_ep), [batch_size, 5])
class FrameBuffer():
def __init__(self, buffer_size=4, frame_size=None):
self.buffer_size = buffer_size
self.frame_size = frame_size
self._frames = [[0] * frame_size] * buffer_size
def add(self, frame):
self._frames.append(frame)
if len(self._frames) > self.buffer_size:
self._frames.pop(0)
def frames(self):
return np.reshape(np.array(self._frames), [1, self.frame_size * self.buffer_size])
def process_frame(f, last_f=None, height=84,width=84):
if last_f is not None:
f = np.amax(np.array([f, last_f]), axis=0)
f = scipy.misc.imresize(f, (height, width))
f = np.dot(f[...,:3], [0.299, 0.587, 0.114])/255.0
return np.reshape(f,[-1])
def clip_reward(r):
if r>0:
return 1.0
elif r<0:
return -1.0
return 0
def clip_reward_tan(r):
return np.arctan(r)
def discounted_reward(rs, gamma):
total = 0
for k in reversed(range(len(rs))):
total = total * gamma + rs[k]
return total
if __name__=="__main__":
game_name = 'SpaceInvaders-v0'
env = gym.make(game_name)
game_name += '-ddqn-cnn'
env.env.frameskip = 3
print("{} has skip-frame: {}".format(game_name, env.env.frameskip))
render = False
batch_size = 32 # num of experience traces
update_target_step = 10000
gamma = 0.99 # discount factor for reward
e_start = 1.0 # prob of random action
e_end = 0.1
annel_steps = 1000000 # steps from e_start to e_end
total_episodes = 90000
update_step = 4
tau = 0.001
exp_buffer_size = 100000
pre_train_steps = 10000 # steps of random action before training begins
logdir = "./checkpoints/ddqn-cnn"
h_size = 512
action_size = env.action_space.n
frame_count = 4
img_size = 84
e_delta = (e_start - e_end) / annel_steps
exp_buffer = ExperienceBuffer(buffer_size=exp_buffer_size)
scope_main = "main_qn"
scope_target = "target_qn"
# build graph
graph = tf.Graph()
with graph.as_default():
global_step = tf.get_variable("global_step",(),tf.int64,tf.zeros_initializer(), trainable=False)
inc_global_step = tf.assign(global_step, global_step.value()+1)
summ_writer = tf.summary.FileWriter(logdir)
with tf.variable_scope(scope_main):
main_qn = QNetwork(h_size, action_size)
with tf.variable_scope(scope_target):
target_qn = QNetwork(h_size, action_size)
update_qn_op = update_target_graph(scope_main, scope_target, tau)
copy_graph_op = copy_target_graph(scope_main, scope_target)
sv = tf.train.Supervisor(logdir=logdir, graph=graph, summary_op=None)
e = e_start
total_step = 0
with sv.managed_session() as sess:
step_value, _ = sess.run([global_step, copy_graph_op])
for ep in range(total_episodes):
frame_buffer = FrameBuffer(buffer_size=frame_count, frame_size=img_size*img_size)
s = env.reset()
s_frame = process_frame(s)
frame_buffer.add(s_frame)
ep_rewards = []
t_ep_start = time.time()
while True:
if render:
env.render()
begin_frames = frame_buffer.frames()
if np.random.rand() < e or total_step<pre_train_steps:
act = np.random.randint(0, action_size)
else:
act, _ = main_qn.predict_act(begin_frames, session=sess)
act = act[0]
s1, reward, done, _ = env.step(act)
r2 = clip_reward_tan(reward)
s1_frame = process_frame(s1)
frame_buffer.add(s1_frame)
next_frames = frame_buffer.frames()
exp_buffer.add(np.reshape(np.array([begin_frames, act, r2, next_frames, done]), [1,5]))
if total_step > pre_train_steps:
if e > e_end:
e -= e_delta
if total_step % update_step == 0:
# update model
train_batch = exp_buffer.sample(batch_size)
pred_act, _ = main_qn.predict_act(np.vstack(train_batch[:, 3]), batch_size, sess)
_, q_vals = target_qn.predict_act(np.vstack(train_batch[:, 3]), batch_size, sess)
end_multiplier = - (train_batch[:, 4] - 1)
double_q = q_vals[range(batch_size),pred_act]
target_q_val = train_batch[:, 2] + gamma * double_q * end_multiplier
in_frames = np.vstack(train_batch[:, 0])
acts = train_batch[:,1]
main_qn.update_nn(in_frames, target_q_val, acts, batch_size, sess, summ_writer, step_value)
step_value = sess.run(inc_global_step)
# register rand prob
summary = tf.Summary()
summary.value.add(tag='rand_prob', simple_value=e)
summ_writer.add_summary(summary, step_value)
summ_writer.flush()
#sess.run(update_qn_op)
s = s1
s_frame = s1_frame
ep_rewards.append(reward)
total_step += 1
if total_step % update_target_step == 0:
sess.run(copy_graph_op)
if done:
disc_r = discounted_reward(ep_rewards, gamma)
score = discounted_reward(ep_rewards, 1)
print("Episode {} finished in {} seconds with discounted reward {}, score {}, e {}, global step {}".format(ep, time.time()-t_ep_start, disc_r, score,e, step_value))
sendStatElastic({"discount_reward":disc_r, "score":score,"episode":ep,"rand_e_prob":e,'game_name':game_name})
break
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
import six
from six.moves.urllib import parse
from keystone.common import controller as common_controller
from keystone.common import dependency
from keystone.common import utils
from keystone import exception
from keystone.federation import constants as federation_constants
from keystone.i18n import _, _LE
from keystone import token
from keystone.token import provider
LOG = log.getLogger(__name__)
CONF = cfg.CONF
@dependency.requires('catalog_api', 'resource_api', 'assignment_api')
class V2TokenDataHelper(object):
"""Creates V2 token data."""
def v3_to_v2_token(self, v3_token_data):
"""Convert v3 token data into v2.0 token data.
This method expects a dictionary generated from
V3TokenDataHelper.get_token_data() and converts it to look like a v2.0
token dictionary.
:param v3_token_data: dictionary formatted for v3 tokens
:returns: dictionary formatted for v2 tokens
:raises keystone.exception.Unauthorized: If a specific token type is
not supported in v2.
"""
token_data = {}
# Build v2 token
v3_token = v3_token_data['token']
# NOTE(lbragstad): Version 2.0 tokens don't know about any domain other
# than the default domain specified in the configuration.
domain_id = v3_token.get('domain', {}).get('id')
if domain_id and CONF.identity.default_domain_id != domain_id:
msg = ('Unable to validate domain-scoped tokens outside of the '
'default domain')
raise exception.Unauthorized(msg)
token = {}
token['expires'] = v3_token.get('expires_at')
token['issued_at'] = v3_token.get('issued_at')
token['audit_ids'] = v3_token.get('audit_ids')
if 'project' in v3_token:
# v3 token_data does not contain all tenant attributes
tenant = self.resource_api.get_project(
v3_token['project']['id'])
# Drop domain specific fields since v2 calls are not domain-aware.
token['tenant'] = common_controller.V2Controller.v3_to_v2_project(
tenant)
token_data['token'] = token
# Build v2 user
v3_user = v3_token['user']
user = common_controller.V2Controller.v3_to_v2_user(v3_user)
if 'OS-TRUST:trust' in v3_token:
msg = ('Unable to validate trust-scoped tokens using version v2.0 '
'API.')
raise exception.Unauthorized(msg)
if 'OS-OAUTH1' in v3_token:
msg = ('Unable to validate Oauth tokens using the version v2.0 '
'API.')
raise exception.Unauthorized(msg)
# Set user roles
user['roles'] = []
role_ids = []
for role in v3_token.get('roles', []):
role_ids.append(role.pop('id'))
user['roles'].append(role)
user['roles_links'] = []
token_data['user'] = user
# Get and build v2 service catalog
token_data['serviceCatalog'] = []
if 'tenant' in token:
catalog_ref = self.catalog_api.get_catalog(
user['id'], token['tenant']['id'])
if catalog_ref:
token_data['serviceCatalog'] = self.format_catalog(catalog_ref)
# Build v2 metadata
metadata = {}
metadata['roles'] = role_ids
# Setting is_admin to keep consistency in v2 response
metadata['is_admin'] = 0
token_data['metadata'] = metadata
return {'access': token_data}
@classmethod
def format_token(cls, token_ref, roles_ref=None, catalog_ref=None,
trust_ref=None):
audit_info = None
user_ref = token_ref['user']
metadata_ref = token_ref['metadata']
if roles_ref is None:
roles_ref = []
expires = token_ref.get('expires', provider.default_expire_time())
if expires is not None:
if not isinstance(expires, six.text_type):
expires = utils.isotime(expires)
token_data = token_ref.get('token_data')
if token_data:
token_audit = token_data.get(
'access', token_data).get('token', {}).get('audit_ids')
audit_info = token_audit
if audit_info is None:
audit_info = provider.audit_info(token_ref.get('parent_audit_id'))
o = {'access': {'token': {'id': token_ref['id'],
'expires': expires,
'issued_at': utils.isotime(subsecond=True),
'audit_ids': audit_info
},
'user': {'id': user_ref['id'],
'name': user_ref['name'],
'username': user_ref['name'],
'roles': roles_ref,
'roles_links': metadata_ref.get('roles_links',
[])
}
}
}
if 'bind' in token_ref:
o['access']['token']['bind'] = token_ref['bind']
if 'tenant' in token_ref and token_ref['tenant']:
token_ref['tenant']['enabled'] = True
o['access']['token']['tenant'] = token_ref['tenant']
if catalog_ref is not None:
o['access']['serviceCatalog'] = V2TokenDataHelper.format_catalog(
catalog_ref)
if metadata_ref:
if 'is_admin' in metadata_ref:
o['access']['metadata'] = {'is_admin':
metadata_ref['is_admin']}
else:
o['access']['metadata'] = {'is_admin': 0}
if 'roles' in metadata_ref:
o['access']['metadata']['roles'] = metadata_ref['roles']
if CONF.trust.enabled and trust_ref:
o['access']['trust'] = {'trustee_user_id':
trust_ref['trustee_user_id'],
'id': trust_ref['id'],
'trustor_user_id':
trust_ref['trustor_user_id'],
'impersonation':
trust_ref['impersonation']
}
return o
@classmethod
def format_catalog(cls, catalog_ref):
"""Munge catalogs from internal to output format.
Internal catalogs look like::
{$REGION: {
{$SERVICE: {
$key1: $value1,
...
}
}
}
The legacy api wants them to look like::
[{'name': $SERVICE[name],
'type': $SERVICE,
'endpoints': [{
'tenantId': $tenant_id,
...
'region': $REGION,
}],
'endpoints_links': [],
}]
"""
if not catalog_ref:
return []
services = {}
for region, region_ref in catalog_ref.items():
for service, service_ref in region_ref.items():
new_service_ref = services.get(service, {})
new_service_ref['name'] = service_ref.pop('name')
new_service_ref['type'] = service
new_service_ref['endpoints_links'] = []
service_ref['region'] = region
endpoints_ref = new_service_ref.get('endpoints', [])
endpoints_ref.append(service_ref)
new_service_ref['endpoints'] = endpoints_ref
services[service] = new_service_ref
return list(services.values())
@dependency.requires('assignment_api', 'catalog_api', 'federation_api',
'identity_api', 'resource_api', 'role_api', 'trust_api')
class V3TokenDataHelper(object):
"""Token data helper."""
def __init__(self):
# Keep __init__ around to ensure dependency injection works.
super(V3TokenDataHelper, self).__init__()
def _get_filtered_domain(self, domain_id):
domain_ref = self.resource_api.get_domain(domain_id)
return {'id': domain_ref['id'], 'name': domain_ref['name']}
def _get_filtered_project(self, project_id):
project_ref = self.resource_api.get_project(project_id)
filtered_project = {
'id': project_ref['id'],
'name': project_ref['name']}
if project_ref['domain_id'] is not None:
filtered_project['domain'] = (
self._get_filtered_domain(project_ref['domain_id']))
else:
# Projects acting as a domain do not have a domain_id attribute
filtered_project['domain'] = None
return filtered_project
def _populate_scope(self, token_data, domain_id, project_id):
if 'domain' in token_data or 'project' in token_data:
# scope already exist, no need to populate it again
return
if domain_id:
token_data['domain'] = self._get_filtered_domain(domain_id)
if project_id:
token_data['project'] = self._get_filtered_project(project_id)
def _populate_is_admin_project(self, token_data):
# TODO(ayoung): Support the ability for a project acting as a domain
# to be the admin project once the rest of the code for projects
# acting as domains is merged. Code will likely be:
# (r.admin_project_name == None and project['is_domain'] == True
# and project['name'] == r.admin_project_domain_name)
project = token_data['project']
r = CONF.resource
if (project['name'] == r.admin_project_name and
project['domain']['name'] == r.admin_project_domain_name):
token_data['is_admin_project'] = True
def _get_roles_for_user(self, user_id, domain_id, project_id):
roles = []
if domain_id:
roles = self.assignment_api.get_roles_for_user_and_domain(
user_id, domain_id)
if project_id:
roles = self.assignment_api.get_roles_for_user_and_project(
user_id, project_id)
return [self.role_api.get_role(role_id) for role_id in roles]
def populate_roles_for_groups(self, token_data, group_ids,
project_id=None, domain_id=None,
user_id=None):
"""Populate roles basing on provided groups and project/domain
Used for ephemeral users with dynamically assigned groups.
This method does not return anything, yet it modifies token_data in
place.
:param token_data: a dictionary used for building token response
:param group_ids: list of group IDs a user is a member of
:param project_id: project ID to scope to
:param domain_id: domain ID to scope to
:param user_id: user ID
:raises keystone.exception.Unauthorized: when no roles were found for a
(group_ids, project_id) or (group_ids, domain_id) pairs.
"""
def check_roles(roles, user_id, project_id, domain_id):
# User was granted roles so simply exit this function.
if roles:
return
if project_id:
msg = _('User %(user_id)s has no access '
'to project %(project_id)s') % {
'user_id': user_id,
'project_id': project_id}
elif domain_id:
msg = _('User %(user_id)s has no access '
'to domain %(domain_id)s') % {
'user_id': user_id,
'domain_id': domain_id}
# Since no roles were found a user is not authorized to
# perform any operations. Raise an exception with
# appropriate error message.
raise exception.Unauthorized(msg)
roles = self.assignment_api.get_roles_for_groups(group_ids,
project_id,
domain_id)
check_roles(roles, user_id, project_id, domain_id)
token_data['roles'] = roles
def _populate_user(self, token_data, user_id, trust):
if 'user' in token_data:
# no need to repopulate user if it already exists
return
user_ref = self.identity_api.get_user(user_id)
if CONF.trust.enabled and trust and 'OS-TRUST:trust' not in token_data:
trustor_user_ref = (self.identity_api.get_user(
trust['trustor_user_id']))
try:
self.identity_api.assert_user_enabled(trust['trustor_user_id'])
except AssertionError:
raise exception.Forbidden(_('Trustor is disabled.'))
if trust['impersonation']:
user_ref = trustor_user_ref
token_data['OS-TRUST:trust'] = (
{
'id': trust['id'],
'trustor_user': {'id': trust['trustor_user_id']},
'trustee_user': {'id': trust['trustee_user_id']},
'impersonation': trust['impersonation']
})
filtered_user = {
'id': user_ref['id'],
'name': user_ref['name'],
'domain': self._get_filtered_domain(user_ref['domain_id'])}
token_data['user'] = filtered_user
def _populate_oauth_section(self, token_data, access_token):
if access_token:
access_token_id = access_token['id']
consumer_id = access_token['consumer_id']
token_data['OS-OAUTH1'] = ({'access_token_id': access_token_id,
'consumer_id': consumer_id})
def _populate_roles(self, token_data, user_id, domain_id, project_id,
trust, access_token):
if 'roles' in token_data:
# no need to repopulate roles
return
if access_token:
filtered_roles = []
authed_role_ids = jsonutils.loads(access_token['role_ids'])
all_roles = self.role_api.list_roles()
for role in all_roles:
for authed_role in authed_role_ids:
if authed_role == role['id']:
filtered_roles.append({'id': role['id'],
'name': role['name']})
token_data['roles'] = filtered_roles
return
if CONF.trust.enabled and trust:
# If redelegated_trust_id is set, then we must traverse the
# trust_chain in order to determine who the original trustor is. We
# need to do this because the user ID of the original trustor helps
# us determine scope in the redelegated context.
if trust.get('redelegated_trust_id'):
trust_chain = self.trust_api.get_trust_pedigree(trust['id'])
token_user_id = trust_chain[-1]['trustor_user_id']
else:
token_user_id = trust['trustor_user_id']
token_project_id = trust['project_id']
# trusts do not support domains yet
token_domain_id = None
else:
token_user_id = user_id
token_project_id = project_id
token_domain_id = domain_id
if token_domain_id or token_project_id:
filtered_roles = []
if CONF.trust.enabled and trust:
# First expand out any roles that were in the trust to include
# any implied roles, whether global or domain specific
refs = [{'role_id': role['id']} for role in trust['roles']]
effective_trust_roles = (
self.assignment_api.add_implied_roles(refs))
# Now get the current role assignments for the trustor,
# including any domain specific roles.
assignment_list = self.assignment_api.list_role_assignments(
user_id=token_user_id,
project_id=token_project_id,
effective=True, strip_domain_roles=False)
current_effective_trustor_roles = (
list(set([x['role_id'] for x in assignment_list])))
# Go through each of the effective trust roles, making sure the
# trustor still has them, if any have been removed, then we
# will treat the trust as invalid
for trust_role in effective_trust_roles:
match_roles = [x for x in current_effective_trustor_roles
if x == trust_role['role_id']]
if match_roles:
role = self.role_api.get_role(match_roles[0])
if role['domain_id'] is None:
filtered_roles.append(role)
else:
raise exception.Forbidden(
_('Trustee has no delegated roles.'))
else:
for role in self._get_roles_for_user(token_user_id,
token_domain_id,
token_project_id):
filtered_roles.append({'id': role['id'],
'name': role['name']})
# user has no project or domain roles, therefore access denied
if not filtered_roles:
if token_project_id:
msg = _('User %(user_id)s has no access '
'to project %(project_id)s') % {
'user_id': user_id,
'project_id': token_project_id}
else:
msg = _('User %(user_id)s has no access '
'to domain %(domain_id)s') % {
'user_id': user_id,
'domain_id': token_domain_id}
LOG.debug(msg)
raise exception.Unauthorized(msg)
token_data['roles'] = filtered_roles
def _populate_service_catalog(self, token_data, user_id,
domain_id, project_id, trust):
if 'catalog' in token_data:
# no need to repopulate service catalog
return
if CONF.trust.enabled and trust:
user_id = trust['trustor_user_id']
if project_id or domain_id:
service_catalog = self.catalog_api.get_v3_catalog(
user_id, project_id)
token_data['catalog'] = service_catalog
def _populate_service_providers(self, token_data):
if 'service_providers' in token_data:
return
service_providers = self.federation_api.get_enabled_service_providers()
if service_providers:
token_data['service_providers'] = service_providers
def _populate_token_dates(self, token_data, expires=None, trust=None,
issued_at=None):
if not expires:
expires = provider.default_expire_time()
if not isinstance(expires, six.string_types):
expires = utils.isotime(expires, subsecond=True)
token_data['expires_at'] = expires
token_data['issued_at'] = (issued_at or
utils.isotime(subsecond=True))
def _populate_audit_info(self, token_data, audit_info=None):
if audit_info is None or isinstance(audit_info, six.string_types):
token_data['audit_ids'] = provider.audit_info(audit_info)
elif isinstance(audit_info, list):
token_data['audit_ids'] = audit_info
else:
msg = (_('Invalid audit info data type: %(data)s (%(type)s)') %
{'data': audit_info, 'type': type(audit_info)})
LOG.error(msg)
raise exception.UnexpectedError(msg)
def get_token_data(self, user_id, method_names, domain_id=None,
project_id=None, expires=None, trust=None, token=None,
include_catalog=True, bind=None, access_token=None,
issued_at=None, audit_info=None):
token_data = {'methods': method_names}
# We've probably already written these to the token
if token:
for x in ('roles', 'user', 'catalog', 'project', 'domain'):
if x in token:
token_data[x] = token[x]
if bind:
token_data['bind'] = bind
self._populate_scope(token_data, domain_id, project_id)
if token_data.get('project'):
self._populate_is_admin_project(token_data)
self._populate_user(token_data, user_id, trust)
self._populate_roles(token_data, user_id, domain_id, project_id, trust,
access_token)
self._populate_audit_info(token_data, audit_info)
if include_catalog:
self._populate_service_catalog(token_data, user_id, domain_id,
project_id, trust)
self._populate_service_providers(token_data)
self._populate_token_dates(token_data, expires=expires, trust=trust,
issued_at=issued_at)
self._populate_oauth_section(token_data, access_token)
return {'token': token_data}
@dependency.requires('catalog_api', 'identity_api', 'oauth_api',
'resource_api', 'role_api', 'trust_api')
class BaseProvider(provider.Provider):
def __init__(self, *args, **kwargs):
super(BaseProvider, self).__init__(*args, **kwargs)
self.v3_token_data_helper = V3TokenDataHelper()
self.v2_token_data_helper = V2TokenDataHelper()
def get_token_version(self, token_data):
if token_data and isinstance(token_data, dict):
if 'token_version' in token_data:
if token_data['token_version'] in token.provider.VERSIONS:
return token_data['token_version']
# FIXME(morganfainberg): deprecate the following logic in future
# revisions. It is better to just specify the token_version in
# the token_data itself. This way we can support future versions
# that might have the same fields.
if 'access' in token_data:
return token.provider.V2
if 'token' in token_data and 'methods' in token_data['token']:
return token.provider.V3
raise exception.UnsupportedTokenVersionException()
def issue_v2_token(self, token_ref, roles_ref=None,
catalog_ref=None):
if token_ref.get('bind') and not self._supports_bind_authentication:
msg = _('The configured token provider does not support bind '
'authentication.')
raise exception.NotImplemented(message=msg)
metadata_ref = token_ref['metadata']
trust_ref = None
if CONF.trust.enabled and metadata_ref and 'trust_id' in metadata_ref:
trust_ref = self.trust_api.get_trust(metadata_ref['trust_id'])
token_data = self.v2_token_data_helper.format_token(
token_ref, roles_ref, catalog_ref, trust_ref)
token_id = self._get_token_id(token_data)
token_data['access']['token']['id'] = token_id
return token_id, token_data
def _is_mapped_token(self, auth_context):
return (federation_constants.IDENTITY_PROVIDER in auth_context and
federation_constants.PROTOCOL in auth_context)
def issue_v3_token(self, user_id, method_names, expires_at=None,
project_id=None, domain_id=None, auth_context=None,
trust=None, metadata_ref=None, include_catalog=True,
parent_audit_id=None):
if auth_context and auth_context.get('bind'):
# NOTE(lbragstad): Check if the token provider being used actually
# supports bind authentication methods before proceeding.
if not self._supports_bind_authentication:
raise exception.NotImplemented(_(
'The configured token provider does not support bind '
'authentication.'))
# for V2, trust is stashed in metadata_ref
if (CONF.trust.enabled and not trust and metadata_ref and
'trust_id' in metadata_ref):
trust = self.trust_api.get_trust(metadata_ref['trust_id'])
if CONF.trust.enabled and trust:
if user_id != trust['trustee_user_id']:
raise exception.Forbidden(_('User is not a trustee.'))
token_ref = None
if auth_context and self._is_mapped_token(auth_context):
token_ref = self._handle_mapped_tokens(
auth_context, project_id, domain_id)
access_token = None
if 'oauth1' in method_names:
access_token_id = auth_context['access_token_id']
access_token = self.oauth_api.get_access_token(access_token_id)
token_data = self.v3_token_data_helper.get_token_data(
user_id,
method_names,
domain_id=domain_id,
project_id=project_id,
expires=expires_at,
trust=trust,
bind=auth_context.get('bind') if auth_context else None,
token=token_ref,
include_catalog=include_catalog,
access_token=access_token,
audit_info=parent_audit_id)
token_id = self._get_token_id(token_data)
return token_id, token_data
def _handle_mapped_tokens(self, auth_context, project_id, domain_id):
user_id = auth_context['user_id']
group_ids = auth_context['group_ids']
idp = auth_context[federation_constants.IDENTITY_PROVIDER]
protocol = auth_context[federation_constants.PROTOCOL]
token_data = {
'user': {
'id': user_id,
'name': parse.unquote(user_id),
federation_constants.FEDERATION: {
'groups': [{'id': x} for x in group_ids],
'identity_provider': {'id': idp},
'protocol': {'id': protocol}
},
'domain': {
'id': CONF.federation.federated_domain_name,
'name': CONF.federation.federated_domain_name
}
}
}
if project_id or domain_id:
self.v3_token_data_helper.populate_roles_for_groups(
token_data, group_ids, project_id, domain_id, user_id)
return token_data
def _verify_token_ref(self, token_ref):
"""Verify and return the given token_ref."""
if not token_ref:
raise exception.Unauthorized()
return token_ref
def _assert_is_not_federation_token(self, token_ref):
"""Make sure we aren't using v2 auth on a federation token."""
token_data = token_ref.get('token_data')
if (token_data and self.get_token_version(token_data) ==
token.provider.V3):
if 'OS-FEDERATION' in token_data['token']['user']:
msg = _('Attempting to use OS-FEDERATION token with V2 '
'Identity Service, use V3 Authentication')
raise exception.Unauthorized(msg)
def _assert_default_domain(self, token_ref):
"""Make sure we are operating on default domain only."""
if (token_ref.get('token_data') and
self.get_token_version(token_ref.get('token_data')) ==
token.provider.V3):
# this is a V3 token
msg = _('Non-default domain is not supported')
# domain scoping is prohibited
if token_ref['token_data']['token'].get('domain'):
raise exception.Unauthorized(
_('Domain scoped token is not supported'))
# if token is scoped to trust, both trustor and trustee must
# be in the default domain. Furthermore, the delegated project
# must also be in the default domain
metadata_ref = token_ref['metadata']
if CONF.trust.enabled and 'trust_id' in metadata_ref:
trust_ref = self.trust_api.get_trust(metadata_ref['trust_id'])
trustee_user_ref = self.identity_api.get_user(
trust_ref['trustee_user_id'])
if (trustee_user_ref['domain_id'] !=
CONF.identity.default_domain_id):
raise exception.Unauthorized(msg)
trustor_user_ref = self.identity_api.get_user(
trust_ref['trustor_user_id'])
if (trustor_user_ref['domain_id'] !=
CONF.identity.default_domain_id):
raise exception.Unauthorized(msg)
project_ref = self.resource_api.get_project(
trust_ref['project_id'])
if (project_ref['domain_id'] !=
CONF.identity.default_domain_id):
raise exception.Unauthorized(msg)
def validate_v2_token(self, token_ref):
try:
self._assert_is_not_federation_token(token_ref)
self._assert_default_domain(token_ref)
# FIXME(gyee): performance or correctness? Should we return the
# cached token or reconstruct it? Obviously if we are going with
# the cached token, any role, project, or domain name changes
# will not be reflected. One may argue that with PKI tokens,
# we are essentially doing cached token validation anyway.
# Lets go with the cached token strategy. Since token
# management layer is now pluggable, one can always provide
# their own implementation to suit their needs.
token_data = token_ref.get('token_data')
if (self.get_token_version(token_data) != token.provider.V2):
# Validate the V3 token as V2
token_data = self.v2_token_data_helper.v3_to_v2_token(
token_data)
trust_id = token_data['access'].get('trust', {}).get('id')
if trust_id:
msg = ('Unable to validate trust-scoped tokens using version '
'v2.0 API.')
raise exception.Unauthorized(msg)
return token_data
except exception.ValidationError:
LOG.exception(_LE('Failed to validate token'))
token_id = token_ref['token_data']['access']['token']['id']
raise exception.TokenNotFound(token_id=token_id)
def validate_non_persistent_token(self, token_id):
try:
(user_id, methods, audit_ids, domain_id, project_id, trust_id,
federated_info, access_token_id, created_at, expires_at) = (
self.token_formatter.validate_token(token_id))
except exception.ValidationError as e:
raise exception.TokenNotFound(e)
token_dict = None
trust_ref = None
if federated_info:
# NOTE(lbragstad): We need to rebuild information about the
# federated token as well as the federated token roles. This is
# because when we validate a non-persistent token, we don't have a
# token reference to pull the federated token information out of.
# As a result, we have to extract it from the token itself and
# rebuild the federated context. These private methods currently
# live in the keystone.token.providers.fernet.Provider() class.
token_dict = self._rebuild_federated_info(federated_info, user_id)
if project_id or domain_id:
self._rebuild_federated_token_roles(token_dict, federated_info,
user_id, project_id,
domain_id)
if trust_id:
trust_ref = self.trust_api.get_trust(trust_id)
access_token = None
if access_token_id:
access_token = self.oauth_api.get_access_token(access_token_id)
return self.v3_token_data_helper.get_token_data(
user_id,
method_names=methods,
domain_id=domain_id,
project_id=project_id,
issued_at=created_at,
expires=expires_at,
trust=trust_ref,
token=token_dict,
access_token=access_token,
audit_info=audit_ids)
def validate_v3_token(self, token_ref):
# FIXME(gyee): performance or correctness? Should we return the
# cached token or reconstruct it? Obviously if we are going with
# the cached token, any role, project, or domain name changes
# will not be reflected. One may argue that with PKI tokens,
# we are essentially doing cached token validation anyway.
# Lets go with the cached token strategy. Since token
# management layer is now pluggable, one can always provide
# their own implementation to suit their needs.
trust_id = token_ref.get('trust_id')
if trust_id:
# token trust validation
self.trust_api.get_trust(trust_id)
token_data = token_ref.get('token_data')
if not token_data or 'token' not in token_data:
# token ref is created by V2 API
project_id = None
project_ref = token_ref.get('tenant')
if project_ref:
project_id = project_ref['id']
issued_at = token_ref['token_data']['access']['token']['issued_at']
audit = token_ref['token_data']['access']['token'].get('audit_ids')
token_data = self.v3_token_data_helper.get_token_data(
token_ref['user']['id'],
['password', 'token'],
project_id=project_id,
bind=token_ref.get('bind'),
expires=token_ref['expires'],
issued_at=issued_at,
audit_info=audit)
return token_data
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
ENGINE_TOPIC = 'engine'
LISTENER_TOPIC = 'heat-engine-listener'
PARAM_KEYS = (
PARAM_TIMEOUT, PARAM_DISABLE_ROLLBACK, PARAM_ADOPT_STACK_DATA,
PARAM_SHOW_DELETED, PARAM_SHOW_NESTED, PARAM_EXISTING,
PARAM_CLEAR_PARAMETERS, PARAM_GLOBAL_TENANT, PARAM_LIMIT,
PARAM_NESTED_DEPTH, PARAM_TAGS, PARAM_SHOW_HIDDEN, PARAM_TAGS_ANY,
PARAM_NOT_TAGS, PARAM_NOT_TAGS_ANY, TEMPLATE_TYPE, PARAM_WITH_DETAIL
) = (
'timeout_mins', 'disable_rollback', 'adopt_stack_data',
'show_deleted', 'show_nested', 'existing',
'clear_parameters', 'global_tenant', 'limit',
'nested_depth', 'tags', 'show_hidden', 'tags_any',
'not_tags', 'not_tags_any', 'template_type', 'with_detail',
)
STACK_KEYS = (
STACK_NAME, STACK_ID,
STACK_CREATION_TIME, STACK_UPDATED_TIME, STACK_DELETION_TIME,
STACK_NOTIFICATION_TOPICS,
STACK_DESCRIPTION, STACK_TMPL_DESCRIPTION,
STACK_PARAMETERS, STACK_OUTPUTS, STACK_ACTION,
STACK_STATUS, STACK_STATUS_DATA, STACK_CAPABILITIES,
STACK_DISABLE_ROLLBACK, STACK_TIMEOUT, STACK_OWNER,
STACK_PARENT, STACK_USER_PROJECT_ID, STACK_TAGS
) = (
'stack_name', 'stack_identity',
'creation_time', 'updated_time', 'deletion_time',
'notification_topics',
'description', 'template_description',
'parameters', 'outputs', 'stack_action',
'stack_status', 'stack_status_reason', 'capabilities',
'disable_rollback', 'timeout_mins', 'stack_owner',
'parent', 'stack_user_project_id', 'tags'
)
STACK_OUTPUT_KEYS = (
OUTPUT_DESCRIPTION,
OUTPUT_KEY, OUTPUT_VALUE,
OUTPUT_ERROR,
) = (
'description',
'output_key', 'output_value',
'output_error',
)
RES_KEYS = (
RES_DESCRIPTION, RES_CREATION_TIME, RES_UPDATED_TIME,
RES_NAME, RES_PHYSICAL_ID, RES_METADATA,
RES_ACTION, RES_STATUS, RES_STATUS_DATA,
RES_TYPE, RES_ID, RES_STACK_ID, RES_STACK_NAME,
RES_REQUIRED_BY, RES_NESTED_STACK_ID, RES_NESTED_RESOURCES,
RES_PARENT_RESOURCE,
) = (
'description', 'creation_time', 'updated_time',
'resource_name', 'physical_resource_id', 'metadata',
'resource_action', 'resource_status', 'resource_status_reason',
'resource_type', 'resource_identity', STACK_ID, STACK_NAME,
'required_by', 'nested_stack_id', 'nested_resources',
'parent_resource',
)
RES_SCHEMA_KEYS = (
RES_SCHEMA_RES_TYPE, RES_SCHEMA_PROPERTIES, RES_SCHEMA_ATTRIBUTES,
RES_SCHEMA_SUPPORT_STATUS,
) = (
RES_TYPE, 'properties', 'attributes', 'support_status'
)
EVENT_KEYS = (
EVENT_ID,
EVENT_STACK_ID, EVENT_STACK_NAME,
EVENT_TIMESTAMP,
EVENT_RES_NAME, EVENT_RES_PHYSICAL_ID, EVENT_RES_ACTION,
EVENT_RES_STATUS, EVENT_RES_STATUS_DATA, EVENT_RES_TYPE,
EVENT_RES_PROPERTIES,
) = (
'event_identity',
STACK_ID, STACK_NAME,
'event_time',
RES_NAME, RES_PHYSICAL_ID, RES_ACTION,
RES_STATUS, RES_STATUS_DATA, RES_TYPE,
'resource_properties',
)
NOTIFY_KEYS = (
NOTIFY_TENANT_ID,
NOTIFY_USER_ID,
NOTIFY_STACK_ID,
NOTIFY_STACK_NAME,
NOTIFY_STATE,
NOTIFY_STATE_REASON,
NOTIFY_CREATE_AT,
) = (
'tenant_id',
'user_id',
STACK_ID,
STACK_NAME,
'state',
'state_reason',
'create_at',
)
# This is the representation of a watch we expose to the API via RPC
WATCH_KEYS = (
WATCH_ACTIONS_ENABLED, WATCH_ALARM_ACTIONS, WATCH_TOPIC,
WATCH_UPDATED_TIME, WATCH_DESCRIPTION, WATCH_NAME,
WATCH_COMPARISON, WATCH_DIMENSIONS, WATCH_PERIODS,
WATCH_INSUFFICIENT_ACTIONS, WATCH_METRIC_NAME, WATCH_NAMESPACE,
WATCH_OK_ACTIONS, WATCH_PERIOD, WATCH_STATE_REASON,
WATCH_STATE_REASON_DATA, WATCH_STATE_UPDATED_TIME, WATCH_STATE_VALUE,
WATCH_STATISTIC, WATCH_THRESHOLD, WATCH_UNIT, WATCH_STACK_ID,
) = (
'actions_enabled', 'actions', 'topic',
'updated_time', 'description', 'name',
'comparison', 'dimensions', 'periods',
'insufficient_actions', 'metric_name', 'namespace',
'ok_actions', 'period', 'state_reason',
'state_reason_data', 'state_updated_time', 'state_value',
'statistic', 'threshold', 'unit', 'stack_id',
)
# Alternate representation of a watch rule to align with DB format
# FIXME : These align with AWS naming for compatibility with the
# current cfn-push-stats & metadata server, fix when we've ported
# cfn-push-stats to use the Cloudwatch server and/or moved metric
# collection into ceilometer, these should just be WATCH_KEYS
# or each field should be stored separately in the DB watch_data
# table if we stick to storing watch data in the heat DB
WATCH_RULE_KEYS = (
RULE_ACTIONS_ENABLED, RULE_ALARM_ACTIONS, RULE_TOPIC,
RULE_UPDATED_TIME, RULE_DESCRIPTION, RULE_NAME,
RULE_COMPARISON, RULE_DIMENSIONS, RULE_PERIODS,
RULE_INSUFFICIENT_ACTIONS, RULE_METRIC_NAME, RULE_NAMESPACE,
RULE_OK_ACTIONS, RULE_PERIOD, RULE_STATE_REASON,
RULE_STATE_REASON_DATA, RULE_STATE_UPDATED_TIME, RULE_STATE_VALUE,
RULE_STATISTIC, RULE_THRESHOLD, RULE_UNIT, RULE_STACK_NAME,
) = (
'ActionsEnabled', 'AlarmActions', 'AlarmArn',
'AlarmConfigurationUpdatedTimestamp', 'AlarmDescription', 'AlarmName',
'ComparisonOperator', 'Dimensions', 'EvaluationPeriods',
'InsufficientDataActions', 'MetricName', 'Namespace',
'OKActions', 'Period', 'StateReason',
'StateReasonData', 'StateUpdatedTimestamp', 'StateValue',
'Statistic', 'Threshold', 'Unit', 'StackName',
)
WATCH_STATES = (
WATCH_STATE_OK, WATCH_STATE_ALARM, WATCH_STATE_NODATA,
WATCH_STATE_SUSPENDED, WATCH_STATE_CEILOMETER_CONTROLLED
) = (
'NORMAL', 'ALARM', 'NODATA',
'SUSPENDED', 'CEILOMETER_CONTROLLED'
)
WATCH_DATA_KEYS = (
WATCH_DATA_ALARM, WATCH_DATA_METRIC, WATCH_DATA_TIME,
WATCH_DATA_NAMESPACE, WATCH_DATA
) = (
'watch_name', 'metric_name', 'timestamp',
'namespace', 'data'
)
VALIDATE_PARAM_KEYS = (
PARAM_TYPE, PARAM_DEFAULT, PARAM_NO_ECHO,
PARAM_ALLOWED_VALUES, PARAM_ALLOWED_PATTERN, PARAM_MAX_LENGTH,
PARAM_MIN_LENGTH, PARAM_MAX_VALUE, PARAM_MIN_VALUE,
PARAM_DESCRIPTION, PARAM_CONSTRAINT_DESCRIPTION, PARAM_LABEL,
PARAM_CUSTOM_CONSTRAINT
) = (
'Type', 'Default', 'NoEcho',
'AllowedValues', 'AllowedPattern', 'MaxLength',
'MinLength', 'MaxValue', 'MinValue',
'Description', 'ConstraintDescription', 'Label',
'CustomConstraint'
)
VALIDATE_PARAM_TYPES = (
PARAM_TYPE_STRING, PARAM_TYPE_NUMBER, PARAM_TYPE_COMMA_DELIMITED_LIST,
PARAM_TYPE_JSON, PARAM_TYPE_BOOLEAN
) = (
'String', 'Number', 'CommaDelimitedList',
'Json', 'Boolean'
)
SOFTWARE_CONFIG_KEYS = (
SOFTWARE_CONFIG_ID,
SOFTWARE_CONFIG_NAME,
SOFTWARE_CONFIG_GROUP,
SOFTWARE_CONFIG_CONFIG,
SOFTWARE_CONFIG_INPUTS,
SOFTWARE_CONFIG_OUTPUTS,
SOFTWARE_CONFIG_OPTIONS,
SOFTWARE_CONFIG_CREATION_TIME
) = (
'id',
'name',
'group',
'config',
'inputs',
'outputs',
'options',
'creation_time'
)
SOFTWARE_DEPLOYMENT_KEYS = (
SOFTWARE_DEPLOYMENT_ID,
SOFTWARE_DEPLOYMENT_CONFIG_ID,
SOFTWARE_DEPLOYMENT_SERVER_ID,
SOFTWARE_DEPLOYMENT_INPUT_VALUES,
SOFTWARE_DEPLOYMENT_OUTPUT_VALUES,
SOFTWARE_DEPLOYMENT_ACTION,
SOFTWARE_DEPLOYMENT_STATUS,
SOFTWARE_DEPLOYMENT_STATUS_REASON,
SOFTWARE_DEPLOYMENT_CREATION_TIME,
SOFTWARE_DEPLOYMENT_UPDATED_TIME
) = (
'id',
'config_id',
'server_id',
'input_values',
'output_values',
'action',
'status',
'status_reason',
'creation_time',
'updated_time'
)
SOFTWARE_DEPLOYMENT_STATUSES = (
SOFTWARE_DEPLOYMENT_IN_PROGRESS,
SOFTWARE_DEPLOYMENT_FAILED,
SOFTWARE_DEPLOYMENT_COMPLETE
) = (
'IN_PROGRESS',
'FAILED',
'COMPLETE'
)
SOFTWARE_DEPLOYMENT_OUTPUTS = (
SOFTWARE_DEPLOYMENT_OUTPUT_STDOUT,
SOFTWARE_DEPLOYMENT_OUTPUT_STDERR,
SOFTWARE_DEPLOYMENT_OUTPUT_STATUS_CODE
) = (
'deploy_stdout',
'deploy_stderr',
'deploy_status_code'
)
SNAPSHOT_KEYS = (
SNAPSHOT_ID,
SNAPSHOT_NAME,
SNAPSHOT_STACK_ID,
SNAPSHOT_DATA,
SNAPSHOT_STATUS,
SNAPSHOT_STATUS_REASON,
SNAPSHOT_CREATION_TIME,
) = (
'id',
'name',
'stack_id',
'data',
'status',
'status_reason',
'creation_time'
)
THREAD_MESSAGES = (THREAD_CANCEL,
THREAD_CANCEL_WITH_ROLLBACK
) = ('cancel', 'cancel_with_rollback')
|
|
from exceptions import (
NodeNotFoundError, NodeTypeNotFoundError, GraphAPIError, MissingNodeTypeError, InvalidPropertyError,
MalformedUpdateDictionaryError, )
import py2neo
import cypher_utils
from py2neo_additions import CypherTransactionManager
from node_model import Attribute, Relationship, NodeModel
from datetime import datetime
import logging
import inspect
DEFAULT_LIMIT = 100
DEFAULT_SKIP = 0
DEFAULT_CONSTRAINTS = None
class GraphAPI(object):
def __init__(self, database_url=None, models=[]):
"""
Initializes the graph with the models that make up the schema graph and
an identifier for a url to a neo4j database.
"""
self.neograph = py2neo.Graph() if not database_url else py2neo.Graph(database_url)
self.models_dict = {}
for model in models:
self.models_dict[model.__name__] = model
def setup_constraints(self):
"""
Creates constraints for the graph based on the models in the schema
graph.
"""
for node_type in self.models_dict.values():
if hasattr(node_type, "add_constraints_to_graph"):
node_type.add_constraints_to_graph(self.neograph)
def remove_constraints(self):
"""
Removes all of the constraints set by the models in the schema graph.
"""
for node_type in self.models_dict.values():
if hasattr(node_type, "remove_constraints_from_graph"):
node_type.remove_constraints_from_graph(self.neograph)
def query_for_subgraphs(self, actor_id, query_dict, node_type):
"""
This is used for returning a tree formatted subgraph of API where the
query dict specifies which nodes should match the query and which
relationships should be included for each node matching the query.
"""
skip = query_dict.get('skip', DEFAULT_SKIP)
order_by = query_dict.get('order_by', None)
limit = query_dict.get('limit', DEFAULT_LIMIT)
constraints = query_dict.get('where', DEFAULT_CONSTRAINTS)
include_dict = query_dict.get('include', None)
nodes = self._get_nodes_with_constraints(node_type, constraints, limit, skip, order_by)
results = []
for node in nodes:
results.append(self.request_subgraph_at_node(actor_id, include_dict, node['id'], node_type))
return results
def update_subgraphs(self, actor_id, update_list):
"""
Updates/creates a forest of trees specified in the update list.
"""
def request_subgraph_at_node(self, actor_id, include_dict, id, node_type=None, tx=None):
"""
Returns a tree subgraph of the Graph API rooted at the specified node,
which includes attributes as specified in the include_dict.
"""
if tx:
return self._request_subgraph_at_node(tx, actor_id, include_dict, id, node_type)
with CypherTransactionManager(self.neograph.cypher) as tx:
return self._request_subgraph_at_node(tx, actor_id, include_dict, id, node_type)
def update_subgraph_at_node(self, actor_id, update_type, update_dict, id=None, node_type=None, tx=None):
"""
Updates/creates a tree subgraph of the Graph API rooted at the
specified node, by making the modifications specified in the
update_dict. All modifications are atomic/transactional.
"""
change_stack = []
if tx:
results = self._update_subgraph_at_node(tx, actor_id, update_type, update_dict, id, node_type, change_stack)
with CypherTransactionManager(self.neograph.cypher) as tx:
results = self._update_subgraph_at_node(tx, actor_id, update_type, update_dict, id, node_type, change_stack)
self.assert_allows_updates(actor_id, change_stack, tx)
# Execute listeners outside the transaction because these listeners
# operate under the assumption that the update has been committed.
for change in change_stack:
if isinstance(change[0], Attribute):
attribute = change[0]
node_id = change[1]
old_value = change[2]
new_value = change[3]
attribute.did_set(self, actor_id, node_id, old_value, new_value)
if isinstance(change[0], Relationship):
relationship = change[0]
node_id = change[1]
related_node_id = change[2]
change_type = change[3]
if change_type == 'remove':
relationship.did_remove_edge(self, actor_id, node_id, related_node_id)
else:
relationship.did_add_edge(self, actor_id, node_id, related_node_id)
return results
def assert_allows_updates(self, actor_id, change_stack, tx):
if actor_id == -1:
return
for change in change_stack:
if inspect.isclass(change[0]) and issubclass(change[0], NodeModel):
node_model = change[0]
change_type = change[1]
if change_type == 'delete':
node_id = change[2]
node_model.assert_allows_delete(actor_id, self, node_id, tx=tx)
else:
node_model.assert_allows_create(actor_id, self, tx=tx)
if isinstance(change[0], Attribute):
attribute = change[0]
node_id = change[1]
old_value = change[2]
new_value = change[3]
attribute.validate_value(self, actor_id, node_id, new_value, tx=tx)
attribute.assert_allows_write(self, actor_id, node_id, new_value, tx=tx)
if isinstance(change[0], Relationship):
relationship = change[0]
node_id = change[1]
related_node_id = change[2]
change_type = change[3]
if change_type == 'remove':
relationship.assert_allows_remove_edge(
self,
actor_id,
node_id,
related_node_id,
tx=tx
)
else:
relationship.assert_allows_add_edge(
self,
actor_id,
node_id,
related_node_id,
tx=tx
)
def delete_nodes(self, actor_id, ids):
"""
Deletes the set of nodes which corresponds to the ids provided.
"""
######### Internal methods #########
def _get_node_type_of_node_with_id(self, tx, id):
tx.append('MATCH (n) WHERE n.id = {id} RETURN labels(n)', {'id': id})
node = tx.process()[-1].one
if not node:
raise NodeNotFoundError(id)
return node[0]
def _nodes_are_related_by(self, tx, a_id, b_id, rel_type):
tx.append(
'MATCH (a {{ id:{aid} }})-[r:{rel_type}]-(b {{ id:{bid} }}) '
'RETURN r'.format(
aid=a_id,
rel_type=rel_type,
bid=b_id))
results = tx.process()[-1].one
return results != None
def _nodes_are_related_from_a_to_b_by(self, tx, a_id, b_id, rel_type):
tx.append(
'MATCH (a {{ id:{aid} }})-[r:{rel_type}]->(b {{ id:{bid} }}) '
'RETURN r'.format(
aid=a_id,
rel_type=rel_type,
bid=b_id))
results = tx.process()[-1].one
return results != None
def _get_node_with_id(self, tx, id, node_type=None):
node = None
if node_type:
node_model = self.models_dict.get(node_type, None)
if not node_model:
# No query injections please.
raise NodeTypeNotFoundError(node_type)
tx.append('MATCH (n:{node_type}) '
'WHERE n.id = {{id}} '
'RETURN n'.format(node_type=node_type),
{'id': id})
else:
tx.append('MATCH (n) WHERE n.id = {id} RETURN n', {'id': id})
node = tx.process()[-1].one
if not node:
raise NodeNotFoundError(id)
return node
def _get_nodes_with_constraints(self, node_type, constraints, limit, skip, order_by):
# TODO: AAAAAHHHH
# CYPHER INJECTION POTENTIAL! WATCHOUT!
constraint_query = ''
# Node type required otherwise you pick up internal type nodes as well.
constraint_query += 'MATCH (n:{})'.format(node_type)
if constraints:
constraint_query += ' WHERE '
constraint_query += cypher_utils.constraints_expression_from_constraints(constraints)
constraint_query += ' RETURN n'
if order_by:
constraint_query += ' ORDER BY "{}"'.format(order_by[0])
constraint_query += ' {}'.format(order_by[1].upper())
constraint_query += ' SKIP {}'.format(skip)
constraint_query += ' LIMIT {}'.format(limit)
return map(lambda r: r[0], self.neograph.cypher.execute(constraint_query))
def _get_new_global_unique_id(self, tx):
# Create the global unique id node if necessary
tx.append(
'MERGE (id:_GlobalUniqueId) '
'ON CREATE SET id.count = 1 '
'ON MATCH SET id.count = id.count + 1 '
'RETURN id.count AS generated_id')
# Return the id of first result of the last transaction
return tx.process()[-1].one
def _create_node_of_type(self, tx, creator_id, node_type):
new_id = self._get_new_global_unique_id(tx)
created_at = datetime.now()
updated_at = created_at
tx.append(
py2neo.cypher.CreateNode(
node_type,
id=new_id,
created_at=created_at.isoformat(),
updated_at=updated_at.isoformat(),
created_by=creator_id
)
)
return new_id
def _create_node_of_relationship_type(self, tx, actor_id, relationship):
# If there was no node id we must create it.
return self._create_node_of_type(tx, actor_id, relationship.target_model_name)
def _delete_node(self, tx, id):
tx.append('MATCH (n) WHERE n.id = {id} DETACH DELETE n', {'id': id})
def _request_subgraph_at_node(self, tx, actor_id, include_dict, id, node_type=None):
"""
Internal method
This method recursively requests the tree of nodes specified in the
include dict, if permission is granted for the operation.
"""
if not node_type:
node_type = self._get_node_type_of_node_with_id(tx, id)
node_model = self.models_dict.get(node_type, None)
if not node_model:
raise NodeTypeNotFoundError(node_type) # No query injections please.
node = self._get_node_with_id(tx, id, node_type)
results = {}
# Always include the id of the current node.
results['id'] = node['id']
if not include_dict:
return results
for include_key in include_dict:
if include_key in node_model.attributes():
attribute = node_model.attributes()[include_key]
if actor_id != -1:
attribute.assert_allows_read(self, actor_id, node['id'], tx=tx)
results[include_key] = node[include_key]
elif include_key in node_model.relationships():
relationship = node_model.relationships()[include_key]
nested_query_dict = include_dict[relationship.name]
skip = nested_query_dict.get('skip', DEFAULT_SKIP) if nested_query_dict else DEFAULT_SKIP
limit = nested_query_dict.get('limit', DEFAULT_LIMIT) if nested_query_dict else DEFAULT_LIMIT
order_by = nested_query_dict.get('order_by', None) if nested_query_dict else None
constraints = nested_query_dict.get(
'where', DEFAULT_CONSTRAINTS) if nested_query_dict else DEFAULT_CONSTRAINTS
nested_include_dict = nested_query_dict.get('include', None) if nested_query_dict else None
if actor_id != -1:
relationship.assert_allows_read(self, actor_id, node['id'], tx=tx)
related_nodes = relationship.get_related_nodes_with_constraints(
tx, node['id'], constraints, limit, skip, order_by)
if relationship.max_edges == 1:
results[relationship.name] = None
if related_nodes:
related_node = related_nodes[0]
results[relationship.name] = self._request_subgraph_at_node(
tx, actor_id, nested_include_dict, related_node['id'])
else:
results[relationship.name] = []
for related_node in related_nodes:
results[relationship.name].append(self._request_subgraph_at_node(
tx, actor_id, nested_include_dict, related_node['id']))
else:
raise InvalidPropertyError("There is no '{}' property.".format(include_key))
return results
def _update_subgraph_at_node(self, tx, actor_id, update_type, update_dict, id=None, node_type=None, change_stack=None):
"""
Internal method
Recursively updates the tree of nodes specified in the update_dict if
permission is granted for the operation.
"""
return_dict = {}
if update_type == 'create':
if not node_type:
raise MissingNodeTypeError('No node type specified for new node.')
if id != None:
raise APIException('id should not be provided for a new node.')
elif update_type == 'update':
if id == None:
raise APIException('id must be specified to update a node.')
elif update_type == 'delete':
if id == None:
raise APIException('id must be specified to delete a node.')
else:
raise APIException('invalid update')
if update_type != 'create':
node_type = self._get_node_type_of_node_with_id(tx, id)
node_model = self.models_dict.get(node_type, None)
if not node_model:
raise NodeTypeNotFoundError(node_type) # No query injections please.
if update_type == 'create':
id = self._create_node_of_type(tx, actor_id, node_type)
if change_stack != None:
change_stack.append((node_model, 'create'))
node = self._get_node_with_id(tx, id, node_type)
for update_key in update_dict:
if update_key == 'id':
continue
elif update_key in node_model.attributes():
attribute = node_model.attributes()[update_key]
new_value = update_dict[update_key]
return_dict[update_key] = self._update_attribute(tx, actor_id, node, attribute, new_value, change_stack)
elif update_key in node_model.relationships():
relationship = node_model.relationships()[update_key]
relationship_update_dict = update_dict[update_key]
if not isinstance(relationship_update_dict, dict):
raise MalformedUpdateDictionaryError('Malformed update dictionary.')
relationship_update_types = set(['attach', 'detach', 'delete'])
if relationship.max_edges == 1 and all(map(lambda value: isinstance(value, dict), relationship_update_dict.values())):
if len(relationship_update_dict) != 1:
raise MalformedUpdateDictionaryError('To one relationships must specify only one of attach update or delete.')
update_type = relationship_update_dict.keys()[0]
if update_type not in relationship_update_types:
raise MalformedUpdateDictionaryError('Relationships must specify only attach update or delete.')
return_dict[update_key] = self._update_to_one_relationship(tx, actor_id, node, relationship, relationship_update_dict, change_stack)
elif ((not relationship.max_edges or relationship.max_edges > 1)
and all(map(lambda value: isinstance(value, list), relationship_update_dict.values()))):
if any(map(lambda key: key not in relationship_update_types, relationship_update_dict)):
raise MalformedUpdateDictionaryError('Relationships must specify only attach update or delete.')
return_dict[update_key] = self._update_to_many_relationship(tx, actor_id, node, relationship, relationship_update_dict, change_stack)
else:
# Trying to pass an array for a to one relationship.
raise MalformedUpdateDictionaryError('Malformed update dictionary.')
else:
raise InvalidPropertyError("There is no '{}' property.".format(update_key))
if update_type == 'delete':
self._delete_node(tx, id)
if change_stack != None:
change_stack.append((node_model, 'delete', id))
else:
return_dict['id'] = id
return return_dict
def _update_attribute(self, tx, actor_id, node, attribute, new_value, change_stack=None):
"""
Updates the attribute on the provided node with the new value.
"""
if change_stack != None:
old_value = attribute.get_value(tx, node['id'])
change_stack.append((attribute, node['id'], old_value, new_value))
attribute.set_value(tx, node['id'], new_value)
return new_value
def _update_to_one_relationship(self, tx, actor_id, node, relationship, update_dict, change_stack=None):
"""
Updates the to one relationship in accordance with the provided update
dict if permitted and then recursively calls update on the nested node.
"""
relationship_update_type = update_dict.keys()[0]
update_value = update_dict[relationship_update_type]
rev_relationship = relationship.get_reverse_relationship(self.models_dict)
# If there is a currently related node, remove it.
current_related_nodes = relationship.get_related_nodes_with_constraints(tx, node['id'])
current_related_node = current_related_nodes[0] if current_related_nodes else None
if relationship_update_type == 'delete':
if not current_related_node or current_related_node['id'] != update_value['id']:
raise APIException('Cannot delele node {} which is not related.'.format(update_value['id']))
return self._update_subgraph_at_node(tx, actor_id, 'delete', update_value, update_value['id'], change_stack=change_stack)
elif relationship_update_type == 'detach':
if not current_related_node or current_related_node['id'] != update_value['id']:
raise APIException('Cannot detach node {} which is not related.'.format(update_value['id']))
relationship.remove(tx, node['id'], current_related_node['id'])
if change_stack != None:
change_stack.append((relationship, node['id'], current_related_node['id'], 'detach'))
if rev_relationship:
change_stack.append((rev_relationship, current_related_node['id'], node['id'], 'detach'))
return self._update_subgraph_at_node(tx, actor_id, 'update', update_value, update_value['id'], change_stack=change_stack)
elif relationship_update_type == 'attach':
if current_related_node:
if 'id' not in update_value or current_related_node['id'] != update_value['id']:
relationship.remove(tx, node['id'], current_related_node['id'])
if change_stack != None:
change_stack.append((relationship, node['id'], current_related_node['id'], 'detach'))
if rev_relationship:
change_stack.append((rev_relationship, current_related_node['id'], node['id'], 'detach'))
if 'id' not in update_value:
update_value['id'] = self._create_node_of_relationship_type(tx, actor_id, relationship)
if change_stack != None:
node_model = self.models_dict[relationship.target_model_name]
change_stack.append((node_model, 'create'))
related_node = self._get_node_with_id(tx, update_value['id'])
relationship.add(tx, node['id'], update_value['id'])
if change_stack != None:
change_stack.append((relationship, node['id'], related_node['id'], 'add'))
if rev_relationship:
change_stack.append((rev_relationship, related_node['id'], node['id'], 'add'))
return self._update_subgraph_at_node(tx, actor_id, 'update', update_value, update_value['id'], change_stack=change_stack)
def _update_to_many_relationship(self, tx, actor_id, node, relationship, update_dict, change_stack=None):
"""
Updates the to many relationship in accordance with the provided update
list if permitted and then recursively calls update on each of the
nested nodes.
"""
return_list = []
for relationship_update_type, update_list in update_dict.iteritems():
rev_relationship = relationship.get_reverse_relationship(self.models_dict)
if relationship_update_type == 'delete':
for update_value in update_list:
related_node = self._get_node_with_id(tx, update_value['id'])
return_list.append(self._update_subgraph_at_node(tx, actor_id, 'delete', update_value, update_value['id'], change_stack=change_stack))
elif relationship_update_type == 'detach':
edges_to_detach = update_list
for update_value in edges_to_detach:
related_node = self._get_node_with_id(tx, update_value['id'])
relationship.remove(tx, node['id'], update_value['id'])
if change_stack != None:
change_stack.append((relationship, node['id'], related_node['id'], 'remove'))
if rev_relationship:
change_stack.append((rev_relationship, related_node['id'], node['id'], 'remove'))
return_list.append(self._update_subgraph_at_node(tx, actor_id, 'update', update_value, update_value['id'], change_stack=change_stack))
elif relationship_update_type == 'attach':
# All the dictionaries without ids.
nodes_to_create = filter(lambda e: 'id' not in e, update_list)
for new_node_dict in nodes_to_create:
new_node_dict['id'] = self._create_node_of_relationship_type(tx, actor_id, relationship)
if change_stack != None:
node_model = self.models_dict[relationship.target_model_name]
change_stack.append((node_model, 'create'))
# We have already added the id's into the objects that
# were missing them so they will be in edges to add.
edges_to_attach = update_list
current_related_node_id_set = map(lambda crn: crn['id'], relationship.get_related_nodes_with_constraints(tx, node['id']))
for update_value in filter(lambda update_value: update_value['id'] not in current_related_node_id_set, edges_to_attach):
related_node = self._get_node_with_id(tx, update_value['id'])
relationship.add(tx, node['id'], update_value['id'])
if change_stack != None:
change_stack.append((relationship, node['id'], related_node['id'], 'add'))
if rev_relationship:
change_stack.append((rev_relationship, related_node['id'], node['id'], 'add'))
return_list.append(self._update_subgraph_at_node(tx, actor_id, 'update', update_value, update_value['id'], change_stack=change_stack))
|
|
from testtools import TestCase
from testtools.matchers import Equals, Is, MatchesListwise, raises
from eliottree.test.tasks import (
action_task, action_task_end, message_task, nested_action_task,
unnamed_message)
from eliottree.tree import Tree, _TaskNode, task_name
def _flattened_tasks(nodes):
"""
Construct a flat iterable of all the tasks for an iterable of nodes.
"""
for n in nodes:
yield n.task
for c in n.children():
yield c.task
class TaskNameTests(TestCase):
"""
Tests for ``eliottree.tree.task_name``.
"""
def test_none(self):
"""
Cannot create a task name for ``None``.
"""
self.assertThat(
lambda: task_name(None),
raises(ValueError))
def test_message_type(self):
"""
If the task includes a ``message_type`` key use it to construct the
name.
"""
self.assertThat(
task_name(message_task),
Equals(u'twisted:log@1'))
def test_no_message_type(self):
"""
If the task does not include a ``message_type`` key use the
``action_type`` and ``action_status`` keys to construct a name.
"""
self.assertThat(
task_name(action_task),
Equals(u'app:action@1/started'))
def test_no_action_type(self):
"""
If the task does not include either a ``message_type`` or an
``action_type`` key, then return None.
"""
self.assertThat(
task_name(unnamed_message),
Is(None))
def test_levels(self):
"""
Include the task level in the task name.
"""
self.assertThat(
task_name(nested_action_task),
Equals(u'app:action:nested@1,1/started'))
class TaskNodeTests(TestCase):
"""
Tests for ``eliottree.tree._TaskNode``.
"""
def test_none(self):
"""
Passing ``None`` as the task value raises ``ValueError``
"""
self.assertThat(
lambda: _TaskNode(task=None),
raises(ValueError))
def test_repr(self):
"""
Representation of a task node.
"""
node = _TaskNode(task=action_task)
self.assertThat(
repr(node),
Equals('<_TaskNode f3a32bb3-ea6b-457c-aa99-08a3d0491ab4 '
'app:action@1/started children=0>'))
def test_repr_nameless(self):
"""
Representation of a task without a name.
"""
node = _TaskNode(task=unnamed_message)
self.assertThat(
repr(node),
Equals('<_TaskNode cdeb220d-7605-4d5f-8341-1a170222e308 '
'{} children=0>'.format(_TaskNode._DEFAULT_TASK_NAME)))
def test_repr_childen(self):
"""
Representation of a task node with children.
"""
node = _TaskNode(task=action_task, name=u'foo')
node.add_child(_TaskNode(task=message_task))
self.assertThat(
repr(node),
Equals('<_TaskNode f3a32bb3-ea6b-457c-aa99-08a3d0491ab4 '
'foo children=1>'))
def test_no_children(self):
"""
``_TaskNode.children`` returns an empty list for a node with no
children.
"""
node = _TaskNode(task=action_task, name=u'foo')
self.assertThat(
node.children(),
Equals([]))
def test_children(self):
"""
``_TaskNode.children`` returns an list of child nodes sorted by their
level regardless of the order they were added.
"""
node = _TaskNode(task=action_task, name=u'foo')
child = _TaskNode(task=nested_action_task)
child2 = _TaskNode(task=action_task_end)
node.add_child(child2)
node.add_child(child)
self.assertThat(
node.children(),
Equals([child, child2]))
def test_nested_children(self):
"""
``_TaskNode.children`` does not include grandchildren.
"""
node = _TaskNode(task=action_task, name=u'foo')
child = _TaskNode(task=message_task)
node.add_child(child)
child2 = _TaskNode(task=nested_action_task)
node.add_child(child2)
self.assertThat(
node.children(),
Equals([child]))
self.assertThat(
child.children(),
Equals([child2]))
class TreeTests(TestCase):
"""
Tests for ``eliottree.tree.Tree``.
"""
def test_initial(self):
"""
The initial state of a tree is always empty.
"""
tree = Tree()
self.assertThat(tree.nodes(), Equals([]))
def test_merge_tasks(self):
"""
Merge tasks into the tree and retrieve an list of key-node
pairs ordered by task timestamp.
"""
tree = Tree()
matches = tree.merge_tasks([message_task, action_task])
self.expectThat(matches, Is(None))
keys, nodes = zip(*tree.nodes())
self.expectThat(
list(keys),
Equals(['cdeb220d-7605-4d5f-8341-1a170222e308',
'f3a32bb3-ea6b-457c-aa99-08a3d0491ab4']))
self.assertThat(
list(_flattened_tasks(nodes)),
MatchesListwise([Equals(message_task),
Equals(action_task)]))
def test_merge_nested_tasks(self):
"""
Merge nested tasks into the tree and retrieve an list of key-node
pairs ordered by task timestamp.
"""
tree = Tree()
matches = tree.merge_tasks([action_task_end, action_task])
self.expectThat(matches, Is(None))
keys, nodes = zip(*tree.nodes())
self.expectThat(
list(keys),
Equals(['f3a32bb3-ea6b-457c-aa99-08a3d0491ab4']))
self.assertThat(
list(_flattened_tasks(nodes)),
MatchesListwise([Equals(action_task),
Equals(action_task_end)]))
def test_merge_startless_tasks(self):
"""
Merging a task that will never have a start parent raises
``RuntimeError``.
"""
tree = Tree()
self.assertThat(
lambda: tree.merge_tasks([action_task_end]),
raises(RuntimeError))
def test_merge_tasks_filtered(self):
"""
Merge tasks into the tree with a filter function, generating a set of
matches that can be used to prune the tree.
"""
tree = Tree()
filters = [lambda task: task.get(u'action_type') == u'app:action']
matches = tree.merge_tasks([action_task, message_task], filters)
keys, nodes = zip(*tree.nodes(matches))
self.expectThat(
list(keys),
Equals(['f3a32bb3-ea6b-457c-aa99-08a3d0491ab4']))
self.expectThat(
list(keys),
Equals(list(matches)))
self.assertThat(
list(_flattened_tasks(nodes)),
MatchesListwise([Equals(action_task)]))
|
|
import numpy
import six
from chainer.backends import _cpu
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import serializer
import chainerx
class DictionarySerializer(serializer.Serializer):
"""Serializer for dictionary.
This is the standard serializer in Chainer. The hierarchy of objects are
simply mapped to a flat dictionary with keys representing the paths to
objects in the hierarchy.
.. note::
Despite of its name, this serializer DOES NOT serialize the
object into external files. It just build a flat dictionary of arrays
that can be fed into :func:`numpy.savez` and
:func:`numpy.savez_compressed`. If you want to use this serializer
directly, you have to manually send a resulting dictionary to one of
these functions.
Args:
target (dict): The dictionary that this serializer saves the objects
to. If target is None, then a new dictionary is created.
path (str): The base path in the hierarchy that this serializer
indicates.
Attributes:
~DictionarySerializer.target (dict): The target dictionary.
Once the serialization completes, this dictionary can be fed into
:func:`numpy.savez` or :func:`numpy.savez_compressed` to serialize
it in the NPZ format.
"""
def __init__(self, target=None, path=''):
self.target = {} if target is None else target
self.path = path
def __getitem__(self, key):
key = key.strip('/')
return DictionarySerializer(self.target, self.path + key + '/')
def __call__(self, key, value):
key = key.lstrip('/')
self.target[self.path + key] = (
_cpu._to_cpu(value) if value is not None
else numpy.asarray(None))
return value
def save_npz(file, obj, compression=True):
"""Saves an object to the file in NPZ format.
This is a short-cut function to save only one object into an NPZ file.
Args:
file (str or file-like): Target file to write to.
obj: Object to be serialized. It must support serialization protocol.
compression (bool): If ``True``, compression in the resulting zip file
is enabled.
.. seealso::
:func:`chainer.serializers.load_npz`
"""
if isinstance(file, six.string_types):
with open(file, 'wb') as f:
save_npz(f, obj, compression)
return
s = DictionarySerializer()
s.save(obj)
if compression:
numpy.savez_compressed(file, **s.target)
else:
numpy.savez(file, **s.target)
class NpzDeserializer(serializer.Deserializer):
"""Deserializer for NPZ format.
This is the standard deserializer in Chainer. This deserializer can be used
to read an object serialized by :func:`save_npz`.
Args:
npz: `npz` file object.
path: The base path that the deserialization starts from.
strict (bool): If ``True``, the deserializer raises an error when an
expected value is not found in the given NPZ file. Otherwise,
it ignores the value and skip deserialization.
ignore_names (string, callable or list of them):
If callable, it is a function that takes a name of a parameter
and a persistent and returns ``True`` when it needs to be skipped.
If string, this is a name of a parameter or persistent that are
going to be skipped.
This can also be a list of callables and strings that behave as
described above.
"""
def __init__(self, npz, path='', strict=True, ignore_names=None):
self.npz = npz
self.path = path
self.strict = strict
if ignore_names is None:
ignore_names = []
self.ignore_names = ignore_names
def __getitem__(self, key):
key = key.strip('/')
return NpzDeserializer(
self.npz, self.path + key + '/', strict=self.strict,
ignore_names=self.ignore_names)
def __call__(self, key, value):
key = self.path + key.lstrip('/')
if not self.strict and key not in self.npz:
return value
if isinstance(self.ignore_names, (tuple, list)):
ignore_names = self.ignore_names
else:
ignore_names = (self.ignore_names,)
for ignore_name in ignore_names:
if isinstance(ignore_name, str):
if key == ignore_name:
return value
elif callable(ignore_name):
if ignore_name(key):
return value
else:
raise ValueError(
'ignore_names needs to be a callable, string or '
'list of them.')
dataset = self.npz[key]
if dataset[()] is None:
return None
if value is None:
return dataset
if isinstance(value, chainerx.ndarray):
value_view = chainerx.to_numpy(value, copy=False)
numpy.copyto(value_view, dataset)
elif isinstance(value, numpy.ndarray):
numpy.copyto(value, dataset)
elif isinstance(value, cuda.ndarray):
value.set(numpy.asarray(dataset, dtype=value.dtype))
elif isinstance(value, intel64.mdarray):
intel64.ideep.basic_copyto(value, numpy.asarray(dataset))
else:
value_type = type(value)
dataset_arr = numpy.asarray(dataset)
if (issubclass(dataset_arr.dtype.type, numpy.number)
and not numpy.can_cast(
dataset_arr.dtype, value_type, casting='safe')):
raise TypeError(
'Cannot safely deserialize from numpy array with dtype={} '
'into a variable of type {}.'.format(
dataset.dtype, type(value)))
value = value_type(dataset_arr)
return value
def load_npz(file, obj, path='', strict=True, ignore_names=None):
"""Loads an object from the file in NPZ format.
This is a short-cut function to load from an `.npz` file that contains only
one object.
Args:
file (str or file-like): File to be loaded.
obj: Object to be deserialized. It must support serialization protocol.
path (str): The path in the hierarchy of the serialized data under
which the data is to be loaded. The default behavior (blank) will
load all data under the root path.
strict (bool): If ``True``, the deserializer raises an error when an
expected value is not found in the given NPZ file. Otherwise,
it ignores the value and skip deserialization.
ignore_names (string, callable or list of them):
If callable, it is a function that takes a name of a parameter
and a persistent and returns ``True`` when it needs to be skipped.
If string, this is a name of a parameter or persistent that are
going to be skipped.
This can also be a list of callables and strings that behave as
described above.
.. seealso::
:func:`chainer.serializers.save_npz`
"""
with numpy.load(file) as f:
d = NpzDeserializer(
f, path=path, strict=strict, ignore_names=ignore_names)
d.load(obj)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import errno
import functools
import logging
import os
import subprocess
import sys
import unittest
import unittest.mock
from copy import deepcopy
from airflow import jobs, models
from airflow.utils.db import add_default_pool_if_not_exists
from airflow.utils.state import State
from airflow.utils.timezone import datetime
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
TEST_DAG_CORRUPTED_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags_corrupted')
TEST_UTILS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'test_utils')
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_USER = 'airflow_test_user'
logger = logging.getLogger(__name__)
def mock_custom_module_path(path: str):
"""
This decorator adds a path to sys.path to simulate running the current script with the ``PYTHONPATH``
environment variable set and sets the environment variable ``PYTHONPATH`` to change the
module load directory for child scripts.
"""
def wrapper(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
copy_sys_path = deepcopy(sys.path)
sys.path.append(path)
try:
with unittest.mock.patch.dict('os.environ', {'PYTHONPATH': path}):
return func(*args, **kwargs)
finally:
sys.path = copy_sys_path
return decorator
return wrapper
def grant_permissions():
airflow_home = os.environ['AIRFLOW_HOME']
subprocess.check_call(
'find "%s" -exec sudo chmod og+w {} +; sudo chmod og+rx /root' % airflow_home, shell=True)
def revoke_permissions():
airflow_home = os.environ['AIRFLOW_HOME']
subprocess.check_call(
'find "%s" -exec sudo chmod og-w {} +; sudo chmod og-rx /root' % airflow_home, shell=True)
def check_original_docker_image():
if not os.path.isfile('/.dockerenv') or os.environ.get('PYTHON_BASE_IMAGE') is None:
raise unittest.SkipTest("""Adding/removing a user as part of a test is very bad for host os
(especially if the user already existed to begin with on the OS), therefore we check if we run inside a
the official docker container and only allow to run the test there. This is done by checking /.dockerenv
file (always present inside container) and checking for PYTHON_BASE_IMAGE variable.
""")
def create_user():
try:
subprocess.check_output(['sudo', 'useradd', '-m', TEST_USER, '-g',
str(os.getegid())])
except OSError as e:
if e.errno == errno.ENOENT:
raise unittest.SkipTest(
"The 'useradd' command did not exist so unable to test "
"impersonation; Skipping Test. These tests can only be run on a "
"linux host that supports 'useradd'."
)
else:
raise unittest.SkipTest(
"The 'useradd' command exited non-zero; Skipping tests. Does the "
"current user have permission to run 'useradd' without a password "
"prompt (check sudoers file)?"
)
class TestImpersonation(unittest.TestCase):
def setUp(self):
check_original_docker_image()
grant_permissions()
add_default_pool_if_not_exists()
self.dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
logger.info('Loaded DAGS:')
logger.info(self.dagbag.dagbag_report())
create_user()
def tearDown(self):
subprocess.check_output(['sudo', 'userdel', '-r', TEST_USER])
revoke_permissions()
def run_backfill(self, dag_id, task_id):
dag = self.dagbag.get_dag(dag_id)
dag.clear()
jobs.BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE).run()
ti = models.TaskInstance(
task=dag.get_task(task_id),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_impersonation(self):
"""
Tests that impersonating a unix user works
"""
self.run_backfill(
'test_impersonation',
'test_impersonated_user'
)
def test_no_impersonation(self):
"""
If default_impersonation=None, tests that the job is run
as the current user (which will be a sudoer)
"""
self.run_backfill(
'test_no_impersonation',
'test_superuser',
)
@unittest.mock.patch.dict('os.environ', AIRFLOW__CORE__DEFAULT_IMPERSONATION=TEST_USER)
def test_default_impersonation(self):
"""
If default_impersonation=TEST_USER, tests that the job defaults
to running as TEST_USER for a test without run_as_user set
"""
self.run_backfill(
'test_default_impersonation',
'test_deelevated_user'
)
def test_impersonation_subdag(self):
"""
Tests that impersonation using a subdag correctly passes the right configuration
:return:
"""
self.run_backfill(
'impersonation_subdag',
'test_subdag_operation'
)
class TestImpersonationWithCustomPythonPath(unittest.TestCase):
@mock_custom_module_path(TEST_UTILS_FOLDER)
def setUp(self):
check_original_docker_image()
grant_permissions()
add_default_pool_if_not_exists()
self.dagbag = models.DagBag(
dag_folder=TEST_DAG_CORRUPTED_FOLDER,
include_examples=False,
)
logger.info('Loaded DAGS:')
logger.info(self.dagbag.dagbag_report())
create_user()
def tearDown(self):
subprocess.check_output(['sudo', 'userdel', '-r', TEST_USER])
revoke_permissions()
def run_backfill(self, dag_id, task_id):
dag = self.dagbag.get_dag(dag_id)
dag.clear()
jobs.BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE).run()
ti = models.TaskInstance(
task=dag.get_task(task_id),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
@mock_custom_module_path(TEST_UTILS_FOLDER)
def test_impersonation_custom(self):
"""
Tests that impersonation using a unix user works with custom packages in
PYTHONPATH
"""
# PYTHONPATH is already set in script triggering tests
assert 'PYTHONPATH' in os.environ
self.run_backfill(
'impersonation_with_custom_pkg',
'exec_python_fn'
)
|
|
"""
Extend pandas with custom array types.
"""
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
TypeVar,
)
import numpy as np
from pandas._libs.hashtable import object_hash
from pandas._typing import (
DtypeObj,
type_t,
)
from pandas.errors import AbstractMethodError
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndex,
ABCSeries,
)
if TYPE_CHECKING:
from pandas.core.arrays import ExtensionArray
# To parameterize on same ExtensionDtype
E = TypeVar("E", bound="ExtensionDtype")
class ExtensionDtype:
"""
A custom data type, to be paired with an ExtensionArray.
See Also
--------
extensions.register_extension_dtype: Register an ExtensionType
with pandas as class decorator.
extensions.ExtensionArray: Abstract base class for custom 1-D array types.
Notes
-----
The interface includes the following abstract methods that must
be implemented by subclasses:
* type
* name
* construct_array_type
The following attributes and methods influence the behavior of the dtype in
pandas operations
* _is_numeric
* _is_boolean
* _get_common_dtype
The `na_value` class attribute can be used to set the default NA value
for this type. :attr:`numpy.nan` is used by default.
ExtensionDtypes are required to be hashable. The base class provides
a default implementation, which relies on the ``_metadata`` class
attribute. ``_metadata`` should be a tuple containing the strings
that define your data type. For example, with ``PeriodDtype`` that's
the ``freq`` attribute.
**If you have a parametrized dtype you should set the ``_metadata``
class property**.
Ideally, the attributes in ``_metadata`` will match the
parameters to your ``ExtensionDtype.__init__`` (if any). If any of
the attributes in ``_metadata`` don't implement the standard
``__eq__`` or ``__hash__``, the default implementations here will not
work.
For interaction with Apache Arrow (pyarrow), a ``__from_arrow__`` method
can be implemented: this method receives a pyarrow Array or ChunkedArray
as only argument and is expected to return the appropriate pandas
ExtensionArray for this dtype and the passed values::
class ExtensionDtype:
def __from_arrow__(
self, array: Union[pyarrow.Array, pyarrow.ChunkedArray]
) -> ExtensionArray:
...
This class does not inherit from 'abc.ABCMeta' for performance reasons.
Methods and properties required by the interface raise
``pandas.errors.AbstractMethodError`` and no ``register`` method is
provided for registering virtual subclasses.
"""
_metadata: tuple[str, ...] = ()
def __str__(self) -> str:
return self.name
def __eq__(self, other: Any) -> bool:
"""
Check whether 'other' is equal to self.
By default, 'other' is considered equal if either
* it's a string matching 'self.name'.
* it's an instance of this type and all of the attributes
in ``self._metadata`` are equal between `self` and `other`.
Parameters
----------
other : Any
Returns
-------
bool
"""
if isinstance(other, str):
try:
other = self.construct_from_string(other)
except TypeError:
return False
if isinstance(other, type(self)):
return all(
getattr(self, attr) == getattr(other, attr) for attr in self._metadata
)
return False
def __hash__(self) -> int:
# for python>=3.10, different nan objects have different hashes
# we need to avoid that und thus use hash function with old behavior
return object_hash(tuple(getattr(self, attr) for attr in self._metadata))
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
@property
def na_value(self) -> object:
"""
Default NA value to use for this type.
This is used in e.g. ExtensionArray.take. This should be the
user-facing "boxed" version of the NA value, not the physical NA value
for storage. e.g. for JSONArray, this is an empty dictionary.
"""
return np.nan
@property
def type(self) -> type_t[Any]:
"""
The scalar type for the array, e.g. ``int``
It's expected ``ExtensionArray[item]`` returns an instance
of ``ExtensionDtype.type`` for scalar ``item``, assuming
that value is valid (not NA). NA values do not need to be
instances of `type`.
"""
raise AbstractMethodError(self)
@property
def kind(self) -> str:
"""
A character code (one of 'biufcmMOSUV'), default 'O'
This should match the NumPy dtype used when the array is
converted to an ndarray, which is probably 'O' for object if
the extension type cannot be represented as a built-in NumPy
type.
See Also
--------
numpy.dtype.kind
"""
return "O"
@property
def name(self) -> str:
"""
A string identifying the data type.
Will be used for display in, e.g. ``Series.dtype``
"""
raise AbstractMethodError(self)
@property
def names(self) -> list[str] | None:
"""
Ordered list of field names, or None if there are no fields.
This is for compatibility with NumPy arrays, and may be removed in the
future.
"""
return None
@classmethod
def construct_array_type(cls) -> type_t[ExtensionArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
raise AbstractMethodError(cls)
@classmethod
def construct_from_string(cls, string: str):
r"""
Construct this type from a string.
This is useful mainly for data types that accept parameters.
For example, a period dtype accepts a frequency parameter that
can be set as ``period[H]`` (where H means hourly frequency).
By default, in the abstract class, just the name of the type is
expected. But subclasses can overwrite this method to accept
parameters.
Parameters
----------
string : str
The name of the type, for example ``category``.
Returns
-------
ExtensionDtype
Instance of the dtype.
Raises
------
TypeError
If a class cannot be constructed from this 'string'.
Examples
--------
For extension dtypes with arguments the following may be an
adequate implementation.
>>> @classmethod
... def construct_from_string(cls, string):
... pattern = re.compile(r"^my_type\[(?P<arg_name>.+)\]$")
... match = pattern.match(string)
... if match:
... return cls(**match.groupdict())
... else:
... raise TypeError(
... f"Cannot construct a '{cls.__name__}' from '{string}'"
... )
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
# error: Non-overlapping equality check (left operand type: "str", right
# operand type: "Callable[[ExtensionDtype], str]") [comparison-overlap]
assert isinstance(cls.name, str), (cls, type(cls.name))
if string != cls.name:
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
return cls()
@classmethod
def is_dtype(cls, dtype: object) -> bool:
"""
Check if we match 'dtype'.
Parameters
----------
dtype : object
The object to check.
Returns
-------
bool
Notes
-----
The default implementation is True if
1. ``cls.construct_from_string(dtype)`` is an instance
of ``cls``.
2. ``dtype`` is an object and is an instance of ``cls``
3. ``dtype`` has a ``dtype`` attribute, and any of the above
conditions is true for ``dtype.dtype``.
"""
dtype = getattr(dtype, "dtype", dtype)
if isinstance(dtype, (ABCSeries, ABCIndex, ABCDataFrame, np.dtype)):
# https://github.com/pandas-dev/pandas/issues/22960
# avoid passing data to `construct_from_string`. This could
# cause a FutureWarning from numpy about failing elementwise
# comparison from, e.g., comparing DataFrame == 'category'.
return False
elif dtype is None:
return False
elif isinstance(dtype, cls):
return True
if isinstance(dtype, str):
try:
return cls.construct_from_string(dtype) is not None
except TypeError:
return False
return False
@property
def _is_numeric(self) -> bool:
"""
Whether columns with this dtype should be considered numeric.
By default ExtensionDtypes are assumed to be non-numeric.
They'll be excluded from operations that exclude non-numeric
columns, like (groupby) reductions, plotting, etc.
"""
return False
@property
def _is_boolean(self) -> bool:
"""
Whether this dtype should be considered boolean.
By default, ExtensionDtypes are assumed to be non-numeric.
Setting this to True will affect the behavior of several places,
e.g.
* is_bool
* boolean indexing
Returns
-------
bool
"""
return False
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
"""
Return the common dtype, if one exists.
Used in `find_common_type` implementation. This is for example used
to determine the resulting dtype in a concat operation.
If no common dtype exists, return None (which gives the other dtypes
the chance to determine a common dtype). If all dtypes in the list
return None, then the common dtype will be "object" dtype (this means
it is never needed to return "object" dtype from this method itself).
Parameters
----------
dtypes : list of dtypes
The dtypes for which to determine a common dtype. This is a list
of np.dtype or ExtensionDtype instances.
Returns
-------
Common dtype (np.dtype or ExtensionDtype) or None
"""
if len(set(dtypes)) == 1:
# only itself
return self
else:
return None
@property
def _can_hold_na(self) -> bool:
"""
Can arrays of this dtype hold NA values?
"""
return True
def register_extension_dtype(cls: type[E]) -> type[E]:
"""
Register an ExtensionType with pandas as class decorator.
This enables operations like ``.astype(name)`` for the name
of the ExtensionDtype.
Returns
-------
callable
A class decorator.
Examples
--------
>>> from pandas.api.extensions import register_extension_dtype
>>> from pandas.api.extensions import ExtensionDtype
>>> @register_extension_dtype
... class MyExtensionDtype(ExtensionDtype):
... name = "myextension"
"""
_registry.register(cls)
return cls
class Registry:
"""
Registry for dtype inference.
The registry allows one to map a string repr of a extension
dtype to an extension dtype. The string alias can be used in several
places, including
* Series and Index constructors
* :meth:`pandas.array`
* :meth:`pandas.Series.astype`
Multiple extension types can be registered.
These are tried in order.
"""
def __init__(self):
self.dtypes: list[type[ExtensionDtype]] = []
def register(self, dtype: type[ExtensionDtype]) -> None:
"""
Parameters
----------
dtype : ExtensionDtype class
"""
if not issubclass(dtype, ExtensionDtype):
raise ValueError("can only register pandas extension dtypes")
self.dtypes.append(dtype)
def find(self, dtype: type[ExtensionDtype] | str) -> type[ExtensionDtype] | None:
"""
Parameters
----------
dtype : Type[ExtensionDtype] or str
Returns
-------
return the first matching dtype, otherwise return None
"""
if not isinstance(dtype, str):
dtype_type = dtype
if not isinstance(dtype, type):
dtype_type = type(dtype)
if issubclass(dtype_type, ExtensionDtype):
return dtype
return None
for dtype_type in self.dtypes:
try:
return dtype_type.construct_from_string(dtype)
except TypeError:
pass
return None
_registry = Registry()
|
|
from __future__ import print_function
import glob
import json
import os
import shutil
import subprocess
import sys
import tarfile
import time
import marvin.deployDataCenter
import marvin.marvinInit
import mysql.connector
import nose
import paramiko
import requests
import scp
from jsonpath_ng import parse
from cmds import CMDS as CMDS
from . import Base
class CIException(Exception):
pass
class CI(Base.Base):
"""Initializes CI class with the given ``marvin_config`` file
:param marvin_config: Path to marvin file
:param debug: Output debug information
"""
def __init__(self, marvin_config=None, debug=False):
super(CI, self).__init__(marvin_config=marvin_config, debug=debug)
self.__workspace = None
self.zone = None
self.setup_files = '/data/shared'
self.templatepath = '/template/tmpl/1/3'
self.templateuuid = 'f327eecc-be53-4d80-9d43-adaf45467abd'
self.flywayversion = '6.1.3'
self.flywaycli = ('https://repo1.maven.org/maven2/org/flywaydb/flyway-commandline/'
'{fwv}/flyway-commandline-{fwv}-linux-x64.tar.gz'.format(fwv=self.flywayversion))
self.mariadbversion = '2.3.0'
self.mariadbjar = ('https://beta-nexus.mcc.schubergphilis.com/service/local/artifact/maven/'
'redirect?r=central&g=org.mariadb.jdbc&a=mariadb-java-client&v=%s' % self.mariadbversion)
@property
def workspace(self):
workspace = os.getenv('CUSTOM_WORKSPACE') if not self.__workspace else self.__workspace
if workspace is None:
workspace = "/".join(filter(None, ['/data/git', self.zone, 'cosmic']))
print("==> Using workspace: ", workspace)
return workspace
@workspace.setter
def workspace(self, value):
self.__workspace = value
def prepare(self, timeout=900, cloudstack_deploy_mode=""):
"""Prepare infrastructure for CI pipeline
:param timeout: Timeout to wait for infra to be build
:param cloudstack_deploy_mode: Deploy cloudstack infra
"""
clusters = parse('zones[*].pods[*].clusters[*]').find(self.config)
primarystorage = parse('zones[*].pods[*].clusters[*].primaryStorages[*]').find(self.config)
secondarystorage = parse('zones[*].secondaryStorages[*]').find(self.config)
for path in map(lambda x: x.value['url'].split(':')[2], primarystorage + secondarystorage):
if not os.path.exists(path):
os.makedirs(path)
# There can only be one Hypervisor type KVM or Xen
hypervisor = clusters[0].value['hypervisor'].lower()
if hypervisor == 'kvm':
print("==> Found hypervisor: %s; changing MTU to 1600" % hypervisor)
elif hypervisor == 'xenserver':
print("==> Found hypervisor: %s; changing MTU to 1500" % hypervisor)
else:
raise CIException("Hypervisor %s is unsupported, aborting" % hypervisor)
for h in os.listdir('/sys/devices/virtual/net/virbr0/brif/'):
for c in CMDS['MTU'][hypervisor]:
subprocess.call(map(lambda x: x.format(dev=h), c.split(' ')))
if cloudstack_deploy_mode:
cloudstack_deploy_mode = "--cloudstack"
if self.debug:
print("==> Executing: ", CMDS['deploy'].format(marvin_config=self.marvin_config,
cloudstack_deploy_mode=cloudstack_deploy_mode))
task = subprocess.Popen(map(lambda x: x.format(marvin_config=self.marvin_config,
cloudstack_deploy_mode=cloudstack_deploy_mode),
CMDS['deploy'].split(' ')))
retries = timeout
# FIXME: In python3 subprocess.call has a timeout, this can then be removed
while task.poll() is None and retries > 0:
time.sleep(1)
retries -= 1
for cmd in CMDS['generic']:
subprocess.call(cmd.split(' '))
def cleanup(self, jsonpath=None, vm=None, name_path=None, result_filter=None, collect_logs=True):
"""Collect all data and cleanup VM's and images
Example:
ci = CI('/data/shared/marvin/marvin.json')
ci.cleanup(config=config, jsonpath='zones[*].pods[*].clusters[*].hosts[*]', namepath='url',
filter=lambda x: x.split('/')[::-1][0])
:param jsonpath: JSONPath to filter out JSON
:param vm: Name of the instance to remove
:param name_path: Optional parameter to filter out json
:param result_filter: Optional lambda to use on filtered result
:param collect_logs: Collect logs and coverage files
"""
for i in parse(jsonpath).find(self.config):
properties = i.value
username = properties.get('username', properties.get('user', 'root'))
password = properties.get('password', properties.get('passwd', 'password'))
if name_path:
vm = parse(name_path).find(properties)[0].value
if result_filter:
vm = result_filter(vm)
if collect_logs:
print("==> Collecting Logs and Code Coverage Report from %s" % vm)
# TODO: Copy logs and coverage reports from HV and SCP them
# collect_files_from_vm ${csip} ${csuser} ${cspass} "/var/log/cosmic/management/*.log*" "cs${i}-management-logs/"
if vm.startswith('cs'):
src = "/var/log/cosmic/management/*.log*"
dstdir = "%s-management-logs" % vm
hostname = properties['mgtSvrIp']
else:
src = "/var/log/cosmic/agent/*.log*"
dstdir = "%s-agent-logs" % vm
hostname = vm
if not os.path.exists(dstdir):
os.makedirs(dstdir)
try:
self.collect_files_from_vm(hostname=hostname, username=username, password=password,
src=src, dst="%s" % dstdir)
except (scp.SCPException, paramiko.ssh_exception) as e:
print("ERROR: %s" % e.message)
print("==> Destroying VM %s" % vm)
# FIXME: Create library for this instead of a subprocess
subprocess.call(['/data/shared/deploy/kvm_local_deploy.py', '-x', vm])
def cleanup_storage(self):
"""Cleanup storage"""
primarystorage = parse('zones[*].pods[*].clusters[*].primaryStorages[*]').find(self.config)
secondarystorage = parse('zones[*].secondaryStorages[*]').find(self.config)
for i in map(lambda x: x.value['url'].split(':')[2], primarystorage + secondarystorage):
if os.path.exists(i):
try:
shutil.rmtree("%s" % i)
except OSError as e:
print("ERROR: %s" % e.message)
def collect_files_from_vm(self, hostname='localhost', username=None, password=None, src=None, dst=None):
"""Collect logs and coverage files
:param hostname: Hostname
:param username: Username
:param password: Password
:param src: Source files
:param dst: Destination directory
"""
self._scp_get(hostname=hostname, username=username, password=password, srcfile=src, destfile=dst)
def marvin_tests(self, tests=None):
"""Run Marvin tests
:param tests: marvin tests to run
"""
self.copy_marvin_config()
# Run marvin tests
old_path = os.getcwd()
nose_args = ("nosetests --with-xunit --xunit-file={path}/nosetests.xml "
"--with-marvin --marvin-config={config} "
"-s -a tags=advanced {tests}".format(path=old_path,
config=self.marvin_config,
tests=" ".join(tests)))
os.chdir("cosmic-core/test/integration")
print("==> Running tests")
if self.debug:
print('==> Nose parameters: %s' % nose_args)
ret = nose.run(argv=nose_args.split(" "))
os.chdir(old_path)
if not ret:
sys.exit(1)
sys.exit(0)
def deploy_dc(self):
"""Deploy DC
Use Marvin to deploy DC
"""
print("==> Deploying Data Center")
# TODO: Replace Marvin
mrv = marvin.marvinInit.MarvinInit(self.marvin_config)
mrv.init()
dc = marvin.deployDataCenter.DeployDataCenters(mrv.getTestClient(), mrv.getParsedConfig())
dc.deploy()
def copy_marvin_config(self):
"""Copy Marvin file to current working directory"""
print("==> Making local copy of Marvin Config file")
marvin_filename = self.marvin_config.split('/')[-1]
open(marvin_filename, "w").write(json.dumps(self.config, indent=4))
def install_kvm_packages(self):
"""Prepare KVM hypervisor"""
zones = parse('zones[*]').find(self.config)
for zone in zones:
hosts = parse('pods[*].clusters[*].hosts[*]').find(zone)
for host in hosts:
hostname = host.value['url'].split('/')[-1]
connection = {'hostname': hostname, 'username': host.value['username'],
'password': host.value['password']}
# Do pre-commands
for cmd in CMDS['agent_install']['precommands']:
self._ssh(cmd=cmd, **connection)
# SCP files
for cmd in CMDS['agent_install']['scp']:
self.zone = zone.value['name']
src_file = self.workspace + "/" + cmd[0]
self._scp_put(srcfile=src_file, destfile=cmd[1], **connection)
if self.debug:
print("==> scp %s %s:%s\n" % (src_file, hostname, cmd[1]))
# Do post-commands
for cmd in CMDS['agent_install']['postcommands']:
self._ssh(cmd=cmd, **connection)
def deploy_cosmic_db(self):
"""Prepare Cosmic Database"""
cmd = 'mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO \'root\'@\'%\' WITH GRANT OPTION; FLUSH PRIVILEGES;"'
# TODO: At the moment there is only one DB server specified, so deployment only uses that DB server
db_svr = self.config['dbSvr']['dbSvr']
db_port = self.config['dbSvr']['port']
db_user = self.config['dbSvr']['user']
db_pass = self.config['dbSvr']['passwd']
self.wait_for_port(hostname=db_svr, tcp_port=db_port)
for mgtSvr in self.config['mgtSvr']:
self._ssh(hostname=mgtSvr['mgtSvrIp'], username=mgtSvr['user'],
password=mgtSvr['passwd'], cmd=cmd)
for query in open("%s/ci/setup_files/create-cloud-db.sql" % self.setup_files, "r").readlines():
if query == '\n':
continue
cloud_db = mysql.connector.connect(
host=self.config['dbSvr']['dbSvr'],
username="root"
)
cloud_cursor = cloud_db.cursor()
cloud_cursor.execute(query)
cloud_db.commit()
cloud_db.close()
for f in glob.glob('/tmp/flyway*'):
os.unlink(f) if os.path.isfile(f) else shutil.rmtree(f)
resp = requests.get(self.flywaycli)
open('/tmp/flyway.tar.gz', 'w').write(resp.content)
tar = tarfile.open('/tmp/flyway.tar.gz')
tar.extractall(path='/tmp/')
tar.close()
for mgtSvr in self.config['mgtSvr']:
subprocess.call(['/tmp/flyway-{fwv}/flyway'.format(fwv=self.flywayversion),
'-url=jdbc:mariadb://%s:%s/cloud' % (mgtSvr['mgtSvrIp'], db_port),
'-user=%s' % db_user,
'-password=%s' % db_pass,
'-encoding=UTF-8',
'-locations=filesystem:cosmic-core/cosmic-flyway/src',
'-baselineOnMigrate=true',
'-table=schema_version',
'migrate'])
print('==> Cosmic DB deployed at %s' % mgtSvr['mgtSvrIp'])
for f in glob.glob('/tmp/flyway*'):
os.unlink(f) if os.path.isfile(f) else shutil.rmtree(f)
def install_systemvm_templates(self, template=None):
"""Install SystemVM template
:param template: File location of template file
"""
tmpltsize = os.stat(template).st_size
template_properties = (
"filename={uuid}.qcow2\n"
"description=SystemVM Template\n"
"checksum=\n"
"hvm=false\n"
"size={tmpltsize}\n"
"qcow2=true\n"
"id=3\n"
"public=true\n"
"qcow2.filename={uuid}.qcow2\n"
"uniquename=routing-3\n"
"qcow2.virtualsize={tmpltsize}\n"
"virtualsize={tmpltsize}\n"
"qcow2.size={tmpltsize}\n".format(uuid=self.templateuuid, tmpltsize=tmpltsize)
)
secondarystorage = parse('zones[*].secondaryStorages[*]').find(self.config)
for path in map(lambda x: x.value['url'].split(':')[2], secondarystorage):
if not os.path.exists(path+self.templatepath):
os.makedirs(path+self.templatepath)
shutil.copyfile(template, "%s%s/%s.qcow2" % (path, self.templatepath, self.templateuuid))
open("%s%s/template.properties" % (path, self.templatepath), 'w').write(template_properties)
print('==> SystemVM templates installed')
def configure_tomcat_to_load_jacoco_agent(self):
"""Deploy jacoco agent on management server"""
open("/tmp/jacoco.conf", "w").write('JAVA_OPTS="$JAVA_OPTS -javaagent:/tmp/jacoco-agent.jar=destfile=/tmp/jacoco-it.exec"\n')
zone = self.config['zones'][0]['name']
for host in self.config['mgtSvr']:
connection = {'hostname': host['mgtSvrIp'], 'username': host['user'], 'password': host['passwd']}
self.zone = zone
src_file = self.workspace + "/target/jacoco-agent.jar"
self._scp_put(srcfile=src_file, destfile="/tmp", **connection)
self._scp_put(srcfile="/tmp/jacoco.conf", destfile="/etc/tomcat/conf.d/jacoco.conf", **connection)
print("==> Tomcat configured")
os.unlink("/tmp/jacoco.conf")
def configure_agent_to_load_jacoco_agent(self):
"""Deploy jacoco agent on hypervisor"""
zones = parse('zones[*]').find(self.config)
for zone in zones:
hosts = parse('pods[*].clusters[*].hosts[*]').find(zone)
for host in hosts:
hostname = host.value['url'].split('/')[-1]
connection = {'hostname': hostname, 'username': host.value['username'],
'password': host.value['password']}
cmd = r"sed -i -e 's|/bin/java -Xms|/bin/java -javaagent:/tmp/jacoco-agent.jar=destfile=/tmp/jacoco-it.exec -Xms|' /usr/lib/systemd/system/cosmic-agent.service"
self.zone = zone.value['name']
src_file = self.workspace + "/target/jacoco-agent.jar"
self._scp_put(srcfile=src_file, destfile="/tmp", **connection)
self._ssh(cmd=cmd, **connection)
self._ssh(cmd="systemctl daemon-reload", **connection)
print("==> Agent configured")
def deploy_cosmic_war(self):
"""Deploy Cosmic WAR file"""
resp = requests.get(self.mariadbjar)
open('/tmp/mariadb-java-client-latest.jar', 'w').write(resp.content)
self.zone = self.config['zones'][0]['name']
template_vars = {
'setup_files': "%s/ci/setup_files" % self.setup_files,
'mariadbjar': "/tmp/mariadb-java-client-latest.jar",
'war_file': "%s/cosmic-client/target/cloud-client-ui-*.war" % self.workspace
}
for host in self.config['mgtSvr']:
connection = {'hostname': host['mgtSvrIp'], 'username': host['user'], 'password': host['passwd']}
# Do pre-commands
for cmd in CMDS['war_deploy']['precommands']:
self._ssh(cmd=cmd, **connection)
# Do scp-commands
for cmd in CMDS['war_deploy']['scp']:
srcfile = cmd[0].format(**template_vars)
self._scp_put(srcfile=srcfile, destfile=cmd[1], **connection)
# Do post-commands
for cmd in CMDS['war_deploy']['postcommands']:
self._ssh(cmd=cmd, **connection)
os.unlink("/tmp/mariadb-java-client-latest.jar")
def collect_test_coverage_files(self):
self.zone = self.config['zones'][0]['name']
for host in self.config['mgtSvr']:
connection = {'hostname': host['mgtSvrIp'], 'username': host['user'], 'password': host['passwd']}
print("==> Stopping Tomcat on %s" % host['mgtSvrName'])
self._ssh(cmd="systemctl stop tomcat", **connection)
print("==> Collecting Integration Tests Coverage Data (Management Server) from %s" % host['mgtSvrName'])
destfile = ("%s/target/coverage-reports/jacoco-it-%s.exec" %
(self.workspace, host['mgtSvrName']))
try:
self._scp_get(srcfile="/tmp/jacoco-it.exec", destfile=destfile, **connection)
except IOError as e:
print("ERROR: %s" % (e.message or e.strerror))
zones = parse('zones[*]').find(self.config)
for zone in zones:
hosts = parse('pods[*].clusters[*].hosts[*]').find(zone)
for host in hosts:
hostname = host.value['url'].split('/')[-1]
connection = {'hostname': hostname, 'username': host.value['username'],
'password': host.value['password']}
print("==> Stopping Cosmic KVM Agent on host %s" % hostname)
self._ssh(cmd="systemctl stop cosmic-agent", **connection)
self.zone = zone.value['name']
destfile = ("%s/target/coverage-reports/jacoco-it-%s.exec" %
(self.workspace, hostname))
print("==> Collecting Integration Tests Coverage Data (Agent) from %s" % hostname)
try:
self._scp_get(srcfile="/tmp/jacoco-it.exec", destfile=destfile, **connection)
except IOError as e:
print("ERROR: %s" % (e.message or e.strerror))
|
|
#!/usr/bin/env python
"""Sample script of recurrent neural network language model.
This code is ported from the following implementation written in Torch.
https://github.com/tomsercu/lstm
"""
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
# Definition of a recurrent net for language modeling
class RNNForLM(chainer.Chain):
def __init__(self, n_vocab, n_units):
super(RNNForLM, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(n_vocab, n_units)
self.l1 = L.LSTM(n_units, n_units)
self.l2 = L.LSTM(n_units, n_units)
self.l3 = L.Linear(n_units, n_vocab)
for param in self.params():
param.data[...] = np.random.uniform(-0.1, 0.1, param.data.shape)
def reset_state(self):
self.l1.reset_state()
self.l2.reset_state()
def __call__(self, x):
h0 = self.embed(x)
h1 = self.l1(F.dropout(h0))
h2 = self.l2(F.dropout(h1))
y = self.l3(F.dropout(h2))
return y
# Dataset iterator to create a batch of sequences at different positions.
# This iterator returns a pair of current words and the next words. Each
# example is a part of sequences starting from the different offsets
# equally spaced within the whole sequence.
class ParallelSequentialIterator(chainer.dataset.Iterator):
def __init__(self, dataset, batch_size, repeat=True):
self.dataset = dataset
self.batch_size = batch_size # batch size
# Number of completed sweeps over the dataset. In this case, it is
# incremented if every word is visited at least once after the last
# increment.
self.epoch = 0
# True if the epoch is incremented at the last iteration.
self.is_new_epoch = False
self.repeat = repeat
length = len(dataset)
# Offsets maintain the position of each sequence in the mini-batch.
self.offsets = [i * length // batch_size for i in range(batch_size)]
# NOTE: this is not a count of parameter updates. It is just a count of
# calls of ``__next__``.
self.iteration = 0
def __next__(self):
# This iterator returns a list representing a mini-batch. Each item
# indicates a different position in the original sequence. Each item is
# represented by a pair of two word IDs. The first word is at the
# "current" position, while the second word at the next position.
# At each iteration, the iteration count is incremented, which pushes
# forward the "current" position.
length = len(self.dataset)
if not self.repeat and self.iteration * self.batch_size >= length:
# If not self.repeat, this iterator stops at the end of the first
# epoch (i.e., when all words are visited once).
raise StopIteration
cur_words = self.get_words()
self.iteration += 1
next_words = self.get_words()
epoch = self.iteration * self.batch_size // length
self.is_new_epoch = self.epoch < epoch
if self.is_new_epoch:
self.epoch = epoch
return list(zip(cur_words, next_words))
@property
def epoch_detail(self):
# Floating point version of epoch.
return self.iteration * self.batch_size / len(self.dataset)
def get_words(self):
# It returns a list of current words.
return [self.dataset[(offset + self.iteration) % len(self.dataset)]
for offset in self.offsets]
def serialize(self, serializer):
# It is important to serialize the state to be recovered on resume.
self.iteration = serializer('iteration', self.iteration)
self.epoch = serializer('epoch', self.epoch)
# Custom updater for truncated BackProp Through Time (BPTT)
class BPTTUpdater(training.StandardUpdater):
def __init__(self, train_iter, optimizer, bprop_len, device):
super(BPTTUpdater, self).__init__(
train_iter, optimizer, device=device)
self.bprop_len = bprop_len
# The core part of the update routine can be customized by overriding.
def update_core(self):
loss = 0
# When we pass one iterator and optimizer to StandardUpdater.__init__,
# they are automatically named 'main'.
train_iter = self.get_iterator('main')
optimizer = self.get_optimizer('main')
# Progress the dataset iterator for bprop_len words at each iteration.
for i in range(self.bprop_len):
# Get the next batch (a list of tuples of two word IDs)
batch = train_iter.__next__()
# Concatenate the word IDs to matrices and send them to the device
# self.converter does this job
# (it is chainer.dataset.concat_examples by default)
x, t = self.converter(batch, self.device)
# Compute the loss at this time step and accumulate it
loss += optimizer.target(chainer.Variable(x), chainer.Variable(t))
optimizer.target.cleargrads() # Clear the parameter gradients
loss.backward() # Backprop
loss.unchain_backward() # Truncate the graph
optimizer.update() # Update the parameters
# Routine to rewrite the result dictionary of LogReport to add perplexity
# values
def compute_perplexity(result):
result['perplexity'] = np.exp(result['main/loss'])
if 'validation/main/loss' in result:
result['val_perplexity'] = np.exp(result['validation/main/loss'])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--batchsize', '-b', type=int, default=20,
help='Number of examples in each mini-batch')
parser.add_argument('--bproplen', '-l', type=int, default=35,
help='Number of words in each mini-batch '
'(= length of truncated BPTT)')
parser.add_argument('--epoch', '-e', type=int, default=39,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--gradclip', '-c', type=float, default=5,
help='Gradient norm threshold to clip')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--test', action='store_true',
help='Use tiny datasets for quick tests')
parser.set_defaults(test=False)
parser.add_argument('--unit', '-u', type=int, default=650,
help='Number of LSTM units in each layer')
parser.add_argument('--model', '-m', default='model.npz',
help='Model file name to serialize')
args = parser.parse_args()
# Load the Penn Tree Bank long word sequence dataset
train, val, test = chainer.datasets.get_ptb_words()
n_vocab = max(train) + 1 # train is just an array of integers
print('#vocab =', n_vocab)
if args.test:
train = train[:100]
val = val[:100]
test = test[:100]
train_iter = ParallelSequentialIterator(train, args.batchsize)
val_iter = ParallelSequentialIterator(val, 1, repeat=False)
test_iter = ParallelSequentialIterator(test, 1, repeat=False)
# Prepare an RNNLM model
rnn = RNNForLM(n_vocab, args.unit)
model = L.Classifier(rnn)
model.compute_accuracy = False # we only want the perplexity
if args.gpu >= 0:
# Make a specified GPU current
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
# Set up an optimizer
optimizer = chainer.optimizers.SGD(lr=1.0)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.GradientClipping(args.gradclip))
# Set up a trainer
updater = BPTTUpdater(train_iter, optimizer, args.bproplen, args.gpu)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
eval_model = model.copy() # Model with shared params and distinct states
eval_rnn = eval_model.predictor
trainer.extend(extensions.Evaluator(
val_iter, eval_model, device=args.gpu,
# Reset the RNN state at the beginning of each evaluation
eval_hook=lambda _: eval_rnn.reset_state()))
interval = 10 if args.test else 500
trainer.extend(extensions.LogReport(postprocess=compute_perplexity,
trigger=(interval, 'iteration')))
trainer.extend(extensions.PrintReport(
['epoch', 'iteration', 'perplexity', 'val_perplexity']
), trigger=(interval, 'iteration'))
trainer.extend(extensions.ProgressBar(
update_interval=1 if args.test else 10))
trainer.extend(extensions.snapshot())
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'))
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
# Evaluate the final model
print('test')
eval_rnn.reset_state()
evaluator = extensions.Evaluator(test_iter, eval_model, device=args.gpu)
result = evaluator()
print('test perplexity:', np.exp(float(result['main/loss'])))
# Serialize the final model
chainer.serializers.save_npz(args.model, model)
if __name__ == '__main__':
main()
|
|
# Copyright (C) 2015 Henrique Pereira Coutada Miranda, Alejandro Molina-Sanchez
# All rights reserved.
#
# This file is part of yambopy
#
from subprocess import Popen, PIPE
import os
import json
from time import sleep
import re
class YamboIn():
"""
Class to read, write, create and manipulate yambo input files with python.
Examples of use:
Initialize an empty input file:
.. code-block:: python
y = YamboIn(filename='somefile.in')
print y
Call yambo to initialize the input file with variables according to the runlevel,
parse the input file and store the variables:
.. code-block:: python
y = YamboIn('yambo -o c',folder='ip')
print y
If the argument ``args`` was used then the filename should be left as ``yambo.in`` because that's the default input filename that yambo will create.
Call ypp to initialize the input file:
.. code-block:: python
y = YamboIn('yyp -e w'args=,filename='ypp.in')
print y
**Arguments:**
``args``: if specified yambopy will run yambo, read the generated input file and initialize the class with those variables.
``folder``: the folder where the SAVE directory is located
``vim``: if yambo is compiled using vim as an editor this variable should be set as True because then `yambopy` will close vim.
In newer versions an argument for yambo '-Q' tells it to not call vim
``filename``: the name of the input file to be read
"""
#Regular expressions
_variaexp = '([A-Za-z\_0-9]+(?:\_[A-Za-z]+)?)' #variables names
_numexp = '([+-]?\d+(?:\.\d+)?(?:[eE][-+]?\d+)?)' #number
_spacexp = '(?:[ \t]+)?' #space
_stringexp = '["\']([a-zA-Z0-9_ ]+?)["\']' #string
_arrayexp = '%'+_spacexp+_variaexp+'\s+(?:\#.+)?((?:(?:\s|\.|[+-]?\d)+?\|)+)\s+([a-zA-Z]+)?' #arrays
_complexexp = '\('+_spacexp+_numexp+_spacexp+','+_spacexp+_numexp+_spacexp+'\)' #complex numbers
_runexp = '([a-zA-Z0-9_]+)' #runlevels
# list of available runlevels to be stored in the arguments array
_runlevels = ['rim_cut','chi','em1s','bse','optics','bsk','bss',
'em1d','gw0','HF_and_locXC','setup','ppa','cohsex','life',
'collisions','negf','el_ph_scatt','el_el_scatt','excitons','wavefunction','fixsyms',
'QPDBs', 'QPDB_merge','RealTime','RT_X','RToccDos','RToccBnd','RToccEner',
'RToccTime','RTlifeBnd','amplitude','bzgrids','Random_Grid','gkkp','el_ph_corr','WRbsWF','Select_energy', 'RTDBs','photolum','kpts_map',
'RTtime','RToccupations','RTfitbands']
def __init__(self,args='',folder='.',vim=True,filename='yambo.in'):
"""
Initalize the class
"""
self.folder = folder
self.yamboargs = args
#the type of the variables is determined from the type of variable in this dictionary
self.variables = {} #here we will store the values of the variables
self.arguments = [] #here we will store the arguments
# if we initalize the class with arguments we call yambo to generate the input file
if args != '':
workdir = os.getcwd()
os.chdir(folder)
os.system('rm -f %s'%filename)
yambo = Popen(args, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=True)
# if yambo calls vim we have to close it. We just want the generic input file
# that yambo generates.
if vim: yambo.stdin.write(":q!\n")
yambo.wait()
os.chdir(workdir)
self.read_file(filename="%s/%s"%(folder,filename))
else:
if filename:
self.read_file(filename="%s/%s"%(folder,filename))
def __getitem__(self,key):
""" Get the value of a variable in the input file
"""
return self.variables[key]
def __setitem__(self,key,value):
""" Set the value of a variable in the input file
"""
#if the units are not specified, add them
if type(value) == list and str not in map(type,value):
value = [value,'']
if type(value) in [int,float,complex]:
value = [value,'']
self.variables[key] = value
def __delitem__(self,key):
""" Remove a keyword from the dicitonary
"""
del self.variables[key]
def read_file(self,filename='yambo.in'):
""" Read the variables from a file
"""
try:
yambofile = open(filename,"r")
except IOError:
print('Could not read the file %s'%filename)
print('Something is wrong, yambo did not create the input file. Or the file you are trying to read does not exist')
print('command: %s'%self.yamboargs)
print('folder: %s/'%self.folder)
exit()
inputfile = self.read_string(yambofile.read())
yambofile.close()
def add_dict(self,variables):
"""
Add a dictionary containing variables to the current inputfile
"""
self.variables.update(variables)
def read_string(self,inputfile):
"""
Read the input variables from a string
"""
var_real = re.findall(self._variaexp + self._spacexp + '='+ self._spacexp +
self._numexp + self._spacexp + '([A-Za-z]+)?',inputfile)
var_string = re.findall(self._variaexp + self._spacexp + '='+ self._spacexp + self._stringexp, inputfile)
var_array = re.findall(self._arrayexp,inputfile)
var_complex = re.findall(self._variaexp + self._spacexp + '='+ self._spacexp + self._complexexp + self._spacexp + '([A-Za-z]+)?', inputfile)
var_runlevel = re.findall(self._runexp + self._spacexp, inputfile)
def clean(a):
"""
clean the variables according to the type of data
"""
a = a.strip()
if a.replace('.','',1).isdigit():
if "." in a: return float(a)
else: return int(a)
return a
# Determination of the arguments
for key in self._runlevels:
if key in var_runlevel:
self.arguments.append(key)
#float variables
for var in var_real:
name, value, unit = var
self[name] = [float(value),unit]
#string variables
for var in var_string:
name, string = var
self[name] = string
#complex variables
for var in var_complex:
name, real, imag, unit = var
self[name] = [complex(float(real),float(imag)),unit]
#array variables
for var in var_array:
name, array, unit = var
array = [clean(val) for val in array.split('|')[:-1]]
self[name] = [array,unit]
return {"arguments": self.arguments, "variables": self.variables}
def optimize(self,conv,variables=('all',),run=lambda x: None,ref_run=True):
""" Function to to make multiple runs of yambo to converge calculation parameters
Input:
A dictionary conv that has all the variables to be optimized
A list fo the name of the variables in the dicitonary that are to be optimized
A function run that takes as input the name of the inputfile (used to run yambo)
A boolean ref_run that can disable the submitting of the reference run (see scripts/analyse_gw.py)
.. code-block:: python
def run(filename):
os.system('yambo -F %s'%filename)
"""
name_files = []
#check which variables to optimize
if 'all' in variables:
variables = conv.keys()
#save all the variables
backup = {}
for var in variables:
backup[var] = self[var]
#add units to all the variables (to be improved!)
for key,value in conv.items():
if type(value[-1]) != str and type(value[0]) == list:
conv[key] = [value,'']
#make a first run with all the first elements
reference = {}
for key,value in conv.items():
values, unit = value
reference[key] = [values[0],unit]
self[key] = [values[0],unit]
#write the file and run
if ref_run==True:
self.write( "%s/reference.in"%(self.folder) )
run('reference.in')
else:
print 'Reference run disabled.'
#converge one by one
for key in [var for var in conv.keys() if var in variables]:
values, unit = conv[key]
#put back the original values of the variables
for var in variables:
self[var] = reference[var]
#change the other variables
if type(values[0])==str:
for string in values[1:]:
filename = "%s_%s"%(key,string)
self[key] = string
self.write( self.folder + filename )
run(filename+".in")
continue
if type(values[0])==float:
for val in values[1:]:
filename = "%s_%12.8lf"%(key,val)
self[key] = [val,unit]
self.write( "%s/%s.in"%(self.folder,filename) )
run(filename+".in")
continue
if type(values[0])==int:
for val in values[1:]:
filename = "%s_%05d"%(key,val)
self[key] = [val,unit]
self.write( "%s/%s.in"%(self.folder,filename) )
run(filename+".in")
continue
if type(values[0])==list:
for array in values[1:]:
filename = "%s_%s"%(key,"_".join(map(str,array)))
self[key] = [array,unit]
self.write( "%s/%s.in"%(self.folder,filename) )
run(filename+".in")
continue
if type(values[0])==complex:
for value in values[1:]:
filename = "%s_%lf_%lf"%(key,value.real,value.imag)
self[key] = [value,unit]
self.write( "%s/%s.in"%(self.folder,filename) )
run(filename+".in")
continue
raise ValueError( "unknown type for variable:", key )
#put back the original values of the variables
for var in variables:
self[var] = backup[var]
return name_files
def write(self,filename='yambo.in'):
"""
Write a yambo input file
"""
f = open(filename,"w")
f.write(str(self))
f.close()
def pack(self,filename):
"""
Pack all the data of this structure in a `.json` file
"""
f = open(filename,'w')
json.dump(f,[self.arguments,self.real,self.string,self.complex,self.array],indent=5)
f.close()
def __str__(self):
"""
Returns the input file as a string
"""
s = ""
#arguments
s += "\n".join(self.arguments)+'\n'
for key,value in self.variables.items():
if type(value)==str or type(value)==unicode:
s+= "%s = %10s\n"%(key,"'%s'"%value)
continue
if type(value[0])==float:
val, unit = value
if val > 1e-6:
s+="%s = %lf %s\n"%(key,val,unit)
else:
s+="%s = %e %s\n"%(key,val,unit)
continue
if type(value[0])==int:
val, unit = value
s+="%s = %d %s\n"%(key,val,unit)
continue
if type(value[0])==list:
array, unit = value
if type(array[0])==list:
s+='%% %s\n'%key
for l in array:
s+="%s \n"%(" | ".join(map(str,l))+' | ')
s+='%s'%unit
s+='%\n'
else:
s+="%% %s\n %s %s \n%%\n"%(key," | ".join(map(str,array))+' | ',unit)
continue
if type(value[0])==str:
array = value
s+="%% %s\n %s \n%%\n"%(key," | ".join(map(lambda x: "'%s'"%x.replace("'","").replace("\"",""),array))+' | ')
continue
if type(value[0])==complex:
value, unit = value
s+="%s = (%lf,%lf) %s\n"%(key,value.real,value.imag,unit)
continue
raise ValueError( "Unknown type %s for variable: %s" %( type(value), key) )
return s
|
|
from contextlib import contextmanager
from diesel import (Client, call, until_eol, receive,
fire, send, first, fork, sleep)
from diesel.util.queue import Queue, QueueTimeout
import time
import operator as op
import itertools
import uuid
def flatten_arg_pairs(l):
o = []
for i in l:
o.extend(i)
return o
REDIS_PORT = 6379
class RedisError(Exception): pass
class RedisClient(Client):
def __init__(self, host='localhost', port=REDIS_PORT, password=None, **kw):
self.password = password
Client.__init__(self, host, port, **kw)
##################################################
### GENERAL OPERATIONS
@call
def auth(self):
self._send('AUTH', self.password)
resp = self._get_response()
return bool(resp)
@call
def exists(self, k):
self._send('EXISTS', k)
resp = self._get_response()
return bool(resp)
@call
def delete(self, k):
self._send('DEL', k)
resp = self._get_response()
return bool(resp)
@call
def type(self, k):
self._send('TYPE', k)
resp = self._get_response()
return resp
@call
def keys(self, pat):
self._send('KEYS', pat)
resp = self._get_response()
return set(resp)
@call
def randomkey(self):
self._send('RANDOMKEY')
resp = self._get_response()
return resp
@call
def rename(self, old, new):
self._send('RENAME', old, new)
resp = self._get_response()
return resp
@call
def renamenx(self, old, new):
self._send('RENAMENX', old, new)
resp = self._get_response()
return resp
@call
def dbsize(self):
self._send('DBSIZE')
resp = self._get_response()
return resp
@call
def expire(self, key, seconds):
self._send('EXPIRE', key, seconds)
resp = self._get_response()
return resp
@call
def expireat(self, key, when):
unix_time = time.mktime(when.timetuple())
self._send('EXPIREAT', key, unix_time)
resp = self._get_response()
return resp
@call
def ttl(self, key):
self._send('TTL', key)
resp = self._get_response()
resp = None if resp == -1 else int(resp)
return resp
@call
def select(self, idx):
self._send('SELECT', idx)
resp = self._get_response()
return resp
@call
def move(self, key, idx):
self._send('MOVE', key, idx)
@call
def flushdb(self):
self._send('FLUSHDB')
resp = self._get_response()
return resp
@call
def flushall(self):
self._send('FLUSHALL')
resp = self._get_response()
return resp
##################################################
### TRANSACTION OPERATIONS
### http://redis.io/topics/transactions
@call
def multi(self):
"""Starts a transaction."""
self._send('MULTI')
return self._get_response()
@call
def exec_(self):
"""Atomically executes queued commands in a transaction."""
self._send('EXEC')
return self._get_response()
@call
def discard(self):
"""Discards any queued commands and aborts a transaction."""
self._send('DISCARD')
return self._get_response()
@call
def watch(self, keys):
"""Sets up keys to be watched in preparation for a transaction."""
self._send('WATCH', list=keys)
return self._get_response()
def transaction(self, watch=None):
"""Returns a RedisTransaction context manager.
If watch is supplied, it should be a list of keys to be watched for
changes. The transaction will be aborted if the value of any of the
keys is changed outside of the transaction.
A transaction can be invoked with Python's ``with`` statement for
atomically executing a series of commands.
>>> transaction = client.transaction(watch=['dependent_var_1'])
>>> dv1 = client.get('dependent_var_1')
>>> with transaction as t:
... composite_val = compute(dv1)
... t.set('dependent_var_2', composite_val)
>>> print t.value
"""
return RedisTransaction(self, watch or [])
##################################################
### STRING OPERATIONS
@call
def set(self, k, v):
self._send('SET', k, v)
resp = self._get_response()
return resp
@call
def get(self, k):
self._send('GET', k)
resp = self._get_response()
return resp
@call
def getset(self, k, v):
self._send('GETSET', k, v)
resp = self._get_response()
return resp
@call
def mget(self, keylist):
self._send('MGET', list=keylist)
resp = self._get_response()
return resp
@call
def setnx(self, k, v):
self._send('SETNX', k, v)
resp = self._get_response()
return resp
@call
def setex(self, k, tm, v):
self._send('SETEX', k, tm, v)
resp = self._get_response()
return resp
@call
def mset(self, d):
self._send('MSET', list=flatten_arg_pairs(d.iteritems()))
resp = self._get_response()
return resp
@call
def msetnx(self, d):
self._send('MSETNX', list=flatten_arg_pairs(d.iteritems()))
resp = self._get_response()
return resp
@call
def incr(self, k):
self._send('INCR', k)
resp = self._get_response()
return resp
@call
def incrby(self, k, amt):
self._send('INCRBY', k, amt)
resp = self._get_response()
return resp
@call
def decr(self, k):
self._send('DECR', k)
resp = self._get_response()
return resp
@call
def decrby(self, k, amt):
self._send('DECRBY', k, amt)
resp = self._get_response()
return resp
@call
def append(self, k, value):
self._send('APPEND', k, value)
resp = self._get_response()
return resp
@call
def substr(self, k, start, end):
self._send('SUBSTR', k, start, end)
resp = self._get_response()
return resp
@call
def getbit(self, k, offset):
self._send('GETBIT', k, offset)
resp = self._get_response()
return int(resp)
@call
def setbit(self, k, offset, value):
self._send('SETBIT', k, offset, value)
resp = self._get_response()
return resp
@call
def strlen(self, k):
self._send('STRLEN', k)
resp = self._get_response()
return int(resp)
##################################################
### LIST OPERATIONS
@call
def rpush(self, k, v):
self._send('RPUSH', k, v)
resp = self._get_response()
return resp
@call
def lpush(self, k, v):
self._send('LPUSH', k, v)
resp = self._get_response()
return resp
@call
def llen(self, k):
self._send('LLEN', k)
resp = self._get_response()
return resp
@call
def lrange(self, k, start, end):
self._send('LRANGE', k, start, end)
resp = self._get_response()
return resp
@call
def ltrim(self, k, start, end):
self._send('LTRIM', k, start, end)
resp = self._get_response()
return resp
@call
def lindex(self, k, idx):
self._send('LINDEX', k, idx)
resp = self._get_response()
return resp
@call
def lset(self, k, idx, v):
self._send('LSET', k, idx, v)
resp = self._get_response()
return resp
@call
def lrem(self, k, v, count=0):
self._send('LREM', k, count, v)
resp = self._get_response()
return resp
@call
def lpop(self, k):
self._send('LPOP', k)
resp = self._get_response()
return resp
@call
def rpop(self, k):
self._send('RPOP', k)
resp = self._get_response()
return resp
@call
def blpop(self, keylist, timeout=0):
self._send('BLPOP', list=list(keylist) + [timeout])
resp = self._get_response()
if resp:
assert len(resp) == 2
resp = tuple(resp)
else:
resp = None
return resp
@call
def brpop(self, keylist, timeout=0):
self._send('BRPOP', list=list(keylist) + [timeout])
resp = self._get_response()
if resp:
assert len(resp) == 2
resp = tuple(resp)
else:
resp = None
return resp
@call
def rpoplpush(self, src, dest):
self._send('RPOPLPUSH', src, dest)
resp = self._get_response()
return resp
##################################################
### SET OPERATIONS
@call
def sadd(self, k, v):
self._send('SADD', k, v)
resp = self._get_response()
return resp
@call
def srem(self, k, v):
self._send('SREM', k, v)
resp = self._get_response()
return bool(resp)
@call
def spop(self, k):
self._send('SPOP', k)
resp = self._get_response()
return resp
@call
def smove(self, src, dst, v):
self._send('SMOVE', src, dst, v)
resp = self._get_response()
return resp
@call
def scard(self, k):
self._send('SCARD', k)
resp = self._get_response()
return resp
@call
def sismember(self, k, v):
self._send('SISMEMBER', k, v)
resp = self._get_response()
return bool(resp)
@call
def sinter(self, keylist):
self._send('SINTER', list=keylist)
resp = self._get_response()
return set(resp)
@call
def sinterstore(self, dst, keylist):
flist = [dst] + list(keylist)
self._send('SINTERSTORE', list=flist)
resp = self._get_response()
return resp
@call
def sunion(self, keylist):
self._send('SUNION', list=keylist)
resp = self._get_response()
return set(resp)
@call
def sunionstore(self, dst, keylist):
flist = [dst] + list(keylist)
self._send('SUNIONSTORE', list=flist)
resp = self._get_response()
return resp
@call
def sdiff(self, keylist):
self._send('SDIFF', list=keylist)
resp = self._get_response()
return set(resp)
@call
def sdiffstore(self, dst, keylist):
flist = [dst] + list(keylist)
self._send('SDIFFSTORE', list=flist)
resp = self._get_response()
return resp
@call
def smembers(self, key):
self._send('SMEMBERS', key)
resp = self._get_response()
return set(resp)
@call
def srandmember(self, key):
self._send('SRANDMEMBER', key)
resp = self._get_response()
return resp
##################################################
### ZSET OPERATIONS
def __pair_with_scores(self, resp):
return [(resp[x], float(resp[x+1]))
for x in xrange(0, len(resp), 2)]
@call
def zadd(self, key, score, member):
self._send('ZADD', key, score, member)
resp = self._get_response()
return resp
@call
def zrem(self, key, member):
self._send('ZREM', key, member)
resp = self._get_response()
return bool(resp)
@call
def zrange(self, key, start, end, with_scores=False):
args = 'ZRANGE', key, start, end
if with_scores:
args += 'WITHSCORES',
self._send(*args)
resp = self._get_response()
if with_scores:
return self.__pair_with_scores(resp)
return resp
@call
def zrevrange(self, key, start, end, with_scores=False):
args = 'ZREVRANGE', key, start, end
if with_scores:
args += 'WITHSCORES',
self._send(*args)
resp = self._get_response()
if with_scores:
return self.__pair_with_scores(resp)
return resp
@call
def zcard(self, key):
self._send('ZCARD', key)
resp = self._get_response()
return int(resp)
@call
def zscore(self, key, member):
self._send('ZSCORE', key, member)
resp = self._get_response()
return float(resp) if resp is not None else None
@call
def zincrby(self, key, increment, member):
self._send('ZINCRBY', key, increment, member)
resp = self._get_response()
return float(resp)
@call
def zrank(self, key, member):
self._send('ZRANK', key, member)
resp = self._get_response()
return resp
@call
def zrevrank(self, key, member):
self._send('ZREVRANK', key, member)
resp = self._get_response()
return resp
@call
def zrangebyscore(self, key, min, max, offset=None, count=None, with_scores=False):
args = 'ZRANGEBYSCORE', key, min, max
if offset:
assert count is not None, "if offset specified, count must be as well"
args += 'LIMIT', offset, count
if with_scores:
args += 'WITHSCORES',
self._send(*args)
resp = self._get_response()
if with_scores:
return self.__pair_with_scores(resp)
return resp
@call
def zcount(self, key, min, max):
self._send('ZCOUNT', key, min, max)
resp = self._get_response()
return resp
@call
def zremrangebyrank(self, key, min, max):
self._send('ZREMRANGEBYRANK', key, min, max)
resp = self._get_response()
return resp
@call
def zremrangebyscore(self, key, min, max):
self._send('ZREMRANGEBYSCORE', key, min, max)
resp = self._get_response()
return resp
##################################################
### HASH OPERATIONS
@call
def hset(self, key, field, value):
self._send('HSET', key, field, value)
resp = self._get_response()
return bool(resp)
@call
def hget(self, key, field):
self._send('HGET', key, field)
resp = self._get_response()
return resp
@call
def hmset(self, key, d):
if not d:
return True
args = [key] + flatten_arg_pairs(d.iteritems())
self._send('HMSET', list=args)
resp = self._get_response()
return bool(resp)
@call
def hmget(self, key, l):
if not l:
return {}
args = [key] + l
self._send('HMGET', list=args)
resp = self._get_response()
return dict(zip(l, resp))
@call
def hincrby(self, key, field, amt):
self._send('HINCRBY', key, field, amt)
resp = self._get_response()
return resp
@call
def hexists(self, key, field):
self._send('HEXISTS', key, field)
resp = self._get_response()
return bool(resp)
@call
def hdel(self, key, field):
self._send('HDEL', key, field)
resp = self._get_response()
return bool(resp)
@call
def hlen(self, key):
self._send('HLEN', key)
resp = self._get_response()
return resp
@call
def hkeys(self, key):
self._send('HKEYS', key)
resp = self._get_response()
return set(resp)
@call
def hvals(self, key):
self._send('HVALS', key)
resp = self._get_response()
return resp
@call
def hgetall(self, key):
self._send('HGETALL', key)
resp = self._get_response()
return dict(resp[x:x+2] for x in xrange(0, len(resp), 2))
@call
def hsetnx(self, key, field, value):
self._send('HSETNX', key, field, value)
resp = self._get_response()
return bool(resp)
##################################################
### Sorting...
@call
def sort(self, key, pattern=None, limit=None,
get=None, order='ASC', alpha=False, store=None):
args = [key]
if pattern:
args += ['BY', pattern]
if limit:
args += ['LIMIT'] + list(limit)
if get:
args += ['GET', get]
args += [order]
if alpha:
args += 'ALPHA'
if store:
args += ['STORE', store]
self._send('SORT', *args)
resp = self._get_response()
return resp
@call
def subscribe(self, *channels):
'''Subscribe to the given channels.
Note: assumes subscriptions succeed
'''
self._send('SUBSCRIBE', *channels)
return None
@call
def unsubscribe(self, *channels):
'''Unsubscribe from the given channels, or all of them if none are given.
Note: assumes subscriptions don't succeed
'''
self._send('UNSUBSCRIBE', *channels)
return None
@call
def psubscribe(self, *channels):
'''Subscribe to the given glob pattern-matched channels.
Note: assumes subscriptions succeed
'''
self._send('PSUBSCRIBE', *channels)
return None
@call
def punsubscribe(self, *channels):
'''Unsubscribe from the given glob pattern-matched channels, or all of them if none are given.
Note: assumes subscriptions don't succeed
'''
self._send('PUNSUBSCRIBE', *channels)
return None
@call
def get_from_subscriptions(self, wake_sig=None):
'''Wait for a published message on a subscribed channel.
Returns a tuple consisting of:
* The subscription pattern which matched
(the same as the channel for non-glob subscriptions)
* The channel the message was received from.
* The message itself.
-- OR -- None, if wake_sig was fired
NOTE: The message will always be a string. Handle this as you see fit.
NOTE: subscribe/unsubscribe acks are ignored here
'''
while True:
r = self._get_response(wake_sig)
if r:
if r[0] == 'message':
return [r[1]] + r[1:]
elif r[0] == 'pmessage':
return r[1:]
else:
return None
@call
def publish(self, channel, message):
'''Publish a message on the given channel.
Returns the number of clients that received the message.
'''
self._send('PUBLISH', channel, message)
resp = self._get_response()
return resp
@call
def send_raw_command(self, arguments):
cmd, rest = arguments[0], arguments[1:]
self._send(cmd, list=rest)
line_one = until_eol()
if line_one[0] in ('+', '-', ':'):
return line_one
if line_one[0] == '$':
amt = int(line_one[1:])
if amt == -1:
return line_one
return line_one + receive(amt) + until_eol()
if line_one[0] == '*':
nargs = int(line_one[1:])
if nargs == -1:
return line_one
out = line_one
for x in xrange(nargs):
head = until_eol()
out += head
out += receive(int(head[1:])) + until_eol()
return out
def _send(self, cmd, *args, **kwargs):
if 'list' in kwargs:
args = kwargs['list']
all = (cmd,) + tuple(str(s) for s in args)
send('*%s\r\n' % len(all))
for i in all:
send(('$%s\r\n' % len(i)) + i + '\r\n')
def _get_response(self, wake_sig=None):
if wake_sig:
ev, val = first(until_eol=True, waits=[wake_sig])
if ev != 'until_eol':
return None
fl = val.strip()
else:
fl = until_eol().strip()
c = fl[0]
if c == '+':
return fl[1:]
elif c == '$':
l = int(fl[1:])
if l == -1:
resp = None
else:
resp = receive(l)
until_eol() # noop
return resp
elif c == '*':
count = int(fl[1:])
resp = []
if count == -1:
return None
for x in xrange(count):
hl = until_eol()
assert hl[0] in ['$', ':', '+']
if hl[0] == '$':
l = int(hl[1:])
if l == -1:
resp.append(None)
else:
resp.append(receive(l))
until_eol() # noop
elif hl[0] == ':':
resp.append(int(hl[1:]))
elif hl[0] == '+':
resp.append(hl[1:].strip())
return resp
elif c == ':':
return int(fl[1:])
elif c == '-':
e_message = fl[1:]
raise RedisError(e_message)
class RedisTransaction(object):
"""A context manager for doing transactions with a RedisClient."""
def __init__(self, client, watch_keys):
"""Returns a new RedisTransaction instance.
The client argument should be a RedisClient instance and watch_keys
should be a list of keys to watch.
Handles calling the Redis WATCH, MULTI, EXEC and DISCARD commands to
manage transactions. Calls WATCH to watch keys for changes, MULTI to
start the transaction, EXEC to complete it or DISCARD to abort if there
was an exception.
Instances proxy method calls to the client instance. If the transaction
is successful, the value attribute will contain the results.
See http://redis.io/topics/transactions for more details.
"""
self.client = client
self.value = None
self.watching = watch_keys
self.aborted = False
if watch_keys:
self.client.watch(watch_keys)
def __getattr__(self, name):
return getattr(self.client, name)
def __enter__(self):
# Begin the transaction.
self.client.multi()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if any([exc_type, exc_val, exc_tb]):
# There was an error. Abort the transaction.
self.client.discard()
self.aborted = True
else:
# Try and execute the transaction.
self.value = self.client.exec_()
if self.value is None:
self.aborted = True
msg = 'A watched key changed before the transaction completed.'
raise RedisTransactionError(msg)
# Instruct Python not to swallow exceptions generated in the
# transaction block.
return False
class RedisTransactionError(Exception): pass
class LockNotAcquired(Exception):
pass
class RedisLock(object):
def __init__(self, client, key, timeout=30):
assert timeout >= 2, 'Timeout must be greater than 2 to guarantee the transaction'
self.client = client
self.key = key
self.timeout = timeout
self.me = str(uuid.uuid4())
def __enter__(self):
trans = self.client.transaction(watch=[self.key])
v = self.client.get(self.key)
if v:
raise LockNotAcquired()
else:
try:
with trans as t:
t.setex(self.key, self.timeout, self.me)
def touch():
with RedisClient(self.client.addr, self.client.port) as c:
while self.in_block:
c.expire(self.key, self.timeout)
sleep(self.timeout / 2)
self.in_block = True
fork(touch)
except RedisTransactionError:
raise LockNotAcquired()
def __exit__(self, exc_type, exc_val, exc_tb):
self.in_block = False
val = self.client.get(self.key)
assert val == self.me, 'Someone else took the lock, panic (val=%s, expected=%s, wha=%s)' % (val, self.me, self.client.get(self.key))
self.client.delete(self.key)
#########################################
## Hub, an abstraction of sub behavior, etc
class RedisSubHub(object):
def __init__(self, host='127.0.0.1', port=REDIS_PORT, password=None):
self.host = host
self.port = port
self.password= password
self.sub_wake_signal = uuid.uuid4().hex
self.sub_adds = []
self.sub_rms = []
self.subs = {}
def make_client(self):
client = RedisClient(self.host, self.port, self.password)
if self.password != None:
client.auth()
return client
def __isglob(self, glob):
return '*' in glob or '?' in glob or ('[' in glob and ']' and glob)
def __call__(self):
with self.make_client() as conn:
subs = self.subs
for sub in subs:
if self.__isglob(sub):
conn.psubscribe(sub)
else:
conn.subscribe(sub)
while True:
new = rm = None
if self.sub_adds:
sa = self.sub_adds[:]
self.sub_adds = []
new_subs, new_glob_subs = set(), set()
for k, q in sa:
new = new_glob_subs if self.__isglob(k) else new_subs
if k not in subs:
new.add(k)
subs[k] = set([q])
else:
subs[k].add(q)
if new_subs:
conn.subscribe(*new_subs)
if new_glob_subs:
conn.psubscribe(*new_glob_subs)
if self.sub_rms:
sr = self.sub_rms[:]
self.sub_rms = []
rm_subs, rm_glob_subs = set(), set()
for k, q in sr:
rm = rm_glob_subs if self.__isglob(k) else rm_subs
subs[k].remove(q)
if not subs[k]:
del subs[k]
rm.add(k)
if rm_subs:
conn.unsubscribe(*rm_subs)
if rm_glob_subs:
conn.punsubscribe(*rm_glob_subs)
if not self.sub_rms and not self.sub_adds:
r = conn.get_from_subscriptions(self.sub_wake_signal)
if r:
cls, key, msg = r
if cls in subs:
for q in subs[cls]:
q.put((key, msg))
@contextmanager
def subq(self, classes):
if type(classes) not in (set, list, tuple):
classes = [classes]
q = Queue()
for cls in classes:
self.sub_adds.append((cls, q))
fire(self.sub_wake_signal)
try:
yield q
finally:
for cls in classes:
self.sub_rms.append((cls, q))
@contextmanager
def sub(self, classes):
if type(classes) not in (set, list, tuple):
classes = [classes]
hb = self
q = Queue()
class Poller(object):
def __init__(self):
for cls in classes:
hb.sub_adds.append((cls, q))
fire(hb.sub_wake_signal)
def fetch(self, timeout=None):
try:
qn, msg = q.get(timeout=timeout)
except QueueTimeout:
return (None, None)
else:
return (qn, msg)
def close(self):
for cls in classes:
hb.sub_rms.append((cls, q))
pl = Poller()
try:
yield pl
finally:
pl.close()
|
|
# Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import eventlet
import fixtures
import mock
import netaddr
from oslo_config import fixture as fixture_config
from oslo_utils import uuidutils
from neutron.agent.common import config
from neutron.agent.common import ovs_lib
from neutron.agent.dhcp import agent
from neutron.agent import dhcp_agent
from neutron.agent.linux import dhcp
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import constants
from neutron.common import utils as common_utils
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.linux import helpers
from neutron.tests.functional import base
class DHCPAgentOVSTestFramework(base.BaseSudoTestCase):
_DHCP_PORT_MAC_ADDRESS = netaddr.EUI("24:77:03:7d:00:4c")
_DHCP_PORT_MAC_ADDRESS.dialect = netaddr.mac_unix
_TENANT_PORT_MAC_ADDRESS = netaddr.EUI("24:77:03:7d:00:3a")
_TENANT_PORT_MAC_ADDRESS.dialect = netaddr.mac_unix
_IP_ADDRS = {
4: {'addr': '192.168.10.11',
'cidr': '192.168.10.0/24',
'gateway': '192.168.10.1'},
6: {'addr': '0:0:0:0:0:ffff:c0a8:a0b',
'cidr': '0:0:0:0:0:ffff:c0a8:a00/120',
'gateway': '0:0:0:0:0:ffff:c0a8:a01'}, }
def setUp(self):
super(DHCPAgentOVSTestFramework, self).setUp()
config.setup_logging()
self.conf_fixture = self.useFixture(fixture_config.Config())
self.conf = self.conf_fixture.conf
dhcp_agent.register_options(self.conf)
# NOTE(cbrandily): TempDir fixture creates a folder with 0o700
# permissions but agent dir must be readable by dnsmasq user (nobody)
agent_config_dir = self.useFixture(fixtures.TempDir()).path
self.useFixture(
helpers.RecursivePermDirFixture(agent_config_dir, 0o555))
self.conf.set_override("dhcp_confs", agent_config_dir)
self.conf.set_override(
'interface_driver',
'neutron.agent.linux.interface.OVSInterfaceDriver')
self.conf.set_override('report_interval', 0, 'AGENT')
br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
self.conf.set_override('ovs_integration_bridge', br_int.br_name)
self.mock_plugin_api = mock.patch(
'neutron.agent.dhcp.agent.DhcpPluginApi').start().return_value
mock.patch('neutron.agent.rpc.PluginReportStateAPI').start()
self.agent = agent.DhcpAgentWithStateReport('localhost')
self.ovs_driver = interface.OVSInterfaceDriver(self.conf)
self.conf.set_override('check_child_processes_interval', 1, 'AGENT')
def network_dict_for_dhcp(self, dhcp_enabled=True, ip_version=4):
net_id = uuidutils.generate_uuid()
subnet_dict = self.create_subnet_dict(
net_id, dhcp_enabled, ip_version)
port_dict = self.create_port_dict(
net_id, subnet_dict.id,
mac_address=str(self._DHCP_PORT_MAC_ADDRESS),
ip_version=ip_version)
port_dict.device_id = common_utils.get_dhcp_agent_device_id(
net_id, self.conf.host)
net_dict = self.create_network_dict(
net_id, [subnet_dict], [port_dict])
return net_dict
def create_subnet_dict(self, net_id, dhcp_enabled=True, ip_version=4):
sn_dict = dhcp.DictModel({
"id": uuidutils.generate_uuid(),
"network_id": net_id,
"ip_version": ip_version,
"cidr": self._IP_ADDRS[ip_version]['cidr'],
"gateway_ip": (self.
_IP_ADDRS[ip_version]['gateway']),
"enable_dhcp": dhcp_enabled,
"dns_nameservers": [],
"host_routes": [],
"ipv6_ra_mode": None,
"ipv6_address_mode": None})
if ip_version == 6:
sn_dict['ipv6_address_mode'] = constants.DHCPV6_STATEFUL
return sn_dict
def create_port_dict(self, network_id, subnet_id, mac_address,
ip_version=4, ip_address=None):
ip_address = (self._IP_ADDRS[ip_version]['addr']
if not ip_address else ip_address)
port_dict = dhcp.DictModel({
"id": uuidutils.generate_uuid(),
"name": "foo",
"mac_address": mac_address,
"network_id": network_id,
"admin_state_up": True,
"device_id": uuidutils.generate_uuid(),
"device_owner": "foo",
"fixed_ips": [{"subnet_id": subnet_id,
"ip_address": ip_address}], })
return port_dict
def create_network_dict(self, net_id, subnets=None, ports=None):
subnets = [] if not subnets else subnets
ports = [] if not ports else ports
net_dict = dhcp.NetModel(d={
"id": net_id,
"subnets": subnets,
"ports": ports,
"admin_state_up": True,
"tenant_id": uuidutils.generate_uuid(), })
return net_dict
def get_interface_name(self, network, port):
device_manager = dhcp.DeviceManager(conf=self.conf, plugin=mock.Mock())
return device_manager.get_interface_name(network, port)
def configure_dhcp_for_network(self, network, dhcp_enabled=True):
self.agent.configure_dhcp_for_network(network)
self.addCleanup(self._cleanup_network, network, dhcp_enabled)
def _cleanup_network(self, network, dhcp_enabled):
self.mock_plugin_api.release_dhcp_port.return_value = None
if dhcp_enabled:
self.agent.call_driver('disable', network)
def assert_dhcp_resources(self, network, dhcp_enabled):
ovs = ovs_lib.BaseOVS()
port = network.ports[0]
iface_name = self.get_interface_name(network, port)
self.assertEqual(dhcp_enabled, ovs.port_exists(iface_name))
self.assert_dhcp_namespace(network.namespace, dhcp_enabled)
self.assert_dhcp_device(network.namespace, iface_name, dhcp_enabled)
def assert_dhcp_namespace(self, namespace, dhcp_enabled):
ip = ip_lib.IPWrapper()
self.assertEqual(dhcp_enabled, ip.netns.exists(namespace))
def assert_dhcp_device(self, namespace, dhcp_iface_name, dhcp_enabled):
dev = ip_lib.IPDevice(dhcp_iface_name, namespace)
self.assertEqual(dhcp_enabled, ip_lib.device_exists(
dhcp_iface_name, namespace))
if dhcp_enabled:
self.assertEqual(self._DHCP_PORT_MAC_ADDRESS, dev.link.address)
def _plug_port_for_dhcp_request(self, network, port):
namespace = network.namespace
vif_name = self.get_interface_name(network.id, port)
self.ovs_driver.plug(network.id, port.id, vif_name, port.mac_address,
self.conf['ovs_integration_bridge'],
namespace=namespace)
def _ip_list_for_vif(self, vif_name, namespace):
ip_device = ip_lib.IPDevice(vif_name, namespace)
return ip_device.addr.list(ip_version=4)
def _get_network_port_for_allocation_test(self):
network = self.network_dict_for_dhcp()
ip_addr = netaddr.IPNetwork(network.subnets[0].cidr)[1]
port = self.create_port_dict(
network.id, network.subnets[0].id,
mac_address=str(self._TENANT_PORT_MAC_ADDRESS),
ip_address=str(ip_addr))
return network, port
def assert_good_allocation_for_port(self, network, port):
vif_name = self.get_interface_name(network.id, port)
self._run_dhclient(vif_name, network)
predicate = lambda: len(
self._ip_list_for_vif(vif_name, network.namespace))
utils.wait_until_true(predicate, 10)
ip_list = self._ip_list_for_vif(vif_name, network.namespace)
cidr = ip_list[0].get('cidr')
ip_addr = str(netaddr.IPNetwork(cidr).ip)
self.assertEqual(port.fixed_ips[0].ip_address, ip_addr)
def assert_bad_allocation_for_port(self, network, port):
vif_name = self.get_interface_name(network.id, port)
self._run_dhclient(vif_name, network)
# we need wait some time (10 seconds is enough) and check
# that dhclient not configured ip-address for interface
eventlet.sleep(10)
ip_list = self._ip_list_for_vif(vif_name, network.namespace)
self.assertEqual([], ip_list)
def _run_dhclient(self, vif_name, network):
# NOTE: Before run dhclient we should create resolv.conf file
# in namespace, where we will run dhclient for testing address
# allocation for port, otherwise, dhclient will override
# system /etc/resolv.conf
# By default, folder for dhcp-agent's namespace doesn't exist
# that's why we use AdminDirFixture for create directory
# with admin permissions in /etc/netns/ and touch resolv.conf in it.
etc_dir = '/etc/netns/%s' % network.namespace
self.useFixture(helpers.AdminDirFixture(etc_dir))
cmd = ['touch', os.path.join(etc_dir, 'resolv.conf')]
utils.execute(cmd, run_as_root=True)
dhclient_cmd = ['dhclient', '--no-pid', '-d', '-1', vif_name]
proc = net_helpers.RootHelperProcess(
cmd=dhclient_cmd, namespace=network.namespace)
self.addCleanup(proc.wait)
self.addCleanup(proc.kill)
def _get_metadata_proxy_process(self, network):
return external_process.ProcessManager(
self.conf,
network.id,
network.namespace)
class DHCPAgentOVSTestCase(DHCPAgentOVSTestFramework):
def test_create_subnet_with_dhcp(self):
dhcp_enabled = True
for version in [4, 6]:
network = self.network_dict_for_dhcp(
dhcp_enabled, ip_version=version)
self.configure_dhcp_for_network(network=network,
dhcp_enabled=dhcp_enabled)
self.assert_dhcp_resources(network, dhcp_enabled)
def test_agent_mtu_set_on_interface_driver(self):
network = self.network_dict_for_dhcp()
network["mtu"] = 789
self.configure_dhcp_for_network(network=network)
port = network.ports[0]
iface_name = self.get_interface_name(network, port)
dev = ip_lib.IPDevice(iface_name, network.namespace)
self.assertEqual(789, dev.link.mtu)
def test_good_address_allocation(self):
network, port = self._get_network_port_for_allocation_test()
network.ports.append(port)
self.configure_dhcp_for_network(network=network)
self._plug_port_for_dhcp_request(network, port)
self.assert_good_allocation_for_port(network, port)
def test_bad_address_allocation(self):
network, port = self._get_network_port_for_allocation_test()
network.ports.append(port)
self.configure_dhcp_for_network(network=network)
bad_mac_address = netaddr.EUI(self._TENANT_PORT_MAC_ADDRESS.value + 1)
bad_mac_address.dialect = netaddr.mac_unix
port.mac_address = str(bad_mac_address)
self._plug_port_for_dhcp_request(network, port)
self.assert_bad_allocation_for_port(network, port)
def _spawn_network_metadata_proxy(self):
network = self.network_dict_for_dhcp()
self.conf.set_override('enable_isolated_metadata', True)
self.addCleanup(self.agent.disable_isolated_metadata_proxy, network)
self.configure_dhcp_for_network(network=network)
pm = self._get_metadata_proxy_process(network)
utils.wait_until_true(
lambda: pm.active,
timeout=5,
sleep=0.01,
exception=RuntimeError("Metadata proxy didn't spawn"))
return (pm, network)
def test_metadata_proxy_respawned(self):
pm, network = self._spawn_network_metadata_proxy()
old_pid = pm.pid
utils.execute(['kill', '-9', old_pid], run_as_root=True)
utils.wait_until_true(
lambda: pm.active and pm.pid != old_pid,
timeout=5,
sleep=0.1,
exception=RuntimeError("Metadata proxy didn't respawn"))
def test_stale_metadata_proxy_killed(self):
pm, network = self._spawn_network_metadata_proxy()
self.conf.set_override('enable_isolated_metadata', False)
self.configure_dhcp_for_network(network=network)
utils.wait_until_true(
lambda: not pm.active,
timeout=5,
sleep=0.1,
exception=RuntimeError("Stale metadata proxy didn't get killed"))
|
|
"""
Tests for discrete models
Notes
-----
DECIMAL_3 is used because it seems that there is a loss of precision
in the Stata *.dta -> *.csv output, NOT the estimator for the Poisson
tests.
"""
# pylint: disable-msg=E1101
from statsmodels.compat.python import range
import os
import numpy as np
from numpy.testing import (assert_, assert_raises, assert_almost_equal,
assert_equal, assert_array_equal, assert_allclose,
assert_array_less)
from statsmodels.discrete.discrete_model import (Logit, Probit, MNLogit,
Poisson, NegativeBinomial)
from statsmodels.discrete.discrete_margins import _iscount, _isdummy
import statsmodels.api as sm
from nose import SkipTest
from .results.results_discrete import Spector, DiscreteL1, RandHIE, Anes
from statsmodels.tools.sm_exceptions import PerfectSeparationError
try:
import cvxopt
has_cvxopt = True
except ImportError:
has_cvxopt = False
try:
from scipy.optimize import basinhopping
has_basinhopping = True
except ImportError:
has_basinhopping = False
DECIMAL_14 = 14
DECIMAL_10 = 10
DECIMAL_9 = 9
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_0 = 0
class CheckModelResults(object):
"""
res2 should be the test results from RModelWrap
or the results as defined in model_results_data
"""
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
assert_allclose(self.res1.conf_int(), self.res2.conf_int, rtol=8e-5)
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_4)
def pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
# def test_cov_params(self):
# assert_almost_equal(self.res1.cov_params(), self.res2.cov_params,
# DECIMAL_4)
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_4)
def test_llnull(self):
assert_almost_equal(self.res1.llnull, self.res2.llnull, DECIMAL_4)
def test_llr(self):
assert_almost_equal(self.res1.llr, self.res2.llr, DECIMAL_3)
def test_llr_pvalue(self):
assert_almost_equal(self.res1.llr_pvalue, self.res2.llr_pvalue,
DECIMAL_4)
def test_normalized_cov_params(self):
pass
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_dof(self):
assert_equal(self.res1.df_model, self.res2.df_model)
assert_equal(self.res1.df_resid, self.res2.df_resid)
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.model.predict(self.res1.params),
self.res2.phat, DECIMAL_4)
def test_predict_xb(self):
assert_almost_equal(self.res1.model.predict(self.res1.params,
linear=True),
self.res2.yhat, DECIMAL_4)
def test_loglikeobs(self):
#basic cross check
llobssum = self.res1.model.loglikeobs(self.res1.params).sum()
assert_almost_equal(llobssum, self.res1.llf, DECIMAL_14)
def test_jac(self):
#basic cross check
jacsum = self.res1.model.jac(self.res1.params).sum(0)
score = self.res1.model.score(self.res1.params)
assert_almost_equal(jacsum, score, DECIMAL_9) #Poisson has low precision ?
class CheckBinaryResults(CheckModelResults):
def test_pred_table(self):
assert_array_equal(self.res1.pred_table(), self.res2.pred_table)
def test_resid_dev(self):
assert_almost_equal(self.res1.resid_dev, self.res2.resid_dev,
DECIMAL_4)
def test_resid_generalized(self):
assert_almost_equal(self.res1.resid_generalized,
self.res2.resid_generalized, DECIMAL_4)
def smoke_test_resid_response(self):
self.res1.resid_response
class CheckMargEff(object):
"""
Test marginal effects (margeff) and its options
"""
def test_nodummy_dydxoverall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydx_se, DECIMAL_4)
def test_nodummy_dydxmean(self):
me = self.res1.get_margeff(at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxmean_se, DECIMAL_4)
def test_nodummy_dydxmedian(self):
me = self.res1.get_margeff(at='median')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxmedian_se, DECIMAL_4)
def test_nodummy_dydxzero(self):
me = self.res1.get_margeff(at='zero')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxzero, DECIMAL_4)
def test_nodummy_dyexoverall(self):
me = self.res1.get_margeff(method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyex, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyex_se, DECIMAL_4)
def test_nodummy_dyexmean(self):
me = self.res1.get_margeff(at='mean', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexmean_se, DECIMAL_4)
def test_nodummy_dyexmedian(self):
me = self.res1.get_margeff(at='median', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexmedian_se, DECIMAL_4)
def test_nodummy_dyexzero(self):
me = self.res1.get_margeff(at='zero', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexzero_se, DECIMAL_4)
def test_nodummy_eydxoverall(self):
me = self.res1.get_margeff(method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydx_se, DECIMAL_4)
def test_nodummy_eydxmean(self):
me = self.res1.get_margeff(at='mean', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxmean_se, DECIMAL_4)
def test_nodummy_eydxmedian(self):
me = self.res1.get_margeff(at='median', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxmedian_se, DECIMAL_4)
def test_nodummy_eydxzero(self):
me = self.res1.get_margeff(at='zero', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxzero_se, DECIMAL_4)
def test_nodummy_eyexoverall(self):
me = self.res1.get_margeff(method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyex, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyex_se, DECIMAL_4)
def test_nodummy_eyexmean(self):
me = self.res1.get_margeff(at='mean', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexmean_se, DECIMAL_4)
def test_nodummy_eyexmedian(self):
me = self.res1.get_margeff(at='median', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexmedian_se, DECIMAL_4)
def test_nodummy_eyexzero(self):
me = self.res1.get_margeff(at='zero', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexzero_se, DECIMAL_4)
def test_dummy_dydxoverall(self):
me = self.res1.get_margeff(dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_dydx_se, DECIMAL_4)
def test_dummy_dydxmean(self):
me = self.res1.get_margeff(at='mean', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_dydxmean_se, DECIMAL_4)
def test_dummy_eydxoverall(self):
me = self.res1.get_margeff(method='eydx', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_eydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_eydx_se, DECIMAL_4)
def test_dummy_eydxmean(self):
me = self.res1.get_margeff(at='mean', method='eydx', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_eydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_eydxmean_se, DECIMAL_4)
def test_count_dydxoverall(self):
me = self.res1.get_margeff(count=True)
assert_almost_equal(me.margeff,
self.res2.margeff_count_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dydx_se, DECIMAL_4)
def test_count_dydxmean(self):
me = self.res1.get_margeff(count=True, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_count_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dydxmean_se, DECIMAL_4)
def test_count_dummy_dydxoverall(self):
me = self.res1.get_margeff(count=True, dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_count_dummy_dydxoverall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dummy_dydxoverall_se, DECIMAL_4)
def test_count_dummy_dydxmean(self):
me = self.res1.get_margeff(count=True, dummy=True, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_count_dummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dummy_dydxmean_se, DECIMAL_4)
class TestProbitNewton(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Probit(data.endog, data.exog).fit(method="newton", disp=0)
res2 = Spector()
res2.probit()
cls.res2 = res2
#def test_predict(self):
# assert_almost_equal(self.res1.model.predict(self.res1.params),
# self.res2.predict, DECIMAL_4)
class TestProbitBFGS(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Probit(data.endog, data.exog).fit(method="bfgs",
disp=0)
res2 = Spector()
res2.probit()
cls.res2 = res2
class TestProbitNM(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="nm",
disp=0, maxiter=500)
class TestProbitPowell(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="powell",
disp=0, ftol=1e-8)
class TestProbitCG(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
# fmin_cg fails to converge on some machines - reparameterize
from statsmodels.tools.transform_model import StandardizeTransform
transf = StandardizeTransform(data.exog)
exog_st = transf(data.exog)
res1_st = Probit(data.endog, exog_st).fit(method="cg",
disp=0, maxiter=500, gtol=1e-08)
start_params = transf.transform_params(res1_st.params)
assert_allclose(start_params, res2.params, rtol=1e-5, atol=1e-6)
cls.res1 = Probit(data.endog, data.exog).fit(start_params=start_params,
method="cg",
maxiter=500, gtol=1e-08,
disp=0)
assert_array_less(cls.res1.mle_retvals['fcalls'], 100)
class TestProbitNCG(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="ncg",
disp=0, avextol=1e-8)
class TestProbitBasinhopping(CheckBinaryResults):
@classmethod
def setupClass(cls):
if not has_basinhopping:
raise SkipTest("Skipped TestProbitBasinhopping since"
" basinhopping solver is not available")
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
fit = Probit(data.endog, data.exog).fit
cls.res1 = fit(method="basinhopping", disp=0, niter=5,
minimizer={'method' : 'L-BFGS-B', 'tol' : 1e-8})
class CheckLikelihoodModelL1(object):
"""
For testing results generated with L1 regularization
"""
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(
self.res1.conf_int(), self.res2.conf_int, DECIMAL_4)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_nnz_params(self):
assert_almost_equal(
self.res1.nnz_params, self.res2.nnz_params, DECIMAL_4)
def test_aic(self):
assert_almost_equal(
self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(
self.res1.bic, self.res2.bic, DECIMAL_3)
class TestProbitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0.1, 0.2, 0.3, 10]) #/ data.exog.shape[0]
cls.res1 = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, trim_mode='auto',
auto_trim_tol=0.02, acc=1e-10, maxiter=1000)
res2 = DiscreteL1()
res2.probit()
cls.res2 = res2
def test_cov_params(self):
assert_almost_equal(
self.res1.cov_params(), self.res2.cov_params, DECIMAL_4)
class TestMNLogitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog, prepend=False)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
alpha = 10. * np.ones((mlogit_mod.J - 1, mlogit_mod.K)) #/ anes_exog.shape[0]
alpha[-1,:] = 0
cls.res1 = mlogit_mod.fit_regularized(
method='l1', alpha=alpha, trim_mode='auto', auto_trim_tol=0.02,
acc=1e-10, disp=0)
res2 = DiscreteL1()
res2.mnlogit()
cls.res2 = res2
class TestLogitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.alpha = 3 * np.array([0., 1., 1., 1.]) #/ data.exog.shape[0]
cls.res1 = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=cls.alpha, disp=0, trim_mode='size',
size_trim_tol=1e-5, acc=1e-10, maxiter=1000)
res2 = DiscreteL1()
res2.logit()
cls.res2 = res2
def test_cov_params(self):
assert_almost_equal(
self.res1.cov_params(), self.res2.cov_params, DECIMAL_4)
class TestCVXOPT(object):
@classmethod
def setupClass(self):
self.data = sm.datasets.spector.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=True)
def test_cvxopt_versus_slsqp(self):
#Compares resutls from cvxopt to the standard slsqp
if has_cvxopt:
self.alpha = 3. * np.array([0, 1, 1, 1.]) #/ self.data.endog.shape[0]
res_slsqp = Logit(self.data.endog, self.data.exog).fit_regularized(
method="l1", alpha=self.alpha, disp=0, acc=1e-10, maxiter=1000,
trim_mode='auto')
res_cvxopt = Logit(self.data.endog, self.data.exog).fit_regularized(
method="l1_cvxopt_cp", alpha=self.alpha, disp=0, abstol=1e-10,
trim_mode='auto', auto_trim_tol=0.01, maxiter=1000)
assert_almost_equal(res_slsqp.params, res_cvxopt.params, DECIMAL_4)
else:
raise SkipTest("Skipped test_cvxopt since cvxopt is not available")
class TestSweepAlphaL1(object):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.model = Logit(data.endog, data.exog)
cls.alphas = np.array(
[[0.1, 0.1, 0.1, 0.1],
[0.4, 0.4, 0.5, 0.5],
[0.5, 0.5, 1, 1]]) #/ data.exog.shape[0]
cls.res1 = DiscreteL1()
cls.res1.sweep()
def test_sweep_alpha(self):
for i in range(3):
alpha = self.alphas[i, :]
res2 = self.model.fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-10,
trim_mode='off', maxiter=1000)
assert_almost_equal(res2.params, self.res1.params[i], DECIMAL_4)
class CheckL1Compatability(object):
"""
Tests compatability between l1 and unregularized by setting alpha such
that certain parameters should be effectively unregularized, and others
should be ignored by the model.
"""
def test_params(self):
m = self.m
assert_almost_equal(
self.res_unreg.params[:m], self.res_reg.params[:m], DECIMAL_4)
# The last entry should be close to zero
# handle extra parameter of NegativeBinomial
kvars = self.res_reg.model.exog.shape[1]
assert_almost_equal(0, self.res_reg.params[m:kvars], DECIMAL_4)
def test_cov_params(self):
m = self.m
# The restricted cov_params should be equal
assert_almost_equal(
self.res_unreg.cov_params()[:m, :m],
self.res_reg.cov_params()[:m, :m],
DECIMAL_1)
def test_df(self):
assert_equal(self.res_unreg.df_model, self.res_reg.df_model)
assert_equal(self.res_unreg.df_resid, self.res_reg.df_resid)
def test_t_test(self):
m = self.m
kvars = self.kvars
# handle extra parameter of NegativeBinomial
extra = getattr(self, 'k_extra', 0)
t_unreg = self.res_unreg.t_test(np.eye(len(self.res_unreg.params)))
t_reg = self.res_reg.t_test(np.eye(kvars + extra))
assert_almost_equal(t_unreg.effect[:m], t_reg.effect[:m], DECIMAL_3)
assert_almost_equal(t_unreg.sd[:m], t_reg.sd[:m], DECIMAL_3)
assert_almost_equal(np.nan, t_reg.sd[m])
assert_allclose(t_unreg.tvalue[:m], t_reg.tvalue[:m], atol=3e-3)
assert_almost_equal(np.nan, t_reg.tvalue[m])
def test_f_test(self):
m = self.m
kvars = self.kvars
# handle extra parameter of NegativeBinomial
extra = getattr(self, 'k_extra', 0)
f_unreg = self.res_unreg.f_test(np.eye(len(self.res_unreg.params))[:m])
f_reg = self.res_reg.f_test(np.eye(kvars + extra)[:m])
assert_allclose(f_unreg.fvalue, f_reg.fvalue, rtol=3e-5, atol=1e-3)
assert_almost_equal(f_unreg.pvalue, f_reg.pvalue, DECIMAL_3)
def test_bad_r_matrix(self):
kvars = self.kvars
assert_raises(ValueError, self.res_reg.f_test, np.eye(kvars) )
class TestPoissonL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog = sm.add_constant(rand_exog, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
mod_unreg = sm.Poisson(rand_data.endog, exog_no_PSI)
cls.res_unreg = mod_unreg.fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars)
alpha[:cls.m] = 0
cls.res_reg = sm.Poisson(rand_data.endog, rand_exog).fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
class TestNegativeBinomialL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog_st = (rand_exog - rand_exog.mean(0)) / rand_exog.std(0)
rand_exog = sm.add_constant(rand_exog_st, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
mod_unreg = sm.NegativeBinomial(rand_data.endog, exog_no_PSI)
cls.res_unreg = mod_unreg.fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars + 1)
alpha[:cls.m] = 0
alpha[-1] = 0 # don't penalize alpha
mod_reg = sm.NegativeBinomial(rand_data.endog, rand_exog)
cls.res_reg = mod_reg.fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
cls.k_extra = 1 # 1 extra parameter in nb2
class TestNegativeBinomialGeoL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog = sm.add_constant(rand_exog, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
mod_unreg = sm.NegativeBinomial(rand_data.endog, exog_no_PSI,
loglike_method='geometric')
cls.res_unreg = mod_unreg.fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last columns
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars)
alpha[:cls.m] = 0
mod_reg = sm.NegativeBinomial(rand_data.endog, rand_exog,
loglike_method='geometric')
cls.res_reg = mod_reg.fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
assert_equal(mod_reg.loglike_method, 'geometric')
class TestLogitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = np.array([0, 0, 0, 10])
cls.res_reg = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = Logit(data.endog, exog_no_PSI).fit(disp=0, tol=1e-15)
class TestMNLogitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0, 0, 0, 10])
cls.res_reg = MNLogit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = MNLogit(data.endog, exog_no_PSI).fit(
disp=0, tol=1e-15)
def test_t_test(self):
m = self.m
kvars = self.kvars
t_unreg = self.res_unreg.t_test(np.eye(m))
t_reg = self.res_reg.t_test(np.eye(kvars))
assert_almost_equal(t_unreg.effect, t_reg.effect[:m], DECIMAL_3)
assert_almost_equal(t_unreg.sd, t_reg.sd[:m], DECIMAL_3)
assert_almost_equal(np.nan, t_reg.sd[m])
assert_almost_equal(t_unreg.tvalue, t_reg.tvalue[:m, :m], DECIMAL_3)
def test_f_test(self):
raise SkipTest("Skipped test_f_test for MNLogit")
class TestProbitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0, 0, 0, 10])
cls.res_reg = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = Probit(data.endog, exog_no_PSI).fit(disp=0, tol=1e-15)
class CompareL1(object):
"""
For checking results for l1 regularization.
Assumes self.res1 and self.res2 are two legitimate models to be compared.
"""
def test_basic_results(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
assert_almost_equal(self.res1.cov_params(), self.res2.cov_params(), DECIMAL_4)
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int(), DECIMAL_4)
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
assert_almost_equal(self.res1.pred_table(), self.res2.pred_table(), DECIMAL_4)
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_4)
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_4)
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_4)
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
class CompareL11D(CompareL1):
"""
Check t and f tests. This only works for 1-d results
"""
def test_tests(self):
restrictmat = np.eye(len(self.res1.params.ravel()))
assert_almost_equal(self.res1.t_test(restrictmat).pvalue,
self.res2.t_test(restrictmat).pvalue, DECIMAL_4)
assert_almost_equal(self.res1.f_test(restrictmat).pvalue,
self.res2.f_test(restrictmat).pvalue, DECIMAL_4)
class TestL1AlphaZeroLogit(CompareL11D):
"""
Compares l1 model with alpha = 0 to the unregularized model.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.res1 = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = Logit(data.endog, data.exog).fit(disp=0, tol=1e-15)
class TestL1AlphaZeroProbit(CompareL11D):
"""
Compares l1 model with alpha = 0 to the unregularized model.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.res1 = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = Probit(data.endog, data.exog).fit(disp=0, tol=1e-15)
class TestL1AlphaZeroMNLogit(CompareL1):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = MNLogit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = MNLogit(data.endog, data.exog).fit(disp=0, tol=1e-15)
class TestLogitNewton(CheckBinaryResults, CheckMargEff):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Logit(data.endog, data.exog).fit(method="newton", disp=0)
res2 = Spector()
res2.logit()
cls.res2 = res2
def test_resid_pearson(self):
assert_almost_equal(self.res1.resid_pearson,
self.res2.resid_pearson, 5)
def test_nodummy_exog1(self):
me = self.res1.get_margeff(atexog={0 : 2.0, 2 : 1.})
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_atexog1, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_atexog1_se, DECIMAL_4)
def test_nodummy_exog2(self):
me = self.res1.get_margeff(atexog={1 : 21., 2 : 0}, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_atexog2, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_atexog2_se, DECIMAL_4)
def test_dummy_exog1(self):
me = self.res1.get_margeff(atexog={0 : 2.0, 2 : 1.}, dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_atexog1, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_atexog1_se, DECIMAL_4)
def test_dummy_exog2(self):
me = self.res1.get_margeff(atexog={1 : 21., 2 : 0}, at='mean',
dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_atexog2, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_atexog2_se, DECIMAL_4)
class TestLogitBFGS(CheckBinaryResults, CheckMargEff):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.logit()
cls.res2 = res2
cls.res1 = Logit(data.endog, data.exog).fit(method="bfgs", disp=0)
class TestPoissonNewton(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Poisson(data.endog, exog).fit(method='newton', disp=0)
res2 = RandHIE()
res2.poisson()
cls.res2 = res2
def test_margeff_overall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_overall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_overall_se, DECIMAL_4)
def test_margeff_dummy_overall(self):
me = self.res1.get_margeff(dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_overall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_overall_se, DECIMAL_4)
def test_resid(self):
assert_almost_equal(self.res1.resid, self.res2.resid, 2)
def test_predict_prob(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
probs_res = np.loadtxt(os.path.join(cur_dir, "results",
"predict_prob_poisson.csv"), delimiter=",")
# just check the first 100 obs. vs R to save memory
probs = self.res1.predict_prob()[:100]
assert_almost_equal(probs, probs_res, 8)
class TestNegativeBinomialNB2Newton(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb2').fit(method='newton', disp=0)
res2 = RandHIE()
res2.negativebinomial_nb2_bfgs()
cls.res2 = res2
def test_jac(self):
pass
#NOTE: The bse is much closer precitions to stata
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_alpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha,
DECIMAL_4)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.pvalues[:-1], self.res2.pvalues,
DECIMAL_2)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10],
np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def no_info(self):
pass
test_jac = no_info
class TestNegativeBinomialNB1Newton(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb1').fit(
method="newton",
maxiter=100,
disp=0)
res2 = RandHIE()
res2.negativebinomial_nb1_bfgs()
cls.res2 = res2
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def test_lnalpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, 3)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
# the bse for alpha is not high precision from the hessian
# approximation
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_2)
def test_jac(self):
pass
def test_predict(self):
pass
def test_predict_xb(self):
pass
class TestNegativeBinomialNB2BFGS(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb2').fit(
method='bfgs', disp=0)
res2 = RandHIE()
res2.negativebinomial_nb2_bfgs()
cls.res2 = res2
def test_jac(self):
pass
#NOTE: The bse is much closer precitions to stata
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_alpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha,
DECIMAL_4)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.pvalues[:-1], self.res2.pvalues,
DECIMAL_2)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10],
np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def no_info(self):
pass
test_jac = no_info
class TestNegativeBinomialNB1BFGS(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb1').fit(method="bfgs",
maxiter=100,
disp=0)
res2 = RandHIE()
res2.negativebinomial_nb1_bfgs()
cls.res2 = res2
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def test_lnalpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, 3)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
# the bse for alpha is not high precision from the hessian
# approximation
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_2)
def test_jac(self):
pass
def test_predict(self):
pass
def test_predict_xb(self):
pass
class TestNegativeBinomialGeometricBFGS(CheckModelResults):
"""
Cannot find another implementation of the geometric to cross-check results
we only test fitted values because geometric has fewer parameters than nb1 and nb2
and we want to make sure that predict() np.dot(exog, params) works
"""
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'geometric').fit(method='bfgs', disp=0)
res2 = RandHIE()
res2.negativebinomial_geometric_bfgs()
cls.res2 = res2
# the following are regression tests, could be inherited instead
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_3)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int, DECIMAL_3)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10], self.res2.fittedvalues[:10], DECIMAL_3)
def test_jac(self):
pass
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10], np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10], self.res2.fittedvalues[:10], DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def no_info(self):
pass
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_1)
def test_llr(self):
assert_almost_equal(self.res1.llr, self.res2.llr, DECIMAL_2)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
test_jac = no_info
class CheckMNLogitBaseZero(CheckModelResults):
def test_margeff_overall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff, self.res2.margeff_dydx_overall, 6)
assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_overall_se, 6)
def test_margeff_mean(self):
me = self.res1.get_margeff(at='mean')
assert_almost_equal(me.margeff, self.res2.margeff_dydx_mean, 7)
assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_mean_se, 7)
def test_margeff_dummy(self):
data = self.data
vote = data.data['vote']
exog = np.column_stack((data.exog, vote))
exog = sm.add_constant(exog, prepend=False)
res = MNLogit(data.endog, exog).fit(method="newton", disp=0)
me = res.get_margeff(dummy=True)
assert_almost_equal(me.margeff, self.res2.margeff_dydx_dummy_overall,
6)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dydx_dummy_overall_se, 6)
me = res.get_margeff(dummy=True, method="eydx")
assert_almost_equal(me.margeff, self.res2.margeff_eydx_dummy_overall,
5)
assert_almost_equal(me.margeff_se,
self.res2.margeff_eydx_dummy_overall_se, 6)
def test_j(self):
assert_equal(self.res1.model.J, self.res2.J)
def test_k(self):
assert_equal(self.res1.model.K, self.res2.K)
def test_endog_names(self):
assert_equal(self.res1._get_endog_name(None,None)[1],
['y=1', 'y=2', 'y=3', 'y=4', 'y=5', 'y=6'])
def test_pred_table(self):
# fitted results taken from gretl
pred = [6, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 6, 0, 1, 6, 0, 0,
1, 1, 6, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 6, 0, 0, 6, 6, 0, 0, 1,
1, 6, 1, 6, 0, 0, 0, 1, 0, 1, 0, 0, 0, 6, 0, 0, 6, 0, 0, 0, 1,
1, 0, 0, 6, 6, 6, 6, 1, 0, 5, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0,
6, 0, 6, 6, 1, 0, 1, 1, 6, 5, 1, 0, 0, 0, 5, 0, 0, 6, 0, 1, 0,
0, 0, 0, 0, 1, 1, 0, 6, 6, 6, 6, 5, 0, 1, 1, 0, 1, 0, 6, 6, 0,
0, 0, 6, 0, 0, 0, 6, 6, 0, 5, 1, 0, 0, 0, 0, 6, 0, 5, 6, 6, 0,
0, 0, 0, 6, 1, 0, 0, 1, 0, 1, 6, 1, 1, 1, 1, 1, 0, 0, 0, 6, 0,
5, 1, 0, 6, 6, 6, 0, 0, 0, 0, 1, 6, 6, 0, 0, 0, 1, 1, 5, 6, 0,
6, 1, 0, 0, 1, 6, 0, 0, 1, 0, 6, 6, 0, 5, 6, 6, 0, 0, 6, 1, 0,
6, 0, 1, 0, 1, 6, 0, 1, 1, 1, 6, 0, 5, 0, 0, 6, 1, 0, 6, 5, 5,
0, 6, 1, 1, 1, 0, 0, 6, 0, 0, 5, 0, 0, 6, 6, 6, 6, 6, 0, 1, 0,
0, 6, 6, 0, 0, 1, 6, 0, 0, 6, 1, 6, 1, 1, 1, 0, 1, 6, 5, 0, 0,
1, 5, 0, 1, 6, 6, 1, 0, 0, 1, 6, 1, 5, 6, 1, 0, 0, 1, 1, 0, 6,
1, 6, 0, 1, 1, 5, 6, 6, 5, 1, 1, 1, 0, 6, 1, 6, 1, 0, 1, 0, 0,
1, 5, 0, 1, 1, 0, 5, 6, 0, 5, 1, 1, 6, 5, 0, 6, 0, 0, 0, 0, 0,
0, 1, 6, 1, 0, 5, 1, 0, 0, 1, 6, 0, 0, 6, 6, 6, 0, 2, 1, 6, 5,
6, 1, 1, 0, 5, 1, 1, 1, 6, 1, 6, 6, 5, 6, 0, 1, 0, 1, 6, 0, 6,
1, 6, 0, 0, 6, 1, 0, 6, 1, 0, 0, 0, 0, 6, 6, 6, 6, 5, 6, 6, 0,
0, 6, 1, 1, 6, 0, 0, 6, 6, 0, 6, 6, 0, 0, 6, 0, 0, 6, 6, 6, 1,
0, 6, 0, 0, 0, 6, 1, 1, 0, 1, 5, 0, 0, 5, 0, 0, 0, 1, 1, 6, 1,
0, 0, 0, 6, 6, 1, 1, 6, 5, 5, 0, 6, 6, 0, 1, 1, 0, 6, 6, 0, 6,
5, 5, 6, 5, 1, 0, 6, 0, 6, 1, 0, 1, 6, 6, 6, 1, 0, 6, 0, 5, 6,
6, 5, 0, 5, 1, 0, 6, 0, 6, 1, 5, 5, 0, 1, 5, 5, 2, 6, 6, 6, 5,
0, 0, 1, 6, 1, 0, 1, 6, 1, 0, 0, 1, 5, 6, 6, 0, 0, 0, 5, 6, 6,
6, 1, 5, 6, 1, 0, 0, 6, 5, 0, 1, 1, 1, 6, 6, 0, 1, 0, 0, 0, 5,
0, 0, 6, 1, 6, 0, 6, 1, 5, 5, 6, 5, 0, 0, 0, 0, 1, 1, 0, 5, 5,
0, 0, 0, 0, 1, 0, 6, 6, 1, 1, 6, 6, 0, 5, 5, 0, 0, 0, 6, 6, 1,
6, 0, 0, 5, 0, 1, 6, 5, 6, 6, 5, 5, 6, 6, 1, 0, 1, 6, 6, 1, 6,
0, 6, 0, 6, 5, 0, 6, 6, 0, 5, 6, 0, 6, 6, 5, 0, 1, 6, 6, 1, 0,
1, 0, 6, 6, 1, 0, 6, 6, 6, 0, 1, 6, 0, 1, 5, 1, 1, 5, 6, 6, 0,
1, 6, 6, 1, 5, 0, 5, 0, 6, 0, 1, 6, 1, 0, 6, 1, 6, 0, 6, 1, 0,
0, 0, 6, 6, 0, 1, 1, 6, 6, 6, 1, 6, 0, 5, 6, 0, 5, 6, 6, 5, 5,
5, 6, 0, 6, 0, 0, 0, 5, 0, 6, 1, 2, 6, 6, 6, 5, 1, 6, 0, 6, 0,
0, 0, 0, 6, 5, 0, 5, 1, 6, 5, 1, 6, 5, 1, 1, 0, 0, 6, 1, 1, 5,
6, 6, 0, 5, 2, 5, 5, 0, 5, 5, 5, 6, 5, 6, 6, 5, 2, 6, 5, 6, 0,
0, 6, 5, 0, 6, 0, 0, 6, 6, 6, 0, 5, 1, 1, 6, 6, 5, 2, 1, 6, 5,
6, 0, 6, 6, 1, 1, 5, 1, 6, 6, 6, 0, 0, 6, 1, 0, 5, 5, 1, 5, 6,
1, 6, 0, 1, 6, 5, 0, 0, 6, 1, 5, 1, 0, 6, 0, 6, 6, 5, 5, 6, 6,
6, 6, 2, 6, 6, 6, 5, 5, 5, 0, 1, 0, 0, 0, 6, 6, 1, 0, 6, 6, 6,
6, 6, 1, 0, 6, 1, 5, 5, 6, 6, 6, 6, 6, 5, 6, 1, 6, 2, 5, 5, 6,
5, 6, 6, 5, 6, 6, 5, 5, 6, 1, 5, 1, 6, 0, 2, 5, 0, 5, 0, 2, 1,
6, 0, 0, 6, 6, 1, 6, 0, 5, 5, 6, 6, 1, 6, 6, 6, 5, 6, 6, 1, 6,
5, 6, 1, 1, 0, 6, 6, 5, 1, 0, 0, 6, 6, 5, 6, 0, 1, 6, 0, 5, 6,
5, 2, 5, 2, 0, 0, 1, 6, 6, 1, 5, 6, 6, 0, 6, 6, 6, 6, 6, 5]
assert_array_equal(self.res1.predict().argmax(1), pred)
# the rows should add up for pred table
assert_array_equal(self.res1.pred_table().sum(0), np.bincount(pred))
# note this is just a regression test, gretl doesn't have a prediction
# table
pred = [[ 126., 41., 2., 0., 0., 12., 19.],
[ 77., 73., 3., 0., 0., 15., 12.],
[ 37., 43., 2., 0., 0., 19., 7.],
[ 12., 9., 1., 0., 0., 9., 6.],
[ 19., 10., 2., 0., 0., 20., 43.],
[ 22., 25., 1., 0., 0., 31., 71.],
[ 9., 7., 1., 0., 0., 18., 140.]]
assert_array_equal(self.res1.pred_table(), pred)
def test_resid(self):
assert_array_equal(self.res1.resid_misclassified, self.res2.resid)
class TestMNLogitNewtonBaseZero(CheckMNLogitBaseZero):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
cls.data = data
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
cls.res1 = MNLogit(data.endog, exog).fit(method="newton", disp=0)
res2 = Anes()
res2.mnlogit_basezero()
cls.res2 = res2
class TestMNLogitLBFGSBaseZero(CheckMNLogitBaseZero):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
cls.data = data
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
mymodel = MNLogit(data.endog, exog)
cls.res1 = mymodel.fit(method="lbfgs", disp=0, maxiter=50000,
#m=12, pgtol=1e-7, factr=1e3, # 5 failures
#m=20, pgtol=1e-8, factr=1e2, # 3 failures
#m=30, pgtol=1e-9, factr=1e1, # 1 failure
m=40, pgtol=1e-10, factr=5e0,
loglike_and_score=mymodel.loglike_and_score)
res2 = Anes()
res2.mnlogit_basezero()
cls.res2 = res2
def test_perfect_prediction():
cur_dir = os.path.dirname(os.path.abspath(__file__))
iris_dir = os.path.join(cur_dir, '..', '..', 'genmod', 'tests', 'results')
iris_dir = os.path.abspath(iris_dir)
iris = np.genfromtxt(os.path.join(iris_dir, 'iris.csv'), delimiter=",",
skip_header=1)
y = iris[:,-1]
X = iris[:,:-1]
X = X[y != 2]
y = y[y != 2]
X = sm.add_constant(X, prepend=True)
mod = Logit(y,X)
assert_raises(PerfectSeparationError, mod.fit)
#turn off raise PerfectSeparationError
mod.raise_on_perfect_prediction = False
mod.fit(disp=False) #should not raise
def test_poisson_predict():
#GH: 175, make sure poisson predict works without offset and exposure
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=True)
res = sm.Poisson(data.endog, exog).fit(method='newton', disp=0)
pred1 = res.predict()
pred2 = res.predict(exog)
assert_almost_equal(pred1, pred2)
#exta options
pred3 = res.predict(exog, offset=0, exposure=1)
assert_almost_equal(pred1, pred3)
pred3 = res.predict(exog, offset=0, exposure=2)
assert_almost_equal(2*pred1, pred3)
pred3 = res.predict(exog, offset=np.log(2), exposure=1)
assert_almost_equal(2*pred1, pred3)
def test_poisson_newton():
#GH: 24, Newton doesn't work well sometimes
nobs = 10000
np.random.seed(987689)
x = np.random.randn(nobs, 3)
x = sm.add_constant(x, prepend=True)
y_count = np.random.poisson(np.exp(x.sum(1)))
mod = sm.Poisson(y_count, x)
res = mod.fit(start_params=-np.ones(4), method='newton', disp=0)
assert_(not res.mle_retvals['converged'])
def test_issue_339():
# make sure MNLogit summary works for J != K.
data = sm.datasets.anes96.load()
exog = data.exog
# leave out last exog column
exog = exog[:,:-1]
exog = sm.add_constant(exog, prepend=True)
res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0)
# strip the header from the test
smry = "\n".join(res1.summary().as_text().split('\n')[9:])
cur_dir = os.path.dirname(os.path.abspath(__file__))
test_case_file = os.path.join(cur_dir, 'results', 'mn_logit_summary.txt')
test_case = open(test_case_file, 'r').read()
np.testing.assert_(smry == test_case[:-1])
def test_issue_341():
data = sm.datasets.anes96.load()
exog = data.exog
# leave out last exog column
exog = exog[:,:-1]
exog = sm.add_constant(exog, prepend=True)
res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0)
x = exog[0]
np.testing.assert_equal(res1.predict(x).shape, (1,7))
np.testing.assert_equal(res1.predict(x[None]).shape, (1,7))
def test_iscount():
X = np.random.random((50, 10))
X[:,2] = np.random.randint(1, 10, size=50)
X[:,6] = np.random.randint(1, 10, size=50)
X[:,4] = np.random.randint(0, 2, size=50)
X[:,1] = np.random.randint(-10, 10, size=50) # not integers
count_ind = _iscount(X)
assert_equal(count_ind, [2, 6])
def test_isdummy():
X = np.random.random((50, 10))
X[:,2] = np.random.randint(1, 10, size=50)
X[:,6] = np.random.randint(0, 2, size=50)
X[:,4] = np.random.randint(0, 2, size=50)
X[:,1] = np.random.randint(-10, 10, size=50) # not integers
count_ind = _isdummy(X)
assert_equal(count_ind, [4, 6])
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb'],
exit=False)
|
|
#! python
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# see __init__.py
#
# (C) 2001-2008 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK, PARITY_SPACE = \
'N', 'E', 'O', 'M', 'S'
STOPBITS_ONE, STOPBITS_TWO = (1, 2)
FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS = (5, 6, 7, 8)
PARITY_NAMES = {
PARITY_NONE: 'None',
PARITY_EVEN: 'Even',
PARITY_ODD: 'Odd',
PARITY_MARK: 'Mark',
PARITY_SPACE: 'Space',
}
XON = chr(17)
XOFF = chr(19)
class SerialException(Exception):
"""Base class for serial port related exceptions."""
portNotOpenError = SerialException('Port not open')
class SerialTimeoutException(SerialException):
"""Write timeouts give an exception"""
writeTimeoutError = SerialTimeoutException("Write timeout")
class FileLike(object):
"""An abstract file like class.
This class implements readline and readlines based on read and
writelines based on write.
This class is used to provide the above functions for to Serial
port objects.
Note that when the serial port was opened with _NO_ timeout that
readline blocks until it sees a newline (or the specified size is
reached) and that readlines would never return and therefore
refuses to work (it raises an exception in this case)!
"""
def read(self, size):
raise NotImplementedError
def write(self, s):
raise NotImplementedError
def readline(self, size=None, eol='\n'):
"""read a line which is terminated with end-of-line (eol) character
('\n' by default) or until timeout"""
line = ''
while True:
c = self.read(1)
if c:
# not very efficient but lines are usually not that long
line += c
if c == eol:
break
if size is not None and len(line) >= size:
break
else:
break
return line
def readlines(self, sizehint=None, eol='\n'):
"""read a list of lines, until timeout
sizehint is ignored"""
if self.timeout is None:
raise ValueError(
"Serial port MUST have enabled timeout for this function!")
lines = []
while True:
line = self.readline(eol=eol)
if line:
lines.append(line)
if line[-1] != eol: # was the line received with a timeout?
break
else:
break
return lines
def xreadlines(self, sizehint=None):
"""just call readlines - here for compatibility"""
return self.readlines()
def writelines(self, sequence):
for line in sequence:
self.write(line)
def flush(self):
"""flush of file like objects"""
pass
# iterator for e.g. "for line in Serial(0): ..." usage
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def __iter__(self):
return self
class SerialBase(FileLike):
"""Serial port base class. Provides __init__ function and properties to
get/set port settings."""
# default values, may be overriden in subclasses that do not support all
# values
BAUDRATES = (
50,
75,
110,
134,
150,
200,
300,
600,
1200,
1800,
2400,
4800,
9600,
19200,
38400,
57600,
115200,
230400,
460800,
500000,
576000,
921600,
1000000,
1152000,
1500000,
2000000,
2500000,
3000000,
3500000,
4000000)
BYTESIZES = (FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS)
PARITIES = (PARITY_NONE, PARITY_EVEN, PARITY_ODD)
STOPBITS = (STOPBITS_ONE, STOPBITS_TWO)
def __init__(self,
port=None, # number of device, numbering starts at
# zero. if everything fails, the user
# can specify a device string, note
# that this isn't portable anymore
# port will be opened if one is specified
baudrate=9600, # baudrate
bytesize=EIGHTBITS, # number of databits
parity=PARITY_NONE, # enable parity checking
stopbits=STOPBITS_ONE, # number of stopbits
timeout=None, # set a timeout value, None to wait forever
xonxoff=0, # enable software flow control
rtscts=0, # enable RTS/CTS flow control
writeTimeout=None, # set a timeout for writes
dsrdtr=None,
# None: use rtscts setting, dsrdtr override if true or false
# Inter-character timeout, None to disable
interCharTimeout=None
):
"""Initialize comm port object. If a port is given, then the port will be
opened immediately. Otherwise a Serial port object in closed state
is returned."""
self._isOpen = False
self._port = None # correct value is assigned below trough properties
# correct value is assigned below trough properties
self._baudrate = None
# correct value is assigned below trough properties
self._bytesize = None
# correct value is assigned below trough properties
self._parity = None
# correct value is assigned below trough properties
self._stopbits = None
# correct value is assigned below trough properties
self._timeout = None
# correct value is assigned below trough properties
self._writeTimeout = None
# correct value is assigned below trough properties
self._xonxoff = None
# correct value is assigned below trough properties
self._rtscts = None
# correct value is assigned below trough properties
self._dsrdtr = None
# correct value is assigned below trough properties
self._interCharTimeout = None
# assign values using get/set methods using the properties feature
self.port = port
self.baudrate = baudrate
self.bytesize = bytesize
self.parity = parity
self.stopbits = stopbits
self.timeout = timeout
self.writeTimeout = writeTimeout
self.xonxoff = xonxoff
self.rtscts = rtscts
self.dsrdtr = dsrdtr
self.interCharTimeout = interCharTimeout
if port is not None:
self.open()
def isOpen(self):
"""Check if the port is opened."""
return self._isOpen
# - - - - - - - - - - - - - - - - - - - - - - - -
# TODO: these are not realy needed as the is the BAUDRATES etc attribute...
# maybe i remove them before the final release...
def getSupportedBaudrates(self):
return [(str(b), b) for b in self.BAUDRATES]
def getSupportedByteSizes(self):
return [(str(b), b) for b in self.BYTESIZES]
def getSupportedStopbits(self):
return [(str(b), b) for b in self.STOPBITS]
def getSupportedParities(self):
return [(PARITY_NAMES[b], b) for b in self.PARITIES]
# - - - - - - - - - - - - - - - - - - - - - - - -
def setPort(self, port):
"""Change the port. The attribute portstr is set to a string that
contains the name of the port."""
was_open = self._isOpen
if was_open:
self.close()
if port is not None:
if type(port) in [
type(''),
type('')]: # strings are taken directly
self.portstr = port
else:
self.portstr = self.makeDeviceName(port)
else:
self.portstr = None
self._port = port
if was_open:
self.open()
def getPort(self):
"""Get the current port setting.
The value that was passed on init or using setPort() is passed back.
See also the attribute portstr which contains
the name of the port as a string."""
return self._port
port = property(getPort, setPort, doc="Port setting")
def setBaudrate(self, baudrate):
"""Change baudrate. It raises a ValueError if the port is open and the
baudrate is not possible. If the port is closed, then tha value is
accepted and the exception is raised when the port is opened."""
# ~ if baudrate not in self.BAUDRATES:
# raise ValueError("Not a valid baudrate: %r" % baudrate)
try:
self._baudrate = int(baudrate)
except TypeError:
raise ValueError("Not a valid baudrate: %r" % (baudrate,))
else:
if self._isOpen:
self._reconfigurePort()
def getBaudrate(self):
"""Get the current baudrate setting."""
return self._baudrate
baudrate = property(getBaudrate, setBaudrate, doc="Baudrate setting")
def setByteSize(self, bytesize):
"""Change byte size."""
if bytesize not in self.BYTESIZES:
raise ValueError("Not a valid byte size: %r" % (bytesize,))
self._bytesize = bytesize
if self._isOpen:
self._reconfigurePort()
def getByteSize(self):
"""Get the current byte size setting."""
return self._bytesize
bytesize = property(getByteSize, setByteSize, doc="Byte size setting")
def setParity(self, parity):
"""Change parity setting."""
if parity not in self.PARITIES:
raise ValueError("Not a valid parity: %r" % (parity,))
self._parity = parity
if self._isOpen:
self._reconfigurePort()
def getParity(self):
"""Get the current parity setting."""
return self._parity
parity = property(getParity, setParity, doc="Parity setting")
def setStopbits(self, stopbits):
"""Change stopbits size."""
if stopbits not in self.STOPBITS:
raise ValueError("Not a valid stopbit size: %r" % (stopbits,))
self._stopbits = stopbits
if self._isOpen:
self._reconfigurePort()
def getStopbits(self):
"""Get the current stopbits setting."""
return self._stopbits
stopbits = property(getStopbits, setStopbits, doc="Stopbits setting")
def setTimeout(self, timeout):
"""Change timeout setting."""
if timeout is not None:
if timeout < 0:
raise ValueError("Not a valid timeout: %r" % (timeout,))
try:
# test if it's a number, will throw a TypeError if not...
timeout + 1
except TypeError:
raise ValueError("Not a valid timeout: %r" % (timeout,))
self._timeout = timeout
if self._isOpen:
self._reconfigurePort()
def getTimeout(self):
"""Get the current timeout setting."""
return self._timeout
timeout = property(
getTimeout,
setTimeout,
doc="Timeout setting for read()")
def setWriteTimeout(self, timeout):
"""Change timeout setting."""
if timeout is not None:
if timeout < 0:
raise ValueError("Not a valid timeout: %r" % (timeout,))
try:
# test if it's a number, will throw a TypeError if not...
timeout + 1
except TypeError:
raise ValueError("Not a valid timeout: %r" % timeout)
self._writeTimeout = timeout
if self._isOpen:
self._reconfigurePort()
def getWriteTimeout(self):
"""Get the current timeout setting."""
return self._writeTimeout
writeTimeout = property(
getWriteTimeout,
setWriteTimeout,
doc="Timeout setting for write()")
def setXonXoff(self, xonxoff):
"""Change XonXoff setting."""
self._xonxoff = xonxoff
if self._isOpen:
self._reconfigurePort()
def getXonXoff(self):
"""Get the current XonXoff setting."""
return self._xonxoff
xonxoff = property(getXonXoff, setXonXoff, doc="Xon/Xoff setting")
def setRtsCts(self, rtscts):
"""Change RtsCts flow control setting."""
self._rtscts = rtscts
if self._isOpen:
self._reconfigurePort()
def getRtsCts(self):
"""Get the current RtsCts flow control setting."""
return self._rtscts
rtscts = property(getRtsCts, setRtsCts, doc="RTS/CTS flow control setting")
def setDsrDtr(self, dsrdtr=None):
"""Change DsrDtr flow control setting."""
if dsrdtr is None:
# if not set, keep backwards compatibility and follow rtscts
# setting
self._dsrdtr = self._rtscts
else:
# if defined independently, follow its value
self._dsrdtr = dsrdtr
if self._isOpen:
self._reconfigurePort()
def getDsrDtr(self):
"""Get the current DsrDtr flow control setting."""
return self._dsrdtr
dsrdtr = property(getDsrDtr, setDsrDtr, "DSR/DTR flow control setting")
def setInterCharTimeout(self, interCharTimeout):
"""Change inter-character timeout setting."""
if interCharTimeout is not None:
if interCharTimeout < 0:
raise ValueError("Not a valid timeout: %r" % interCharTimeout)
try:
# test if it's a number, will throw a TypeError if not...
interCharTimeout + 1
except TypeError:
raise ValueError("Not a valid timeout: %r" % interCharTimeout)
self._interCharTimeout = interCharTimeout
if self._isOpen:
self._reconfigurePort()
def getInterCharTimeout(self):
"""Get the current inter-character timeout setting."""
return self._interCharTimeout
interCharTimeout = property(
getInterCharTimeout,
setInterCharTimeout,
doc="Inter-character timeout setting for read()")
# - - - - - - - - - - - - - - - - - - - - - - - -
def __repr__(self):
"""String representation of the current port settings and its state."""
return "%s<id=0x%x, " \
"open=%s>(port=%r, baudrate=%r, bytesize=%r, parity=%r, " \
"stopbits=%r, timeout=%r, xonxoff=%r, rtscts=%r, dsrdtr=%r)" \
% (self.__class__.__name__,
id(self), self._isOpen, self.portstr,
self.baudrate, self.bytesize, self.parity, self.stopbits,
self.timeout, self.xonxoff, self.rtscts, self.dsrdtr,)
if __name__ == '__main__':
s = SerialBase()
print(s.portstr)
print(s.getSupportedBaudrates())
print(s.getSupportedByteSizes())
print(s.getSupportedParities())
print(s.getSupportedStopbits())
print(s)
|
|
import json
import pytest
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from openslides.mediafiles.models import Mediafile
from tests.count_queries import count_queries
from tests.test_case import TestCase
@pytest.mark.django_db(transaction=False)
def test_mediafiles_db_queries():
"""
Tests that only the following db queries are done:
* 1 requests to get the list of all files
* 1 request to get all lists of speakers.
* 1 request to get all groups
* 1 request to prefetch parents
"""
for index in range(10):
Mediafile.objects.create(
title=f"some_file{index}",
original_filename=f"some_file{index}",
mediafile=SimpleUploadedFile(f"some_file{index}", b"some content."),
)
assert count_queries(Mediafile.get_elements)() == 4
class TestCreation(TestCase):
def setUp(self):
self.client = APIClient()
self.client.login(username="admin", password="admin")
self.file = SimpleUploadedFile("some_file.ext", b"some content.")
def test_simple_file(self):
response = self.client.post(
reverse("mediafile-list"),
{"title": "test_title_ahyo1uifoo9Aiph2av5a", "mediafile": self.file},
format="multipart",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
mediafile = Mediafile.objects.get()
self.assertEqual(mediafile.title, "test_title_ahyo1uifoo9Aiph2av5a")
self.assertFalse(mediafile.is_directory)
self.assertTrue(mediafile.mediafile.name)
self.assertEqual(mediafile.path, mediafile.original_filename)
def test_simple_directory(self):
response = self.client.post(
reverse("mediafile-list"),
{"title": "test_title_ahyo1uifoo9Aiph2av5a", "is_directory": True},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
mediafile = Mediafile.objects.get()
self.assertEqual(mediafile.title, "test_title_ahyo1uifoo9Aiph2av5a")
self.assertTrue(mediafile.is_directory)
self.assertEqual(mediafile.mediafile.name, "")
self.assertEqual(mediafile.original_filename, "")
self.assertEqual(mediafile.path, mediafile.title + "/")
def test_file_and_directory(self):
response = self.client.post(
reverse("mediafile-list"),
{
"title": "test_title_ahyo1uifoo9Aiph2av5a",
"is_directory": True,
"mediafile": self.file,
},
format="multipart",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(Mediafile.objects.exists())
def test_no_extension(self):
file = SimpleUploadedFile("no_extension", b"some content.")
response = self.client.post(
reverse("mediafile-list"),
{"title": "test_title_vai8oDogohheideedie4", "mediafile": file},
format="multipart",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
mediafile = Mediafile.objects.get()
self.assertEqual(mediafile.title, "test_title_vai8oDogohheideedie4")
def test_mediafile_twice_different_title(self):
file1 = SimpleUploadedFile("file.ext", b"some content.")
file2 = SimpleUploadedFile("file.ext", b"some content.")
response = self.client.post(
reverse("mediafile-list"),
{"title": "test_title_Zeicheipeequie3ohfid", "mediafile": file1},
format="multipart",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
mediafile = Mediafile.objects.get()
self.assertEqual(mediafile.title, "test_title_Zeicheipeequie3ohfid")
response = self.client.post(
reverse("mediafile-list"),
{"title": "test_title_aiChaetohs0quicee9eb", "mediafile": file2},
format="multipart",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(Mediafile.objects.count(), 1)
def test_directory_twice(self):
title = "test_title_kFJq83fjmqo2babfqk3f"
Mediafile.objects.create(is_directory=True, title=title)
response = self.client.post(
reverse("mediafile-list"), {"title": title, "is_directory": True}
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(Mediafile.objects.count(), 1)
def test_without_mediafile(self):
response = self.client.post(reverse("mediafile-list"), {})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(Mediafile.objects.exists())
def test_without_title(self):
response = self.client.post(reverse("mediafile-list"), {"is_directory": True})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(Mediafile.objects.exists())
def test_with_empty_title(self):
response = self.client.post(
reverse("mediafile-list"), {"is_directory": True, "title": ""}
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(Mediafile.objects.exists())
def test_directory_with_slash(self):
response = self.client.post(
reverse("mediafile-list"),
{"title": "test_title_with_/", "is_directory": True},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(Mediafile.objects.exists())
def test_with_parent(self):
parent_title = "test_title_3q0cqghZRFewocjwferT"
title = "test_title_gF3if8jmvrbnwdksg4je"
Mediafile.objects.create(is_directory=True, title=parent_title)
response = self.client.post(
reverse("mediafile-list"),
{"title": title, "is_directory": True, "parent_id": 1},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Mediafile.objects.count(), 2)
mediafile = Mediafile.objects.get(title="test_title_gF3if8jmvrbnwdksg4je")
self.assertEqual(mediafile.parent.title, "test_title_3q0cqghZRFewocjwferT")
self.assertEqual(mediafile.path, parent_title + "/" + title + "/")
def test_with_file_as_parent(self):
Mediafile.objects.create(
title="test_title_qejOVM84gw8ghwpKnqeg", mediafile=self.file
)
response = self.client.post(
reverse("mediafile-list"),
{
"title": "test_title_poejvvlmmorsgeroemr9",
"is_directory": True,
"parent_id": 1,
},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(Mediafile.objects.count(), 1)
def test_with_access_groups(self):
response = self.client.post(
reverse("mediafile-list"),
{
"title": "test_title_dggjwevBnUngelkdviom",
"is_directory": True,
"access_groups_id": [2, 4],
},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(Mediafile.objects.exists())
mediafile = Mediafile.objects.get()
self.assertEqual(
sorted([group.id for group in mediafile.access_groups.all()]), [2, 4]
)
self.assertEqual(mediafile.mediafile.name, "")
self.assertEqual(mediafile.original_filename, "")
self.assertEqual(mediafile.path, mediafile.title + "/")
def test_with_access_groups_wrong_json(self):
response = self.client.post(
reverse("mediafile-list"),
{
"title": "test_title_dggjwevBnUngelkdviom",
"is_directory": True,
"access_groups_id": json.dumps({"a": 324}),
},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(Mediafile.objects.exists())
def test_with_access_groups_wrong_json2(self):
response = self.client.post(
reverse("mediafile-list"),
{
"title": "test_title_dggjwevBnUngelkdviom",
"is_directory": True,
"access_groups_id": "_FWEpwwfkwk",
},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(Mediafile.objects.exists())
# TODO: List and retrieve
class TestUpdate(TestCase):
"""
Tree:
-dir
-mediafileA
-mediafileB
"""
def setUp(self):
self.client = APIClient()
self.client.login(username="admin", password="admin")
self.dir = Mediafile.objects.create(title="dir", is_directory=True)
fileA_name = "some_fileA.ext"
self.fileA = SimpleUploadedFile(fileA_name, b"some content.")
self.mediafileA = Mediafile.objects.create(
title="mediafileA",
original_filename=fileA_name,
mediafile=self.fileA,
parent=self.dir,
)
fileB_name = "some_fileB.ext"
self.fileB = SimpleUploadedFile(fileB_name, b"some content.")
self.mediafileB = Mediafile.objects.create(
title="mediafileB", original_filename=fileB_name, mediafile=self.fileB
)
def test_update(self):
response = self.client.put(
reverse("mediafile-detail", args=[self.mediafileA.pk]),
{"title": "test_title_gpasgrmg*miGUM)EAyGO", "access_groups_id": [2, 4]},
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
mediafile = Mediafile.objects.get(pk=self.mediafileA.pk)
self.assertEqual(mediafile.title, "test_title_gpasgrmg*miGUM)EAyGO")
self.assertEqual(mediafile.path, "dir/some_fileA.ext")
self.assertEqual(
sorted([group.id for group in mediafile.access_groups.all()]), [2, 4]
)
def test_update_directory(self):
response = self.client.put(
reverse("mediafile-detail", args=[self.dir.pk]),
{"title": "test_title_seklMOIGGihdjJBNaflkklnlg"},
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
dir = Mediafile.objects.get(pk=self.dir.pk)
self.assertEqual(dir.title, "test_title_seklMOIGGihdjJBNaflkklnlg")
mediafile = Mediafile.objects.get(pk=self.mediafileA.pk)
self.assertEqual(
mediafile.path, "test_title_seklMOIGGihdjJBNaflkklnlg/some_fileA.ext"
)
def test_update_parent_id(self):
""" Assert, that the parent id does not change """
response = self.client.put(
reverse("mediafile-detail", args=[self.mediafileA.pk]),
{"title": self.mediafileA.title, "parent_id": None},
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
mediafile = Mediafile.objects.get(pk=self.mediafileA.pk)
self.assertTrue(mediafile.parent)
self.assertEqual(mediafile.parent.pk, self.dir.pk)
|
|
from Izjave import *
from Sudoku import *
from Hadamard import *
from CNF import *
import time
newPureValFound = True
solutionVals = {}
lockSolutionVals = False
def DPLL(izjava):
# Metoda dobi izjavo, ki jo obdela s pomocjo funkcije prepareStatement(izjava)
# Ce izjava ni na zacetku False (zaradi praznega protislovja), potem klicemo rekurzivno metodo rec_DPLL(izjava, varValues)
# FIXME: prepareStatement naj preveri se, ce ni izjava False zaradi kaksnega praznega OR-a
global solutionVals
solution = False
startTime = time.time()
dataForDPLL = prepareStatement(izjava) # Pripravimo izjavo. Metoda vrne [izjava, varValues], kjer so vrednosti 100% pravilne
if (dataForDPLL != False):
izjava = dataForDPLL[0]
varValues = dataForDPLL[1]
solution = rec_DPLL(izjava, varValues) # klic rekurzivne metode
print('Vrnjena resitev: ', solution)
print('Vrednosti: ' , solutionVals)
endTime = time.time()
timePassed = endTime - startTime
print('Time: ', timePassed)
def prepareStatement(izjava):
# Metoda sprejme izjavo in jo pripravi takole:
# - izjavo pretvori v CNF obliko
# - izjavo pretvori v seznam seznamov oblike [ ... [ ... ] ...]
# - odstrani vse proste spremenljivke
# - preveri, da se ne zgodi primer (X and notX)
# - pobrise vse True izjave oblike (X or notX)
# - najde vse proste spremenljivke
# Metoda vrne seznam, v katerem je izjava in vrednosti spremenljivk
# Ce je primer na osnovi zgornjih ugotovitev neresljiv, potem metoda vrne False
# poresetiram prejsno resitev
global solutionVals
global lockSolutionVals
global newPureValFound
solutionVals = {}
lockSolutionVals = False
newPureValFound = True
izjava = CNF(izjava) # pretvori izjavo v CNF
izjava = izjava.poenostavi().vrni() # dobim [ ... (...) ...]
izjava = get_2D_list(izjava) # dobim [ ... [...] ...]
varValues = {} # Zacetne vrednosti spremenljivk
izjava = removeTrueSantences(izjava) # metoda odstrani podizjave tipa (X or notX) .... TODO: ali to pravilno deluje?
izjava = sortByLength(izjava)
# PREVERJAMO PROSTE SPREMENLJIVKE, DOKLER JE KAKsNA SE PROSTA!!
while (True):
changes = 0 # stevec za ustavitveni pogoj
newVarDict = {}
newVarDict = removeSingleVars(izjava)
if (newVarDict == False):
print('ERROR: getInfo[1] == False ..... returning False!!!')
return False
else:
varValues = concatDicts(varValues, newVarDict)
if varValues == False: # ce je prislo do protislovja (X and notX) potem ni resitve in vrnemo False
print('ERROR: varValues == False ..... returning False!!!')
return False
izjava = processStatement(izjava, varValues) # metoda odstrani OR-e, ki so True in spremenljivke znotraj OR-ov, ki so False
if (newVarDict != {}):
changes = changes + 1
if (changes == 0):
break
# PREVERIMO CISTE SPREMENLJIVKE
# Spodnja while zanka pridobiva ciste spremenljivke, poenostavlja izjavo in to ponavalja, dokler je kaksna spremenljivka cista
while (newPureValFound == True):
pureVals = getPureVals(izjava) # pridobim slovar cistih spremenljivk
varValues = concatDicts(varValues, pureVals) # zdruzim obstojece spremenljivke s cistimi
#preverimo, da nismo prisli do protislovja:
if varValues == False:
return False
izjava = processStatement(izjava, varValues) # metoda odstrani OR-e, ki so True in spremenljivke znotraj OR-ov, ki so False
izjava = sortByLength(izjava) # sortiranje izjave po dolzini podizjav (narascajoce)
# vrnemo seznam, ki vsebuje na rekurzijo pripravljeno izjavo in slovar vrednosti spremenljivk (te niso vec zastopane v izjavi)
return [izjava, varValues]
def rec_DPLL(izjava, varValues):
# Metoda najprej preveri, ce je kaksna spremenljivka cista. Nato poenostavi izjavo in naredi dve kopiji izjave
# Vzame se prva spremenljivka iz izjave in se nastavi na True. Sprocesira se kopija_1 izjave z novo vrednostjo spremenljivke True
# Vzame se prva spremenljivka iz izjave in se nastavi na False. Sprocesira se kopija_2 izjave z novo vrednostjo spremenljivke False
# ce smo dobili ze resitev, potem ne potrebujemo vec preverjanja
global lockSolutionVals
if lockSolutionVals == True:
return True
# Preverimo, ce smo prisli do resitv=true
if is_AND_empty(izjava):
global solutionVals
if lockSolutionVals == False:
lockSolutionVals = True
solutionVals = copyVarValues(varValues)
return True
# Preverimo, ce smo prisli do resitev=false
if is_OR_empty(izjava):
return False
firstVar = getFirstVar(izjava) # pridobimo prvo spremenljivko
izjava_1 = copyStatement(izjava) # prekopiramo izjavo
izjava_2 = copyStatement(izjava) # prekopiramo izjavo
vals_1 = copyVarValues(varValues) # prekopiramo vrednosti
vals_2 = copyVarValues(varValues) # prekopiramo vrednosti
vals_1[firstVar] = True # enkrat vrednost prve spremenljivke nastavimo na True
izjava_1 = processStatement(izjava_1, vals_1)
if (rec_DPLL(izjava_1, vals_1) != False): # rekurzivni klic
return True
vals_2[firstVar] = False
izjava_2 = processStatement(izjava_2, vals_2)
if (rec_DPLL(izjava_2, vals_2) != False): # rekurzivni klic
return True
return False
def getPureVals(izjava):
# Metoda se sprehodi cez izjavo in poisce ciste spremenljivke.
# Metoda vrne slovar cistih spremenljivk, ki jih nastavi na true (x) oz. false (not x)
pureVals = {}
varsInStatement = {}
global newPureValFound
newPureValFound = False
#napolnim slovar zastopanosti spremenljivk: 1: X .... 2: notX ..... 3: (X and notX)
for subIzjava in izjava:
for var in subIzjava:
#ce ni spremenljivke v seznamu, jo dodamo
if ((var.vrni() in varsInStatement) == False):
if isinstance(var, Var):
varsInStatement[var.vrni()] = 1
else:
varsInStatement[var.vrni()] = 2
#ce je spremenljivka ze v seznamu, preverimo katero vrednost ima
else:
vrednost = varsInStatement[var.vrni()]
if ((vrednost == 2) and (isinstance(var, Var))):
varsInStatement[var.vrni()] = 3
elif ( (vrednost == 1) and (isinstance(var, Not)) ):
varsInStatement[var.vrni()] = 3
else:
pass
#ugotovimo, kate key-e je potrebno odstraniti
keysToBeRemoved = []
for key in varsInStatement:
if (varsInStatement[key] == 3):
keysToBeRemoved.append(key)
#odstranimo key-e
for i in keysToBeRemoved:
varsInStatement.pop(i)
#napolnimo slovar cistih spremenljivk
for key in varsInStatement:
newPureValFound = True
if varsInStatement[key] == 1:
pureVals[key] = True
else:
pureVals[key] = False
return pureVals
def processStatement(izjava, vals):
# Metoda odstrani OR stavek, ce je kateri izmed elementov v ORu enak True
# Metoda odstrani element iz OR stavka, ce je ta element enak False
# Metoda vrne samo novo izjavo
# 1A. korak: najprej dobim indexe vseh podizja, kjer je en element enak True
removeIndex = []
index = -1
for subIzjava in izjava:
index = index + 1
for e in range(0, len(subIzjava)): # e predstavlja element podizjave
thisElement = subIzjava[e]
if (thisElement.vrni() in vals):
value = vals[thisElement.vrni()] # pridobimo vrednost
#ce je element e instanca notX
if isinstance(thisElement, Not):
if (value == False):
removeIndex.append(index)
break
elif isinstance(thisElement, Var):
if (value == True):
removeIndex.append(index)
break
# 1B. korak: izbrisem te podizjav iz izjav
removeIndex.reverse() # XXX: preveri, ce so sortirani(!) padajoce
for i in range(0, len(removeIndex)):
delIndexOfStat = removeIndex[i]
del izjava[delIndexOfStat]
# 2A. korak: pridobim indexe elementov v preostalih podizjavah, ki so False
myRemIz = []
myRemSub = []
statIndex = -1
elemIndex = -1
for subIzjava in izjava:
statIndex = statIndex + 1
elemIndex = 0
myRemSub = []
for i in range(0,len(subIzjava)):
if (subIzjava[i].vrni() in vals):
myRemSub.append(elemIndex)
elemIndex = elemIndex + 1
myRemSub.reverse()
myRemIz.append(myRemSub)
# izbrisem iz izjave dolocene indexe
for i in range(0, len(myRemIz)):
for j in range(0, len(myRemIz[i])):
del izjava[i][myRemIz[i][j]]
return izjava
def getFirstVar(izjava):
x = izjava[0][0].vrni()
return x
def copyStatement(izjava):
# Metoda skopira izjavo in vrne njeno kopijo
copy = []
for subIzjava in izjava:
subCopy = []
for i in subIzjava:
subCopy.append(i)
copy.append(subCopy)
return copy
def copyVarValues(varValues):
# Metoda skopira vrednosti spremenljivk in vrne njihovo kopijo
copy = {}
for keys in varValues:
copy[keys] = varValues[keys]
return copy
def get_2D_list(izjava):
# Metoda sprejme list oblike [ ... (...) ...] in vrne list oblike [ ... [] ...].
# Proste spremenljivke so v listu dolzine 1
allList = []
subList = []
# if preveri, da ni samo ene spremenljivke v izjavi znotraj ANDa
if isinstance(izjava, Var) or isinstance(izjava, Not):
subList.append(izjava)
allList.append(subList)
return allList
for i in range(0, len(izjava)):
subList = []
if isinstance(izjava[i], Not) or isinstance(izjava[i], Var):
subList.append(izjava[i])
else:
for var in izjava[i].vrni():
subList.append(var)
allList.append(subList)
return allList
def is_AND_empty(izjava):
# Metoda preveri, ce je izjava = []. V tem primeru imamo resitev in vrnemo True, sicer vrnemo False
if len(izjava) == 0:
return True
else:
return False
def is_OR_empty(izjava):
# Metoda preveri, ce je izjava = [ ... [] ... ].
# ce je katerikoli OR prazen, potem ni resitve in vrnemo True, sicer vrnemo False
for subIzjava in izjava:
if len(subIzjava) == 0:
return True
return False
def concatDicts(oldValues, newValues):
# Metoda zdruzi stare in nove spremenljivke v slovarjih
# Metoda preme 2 slovarja starih in novi vrednosti spremenljivk in jih zdruzi
# Metoda vrne slovar zdruzenih spremenljivk ali False, ce jih ne more zdruziti (ker pride do protislovja)
for key in newValues:
#ce je ze vsebovan
if key in oldValues:
#ce se stara in nova vrednost razlikujeta
if oldValues[key] != newValues[key]:
return False
#ce ni vsebovan, dodamo novo vrednost med stare vrednosti
else:
oldValues[key] = newValues[key]
return oldValues
def removeSingleVars(izjava):
# Metoda iz izjave odstrani vse podizjave, ki so dolzine 1 (torej proste spremenljivke)
# Metoda nastavi vse proste spremenljivke na ustrezno vrednost
# Metoda vrne [izjava, newVarDict]. Izjava = izjava, ki nima nikjer dolzine 1. newVarDict so novo nastavljene vrednosti
# Metoda vrne False, ce pride do (X AND notX)
singleVars = [] #sem shranimo proste spremenljivke
removeSubIndex = [] # shranimo indexe podizjav, ki jih poramo odstraniti
newVarDict = {}
# dobimo vse proste spremenljivke (tudi podvojene) in indexe podizjav, ki jih je potrebno odstraniti
for i in range(0, len(izjava)):
if len(izjava[i]) == 1:
singleVars.append(izjava[i][0])
removeSubIndex.append(i)
#preverimo, da ni prislo do (X and notX)
for i in range(0, (len(singleVars)-1)):
for j in range((i+1), len(singleVars)):
s1 = singleVars[i]
s2 = singleVars[j]
if ((isinstance(s1, Var) and isinstance(s2, Not)) or (isinstance(s1,Not) and isinstance(s2, Var))):
return False
# odstranim duplikate, ceprov ne bi bilo potrebno
removeIndex = []
for i in range(0, (len(singleVars)-1)):
for j in range((i+1), len(singleVars)):
if(singleVars[i] == singleVars[j]):
removeIndex.append(j)
removeIndex.reverse()
for i in range(0, len(removeIndex)):
del singleVars[i]
# dodamo proste spremenljivke v slovar in jim dolocimo ustrezne vrednosti
for i in range(0, len(singleVars)):
if isinstance(singleVars[i], Var):
newVarDict[singleVars[i].vrni()] = True
else:
newVarDict[singleVars[i].vrni()] = False
#vrnemo (testno) samo seznam novih vrednosti. ce se vse sklada itak preveri concatDicts, same spremenljivke pa odstrani processStatement
return newVarDict
def removeTrueSantences(izjava):
# Metoda odstrani podizjave tipa (X or notX)
toBeRemoved = [] #indexi podizjav, ki jih bomo odstranili
indexCounter = -1
for subIzjava in izjava:
indexCounter = indexCounter + 1
for i in range(0, len(subIzjava)-1):
for j in range((i+1), len(subIzjava)):
#preverim ali se zgodi (X or notX)
if ( subIzjava[i].vrni() == subIzjava[j].vrni() ) and ( subIzjava[i].poenostavi() != subIzjava[j].poenostavi() ):
toBeRemoved.append(indexCounter)
# odstranim iz izjav podizjave oblike (X or notX)
toBeRemoved.reverse()
for i in range (0, len(toBeRemoved)):
del izjava[toBeRemoved[i]]
return izjava
def sortByLength(izjava):
# Metoda sortira podizjave v izjavi glede na njihovo dolzino (narascajoce). Vrne sortirano izjavo
for i in range(0, len(izjava)-1):
for j in range((i+1), len(izjava)):
if len(izjava[i]) > len(izjava[j]):
tempIzjava = izjava[j]
izjava[j] = izjava[i]
izjava[i] = tempIzjava
return izjava
def getTestIzjava(caseNumber):
x = Var('X')
y = Var('Y')
z = Var('Z')
q = Var('Q')
a = Var('A')
or_1 = Or([x,Not(Not(y)),Not(z)])
or_2 = Or([Not(x),Not(y)])
or_3 = Or([x,z])
or_4 = x
or_5 = Or( [ z , y, Not(q) ] )
or_6 = Or([Not(x)])
if caseNumber == 1:
i = And([])
elif caseNumber == 2:
i = And([x])
elif caseNumber == 3:
i = And([x, Or([])])
elif caseNumber == 4:
i = And([Or([]),Or([x])])
elif caseNumber == 5:
i = or_4
i = And([Or([x,Not(y),Not(z)]) , Or([Not(x),Not(y)]) , Or([x,z]) , Or([Not(x),Not(q)])])
elif caseNumber == 6:
i = And([or_1 , or_2 , or_3, or_4, or_5])
elif caseNumber == 7:
i = And([x, Or([x,y]), Or([Not(y), z]), Or([Not(x)])])
elif caseNumber == 8:
i = And([x, Not(x), Or([x, q])]) #test (x AND notx)
elif caseNumber == 9:
i = And([Or([x, y, Not(x)]), Or([q,z])]) #test (x or notX)
elif caseNumber == 10:
#testiranje za pureValues ker sta tuki 2xpure, ostalo vse odpade
i = And([ Or([y, Not(q)]) , Or([y, Not(z)]) , Or([x, Not(y)]) , Or([y,z,q]) , Or([x,Not(z)]) , Or([x,Not(q)]) ])
elif caseNumber == 666:
i = And([ Not(x), Or([x,z]) ])
elif caseNumber == 777:
i = And([ Not(x), Or([Not(x), z, Not(y)]) , Or([x,z]) , Or([Not(z), y]), Or([q,a]) , Or([Not(q), a]) , Or([q, Not(a)]) , Or([Not(q),Not(a)]) ])
else:
i = or_4
i = And([Or([x,Not(y),Not(z)]) , Or([Not(x),Not(y)]) , Or([x,z]) , Or([Not(x),Not(q)])])
return i
def pozdravnaMetoda():
print('************************************************************************************************************')
print('Pozdravljeni v algoritmu DPLL')
print('Za zagon algoritma poklicite funkcijo: DPLL(izjava), ki ji podate izjavo')
print('Primer izjave: ((X or Y) and (Y or notZ)) := And( [ Or([ X, Y ]) , Or([ Y, Not(Z) ]) ] )')
print('Za preverjanje pravilnosti delovanja sta spodaj prilozena copy/paste testna primera')
print('izjava=getTestIzjava(0) -----> ((X or notY or notZ) and (notX or notY) and (X or Z) and (notX or notQ))')
print('izjava=getTestIzjava(1) -----> ()')
print('izjava=getTestIzjava(2) -----> ((X))')
print('izjava=getTestIzjava(3) -----> ((X) and ())')
print('izjava=getTestIzjava(4) -----> (() and (X))')
print('izjava=getTestIzjava(5) -----> ((X or notY or notZ) and (notX or notY) and (X or Z) and (notX or notQ))')
print('izjava=getTestIzjava(6) -----> ((X or Y or notZ) and (notX or notY) and (X or Z) and (X) and (Z or Y or notQ))')
print('izjava=getTestIzjava(7) -----> ((X) and (X or Y) and (notY or Z) and (notX))')
print('izjava=getTestIzjava(8) -----> ((X) and (notX) and (X or Q))')
print('izjava=getTestIzjava(9) -----> ((X or Y or notX) and (Q or Z))')
print('izjava=getTestIzjava(10) -----> ((Y or notQ) and (Y or notZ) and (X or notY) and (Y or Z or Q) and (X or notZ) and (X or notQ))')
print('')
print('Primer za sudoku: izjava = sudoku([[1,2,0,0],[3,0,1,0],[0,1,0,3],[0,0,2,1]])')
print('Izjavo lahko zgradite tudi sami, vendar je potrebno ustvariti vsako spremenljivko, ki jo boste uporabljali (glej Izjave.py)')
print('************************************************************************************************************')
izjava = sudoku([[1,2,0,0],[3,0,1,0],[0,1,0,3],[0,0,2,1]])
#izjava = sudoku([[5,3,0,0,7,0,0,0,0],[6,0,0,1,9,5,0,0,0],[0,9,8,0,0,0,0,6,0],[8,0,0,0,6,0,0,0,3],[4,0,0,8,0,3,0,0,1],[7,0,0,0,2,0,0,0,6],[0,6,0,0,0,0,2,8,0],[0,0,0,4,1,9,0,0,5],[0,0,0,0,8,0,0,7,9]])
#izjava = getTestIzjava(4)
#izjava = hadamard(4)
#izjava = hadamard(2)
DPLL(izjava)
pozdravnaMetoda()
|
|
import operator
from collections import Counter, defaultdict
from functools import partial, reduce
from itertools import chain
from operator import attrgetter
from django.db import IntegrityError, connections, transaction
from django.db.models import query_utils, signals, sql
class ProtectedError(IntegrityError):
def __init__(self, msg, protected_objects):
self.protected_objects = protected_objects
super().__init__(msg, protected_objects)
class RestrictedError(IntegrityError):
def __init__(self, msg, restricted_objects):
self.restricted_objects = restricted_objects
super().__init__(msg, restricted_objects)
def CASCADE(collector, field, sub_objs, using):
collector.collect(
sub_objs, source=field.remote_field.model, source_attr=field.name,
nullable=field.null, fail_on_restricted=False,
)
if field.null and not connections[using].features.can_defer_constraint_checks:
collector.add_field_update(field, None, sub_objs)
def PROTECT(collector, field, sub_objs, using):
raise ProtectedError(
"Cannot delete some instances of model '%s' because they are "
"referenced through a protected foreign key: '%s.%s'" % (
field.remote_field.model.__name__, sub_objs[0].__class__.__name__, field.name
),
sub_objs
)
def RESTRICT(collector, field, sub_objs, using):
collector.add_restricted_objects(field, sub_objs)
collector.add_dependency(field.remote_field.model, field.model)
def SET(value):
if callable(value):
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value(), sub_objs)
else:
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value, sub_objs)
set_on_delete.deconstruct = lambda: ('django.db.models.SET', (value,), {})
return set_on_delete
def SET_NULL(collector, field, sub_objs, using):
collector.add_field_update(field, None, sub_objs)
def SET_DEFAULT(collector, field, sub_objs, using):
collector.add_field_update(field, field.get_default(), sub_objs)
def DO_NOTHING(collector, field, sub_objs, using):
pass
def get_candidate_relations_to_delete(opts):
# The candidate relations are the ones that come from N-1 and 1-1 relations.
# N-N (i.e., many-to-many) relations aren't candidates for deletion.
return (
f for f in opts.get_fields(include_hidden=True)
if f.auto_created and not f.concrete and (f.one_to_one or f.one_to_many)
)
class Collector:
def __init__(self, using):
self.using = using
# Initially, {model: {instances}}, later values become lists.
self.data = defaultdict(set)
# {model: {(field, value): {instances}}}
self.field_updates = defaultdict(partial(defaultdict, set))
# {model: {field: {instances}}}
self.restricted_objects = defaultdict(partial(defaultdict, set))
# fast_deletes is a list of queryset-likes that can be deleted without
# fetching the objects into memory.
self.fast_deletes = []
# Tracks deletion-order dependency for databases without transactions
# or ability to defer constraint checks. Only concrete model classes
# should be included, as the dependencies exist only between actual
# database tables; proxy models are represented here by their concrete
# parent.
self.dependencies = defaultdict(set) # {model: {models}}
def add(self, objs, source=None, nullable=False, reverse_dependency=False):
"""
Add 'objs' to the collection of objects to be deleted. If the call is
the result of a cascade, 'source' should be the model that caused it,
and 'nullable' should be set to True if the relation can be null.
Return a list of all objects that were not already collected.
"""
if not objs:
return []
new_objs = []
model = objs[0].__class__
instances = self.data[model]
for obj in objs:
if obj not in instances:
new_objs.append(obj)
instances.update(new_objs)
# Nullable relationships can be ignored -- they are nulled out before
# deleting, and therefore do not affect the order in which objects have
# to be deleted.
if source is not None and not nullable:
self.add_dependency(source, model, reverse_dependency=reverse_dependency)
return new_objs
def add_dependency(self, model, dependency, reverse_dependency=False):
if reverse_dependency:
model, dependency = dependency, model
self.dependencies[model._meta.concrete_model].add(dependency._meta.concrete_model)
self.data.setdefault(dependency, self.data.default_factory())
def add_field_update(self, field, value, objs):
"""
Schedule a field update. 'objs' must be a homogeneous iterable
collection of model instances (e.g. a QuerySet).
"""
if not objs:
return
model = objs[0].__class__
self.field_updates[model][field, value].update(objs)
def add_restricted_objects(self, field, objs):
if objs:
model = objs[0].__class__
self.restricted_objects[model][field].update(objs)
def clear_restricted_objects_from_set(self, model, objs):
if model in self.restricted_objects:
self.restricted_objects[model] = {
field: items - objs
for field, items in self.restricted_objects[model].items()
}
def clear_restricted_objects_from_queryset(self, model, qs):
if model in self.restricted_objects:
objs = set(qs.filter(pk__in=[
obj.pk
for objs in self.restricted_objects[model].values() for obj in objs
]))
self.clear_restricted_objects_from_set(model, objs)
def _has_signal_listeners(self, model):
return (
signals.pre_delete.has_listeners(model) or
signals.post_delete.has_listeners(model)
)
def can_fast_delete(self, objs, from_field=None):
"""
Determine if the objects in the given queryset-like or single object
can be fast-deleted. This can be done if there are no cascades, no
parents and no signal listeners for the object class.
The 'from_field' tells where we are coming from - we need this to
determine if the objects are in fact to be deleted. Allow also
skipping parent -> child -> parent chain preventing fast delete of
the child.
"""
if from_field and from_field.remote_field.on_delete is not CASCADE:
return False
if hasattr(objs, '_meta'):
model = objs._meta.model
elif hasattr(objs, 'model') and hasattr(objs, '_raw_delete'):
model = objs.model
else:
return False
if self._has_signal_listeners(model):
return False
# The use of from_field comes from the need to avoid cascade back to
# parent when parent delete is cascading to child.
opts = model._meta
return (
all(link == from_field for link in opts.concrete_model._meta.parents.values()) and
# Foreign keys pointing to this model.
all(
related.field.remote_field.on_delete is DO_NOTHING
for related in get_candidate_relations_to_delete(opts)
) and (
# Something like generic foreign key.
not any(hasattr(field, 'bulk_related_objects') for field in opts.private_fields)
)
)
def get_del_batches(self, objs, fields):
"""
Return the objs in suitably sized batches for the used connection.
"""
field_names = [field.name for field in fields]
conn_batch_size = max(
connections[self.using].ops.bulk_batch_size(field_names, objs), 1)
if len(objs) > conn_batch_size:
return [objs[i:i + conn_batch_size]
for i in range(0, len(objs), conn_batch_size)]
else:
return [objs]
def collect(self, objs, source=None, nullable=False, collect_related=True,
source_attr=None, reverse_dependency=False, keep_parents=False,
fail_on_restricted=True):
"""
Add 'objs' to the collection of objects to be deleted as well as all
parent instances. 'objs' must be a homogeneous iterable collection of
model instances (e.g. a QuerySet). If 'collect_related' is True,
related objects will be handled by their respective on_delete handler.
If the call is the result of a cascade, 'source' should be the model
that caused it and 'nullable' should be set to True, if the relation
can be null.
If 'reverse_dependency' is True, 'source' will be deleted before the
current model, rather than after. (Needed for cascading to parent
models, the one case in which the cascade follows the forwards
direction of an FK rather than the reverse direction.)
If 'keep_parents' is True, data of parent model's will be not deleted.
If 'fail_on_restricted' is False, error won't be raised even if it's
prohibited to delete such objects due to RESTRICT, that defers
restricted object checking in recursive calls where the top-level call
may need to collect more objects to determine whether restricted ones
can be deleted.
"""
if self.can_fast_delete(objs):
self.fast_deletes.append(objs)
return
new_objs = self.add(objs, source, nullable,
reverse_dependency=reverse_dependency)
if not new_objs:
return
model = new_objs[0].__class__
if not keep_parents:
# Recursively collect concrete model's parent models, but not their
# related objects. These will be found by meta.get_fields()
concrete_model = model._meta.concrete_model
for ptr in concrete_model._meta.parents.values():
if ptr:
parent_objs = [getattr(obj, ptr.name) for obj in new_objs]
self.collect(parent_objs, source=model,
source_attr=ptr.remote_field.related_name,
collect_related=False,
reverse_dependency=True,
fail_on_restricted=False)
if not collect_related:
return
if keep_parents:
parents = set(model._meta.get_parent_list())
model_fast_deletes = defaultdict(list)
protected_objects = defaultdict(list)
for related in get_candidate_relations_to_delete(model._meta):
# Preserve parent reverse relationships if keep_parents=True.
if keep_parents and related.model in parents:
continue
field = related.field
if field.remote_field.on_delete == DO_NOTHING:
continue
related_model = related.related_model
if self.can_fast_delete(related_model, from_field=field):
model_fast_deletes[related_model].append(field)
continue
batches = self.get_del_batches(new_objs, [field])
for batch in batches:
sub_objs = self.related_objects(related_model, [field], batch)
# Non-referenced fields can be deferred if no signal receivers
# are connected for the related model as they'll never be
# exposed to the user. Skip field deferring when some
# relationships are select_related as interactions between both
# features are hard to get right. This should only happen in
# the rare cases where .related_objects is overridden anyway.
if not (sub_objs.query.select_related or self._has_signal_listeners(related_model)):
referenced_fields = set(chain.from_iterable(
(rf.attname for rf in rel.field.foreign_related_fields)
for rel in get_candidate_relations_to_delete(related_model._meta)
))
sub_objs = sub_objs.only(*tuple(referenced_fields))
if sub_objs:
try:
field.remote_field.on_delete(self, field, sub_objs, self.using)
except ProtectedError as error:
key = "'%s.%s'" % (field.model.__name__, field.name)
protected_objects[key] += error.protected_objects
if protected_objects:
raise ProtectedError(
'Cannot delete some instances of model %r because they are '
'referenced through protected foreign keys: %s.' % (
model.__name__,
', '.join(protected_objects),
),
set(chain.from_iterable(protected_objects.values())),
)
for related_model, related_fields in model_fast_deletes.items():
batches = self.get_del_batches(new_objs, related_fields)
for batch in batches:
sub_objs = self.related_objects(related_model, related_fields, batch)
self.fast_deletes.append(sub_objs)
for field in model._meta.private_fields:
if hasattr(field, 'bulk_related_objects'):
# It's something like generic foreign key.
sub_objs = field.bulk_related_objects(new_objs, self.using)
self.collect(sub_objs, source=model, nullable=True, fail_on_restricted=False)
if fail_on_restricted:
# Raise an error if collected restricted objects (RESTRICT) aren't
# candidates for deletion also collected via CASCADE.
for related_model, instances in self.data.items():
self.clear_restricted_objects_from_set(related_model, instances)
for qs in self.fast_deletes:
self.clear_restricted_objects_from_queryset(qs.model, qs)
if self.restricted_objects.values():
restricted_objects = defaultdict(list)
for related_model, fields in self.restricted_objects.items():
for field, objs in fields.items():
if objs:
key = "'%s.%s'" % (related_model.__name__, field.name)
restricted_objects[key] += objs
if restricted_objects:
raise RestrictedError(
'Cannot delete some instances of model %r because '
'they are referenced through restricted foreign keys: '
'%s.' % (
model.__name__,
', '.join(restricted_objects),
),
set(chain.from_iterable(restricted_objects.values())),
)
def related_objects(self, related_model, related_fields, objs):
"""
Get a QuerySet of the related model to objs via related fields.
"""
predicate = reduce(operator.or_, (
query_utils.Q(**{'%s__in' % related_field.name: objs})
for related_field in related_fields
))
return related_model._base_manager.using(self.using).filter(predicate)
def instances_with_model(self):
for model, instances in self.data.items():
for obj in instances:
yield model, obj
def sort(self):
sorted_models = []
concrete_models = set()
models = list(self.data)
while len(sorted_models) < len(models):
found = False
for model in models:
if model in sorted_models:
continue
dependencies = self.dependencies.get(model._meta.concrete_model)
if not (dependencies and dependencies.difference(concrete_models)):
sorted_models.append(model)
concrete_models.add(model._meta.concrete_model)
found = True
if not found:
return
self.data = {model: self.data[model] for model in sorted_models}
def delete(self):
# sort instance collections
for model, instances in self.data.items():
self.data[model] = sorted(instances, key=attrgetter("pk"))
# if possible, bring the models in an order suitable for databases that
# don't support transactions or cannot defer constraint checks until the
# end of a transaction.
self.sort()
# number of objects deleted for each model label
deleted_counter = Counter()
# Optimize for the case with a single obj and no dependencies
if len(self.data) == 1 and len(instances) == 1:
instance = list(instances)[0]
if self.can_fast_delete(instance):
with transaction.mark_for_rollback_on_error(self.using):
count = sql.DeleteQuery(model).delete_batch([instance.pk], self.using)
setattr(instance, model._meta.pk.attname, None)
return count, {model._meta.label: count}
with transaction.atomic(using=self.using, savepoint=False):
# send pre_delete signals
for model, obj in self.instances_with_model():
if not model._meta.auto_created:
signals.pre_delete.send(
sender=model, instance=obj, using=self.using
)
# fast deletes
for qs in self.fast_deletes:
count = qs._raw_delete(using=self.using)
if count:
deleted_counter[qs.model._meta.label] += count
# update fields
for model, instances_for_fieldvalues in self.field_updates.items():
for (field, value), instances in instances_for_fieldvalues.items():
query = sql.UpdateQuery(model)
query.update_batch([obj.pk for obj in instances],
{field.name: value}, self.using)
# reverse instance collections
for instances in self.data.values():
instances.reverse()
# delete instances
for model, instances in self.data.items():
query = sql.DeleteQuery(model)
pk_list = [obj.pk for obj in instances]
count = query.delete_batch(pk_list, self.using)
if count:
deleted_counter[model._meta.label] += count
if not model._meta.auto_created:
for obj in instances:
signals.post_delete.send(
sender=model, instance=obj, using=self.using
)
# update collected instances
for instances_for_fieldvalues in self.field_updates.values():
for (field, value), instances in instances_for_fieldvalues.items():
for obj in instances:
setattr(obj, field.attname, value)
for model, instances in self.data.items():
for instance in instances:
setattr(instance, model._meta.pk.attname, None)
return sum(deleted_counter.values()), dict(deleted_counter)
|
|
# Copyright (c) 2015 Ericsson AB
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import objects
from nova.scheduler import weights
from nova.scheduler.weights import affinity
from nova import test
from nova.tests.unit.scheduler import fakes
class SoftWeigherTestBase(test.NoDBTestCase):
def setUp(self):
super(SoftWeigherTestBase, self).setUp()
self.weight_handler = weights.HostWeightHandler()
self.weighers = []
def _get_weighed_host(self, hosts, policy, group='default'):
if group == 'default':
members = ['member1', 'member2', 'member3', 'member4', 'member5',
'member6', 'member7']
else:
members = ['othermember1', 'othermember2']
request_spec = objects.RequestSpec(
instance_group=objects.InstanceGroup(
policy=policy,
members=members))
return self.weight_handler.get_weighed_objects(self.weighers,
hosts,
request_spec)[0]
def _get_all_hosts(self):
host_values = [
('host1', 'node1', {'instances': {
'member1': mock.sentinel,
'instance13': mock.sentinel
}}),
('host2', 'node2', {'instances': {
'member2': mock.sentinel,
'member3': mock.sentinel,
'member4': mock.sentinel,
'member5': mock.sentinel,
'othermember1': mock.sentinel,
'othermember2': mock.sentinel,
'instance14': mock.sentinel
}}),
('host3', 'node3', {'instances': {
'instance15': mock.sentinel
}}),
('host4', 'node4', {'instances': {
'member6': mock.sentinel,
'member7': mock.sentinel,
'instance16': mock.sentinel
}})]
return [fakes.FakeHostState(host, node, values)
for host, node, values in host_values]
def _do_test(self, policy, expected_weight, expected_host,
group='default'):
hostinfo_list = self._get_all_hosts()
weighed_host = self._get_weighed_host(hostinfo_list,
policy, group)
self.assertEqual(expected_weight, weighed_host.weight)
if expected_host:
self.assertEqual(expected_host, weighed_host.obj.host)
class SoftAffinityWeigherTestCase(SoftWeigherTestBase):
def setUp(self):
super(SoftAffinityWeigherTestCase, self).setUp()
self.weighers = [affinity.ServerGroupSoftAffinityWeigher()]
self.softaffin_weigher = affinity.ServerGroupSoftAffinityWeigher()
def test_soft_affinity_weight_multiplier_by_default(self):
self._do_test(policy='soft-affinity',
expected_weight=1.0,
expected_host='host2')
def test_soft_affinity_weight_multiplier_zero_value(self):
# We do not know the host, all have same weight.
self.flags(soft_affinity_weight_multiplier=0.0,
group='filter_scheduler')
self._do_test(policy='soft-affinity',
expected_weight=0.0,
expected_host=None)
def test_soft_affinity_weight_multiplier_positive_value(self):
self.flags(soft_affinity_weight_multiplier=2.0,
group='filter_scheduler')
self._do_test(policy='soft-affinity',
expected_weight=2.0,
expected_host='host2')
def test_soft_affinity_weight_multiplier(self):
self.flags(soft_affinity_weight_multiplier=0.0,
group='filter_scheduler')
host_attr = {'instances': {'instance1': mock.sentinel}}
host1 = fakes.FakeHostState('fake-host', 'node', host_attr)
# By default, return the weight_multiplier configuration directly
self.assertEqual(0.0, self.softaffin_weigher.weight_multiplier(host1))
host1.aggregates = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'soft_affinity_weight_multiplier': '2'},
)]
# read the weight multiplier from metadata to override the config
self.assertEqual(2.0, self.softaffin_weigher.weight_multiplier(host1))
host1.aggregates = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'soft_affinity_weight_multiplier': '2'},
),
objects.Aggregate(
id=2,
name='foo',
hosts=['fake-host'],
metadata={'soft_affinity_weight_multiplier': '1.5'},
)]
# If the host is in multiple aggs and there are conflict weight values
# in the metadata, we will use the min value among them
self.assertEqual(1.5, self.softaffin_weigher.weight_multiplier(host1))
def test_host_with_agg(self):
self.flags(soft_affinity_weight_multiplier=0.0,
group='filter_scheduler')
hostinfo_list = self._get_all_hosts()
aggs = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'soft_affinity_weight_multiplier': '1.5'},
)]
for h in hostinfo_list:
h.aggregates = aggs
weighed_host = self._get_weighed_host(hostinfo_list,
'soft-affinity')
self.assertEqual(1.5, weighed_host.weight)
self.assertEqual('host2', weighed_host.obj.host)
def test_running_twice(self):
"""Run the weighing twice for different groups each run
The first run has a group with more members on the same host than the
second both. In both cases, most members of their groups are on the
same host => weight should be maximum (1 with default multiplier).
"""
self._do_test(policy='soft-affinity',
expected_weight=1.0,
expected_host='host2')
self._do_test(policy='soft-affinity',
expected_weight=1.0,
expected_host='host2',
group='other')
class SoftAntiAffinityWeigherTestCase(SoftWeigherTestBase):
def setUp(self):
super(SoftAntiAffinityWeigherTestCase, self).setUp()
self.weighers = [affinity.ServerGroupSoftAntiAffinityWeigher()]
self.antiaffin_weigher = affinity.ServerGroupSoftAntiAffinityWeigher()
def test_soft_anti_affinity_weight_multiplier_by_default(self):
self._do_test(policy='soft-anti-affinity',
expected_weight=1.0,
expected_host='host3')
def test_soft_anti_affinity_weight_multiplier_zero_value(self):
# We do not know the host, all have same weight.
self.flags(soft_anti_affinity_weight_multiplier=0.0,
group='filter_scheduler')
self._do_test(policy='soft-anti-affinity',
expected_weight=0.0,
expected_host=None)
def test_soft_anti_affinity_weight_multiplier_positive_value(self):
self.flags(soft_anti_affinity_weight_multiplier=2.0,
group='filter_scheduler')
self._do_test(policy='soft-anti-affinity',
expected_weight=2.0,
expected_host='host3')
def test_soft_anti_affinity_weight_multiplier(self):
self.flags(soft_anti_affinity_weight_multiplier=0.0,
group='filter_scheduler')
host_attr = {'instances': {'instance1': mock.sentinel}}
host1 = fakes.FakeHostState('fake-host', 'node', host_attr)
# By default, return the weight_multiplier configuration directly
self.assertEqual(0.0, self.antiaffin_weigher.weight_multiplier(host1))
host1.aggregates = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'soft_anti_affinity_weight_multiplier': '2'},
)]
# read the weight multiplier from metadata to override the config
self.assertEqual(2.0, self.antiaffin_weigher.weight_multiplier(host1))
host1.aggregates = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'soft_anti_affinity_weight_multiplier': '2'},
),
objects.Aggregate(
id=2,
name='foo',
hosts=['fake-host'],
metadata={'soft_anti_affinity_weight_multiplier': '1.5'},
)]
# If the host is in multiple aggs and there are conflict weight values
# in the metadata, we will use the min value among them
self.assertEqual(1.5, self.antiaffin_weigher.weight_multiplier(host1))
def test_host_with_agg(self):
self.flags(soft_anti_affinity_weight_multiplier=0.0,
group='filter_scheduler')
hostinfo_list = self._get_all_hosts()
aggs = [
objects.Aggregate(
id=1,
name='foo',
hosts=['host1', 'host2', 'host3', 'host4'],
metadata={'soft_anti_affinity_weight_multiplier': '1.5'},
)]
for h in hostinfo_list:
h.aggregates = aggs
weighed_host = self._get_weighed_host(hostinfo_list,
'soft-anti-affinity')
self.assertEqual(1.5, weighed_host.weight)
self.assertEqual('host3', weighed_host.obj.host)
|
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo_serialization import jsonutils
import six
from nova import context
from nova import exception
from nova import objects
from nova.objects import base as base_obj
from nova.pci import stats
from nova import test
from nova.virt import hardware as hw
class InstanceInfoTests(test.NoDBTestCase):
def test_instance_info_default(self):
ii = hw.InstanceInfo()
self.assertIsNone(ii.state)
self.assertIsNone(ii.id)
self.assertEqual(0, ii.max_mem_kb)
self.assertEqual(0, ii.mem_kb)
self.assertEqual(0, ii.num_cpu)
self.assertEqual(0, ii.cpu_time_ns)
def test_instance_info(self):
ii = hw.InstanceInfo(state='fake-state',
max_mem_kb=1,
mem_kb=2,
num_cpu=3,
cpu_time_ns=4,
id='fake-id')
self.assertEqual('fake-state', ii.state)
self.assertEqual('fake-id', ii.id)
self.assertEqual(1, ii.max_mem_kb)
self.assertEqual(2, ii.mem_kb)
self.assertEqual(3, ii.num_cpu)
self.assertEqual(4, ii.cpu_time_ns)
def test_instance_infoi_equals(self):
ii1 = hw.InstanceInfo(state='fake-state',
max_mem_kb=1,
mem_kb=2,
num_cpu=3,
cpu_time_ns=4,
id='fake-id')
ii2 = hw.InstanceInfo(state='fake-state',
max_mem_kb=1,
mem_kb=2,
num_cpu=3,
cpu_time_ns=4,
id='fake-id')
ii3 = hw.InstanceInfo(state='fake-estat',
max_mem_kb=11,
mem_kb=22,
num_cpu=33,
cpu_time_ns=44,
id='fake-di')
self.assertEqual(ii1, ii2)
self.assertNotEqual(ii1, ii3)
class CpuSetTestCase(test.NoDBTestCase):
def test_get_vcpu_pin_set(self):
self.flags(vcpu_pin_set="1-3,5,^2")
cpuset_ids = hw.get_vcpu_pin_set()
self.assertEqual(set([1, 3, 5]), cpuset_ids)
def test_parse_cpu_spec_none_returns_none(self):
self.flags(vcpu_pin_set=None)
cpuset_ids = hw.get_vcpu_pin_set()
self.assertIsNone(cpuset_ids)
def test_parse_cpu_spec_valid_syntax_works(self):
cpuset_ids = hw.parse_cpu_spec("1")
self.assertEqual(set([1]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1,2")
self.assertEqual(set([1, 2]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec(", , 1 , ,, 2, ,")
self.assertEqual(set([1, 2]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1-1")
self.assertEqual(set([1]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec(" 1 - 1, 1 - 2 , 1 -3")
self.assertEqual(set([1, 2, 3]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1,^2")
self.assertEqual(set([1]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1-2, ^1")
self.assertEqual(set([2]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1-3,5,^2")
self.assertEqual(set([1, 3, 5]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec(" 1 - 3 , ^2, 5")
self.assertEqual(set([1, 3, 5]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec(" 1,1, ^1")
self.assertEqual(set([]), cpuset_ids)
def test_parse_cpu_spec_invalid_syntax_raises(self):
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
" -1-3,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3-,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"-3,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3,5,^2^")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3,5,^2-")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"--13,^^5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"a-3,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-a,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3,b,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3,5,^c")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"3 - 1, 5 , ^ 2 ")
def test_format_cpu_spec(self):
cpus = set([])
spec = hw.format_cpu_spec(cpus)
self.assertEqual("", spec)
cpus = []
spec = hw.format_cpu_spec(cpus)
self.assertEqual("", spec)
cpus = set([1, 3])
spec = hw.format_cpu_spec(cpus)
self.assertEqual("1,3", spec)
cpus = [1, 3]
spec = hw.format_cpu_spec(cpus)
self.assertEqual("1,3", spec)
cpus = set([1, 2, 4, 6])
spec = hw.format_cpu_spec(cpus)
self.assertEqual("1-2,4,6", spec)
cpus = [1, 2, 4, 6]
spec = hw.format_cpu_spec(cpus)
self.assertEqual("1-2,4,6", spec)
cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48])
spec = hw.format_cpu_spec(cpus)
self.assertEqual("10-11,13-16,19-20,40,42,48", spec)
cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]
spec = hw.format_cpu_spec(cpus)
self.assertEqual("10-11,13-16,19-20,40,42,48", spec)
cpus = set([1, 2, 4, 6])
spec = hw.format_cpu_spec(cpus, allow_ranges=False)
self.assertEqual("1,2,4,6", spec)
cpus = [1, 2, 4, 6]
spec = hw.format_cpu_spec(cpus, allow_ranges=False)
self.assertEqual("1,2,4,6", spec)
cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48])
spec = hw.format_cpu_spec(cpus, allow_ranges=False)
self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec)
cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]
spec = hw.format_cpu_spec(cpus, allow_ranges=False)
self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec)
class VCPUTopologyTest(test.NoDBTestCase):
def test_validate_config(self):
testdata = [
{ # Flavor sets preferred topology only
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
}),
"image": {
"properties": {}
},
"expect": (
8, 2, 1, 65536, 65536, 65536
)
},
{ # Image topology overrides flavor
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
"hw:cpu_max_threads": "2",
}),
"image": {
"properties": {
"hw_cpu_sockets": "4",
"hw_cpu_cores": "2",
"hw_cpu_threads": "2",
}
},
"expect": (
4, 2, 2, 65536, 65536, 2,
)
},
{ # Partial image topology overrides flavor
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_sockets": "2",
}
},
"expect": (
2, -1, -1, 65536, 65536, 65536,
)
},
{ # Restrict use of threads
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_threads": "2",
}),
"image": {
"properties": {
"hw_cpu_max_threads": "1",
}
},
"expect": (
-1, -1, -1, 65536, 65536, 1,
)
},
{ # Force use of at least two sockets
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {}
},
"expect": (
-1, -1, -1, 65536, 8, 1
)
},
{ # Image limits reduce flavor
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_cores": "4",
}
},
"expect": (
-1, -1, -1, 65536, 4, 1
)
},
{ # Image limits kill flavor preferred
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "2",
"hw:cpu_cores": "8",
"hw:cpu_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_cores": "4",
}
},
"expect": (
-1, -1, -1, 65536, 4, 65536
)
},
{ # Image limits cannot exceed flavor
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_cores": "16",
}
},
"expect": exception.ImageVCPULimitsRangeExceeded,
},
{ # Image preferred cannot exceed flavor
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_cores": "16",
}
},
"expect": exception.ImageVCPUTopologyRangeExceeded,
},
]
for topo_test in testdata:
if type(topo_test["expect"]) == tuple:
(preferred,
maximum) = hw._get_cpu_topology_constraints(
topo_test["flavor"],
topo_test["image"])
self.assertEqual(topo_test["expect"][0], preferred.sockets)
self.assertEqual(topo_test["expect"][1], preferred.cores)
self.assertEqual(topo_test["expect"][2], preferred.threads)
self.assertEqual(topo_test["expect"][3], maximum.sockets)
self.assertEqual(topo_test["expect"][4], maximum.cores)
self.assertEqual(topo_test["expect"][5], maximum.threads)
else:
self.assertRaises(topo_test["expect"],
hw._get_cpu_topology_constraints,
topo_test["flavor"],
topo_test["image"])
def test_possible_topologies(self):
testdata = [
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 8,
"maxcores": 8,
"maxthreads": 2,
"expect": [
[8, 1, 1],
[4, 2, 1],
[2, 4, 1],
[1, 8, 1],
[4, 1, 2],
[2, 2, 2],
[1, 4, 2],
]
},
{
"allow_threads": False,
"vcpus": 8,
"maxsockets": 8,
"maxcores": 8,
"maxthreads": 2,
"expect": [
[8, 1, 1],
[4, 2, 1],
[2, 4, 1],
[1, 8, 1],
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1024,
"maxthreads": 2,
"expect": [
[8, 1, 1],
[4, 2, 1],
[2, 4, 1],
[1, 8, 1],
[4, 1, 2],
[2, 2, 2],
[1, 4, 2],
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1,
"maxthreads": 2,
"expect": [
[8, 1, 1],
[4, 1, 2],
]
},
{
"allow_threads": True,
"vcpus": 7,
"maxsockets": 8,
"maxcores": 8,
"maxthreads": 2,
"expect": [
[7, 1, 1],
[1, 7, 1],
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 2,
"maxcores": 1,
"maxthreads": 1,
"expect": exception.ImageVCPULimitsRangeImpossible,
},
{
"allow_threads": False,
"vcpus": 8,
"maxsockets": 2,
"maxcores": 1,
"maxthreads": 4,
"expect": exception.ImageVCPULimitsRangeImpossible,
},
{
"allow_threads": True,
"specified_threads": 2,
"vcpus": 8,
"maxsockets": 4,
"maxcores": 2,
"maxthreads": 4,
"expect": [
[4, 1, 2],
[2, 2, 2],
]
},
{
"allow_threads": False,
"specified_threads": 2,
"vcpus": 8,
"maxsockets": 8,
"maxcores": 8,
"maxthreads": 2,
"expect": [
[8, 1, 1],
[4, 2, 1],
[2, 4, 1],
[1, 8, 1],
]
},
]
for topo_test in testdata:
if type(topo_test["expect"]) == list:
actual = []
for topology in hw._get_possible_cpu_topologies(
topo_test["vcpus"],
objects.VirtCPUTopology(
sockets=topo_test["maxsockets"],
cores=topo_test["maxcores"],
threads=topo_test["maxthreads"]),
topo_test["allow_threads"],
topo_test.get("specified_threads")):
actual.append([topology.sockets,
topology.cores,
topology.threads])
self.assertEqual(topo_test["expect"], actual)
else:
self.assertRaises(topo_test["expect"],
hw._get_possible_cpu_topologies,
topo_test["vcpus"],
objects.VirtCPUTopology(
sockets=topo_test["maxsockets"],
cores=topo_test["maxcores"],
threads=topo_test["maxthreads"]),
topo_test["allow_threads"],
topo_test.get("specified_threads"))
def test_sorting_topologies(self):
testdata = [
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 8,
"maxcores": 8,
"maxthreads": 2,
"sockets": 4,
"cores": 2,
"threads": 1,
"expect": [
[4, 2, 1], # score = 2
[8, 1, 1], # score = 1
[2, 4, 1], # score = 1
[1, 8, 1], # score = 1
[4, 1, 2], # score = 1
[2, 2, 2], # score = 1
[1, 4, 2], # score = 1
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1024,
"maxthreads": 2,
"sockets": -1,
"cores": 4,
"threads": -1,
"expect": [
[2, 4, 1], # score = 1
[1, 4, 2], # score = 1
[8, 1, 1], # score = 0
[4, 2, 1], # score = 0
[1, 8, 1], # score = 0
[4, 1, 2], # score = 0
[2, 2, 2], # score = 0
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1,
"maxthreads": 2,
"sockets": -1,
"cores": -1,
"threads": 2,
"expect": [
[4, 1, 2], # score = 1
[8, 1, 1], # score = 0
]
},
{
"allow_threads": False,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1,
"maxthreads": 2,
"sockets": -1,
"cores": -1,
"threads": 2,
"expect": [
[8, 1, 1], # score = 0
]
},
]
for topo_test in testdata:
actual = []
possible = hw._get_possible_cpu_topologies(
topo_test["vcpus"],
objects.VirtCPUTopology(sockets=topo_test["maxsockets"],
cores=topo_test["maxcores"],
threads=topo_test["maxthreads"]),
topo_test["allow_threads"],
None)
tops = hw._sort_possible_cpu_topologies(
possible,
objects.VirtCPUTopology(sockets=topo_test["sockets"],
cores=topo_test["cores"],
threads=topo_test["threads"]))
for topology in tops:
actual.append([topology.sockets,
topology.cores,
topology.threads])
self.assertEqual(topo_test["expect"], actual)
def test_best_config(self):
testdata = [
{ # Flavor sets preferred topology only
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1"
}),
"image": {
"properties": {}
},
"expect": [8, 2, 1],
},
{ # Image topology overrides flavor
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
"hw:cpu_maxthreads": "2",
}),
"image": {
"properties": {
"hw_cpu_sockets": "4",
"hw_cpu_cores": "2",
"hw_cpu_threads": "2",
}
},
"expect": [4, 2, 2],
},
{ # Image topology overrides flavor
"allow_threads": False,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
"hw:cpu_maxthreads": "2",
}),
"image": {
"properties": {
"hw_cpu_sockets": "4",
"hw_cpu_cores": "2",
"hw_cpu_threads": "2",
}
},
"expect": [8, 2, 1],
},
{ # Partial image topology overrides flavor
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1"
}),
"image": {
"properties": {
"hw_cpu_sockets": "2"
}
},
"expect": [2, 8, 1],
},
{ # Restrict use of threads
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_threads": "1"
}),
"image": {
"properties": {}
},
"expect": [16, 1, 1]
},
{ # Force use of at least two sockets
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {}
},
"expect": [16, 1, 1]
},
{ # Image limits reduce flavor
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_sockets": "8",
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_sockets": 4,
}
},
"expect": [4, 4, 1]
},
{ # Image limits kill flavor preferred
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "2",
"hw:cpu_cores": "8",
"hw:cpu_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_cores": 4,
}
},
"expect": [16, 1, 1]
},
{ # NUMA needs threads, only cores requested by flavor
"allow_threads": True,
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_cores": "2",
}),
"image": {
"properties": {
"hw_cpu_max_cores": 2,
}
},
"numa_topology": objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=1024,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=1, threads=2)),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]), memory=1024)]),
"expect": [1, 2, 2]
},
{ # NUMA needs threads, but more than requested by flavor - the
# least amount of threads wins
"allow_threads": True,
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_threads": "2",
}),
"image": {
"properties": {}
},
"numa_topology": objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=1, threads=4))]),
"expect": [2, 1, 2]
},
{ # NUMA needs different number of threads per cell - the least
# amount of threads wins
"allow_threads": True,
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={}),
"image": {
"properties": {}
},
"numa_topology": objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=1024,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=2, threads=2)),
objects.InstanceNUMACell(
id=1, cpuset=set([4, 5, 6, 7]), memory=1024,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=1, threads=4))]),
"expect": [4, 1, 2]
},
]
for topo_test in testdata:
topology = hw._get_desirable_cpu_topologies(
topo_test["flavor"],
topo_test["image"],
topo_test["allow_threads"],
topo_test.get("numa_topology"))[0]
self.assertEqual(topo_test["expect"][0], topology.sockets)
self.assertEqual(topo_test["expect"][1], topology.cores)
self.assertEqual(topo_test["expect"][2], topology.threads)
class NUMATopologyTest(test.NoDBTestCase):
def test_topology_constraints(self):
testdata = [
{
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
}),
"image": {
},
"expect": None,
},
{
"flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={
"hw:numa_nodes": 2
}),
"image": {
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=1024),
objects.InstanceNUMACell(
id=1, cpuset=set([4, 5, 6, 7]), memory=1024),
]),
},
{
"flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={
"hw:mem_page_size": 2048
}),
"image": {
},
"expect": objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=2048, pagesize=2048)
]),
},
{
# vcpus is not a multiple of nodes, so it
# is an error to not provide cpu/mem mapping
"flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={
"hw:numa_nodes": 3
}),
"image": {
},
"expect": exception.ImageNUMATopologyAsymmetric,
},
{
"flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={
"hw:numa_nodes": 3,
"hw:numa_cpus.0": "0-3",
"hw:numa_mem.0": "1024",
"hw:numa_cpus.1": "4,6",
"hw:numa_mem.1": "512",
"hw:numa_cpus.2": "5,7",
"hw:numa_mem.2": "512",
}),
"image": {
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=1024),
objects.InstanceNUMACell(
id=1, cpuset=set([4, 6]), memory=512),
objects.InstanceNUMACell(
id=2, cpuset=set([5, 7]), memory=512)
]),
},
{
# Request a CPU that is out of range
# wrt vCPU count
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 1,
"hw:numa_cpus.0": "0-16",
"hw:numa_mem.0": "2048",
}),
"image": {
},
"expect": exception.ImageNUMATopologyCPUOutOfRange,
},
{
# Request the same CPU in two nodes
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-7",
"hw:numa_mem.0": "1024",
"hw:numa_cpus.1": "0-7",
"hw:numa_mem.1": "1024",
}),
"image": {
},
"expect": exception.ImageNUMATopologyCPUDuplicates,
},
{
# Request with some CPUs not assigned
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-2",
"hw:numa_mem.0": "1024",
"hw:numa_cpus.1": "3-4",
"hw:numa_mem.1": "1024",
}),
"image": {
},
"expect": exception.ImageNUMATopologyCPUsUnassigned,
},
{
# Request too little memory vs flavor total
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-3",
"hw:numa_mem.0": "512",
"hw:numa_cpus.1": "4-7",
"hw:numa_mem.1": "512",
}),
"image": {
},
"expect": exception.ImageNUMATopologyMemoryOutOfRange,
},
{
# Request too much memory vs flavor total
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-3",
"hw:numa_mem.0": "1576",
"hw:numa_cpus.1": "4-7",
"hw:numa_mem.1": "1576",
}),
"image": {
},
"expect": exception.ImageNUMATopologyMemoryOutOfRange,
},
{
# Request missing mem.0
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-3",
"hw:numa_mem.1": "1576",
}),
"image": {
},
"expect": exception.ImageNUMATopologyIncomplete,
},
{
# Request missing cpu.0
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_mem.0": "1576",
"hw:numa_cpus.1": "4-7",
}),
"image": {
},
"expect": exception.ImageNUMATopologyIncomplete,
},
{
# Image attempts to override flavor
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
}),
"image": {
"properties": {
"hw_numa_nodes": 4}
},
"expect": exception.ImageNUMATopologyForbidden,
},
{
# NUMA + CPU pinning requested in the flavor
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2, "hw:cpu_policy": "dedicated"
}),
"image": {
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=1024,
cpu_pinning={}),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]), memory=1024,
cpu_pinning={})])
},
{
# no NUMA + CPU pinning requested in the flavor
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_policy": "dedicated"
}),
"image": {
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_pinning={})])
},
{
# NUMA + CPU pinning requested in the image
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2
}),
"image": {
"properties": {
"hw_cpu_policy": "dedicated"}
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=1024,
cpu_pinning={}),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]), memory=1024,
cpu_pinning={})])
},
{
# no NUMA + CPU pinning requested in the image
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={}),
"image": {
"properties": {
"hw_cpu_policy": "dedicated"}
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_pinning={})])
},
{
# Invalid CPU pinning override
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2, "hw:cpu_policy": "shared"
}),
"image": {
"properties": {
"hw_cpu_policy": "dedicated"}
},
"expect": exception.ImageCPUPinningForbidden,
},
]
for testitem in testdata:
if testitem["expect"] is None:
topology = hw.numa_get_constraints(
testitem["flavor"], testitem["image"])
self.assertIsNone(topology)
elif type(testitem["expect"]) == type:
self.assertRaises(testitem["expect"],
hw.numa_get_constraints,
testitem["flavor"],
testitem["image"])
else:
topology = hw.numa_get_constraints(
testitem["flavor"], testitem["image"])
self.assertEqual(len(testitem["expect"].cells),
len(topology.cells))
for i in range(len(topology.cells)):
self.assertEqual(testitem["expect"].cells[i].id,
topology.cells[i].id)
self.assertEqual(testitem["expect"].cells[i].cpuset,
topology.cells[i].cpuset)
self.assertEqual(testitem["expect"].cells[i].memory,
topology.cells[i].memory)
self.assertEqual(testitem["expect"].cells[i].pagesize,
topology.cells[i].pagesize)
self.assertEqual(testitem["expect"].cells[i].cpu_pinning,
topology.cells[i].cpu_pinning)
def test_host_usage_contiguous(self):
hpages0_4K = objects.NUMAPagesTopology(size_kb=4, total=256, used=0)
hpages0_2M = objects.NUMAPagesTopology(size_kb=2048, total=0, used=1)
hpages1_4K = objects.NUMAPagesTopology(size_kb=4, total=128, used=2)
hpages1_2M = objects.NUMAPagesTopology(size_kb=2048, total=0, used=3)
hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[
hpages0_4K, hpages0_2M],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([4, 6]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[
hpages1_4K, hpages1_2M],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=2, cpuset=set([5, 7]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
instance1 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=256),
objects.InstanceNUMACell(id=1, cpuset=set([4]), memory=256),
])
instance2 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256),
objects.InstanceNUMACell(id=1, cpuset=set([5, 7]), memory=256),
])
hostusage = hw.numa_usage_from_instances(
hosttopo, [instance1, instance2])
self.assertEqual(len(hosttopo), len(hostusage))
self.assertIsInstance(hostusage.cells[0], objects.NUMACell)
self.assertEqual(hosttopo.cells[0].cpuset,
hostusage.cells[0].cpuset)
self.assertEqual(hosttopo.cells[0].memory,
hostusage.cells[0].memory)
self.assertEqual(hostusage.cells[0].cpu_usage, 5)
self.assertEqual(hostusage.cells[0].memory_usage, 512)
self.assertEqual(hostusage.cells[0].mempages, [
hpages0_4K, hpages0_2M])
self.assertIsInstance(hostusage.cells[1], objects.NUMACell)
self.assertEqual(hosttopo.cells[1].cpuset,
hostusage.cells[1].cpuset)
self.assertEqual(hosttopo.cells[1].memory,
hostusage.cells[1].memory)
self.assertEqual(hostusage.cells[1].cpu_usage, 3)
self.assertEqual(hostusage.cells[1].memory_usage, 512)
self.assertEqual(hostusage.cells[1].mempages, [
hpages1_4K, hpages1_2M])
self.assertEqual(256, hpages0_4K.total)
self.assertEqual(0, hpages0_4K.used)
self.assertEqual(0, hpages0_2M.total)
self.assertEqual(1, hpages0_2M.used)
self.assertIsInstance(hostusage.cells[2], objects.NUMACell)
self.assertEqual(hosttopo.cells[2].cpuset,
hostusage.cells[2].cpuset)
self.assertEqual(hosttopo.cells[2].memory,
hostusage.cells[2].memory)
self.assertEqual(hostusage.cells[2].cpu_usage, 0)
self.assertEqual(hostusage.cells[2].memory_usage, 0)
self.assertEqual(128, hpages1_4K.total)
self.assertEqual(2, hpages1_4K.used)
self.assertEqual(0, hpages1_2M.total)
self.assertEqual(3, hpages1_2M.used)
def test_host_usage_sparse(self):
hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=5, cpuset=set([4, 6]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=6, cpuset=set([5, 7]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
instance1 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=256),
objects.InstanceNUMACell(id=6, cpuset=set([4]), memory=256),
])
instance2 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256,
cpu_usage=0, memory_usage=0, mempages=[]),
objects.InstanceNUMACell(id=5, cpuset=set([5, 7]), memory=256,
cpu_usage=0, memory_usage=0, mempages=[]),
])
hostusage = hw.numa_usage_from_instances(
hosttopo, [instance1, instance2])
self.assertEqual(len(hosttopo), len(hostusage))
self.assertIsInstance(hostusage.cells[0], objects.NUMACell)
self.assertEqual(hosttopo.cells[0].id,
hostusage.cells[0].id)
self.assertEqual(hosttopo.cells[0].cpuset,
hostusage.cells[0].cpuset)
self.assertEqual(hosttopo.cells[0].memory,
hostusage.cells[0].memory)
self.assertEqual(hostusage.cells[0].cpu_usage, 5)
self.assertEqual(hostusage.cells[0].memory_usage, 512)
self.assertIsInstance(hostusage.cells[1], objects.NUMACell)
self.assertEqual(hosttopo.cells[1].id,
hostusage.cells[1].id)
self.assertEqual(hosttopo.cells[1].cpuset,
hostusage.cells[1].cpuset)
self.assertEqual(hosttopo.cells[1].memory,
hostusage.cells[1].memory)
self.assertEqual(hostusage.cells[1].cpu_usage, 2)
self.assertEqual(hostusage.cells[1].memory_usage, 256)
self.assertIsInstance(hostusage.cells[2], objects.NUMACell)
self.assertEqual(hosttopo.cells[2].cpuset,
hostusage.cells[2].cpuset)
self.assertEqual(hosttopo.cells[2].memory,
hostusage.cells[2].memory)
self.assertEqual(hostusage.cells[2].cpu_usage, 1)
self.assertEqual(hostusage.cells[2].memory_usage, 256)
def test_host_usage_culmulative_with_free(self):
hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024,
cpu_usage=2, memory_usage=512, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([4, 6]), memory=512,
cpu_usage=1, memory_usage=512, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=2, cpuset=set([5, 7]), memory=256,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
instance1 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=256),
objects.InstanceNUMACell(id=2, cpuset=set([4]), memory=256)])
hostusage = hw.numa_usage_from_instances(
hosttopo, [instance1])
self.assertIsInstance(hostusage.cells[0], objects.NUMACell)
self.assertEqual(hostusage.cells[0].cpu_usage, 5)
self.assertEqual(hostusage.cells[0].memory_usage, 1024)
self.assertIsInstance(hostusage.cells[1], objects.NUMACell)
self.assertEqual(hostusage.cells[1].cpu_usage, 2)
self.assertEqual(hostusage.cells[1].memory_usage, 768)
self.assertIsInstance(hostusage.cells[2], objects.NUMACell)
self.assertEqual(hostusage.cells[2].cpu_usage, 1)
self.assertEqual(hostusage.cells[2].memory_usage, 256)
# Test freeing of resources
hostusage = hw.numa_usage_from_instances(
hostusage, [instance1], free=True)
self.assertEqual(hostusage.cells[0].cpu_usage, 2)
self.assertEqual(hostusage.cells[0].memory_usage, 512)
self.assertEqual(hostusage.cells[1].cpu_usage, 1)
self.assertEqual(hostusage.cells[1].memory_usage, 512)
self.assertEqual(hostusage.cells[2].cpu_usage, 0)
self.assertEqual(hostusage.cells[2].memory_usage, 0)
def test_topo_usage_none(self):
hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([0, 1]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([2, 3]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
instance1 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256),
objects.InstanceNUMACell(id=2, cpuset=set([2]), memory=256),
])
hostusage = hw.numa_usage_from_instances(
None, [instance1])
self.assertIsNone(hostusage)
hostusage = hw.numa_usage_from_instances(
hosttopo, [])
self.assertEqual(hostusage.cells[0].cpu_usage, 0)
self.assertEqual(hostusage.cells[0].memory_usage, 0)
self.assertEqual(hostusage.cells[1].cpu_usage, 0)
self.assertEqual(hostusage.cells[1].memory_usage, 0)
hostusage = hw.numa_usage_from_instances(
hosttopo, None)
self.assertEqual(hostusage.cells[0].cpu_usage, 0)
self.assertEqual(hostusage.cells[0].memory_usage, 0)
self.assertEqual(hostusage.cells[1].cpu_usage, 0)
self.assertEqual(hostusage.cells[1].memory_usage, 0)
def assertNUMACellMatches(self, expected_cell, got_cell):
attrs = ('cpuset', 'memory', 'id')
if isinstance(expected_cell, objects.NUMATopology):
attrs += ('cpu_usage', 'memory_usage')
for attr in attrs:
self.assertEqual(getattr(expected_cell, attr),
getattr(got_cell, attr))
def test_json(self):
expected = objects.NUMATopology(
cells=[
objects.NUMACell(id=1, cpuset=set([1, 2]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=2, cpuset=set([3, 4]), memory=1024,
cpu_usage=0, memory_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([]))])
got = objects.NUMATopology.obj_from_db_obj(expected._to_json())
for exp_cell, got_cell in zip(expected.cells, got.cells):
self.assertNUMACellMatches(exp_cell, got_cell)
class VirtNUMATopologyCellUsageTestCase(test.NoDBTestCase):
def test_fit_instance_cell_success_no_limit(self):
host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))
instance_cell = objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=1024)
fitted_cell = hw._numa_fit_instance_cell(host_cell, instance_cell)
self.assertIsInstance(fitted_cell, objects.InstanceNUMACell)
self.assertEqual(host_cell.id, fitted_cell.id)
def test_fit_instance_cell_success_w_limit(self):
host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024,
cpu_usage=2,
memory_usage=1024,
mempages=[], siblings=[],
pinned_cpus=set([]))
limit_cell = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
instance_cell = objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=1024)
fitted_cell = hw._numa_fit_instance_cell(
host_cell, instance_cell, limit_cell=limit_cell)
self.assertIsInstance(fitted_cell, objects.InstanceNUMACell)
self.assertEqual(host_cell.id, fitted_cell.id)
def test_fit_instance_cell_self_overcommit(self):
host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))
limit_cell = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
instance_cell = objects.InstanceNUMACell(
id=0, cpuset=set([1, 2, 3]), memory=4096)
fitted_cell = hw._numa_fit_instance_cell(
host_cell, instance_cell, limit_cell=limit_cell)
self.assertIsNone(fitted_cell)
def test_fit_instance_cell_fail_w_limit(self):
host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024,
cpu_usage=2,
memory_usage=1024,
mempages=[], siblings=[],
pinned_cpus=set([]))
instance_cell = objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=4096)
limit_cell = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
fitted_cell = hw._numa_fit_instance_cell(
host_cell, instance_cell, limit_cell=limit_cell)
self.assertIsNone(fitted_cell)
instance_cell = objects.InstanceNUMACell(
id=0, cpuset=set([1, 2, 3, 4, 5]), memory=1024)
fitted_cell = hw._numa_fit_instance_cell(
host_cell, instance_cell, limit_cell=limit_cell)
self.assertIsNone(fitted_cell)
class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
def setUp(self):
super(VirtNUMAHostTopologyTestCase, self).setUp()
self.host = objects.NUMATopology(
cells=[
objects.NUMACell(id=1, cpuset=set([1, 2]), memory=2048,
cpu_usage=2, memory_usage=2048,
mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(id=2, cpuset=set([3, 4]), memory=2048,
cpu_usage=2, memory_usage=2048,
mempages=[], siblings=[],
pinned_cpus=set([]))])
self.limits = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
self.instance1 = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=2048)])
self.instance2 = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1, 2, 3, 4]), memory=1024)])
self.instance3 = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=1024)])
def test_get_fitting_success_no_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host, self.instance1)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.host = hw.numa_usage_from_instances(self.host,
[fitted_instance1])
fitted_instance2 = hw.numa_fit_instance_to_host(
self.host, self.instance3)
self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology)
def test_get_fitting_success_limits(self):
fitted_instance = hw.numa_fit_instance_to_host(
self.host, self.instance3, self.limits)
self.assertIsInstance(fitted_instance, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance.cells[0].id)
def test_get_fitting_fails_no_limits(self):
fitted_instance = hw.numa_fit_instance_to_host(
self.host, self.instance2, self.limits)
self.assertIsNone(fitted_instance)
def test_get_fitting_culmulative_fails_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host, self.instance1, self.limits)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance1.cells[0].id)
self.host = hw.numa_usage_from_instances(self.host,
[fitted_instance1])
fitted_instance2 = hw.numa_fit_instance_to_host(
self.host, self.instance2, self.limits)
self.assertIsNone(fitted_instance2)
def test_get_fitting_culmulative_success_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host, self.instance1, self.limits)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance1.cells[0].id)
self.host = hw.numa_usage_from_instances(self.host,
[fitted_instance1])
fitted_instance2 = hw.numa_fit_instance_to_host(
self.host, self.instance3, self.limits)
self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology)
self.assertEqual(2, fitted_instance2.cells[0].id)
def test_get_fitting_pci_success(self):
pci_request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': '8086'}])
pci_reqs = [pci_request]
pci_stats = stats.PciDeviceStats()
with mock.patch.object(stats.PciDeviceStats,
'support_requests', return_value= True):
fitted_instance1 = hw.numa_fit_instance_to_host(self.host,
self.instance1,
pci_requests=pci_reqs,
pci_stats=pci_stats)
self.assertIsInstance(fitted_instance1,
objects.InstanceNUMATopology)
def test_get_fitting_pci_fail(self):
pci_request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': '8086'}])
pci_reqs = [pci_request]
pci_stats = stats.PciDeviceStats()
with mock.patch.object(stats.PciDeviceStats,
'support_requests', return_value= False):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host,
self.instance1,
pci_requests=pci_reqs,
pci_stats=pci_stats)
self.assertIsNone(fitted_instance1)
class NumberOfSerialPortsTest(test.NoDBTestCase):
def test_flavor(self):
flavor = objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={"hw:serial_port_count": 3})
num_ports = hw.get_number_of_serial_ports(flavor, None)
self.assertEqual(3, num_ports)
def test_image_meta(self):
flavor = objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={})
image_meta = {"properties": {"hw_serial_port_count": 2}}
num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
self.assertEqual(2, num_ports)
def test_flavor_invalid_value(self):
flavor = objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={"hw:serial_port_count": 'foo'})
image_meta = {"properties": {}}
self.assertRaises(exception.ImageSerialPortNumberInvalid,
hw.get_number_of_serial_ports,
flavor, image_meta)
def test_image_meta_invalid_value(self):
flavor = objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={})
image_meta = {"properties": {"hw_serial_port_count": 'bar'}}
self.assertRaises(exception.ImageSerialPortNumberInvalid,
hw.get_number_of_serial_ports,
flavor, image_meta)
def test_image_meta_smaller_than_flavor(self):
flavor = objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={"hw:serial_port_count": 3})
image_meta = {"properties": {"hw_serial_port_count": 2}}
num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
self.assertEqual(2, num_ports)
def test_flavor_smaller_than_image_meta(self):
flavor = objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={"hw:serial_port_count": 3})
image_meta = {"properties": {"hw_serial_port_count": 4}}
self.assertRaises(exception.ImageSerialPortNumberExceedFlavorValue,
hw.get_number_of_serial_ports,
flavor, image_meta)
class HelperMethodsTestCase(test.NoDBTestCase):
def setUp(self):
super(HelperMethodsTestCase, self).setUp()
self.hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([0, 1]), memory=512,
memory_usage=0, cpu_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([2, 3]), memory=512,
memory_usage=0, cpu_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
self.instancetopo = objects.InstanceNUMATopology(
instance_uuid='fake-uuid',
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=256, pagesize=2048,
cpu_pinning={1: 3, 0: 4}),
objects.InstanceNUMACell(
id=1, cpuset=set([2]), memory=256, pagesize=2048,
cpu_pinning={2: 5}),
])
self.context = context.RequestContext('fake-user',
'fake-project')
def _check_usage(self, host_usage):
self.assertEqual(2, host_usage.cells[0].cpu_usage)
self.assertEqual(256, host_usage.cells[0].memory_usage)
self.assertEqual(1, host_usage.cells[1].cpu_usage)
self.assertEqual(256, host_usage.cells[1].memory_usage)
def test_dicts_json(self):
host = {'numa_topology': self.hosttopo._to_json()}
instance = {'numa_topology': self.instancetopo._to_json()}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_dicts_instance_json(self):
host = {'numa_topology': self.hosttopo}
instance = {'numa_topology': self.instancetopo._to_json()}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, objects.NUMATopology)
self._check_usage(res)
def test_dicts_instance_json_old(self):
host = {'numa_topology': self.hosttopo}
instance = {'numa_topology':
jsonutils.dumps(self.instancetopo._to_dict())}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, objects.NUMATopology)
self._check_usage(res)
def test_dicts_host_json(self):
host = {'numa_topology': self.hosttopo._to_json()}
instance = {'numa_topology': self.instancetopo}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_dicts_host_json_old(self):
host = {'numa_topology': jsonutils.dumps(
self.hosttopo._to_dict())}
instance = {'numa_topology': self.instancetopo}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_object_host_instance_json(self):
host = objects.ComputeNode(numa_topology=self.hosttopo._to_json())
instance = {'numa_topology': self.instancetopo._to_json()}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_object_host_instance(self):
host = objects.ComputeNode(numa_topology=self.hosttopo._to_json())
instance = {'numa_topology': self.instancetopo}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_instance_with_fetch(self):
host = objects.ComputeNode(numa_topology=self.hosttopo._to_json())
fake_uuid = str(uuid.uuid4())
instance = {'uuid': fake_uuid}
with mock.patch.object(objects.InstanceNUMATopology,
'get_by_instance_uuid', return_value=None) as get_mock:
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self.assertTrue(get_mock.called)
def test_object_instance_with_load(self):
host = objects.ComputeNode(numa_topology=self.hosttopo._to_json())
fake_uuid = str(uuid.uuid4())
instance = objects.Instance(context=self.context, uuid=fake_uuid)
with mock.patch.object(objects.InstanceNUMATopology,
'get_by_instance_uuid', return_value=None) as get_mock:
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self.assertTrue(get_mock.called)
def test_instance_serialized_by_build_request_spec(self):
host = objects.ComputeNode(numa_topology=self.hosttopo._to_json())
fake_uuid = str(uuid.uuid4())
instance = objects.Instance(context=self.context, id=1, uuid=fake_uuid,
numa_topology=self.instancetopo)
# NOTE (ndipanov): This emulates scheduler.utils.build_request_spec
# We can remove this test once we no longer use that method.
instance_raw = jsonutils.to_primitive(
base_obj.obj_to_primitive(instance))
res = hw.get_host_numa_usage_from_instance(host, instance_raw)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_attr_host(self):
class Host(object):
def __init__(obj):
obj.numa_topology = self.hosttopo._to_json()
host = Host()
instance = {'numa_topology': self.instancetopo._to_json()}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_never_serialize_result(self):
host = {'numa_topology': self.hosttopo._to_json()}
instance = {'numa_topology': self.instancetopo}
res = hw.get_host_numa_usage_from_instance(host, instance,
never_serialize_result=True)
self.assertIsInstance(res, objects.NUMATopology)
self._check_usage(res)
def test_dict_numa_topology_to_obj(self):
fake_uuid = str(uuid.uuid4())
instance = objects.Instance(context=self.context, id=1, uuid=fake_uuid,
numa_topology=self.instancetopo)
instance_dict = base_obj.obj_to_primitive(instance)
instance_numa_topo = hw.instance_topology_from_instance(instance_dict)
for expected_cell, actual_cell in zip(self.instancetopo.cells,
instance_numa_topo.cells):
for k in expected_cell.fields:
self.assertEqual(getattr(expected_cell, k),
getattr(actual_cell, k))
class VirtMemoryPagesTestCase(test.NoDBTestCase):
def test_cell_instance_pagesize(self):
cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=2048)
self.assertEqual(0, cell.id)
self.assertEqual(set([0]), cell.cpuset)
self.assertEqual(1024, cell.memory)
self.assertEqual(2048, cell.pagesize)
def test_numa_pagesize_usage_from_cell(self):
instcell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=512, pagesize=2048)
hostcell = objects.NUMACell(
id=0, cpuset=set([0]), memory=1024,
cpu_usage=0, memory_usage=0,
mempages=[objects.NUMAPagesTopology(
size_kb=2048,
total=512,
used=0)],
siblings=[], pinned_cpus=set([]))
topo = hw._numa_pagesize_usage_from_cell(hostcell, instcell, 1)
self.assertEqual(2048, topo[0].size_kb)
self.assertEqual(512, topo[0].total)
self.assertEqual(256, topo[0].used)
def _test_get_requested_mempages_pagesize(self, spec=None, props=None):
flavor = objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs=spec or {})
image_meta = {"properties": props or {}}
return hw._numa_get_pagesize_constraints(flavor, image_meta)
def test_get_requested_mempages_pagesize_from_flavor_swipe(self):
self.assertEqual(
hw.MEMPAGES_SMALL, self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "small"}))
self.assertEqual(
hw.MEMPAGES_LARGE, self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "large"}))
self.assertEqual(
hw.MEMPAGES_ANY, self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "any"}))
def test_get_requested_mempages_pagesize_from_flavor_specific(self):
self.assertEqual(
2048,
self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "2048"}))
def test_get_requested_mempages_pagesize_from_flavor_invalid(self):
self.assertRaises(
exception.MemoryPageSizeInvalid,
self._test_get_requested_mempages_pagesize,
{"hw:mem_page_size": "foo"})
def test_get_requested_mempages_pagesize_from_image_flavor_any(self):
self.assertEqual(
2048,
self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "any"},
props={"hw_mem_page_size": "2048"}))
def test_get_requested_mempages_pagesize_from_image_flavor_large(self):
self.assertEqual(
2048,
self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "large"},
props={"hw_mem_page_size": "2048"}))
def test_get_requested_mempages_pagesize_from_image_forbidden(self):
self.assertRaises(
exception.MemoryPageSizeForbidden,
self._test_get_requested_mempages_pagesize,
{"hw:mem_page_size": "small"},
{"hw_mem_page_size": "2048"})
def test_get_requested_mempages_pagesize_from_image_forbidden2(self):
self.assertRaises(
exception.MemoryPageSizeForbidden,
self._test_get_requested_mempages_pagesize,
{}, {"hw_mem_page_size": "2048"})
def test_cell_accepts_request_wipe(self):
host_cell = objects.NUMACell(
id=0, cpuset=set([0]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=262144, used=0),
],
siblings=[], pinned_cpus=set([]))
inst_cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_SMALL)
self.assertEqual(
4,
hw._numa_cell_supports_pagesize_request(host_cell, inst_cell))
inst_cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_ANY)
self.assertEqual(
4,
hw._numa_cell_supports_pagesize_request(host_cell, inst_cell))
inst_cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_LARGE)
self.assertIsNone(hw._numa_cell_supports_pagesize_request(
host_cell, inst_cell))
def test_cell_accepts_request_large_pass(self):
inst_cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_LARGE)
host_cell = objects.NUMACell(
id=0, cpuset=set([0]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=256, used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512, used=0)
],
siblings=[], pinned_cpus=set([]))
self.assertEqual(
2048,
hw._numa_cell_supports_pagesize_request(host_cell, inst_cell))
def test_cell_accepts_request_custom_pass(self):
inst_cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=2048)
host_cell = objects.NUMACell(
id=0, cpuset=set([0]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=256, used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512, used=0)
],
siblings=[], pinned_cpus=set([]))
self.assertEqual(
2048,
hw._numa_cell_supports_pagesize_request(host_cell, inst_cell))
class _CPUPinningTestCaseBase(object):
def assertEqualTopology(self, expected, got):
for attr in ('sockets', 'cores', 'threads'):
self.assertEqual(getattr(expected, attr), getattr(got, attr),
"Mismatch on %s" % attr)
def assertInstanceCellPinned(self, instance_cell, cell_ids=None):
default_cell_id = 0
self.assertIsNotNone(instance_cell)
if cell_ids is None:
self.assertEqual(default_cell_id, instance_cell.id)
else:
self.assertIn(instance_cell.id, cell_ids)
self.assertEqual(len(instance_cell.cpuset),
len(instance_cell.cpu_pinning))
class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
def test_get_pinning_inst_too_large_cpu(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2]),
memory=2048, memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_inst_too_large_mem(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2]),
memory=2048, memory_usage=1024,
siblings=[], mempages=[],
pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_inst_not_avail(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
pinned_cpus=set([0]),
siblings=[], mempages=[])
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_no_sibling_fits_empty(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2]),
memory=2048, memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
def test_get_pinning_no_sibling_fits_w_usage(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
pinned_cpus=set([1]), mempages=[],
siblings=[])
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=1024)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
def test_get_pinning_instance_siblings_fits(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=topo)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
self.assertEqualTopology(topo, inst_pin.cpu_topology)
def test_get_pinning_instance_siblings_host_siblings_fits_empty(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])],
mempages=[], pinned_cpus=set([]))
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=topo)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
self.assertEqualTopology(topo, inst_pin.cpu_topology)
def test_get_pinning_instance_siblings_host_siblings_fits_w_usage(self):
host_pin = objects.NUMACell(
id=0,
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
pinned_cpus=set([1, 2, 5, 6]),
siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])],
mempages=[])
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=topo)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
self.assertEqualTopology(topo, inst_pin.cpu_topology)
def test_get_pinning_instance_siblings_host_siblings_fails(self):
host_pin = objects.NUMACell(
id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])],
mempages=[], pinned_cpus=set([]))
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=4)
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=2048,
cpu_topology=topo)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_host_siblings_fit_single_core(self):
host_pin = objects.NUMACell(
id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_fit(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
def test_host_numa_fit_instance_to_host_single_cell(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1]), memory=2048,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([2, 3]), memory=2048,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))]
)
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048, cpu_pinning={})])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
def test_host_numa_fit_instance_to_host_single_cell_w_usage(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1]),
pinned_cpus=set([0]), memory=2048,
memory_usage=0, siblings=[],
mempages=[]),
objects.NUMACell(id=1, cpuset=set([2, 3]), memory=2048,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))])
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048, cpu_pinning={})])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(1,))
def test_host_numa_fit_instance_to_host_single_cell_fail(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1]), memory=2048,
pinned_cpus=set([0]), memory_usage=0,
siblings=[], mempages=[]),
objects.NUMACell(id=1, cpuset=set([2, 3]), memory=2048,
pinned_cpus=set([2]), memory_usage=0,
siblings=[], mempages=[])])
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(cpuset=set([0, 1]),
memory=2048,
cpu_pinning={})])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
self.assertIsNone(inst_topo)
def test_host_numa_fit_instance_to_host_fit(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]),
memory=2048, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([]))])
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(cpuset=set([0, 1]),
memory=2048, cpu_pinning={}),
objects.InstanceNUMACell(cpuset=set([2, 3]),
memory=2048, cpu_pinning={})])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
def test_host_numa_fit_instance_to_host_barely_fit(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, pinned_cpus=set([0]),
siblings=[], mempages=[],
memory_usage=0),
objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]),
memory=2048, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([4, 5, 6])),
objects.NUMACell(id=2, cpuset=set([8, 9, 10, 11]),
memory=2048, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([10, 11]))])
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(cpuset=set([0, 1]),
memory=2048, cpu_pinning={}),
objects.InstanceNUMACell(cpuset=set([2, 3]),
memory=2048, cpu_pinning={})])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 2))
def test_host_numa_fit_instance_to_host_fail_capacity(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([0])),
objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([4, 5, 6]))])
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(cpuset=set([0, 1]),
memory=2048, cpu_pinning={}),
objects.InstanceNUMACell(cpuset=set([2, 3]),
memory=2048, cpu_pinning={})])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
self.assertIsNone(inst_topo)
def test_host_numa_fit_instance_to_host_fail_topology(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([]))])
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(cpuset=set([0, 1]),
memory=1024, cpu_pinning={}),
objects.InstanceNUMACell(cpuset=set([2, 3]),
memory=1024, cpu_pinning={}),
objects.InstanceNUMACell(cpuset=set([4, 5]),
memory=1024, cpu_pinning={})])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
self.assertIsNone(inst_topo)
def test_cpu_pinning_usage_from_instances(self):
host_pin = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, cpu_usage=0,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))])
inst_pin_1 = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), id=0, cpu_pinning={0: 0, 1: 3},
memory=2048)])
inst_pin_2 = objects.InstanceNUMATopology(
cells = [objects.InstanceNUMACell(
cpuset=set([0, 1]), id=0, cpu_pinning={0: 1, 1: 2},
memory=2048)])
host_pin = hw.numa_usage_from_instances(
host_pin, [inst_pin_1, inst_pin_2])
self.assertEqual(set([0, 1, 2, 3]),
host_pin.cells[0].pinned_cpus)
def test_cpu_pinning_usage_from_instances_free(self):
host_pin = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, cpu_usage=0, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([0, 1, 3]))])
inst_pin_1 = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0]), memory=1024, cpu_pinning={0: 1}, id=0)])
inst_pin_2 = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=1024, id=0,
cpu_pinning={0: 0, 1: 3})])
host_pin = hw.numa_usage_from_instances(
host_pin, [inst_pin_1, inst_pin_2], free=True)
self.assertEqual(set(), host_pin.cells[0].pinned_cpus)
def test_host_usage_from_instances_fail(self):
host_pin = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, cpu_usage=0,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))])
inst_pin_1 = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048, id=0,
cpu_pinning={0: 0, 1: 3})])
inst_pin_2 = objects.InstanceNUMATopology(
cells = [objects.InstanceNUMACell(
cpuset=set([0, 1]), id=0, memory=2048,
cpu_pinning={0: 0, 1: 2})])
self.assertRaises(exception.CPUPinningInvalid,
hw.numa_usage_from_instances, host_pin,
[inst_pin_1, inst_pin_2])
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import uuid
from keystone.common.sql import migration
from keystone import contrib
from keystone.openstack.common import importutils
from keystone import tests
from keystone.tests import test_v3
class TestExtensionCase(test_v3.RestfulTestCase):
EXTENSION_NAME = 'endpoint_filter'
EXTENSION_TO_ADD = 'endpoint_filter_extension'
def setup_database(self):
self.conf_files = super(TestExtensionCase, self).config_files()
self.conf_files.append(
tests.testsdir('test_associate_project_endpoint_extension.conf'))
super(TestExtensionCase, self).setup_database()
package_name = "%s.%s.migrate_repo" % (contrib.__name__,
self.EXTENSION_NAME)
package = importutils.import_module(package_name)
self.repo_path = os.path.abspath(
os.path.dirname(package.__file__))
migration.db_version_control(version=None, repo_path=self.repo_path)
migration.db_sync(version=None, repo_path=self.repo_path)
def setUp(self):
super(TestExtensionCase, self).setUp()
self.default_request_url = (
'/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': self.endpoint_id})
def tearDown(self):
super(TestExtensionCase, self).tearDown()
self.conf_files.pop()
class AssociateEndpointProjectFilterCRUDTestCase(TestExtensionCase):
"""Test OS-EP-FILTER endpoint to project associations extension."""
# endpoint-project associations crud tests
# PUT
def test_create_endpoint_project_assoc(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid endpoint and project id test case.
"""
self.put(self.default_request_url,
body='',
expected_status=204)
def test_create_endpoint_project_assoc_noproj(self):
"""PUT OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
"""
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
body='',
expected_status=404)
def test_create_endpoint_project_assoc_noendp(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
"""
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
body='',
expected_status=404)
def test_create_endpoint_project_assoc_unexpected_body(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Unexpected body in request. The body should be ignored.
"""
self.put(self.default_request_url,
body={'project_id': self.default_domain_project_id},
expected_status=204)
# HEAD
def test_check_endpoint_project_assoc(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid project and endpoint id test case.
"""
self.put(self.default_request_url,
body='',
expected_status=204)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': self.endpoint_id},
expected_status=204)
def test_check_endpoint_project_assoc_noproj(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
"""
self.put(self.default_request_url)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
body='',
expected_status=404)
def test_check_endpoint_project_assoc_noendp(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
"""
self.put(self.default_request_url)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
body='',
expected_status=404)
# GET
def test_get_endpoint_project_assoc(self):
"""GET /OS-EP-FILTER/projects/{project_id}/endpoints success."""
self.put(self.default_request_url)
r = self.get('/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': self.default_domain_project_id})
self.assertValidEndpointListResponse(r, self.endpoint)
def test_get_endpoint_project_assoc_noproj(self):
"""GET /OS-EP-FILTER/projects/{project_id}/endpoints no project."""
self.put(self.default_request_url)
self.get('/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': uuid.uuid4().hex},
body='',
expected_status=404)
def test_list_projects_for_endpoint_default(self):
"""GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects success
Don't associate project and endpoint, then get empty list.
"""
r = self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
{'endpoint_id': self.endpoint_id},
expected_status=200)
self.assertValidProjectListResponse(r, expected_length=0)
def test_list_projects_for_endpoint_noendpoint(self):
"""GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Invalid endpoint id test case.
"""
self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
{'endpoint_id': uuid.uuid4().hex},
expected_status=404)
def test_list_projects_for_endpoint_assoc(self):
"""GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects success
Associate default project and endpoint, then get it.
"""
self.put(self.default_request_url)
r = self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
{'endpoint_id': self.endpoint_id},
expected_status=200)
self.assertValidProjectListResponse(r, self.default_domain_project)
# DELETE
def test_remove_endpoint_project_assoc(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid project id and endpoint id test case.
"""
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': self.endpoint_id},
expected_status=204)
def test_remove_endpoint_project_assoc_noproj(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
"""
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
body='',
expected_status=404)
def test_remove_endpoint_project_assoc_noendp(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
"""
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
body='',
expected_status=404)
class AssociateProjectEndpointFilterTokenRequestTestCase(TestExtensionCase):
"""Test OS-EP-FILTER catalog filtering extension."""
def test_default_project_id_scoped_token_with_user_id_ep_filter(self):
# create a second project to work with
ref = self.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
# grant the user a role on the project
self.put(
'/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
'user_id': self.user['id'],
'project_id': project['id'],
'role_id': self.role['id']})
# set the user's preferred project
body = {'user': {'default_project_id': project['id']}}
r = self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
body=body)
self.assertValidUserResponse(r)
# add one endpoint to the project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
# attempt to authenticate without requesting a project
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'], project['id'])
def test_implicit_project_id_scoped_token_with_user_id_ep_filter(self):
# attempt to authenticate without requesting a project
# add one endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
def test_default_project_id_scoped_token_ep_filter_no_catalog(self):
# create a second project to work with
ref = self.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
# grant the user a role on the project
self.put(
'/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
'user_id': self.user['id'],
'project_id': project['id'],
'role_id': self.role['id']})
# set the user's preferred project
body = {'user': {'default_project_id': project['id']}}
r = self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
body=body)
self.assertValidUserResponse(r)
# add one endpoint to the project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
# attempt to authenticate without requesting a project
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'], project['id'])
def test_implicit_project_id_scoped_token_ep_filter_no_catalog(self):
# attempt to authenticate without requesting a project
# add one endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
def test_default_project_id_scoped_token_ep_filter_full_catalog(self):
# create a second project to work with
ref = self.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
# grant the user a role on the project
self.put(
'/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
'user_id': self.user['id'],
'project_id': project['id'],
'role_id': self.role['id']})
# set the user's preferred project
body = {'user': {'default_project_id': project['id']}}
r = self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
body=body)
self.assertValidUserResponse(r)
# attempt to authenticate without requesting a project
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False,
endpoint_filter=True)
self.assertEqual(r.result['token']['project']['id'], project['id'])
def test_implicit_project_id_scoped_token_ep_filter_full_catalog(self):
# attempt to authenticate without requesting a project
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False,
endpoint_filter=True,)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
def test_implicit_project_id_scoped_token_handling_bad_reference(self):
# handling the case with an endpoint that is not associate with
# add first endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
# create a second temporary endpoint
self.endpoint_id2 = uuid.uuid4().hex
self.endpoint2 = self.new_endpoint_ref(service_id=self.service_id)
self.endpoint2['id'] = self.endpoint_id2
self.catalog_api.create_endpoint(
self.endpoint_id2,
self.endpoint2.copy())
# add second endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id2},
body='',
expected_status=204)
# remove the temporary reference
# this will create inconsistency in the endpoint filter table
# which is fixed during the catalog creation for token request
self.catalog_api.delete_endpoint(self.endpoint_id2)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
|
|
from collections import Counter, namedtuple
from contextlib import contextmanager
import json
import logging
import pprint
from types import MethodType
from tornado import gen, ioloop, web, websocket, template
from tornado_cors import CorsMixin
import toro
from ..app import ChatNode
max_waiting_partners = 100
custom_message_type = 'INSTANT_DISCOURSE'
class HashEntry(namedtuple('HashEntry', 'reporter_cid speaker_cid hash')):
__slots__ = ()
def get_symmetric(self, partner_cid):
"""Return a HashEntry that must be seen to verify that the instance has been said.
This means swapping reporter_cid for partner_cid."""
return self._replace(reporter_cid=partner_cid)
@contextmanager
def log_exceptions():
try:
yield
except:
logging.getLogger(__name__).exception("log_exception")
raise
class TestWebSocketHandlerMixin(object):
"""Base class for testing handlers that exposes the on_close event.
This allows for deterministic cleanup of the associated socket.
"""
# source:
# https://github.com/tornadoweb/tornado/blob/26cb9b3fa67ef3282414a86743ee2e16c81913c3/tornado/test/websocket_test.py#L33
# it'd be cool if we could mix this in dynamically only in tests
def initialize(self, *args, **kwargs):
super(TestWebSocketHandlerMixin, self).initialize(*args, **kwargs)
self.__close_future = None
future_gen = kwargs.pop('close_future_gen', None)
if future_gen is not None:
self.__close_future = future_gen.next()
def on_close(self, *args, **kwargs):
if self.__close_future is not None:
self.__close_future.set_result((self.close_code, self.close_reason))
return super(TestWebSocketHandlerMixin, self).on_close(*args, **kwargs)
class PeerJSHandler(websocket.WebSocketHandler):
"""Handles basic Peerjs signaling features."""
cid_handlers = {}
# should define peerjs message types here
def initialize(self, node, **kwargs):
self.node = node
# this doesn't actually do what you want -- it'll print the bound method's MRO class
self.log = logging.getLogger("%s:%s" % (__name__, self.__class__.__name__))
def check_origin(self, origin):
return True
# parsed_origin = urllib.parse.urlparse(origin)
# return parsed_origin.netloc.endswith(".mydomain.com")
def open(self):
"""
qstring: id
qstring: token
qstring: key
"""
self.log.info("open: %r", self.request.query_arguments)
if 'id' not in self.request.query_arguments:
self.write_message(json.dumps({
'type': 'ERROR',
'payload': {
'msg': 'Expected an id in the querystring.'
}
}))
self.close()
return
cid = self.get_query_argument('id')
assert cid in self.node.cids_allocated
self.node.set_cid_in_use(cid)
self.cid = cid
self.cid_handlers[cid] = self
self.write_message(json.dumps({
'type': 'OPEN'
}))
def on_message(self, message):
"""
Peerjs messages always have keys type, payload, dst.
Return True if we handled a message, False otherwise.
"""
message = json.loads(message)
self.log.info("on_message for %s, type %s", self.cid, message['type'])
self.log.debug("message: %s", pprint.pformat(message))
if message['type'] == custom_message_type:
return False
to_cid = message['dst']
forward_message = {
'type': message['type'],
'src': self.cid,
'dst': to_cid,
'payload': message['payload'],
}
# I'm pretty sure you can get out of order messages somehow?
assert to_cid in self.node.cids_in_use
assert to_cid in self.cid_handlers
self.cid_handlers[to_cid].write_message(json.dumps(forward_message))
return True
def on_close(self):
self.log.info("on_close for %s", self.cid)
del self.cid_handlers[self.cid]
self.node.remove_cid_in_use(self.cid)
class ChatHandler(TestWebSocketHandlerMixin, PeerJSHandler):
"""
This handler injects our custom signaling into the PeerJS stream.
"""
partner_queue = toro.Queue(max_waiting_partners)
def initialize(self, *args, **kwargs):
super(ChatHandler, self).initialize(*args, **kwargs)
self.partner_cid = None
self.hash_counter = None
def on_close(self):
super(ChatHandler, self).on_close()
# if our client was on the queue, remove them.
try:
self.partner_queue.queue.remove(self.cid)
self.log.info("removed own client from queue")
except ValueError:
pass # they weren't on the queue
@gen.coroutine
def on_message(self, message):
with log_exceptions():
if super(ChatHandler, self).on_message(message):
# a PeerJS message was handled
return
message = json.loads(message)
if message['subtype'] == 'get-partner':
assert self.cid
# self.partner_cid doesn't have to be None here
# (at least in the current protocol) because there's no
# explicit way to report a disconnect.
self.log.info("about to call _wait_for_partner %s", self.cid)
# to do a timeout, you could just use gen.sleep(_wait_for_partner())?
yield self._wait_for_partner()
elif message['subtype'] == 'me-hash':
entry = HashEntry(self.cid, self.cid, message['hash'])
self._handle_new_entry(entry)
elif message['subtype'] == 'them-hash':
entry = HashEntry(self.cid, self.partner_cid, message['hash'])
self._handle_new_entry(entry)
elif message['subtype'] == 'get-num-clients':
self._send(self.cid, {'subtype': 'num-clients'})
else:
self.log.warning("unrecognized id message subtype")
def _match_hash(self, hash_entry):
"""Return True if the entry has been reported by both clients, False otherwise."""
self.log.info("_match_hash(%s) with %s",
hash_entry, self.hash_counter)
symmetric = hash_entry.get_symmetric(self.partner_cid)
if symmetric in self.hash_counter:
self.hash_counter[symmetric] -= 1
assert self.hash_counter[symmetric] >= 0
if self.hash_counter[symmetric] == 0:
del self.hash_counter[symmetric]
return True
self.hash_counter[hash_entry] += 1
return False
def _handle_new_entry(self, hash_entry):
"""Return True if this entry was matched.
In this case, the entry will also be checked for a penalty and recorded.
Return False otherwise.
"""
if self._match_hash(hash_entry):
if self.node.has_been_said(hash_entry.hash):
self._send_penalty(hash_entry)
else:
# If this hash has been said, there's no reason to record it again.
self.node.record_as_said(hash_entry.hash)
return True
return False
def _send_penalty(self, hash_entry):
# If the hash entry is built from a me-hash, the reporter and speaker
# will be the same.
receiver_cid = hash_entry.speaker_cid
if hash_entry.speaker_cid == self.cid:
partner_cid = self.partner_cid
else:
partner_cid = self.cid
self.log.info("penalty for %r after saying %r",
receiver_cid, hash_entry.hash)
self._send(receiver_cid, {
'subtype': 'you-penalty',
'duration_secs': 2,
})
self._send(partner_cid, {
'subtype': 'them-penalty',
'duration_secs': 2,
})
def _send(self, cid, message, attach_clients=True):
# Always mutates message['type'].
# Mutates message['num_clients'] if attach_clients.
message['type'] = custom_message_type
if attach_clients:
message['num_clients'] = len(self.cid_handlers)
self.cid_handlers[cid].write_message(json.dumps(message))
@gen.coroutine
def _wait_for_partner(self):
with log_exceptions():
self.log.info("_wait_for_partner: %s", self.cid)
yield self.partner_queue.put(self.cid)
# If there is a match on the queue, immediately notify
# the waiting clients.
# Tornado is single-threaded, so Toro queues have no
# race on queue size.
# This will probably match the same people over and over, though.
if self.partner_queue.qsize() >= 2:
# peerjs doesn't need both peers to call connect -- that'll
# end with two separate connections.
# So, we arbitrarily pick one peer to connect to the other.
leader = self.partner_queue.get_nowait()
follower = self.partner_queue.get_nowait()
self.log.info("matched: (%s, %s)", leader, follower)
self._send(leader, {
'subtype': 'match',
'match_cid': follower,
})
self.cid_handlers[leader].partner_cid = follower
self.cid_handlers[follower].partner_cid = leader
#TODO are they both supposed to have the same instance!?
hash_counter = Counter()
self.cid_handlers[leader].hash_counter = hash_counter
self.cid_handlers[follower].hash_counter = hash_counter
else:
self.log.info("unable to match right now")
class GetCidHandler(CorsMixin, web.RequestHandler):
CORS_HEADERS = ', '.join(['Accept', 'Accept-Version', 'Content-Type', 'Api-Version'])
CORS_ORIGIN = '*'
CORS_CREDENTIALS = True # needed?
def initialize(self, node):
self.node = node
def get(self):
self.write(self.node.get_unused_cid())
class TestIndexHandler(web.RequestHandler):
# Used during selenium tests to template the test server into the index.
loader = None
def initialize(self, template_path, **index_template_kwargs):
if self.__class__.loader is None:
self.__class__.loader = template.Loader(template_path)
self.index_template_kwargs = index_template_kwargs
def get(self):
index = self.loader.load('index.html').generate(
**self.index_template_kwargs)
self.write(index)
class FaviconHandler(web.RequestHandler):
def get(self):
self.write('')
def get_app(tornado_port,
redis, capacity, error_rate, filter_sync_secs,
serve_index=False, template_path=None, index_template_kwargs=None,
close_future_gen=None, **app_kwargs):
"""
Return an Application with _id_node set to a new Node.
filter_sync_secs: in seconds, how often to sync this node's filter from redis.
close_future_gen: a generator that yields Futures forever. When closing a websocket,
the server will set a Future's result. See TestWebSocketHandlerMixin.
**app_kwargs: passed through to Application.
"""
logging.getLogger("%s.get_app" % __name__).info("building app:\n%s", pprint.pformat(locals()))
node = ChatNode(redis,
capacity,
error_rate)
handlers = [
(r'/favicon.ico', FaviconHandler),
(r'/peerjs', ChatHandler, {
'close_future_gen': close_future_gen,
'node': node,
}),
(r'/peerjs/id', GetCidHandler, {'node': node}),
]
if serve_index:
index_handler_kwargs = {}
index_handler_kwargs['template_path'] = template_path
index_handler_kwargs.update(index_template_kwargs)
handlers.append((r'/', TestIndexHandler, index_handler_kwargs))
app = web.Application(handlers, **app_kwargs)
sync_callback = ioloop.PeriodicCallback(
node.sync_filter_from_redis, filter_sync_secs * 1000,
io_loop=ioloop.IOLoop.instance()
)
for fieldname in ['_id_node', '_sync_periodic_calback', 'start']:
# Make sure we're not accidentally overriding anything.
assert not hasattr(app, fieldname), "%r has field %s" % (app, fieldname)
app._id_node = node
app._sync_periodic_callback = sync_callback
def start(self):
self._sync_periodic_callback.start()
ioloop.IOLoop.instance().start()
app.start = MethodType(start, app)
return app
|
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
import os
import botocore.session
from botocore.client import Config
import boto3
import boto3.utils
from .resources.factory import ResourceFactory
class Session(object):
"""
A session stores configuration state and allows you to create service
clients and resources.
:type aws_access_key_id: string
:param aws_access_key_id: AWS access key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: AWS secret access key
:type aws_session_token: string
:param aws_session_token: AWS temporary session token
:type region_name: string
:param region_name: Default region when creating new connections
:type botocore_session: botocore.session.Session
:param botocore_session: Use this Botocore session instead of creating
a new default one.
:type profile_name: string
:param profile_name: The name of a profile to use. If not given, then
the default profile is used.
"""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
aws_session_token=None, region_name=None,
botocore_session=None, profile_name=None):
if botocore_session is not None:
self._session = botocore_session
else:
# Create a new default session
self._session = botocore.session.get_session()
# Setup custom user-agent string if it isn't already customized
if self._session.user_agent_name == 'Botocore':
botocore_info = 'Botocore/{0}'.format(
self._session.user_agent_version)
if self._session.user_agent_extra:
self._session.user_agent_extra += ' ' + botocore_info
else:
self._session.user_agent_extra = botocore_info
self._session.user_agent_name = 'Boto3'
self._session.user_agent_version = boto3.__version__
if profile_name is not None:
self._session.set_config_variable('profile', profile_name)
if aws_access_key_id or aws_secret_access_key or aws_session_token:
self._session.set_credentials(aws_access_key_id,
aws_secret_access_key, aws_session_token)
if region_name is not None:
self._session.set_config_variable('region', region_name)
self.resource_factory = ResourceFactory(
self._session.get_component('event_emitter'))
self._setup_loader()
self._register_default_handlers()
def __repr__(self):
return 'Session(region={0})'.format(
repr(self._session.get_config_variable('region')))
@property
def profile_name(self):
"""
The **read-only** profile name.
"""
return self._session.profile or 'default'
@property
def region_name(self):
"""
The **read-only** region name.
"""
return self._session.get_config_variable('region')
@property
def events(self):
"""
The event emitter for a session
"""
return self._session.get_component('event_emitter')
def _setup_loader(self):
"""
Setup loader paths so that we can load resources.
"""
self._loader = self._session.get_component('data_loader')
self._loader.search_paths.append(
os.path.join(os.path.dirname(__file__), 'data'))
def get_available_services(self):
"""
Get a list of available services that can be loaded as low-level
clients via :py:meth:`Session.client`.
:rtype: list
:return: List of service names
"""
return self._session.get_available_services()
def get_available_resources(self):
"""
Get a list of available services that can be loaded as resource
clients via :py:meth:`Session.resource`.
:rtype: list
:return: List of service names
"""
return self._loader.list_available_services(type_name='resources-1')
def client(self, service_name, region_name=None, api_version=None,
use_ssl=True, verify=None, endpoint_url=None,
aws_access_key_id=None, aws_secret_access_key=None,
aws_session_token=None, config=None):
"""
Create a low-level service client by name.
:type service_name: string
:param service_name: The name of a service, e.g. 's3' or 'ec2'. You
can get a list of available services via
:py:meth:`get_available_services`.
:type region_name: string
:param region_name: The name of the region associated with the client.
A client is associated with a single region.
:type api_version: string
:param api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
:type use_ssl: boolean
:param use_ssl: Whether or not to use SSL. By default, SSL is used. Note that
not all services support non-ssl connections.
:type verify: boolean/string
:param verify: Whether or not to verify SSL certificates. By default SSL certificates
are verified. You can provide the following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:type endpoint_url: string
:param endpoint_url: The complete URL to use for the constructed client.
Normally, botocore will automatically construct the appropriate URL
to use when communicating with a service. You can specify a
complete URL (including the "http/https" scheme) to override this
behavior. If this value is provided, then ``use_ssl`` is ignored.
:type aws_access_key_id: string
:param aws_access_key_id: The access key to use when creating
the client. This is entirely optional, and if not provided,
the credentials configured for the session will automatically
be used. You only need to provide this argument if you want
to override the credentials used for this specific client.
:type aws_secret_access_key: string
:param aws_secret_access_key: The secret key to use when creating
the client. Same semantics as aws_access_key_id above.
:type aws_session_token: string
:param aws_session_token: The session token to use when creating
the client. Same semantics as aws_access_key_id above.
:type config: botocore.client.Config
:param config: Advanced client configuration options. If region_name
is specified in the client config, its value will take precedence
over environment variables and configuration values, but not over
a region_name value passed explicitly to the method.
:return: Service client instance
"""
return self._session.create_client(
service_name, region_name=region_name, api_version=api_version,
use_ssl=use_ssl, verify=verify, endpoint_url=endpoint_url,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token, config=config)
def resource(self, service_name, region_name=None, api_version=None,
use_ssl=True, verify=None, endpoint_url=None,
aws_access_key_id=None, aws_secret_access_key=None,
aws_session_token=None, config=None):
"""
Create a resource service client by name.
:type service_name: string
:param service_name: The name of a service, e.g. 's3' or 'ec2'. You
can get a list of available services via
:py:meth:`get_available_resources`.
:type region_name: string
:param region_name: The name of the region associated with the client.
A client is associated with a single region.
:type api_version: string
:param api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
:type use_ssl: boolean
:param use_ssl: Whether or not to use SSL. By default, SSL is used. Note that
not all services support non-ssl connections.
:type verify: boolean/string
:param verify: Whether or not to verify SSL certificates. By default SSL certificates
are verified. You can provide the following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:type endpoint_url: string
:param endpoint_url: The complete URL to use for the constructed client.
Normally, botocore will automatically construct the appropriate URL
to use when communicating with a service. You can specify a
complete URL (including the "http/https" scheme) to override this
behavior. If this value is provided, then ``use_ssl`` is ignored.
:type aws_access_key_id: string
:param aws_access_key_id: The access key to use when creating
the client. This is entirely optional, and if not provided,
the credentials configured for the session will automatically
be used. You only need to provide this argument if you want
to override the credentials used for this specific client.
:type aws_secret_access_key: string
:param aws_secret_access_key: The secret key to use when creating
the client. Same semantics as aws_access_key_id above.
:type aws_session_token: string
:param aws_session_token: The session token to use when creating
the client. Same semantics as aws_access_key_id above.
:type config: botocore.client.Config
:param config: Advanced client configuration options. If region_name
is specified in the client config, its value will take precedence
over environment variables and configuration values, but not over
a region_name value passed explicitly to the method. If
user_agent_extra is specified in the client config, it overrides
the default user_agent_extra provided by the resource API.
:return: Subclass of :py:class:`~boto3.resources.base.ServiceResource`
"""
if api_version is None:
api_version = self._loader.determine_latest_version(
service_name, 'resources-1')
resource_model = self._loader.load_service_model(
service_name, 'resources-1', api_version)
# Creating a new resource instance requires the low-level client
# and service model, the resource version and resource JSON data.
# We pass these to the factory and get back a class, which is
# instantiated on top of the low-level client.
if config is not None:
if config.user_agent_extra is None:
config = copy.deepcopy(config)
config.user_agent_extra = 'Resource'
else:
config = Config(user_agent_extra='Resource')
client = self.client(
service_name, region_name=region_name, api_version=api_version,
use_ssl=use_ssl, verify=verify, endpoint_url=endpoint_url,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token, config=config)
service_model = client.meta.service_model
# Create a ServiceContext object to serve as a reference to
# important read-only information about the general service.
service_context = boto3.utils.ServiceContext(
service_name=service_name, service_model=service_model,
resource_json_definitions=resource_model['resources'],
service_waiter_model=boto3.utils.LazyLoadedWaiterModel(
self._session, service_name, api_version)
)
# Create the service resource class.
cls = self.resource_factory.load_from_definition(
resource_name=service_name,
single_resource_json_definition=resource_model['service'],
service_context=service_context
)
return cls(client=client)
def _register_default_handlers(self):
# S3 customizations
self._session.register(
'creating-client-class.s3',
boto3.utils.lazy_call(
'boto3.s3.inject.inject_s3_transfer_methods'))
self._session.register(
'creating-resource-class.s3.Bucket',
boto3.utils.lazy_call(
'boto3.s3.inject.inject_bucket_methods'))
self._session.register(
'creating-resource-class.s3.Object',
boto3.utils.lazy_call(
'boto3.s3.inject.inject_object_methods'))
# DynamoDb customizations
self._session.register(
'creating-resource-class.dynamodb',
boto3.utils.lazy_call(
'boto3.dynamodb.transform.register_high_level_interface'),
unique_id='high-level-dynamodb')
self._session.register(
'creating-resource-class.dynamodb.Table',
boto3.utils.lazy_call(
'boto3.dynamodb.table.register_table_methods'),
unique_id='high-level-dynamodb-table')
# EC2 Customizations
self._session.register(
'creating-resource-class.ec2.ServiceResource',
boto3.utils.lazy_call(
'boto3.ec2.createtags.inject_create_tags'))
self._session.register(
'creating-resource-class.ec2.Instance',
boto3.utils.lazy_call(
'boto3.ec2.deletetags.inject_delete_tags',
event_emitter=self.events))
|
|
import pytest
from pycket.expand import expand
from pycket.interpreter import *
from pycket.values import *
from pycket.impersonators import *
from pycket.vector import *
from pycket.prims import *
from pycket.test.testhelper import run_fix, run, run_mod, execute, check_equal
import sys
def test_vec():
assert isinstance(run('(vector 1)'), W_Vector)
#run('(vector? (quote #(0 (2 2 2 2)) "Anna"))', w_true)
#run("(vector? (quote #())", w_true)
run_fix("(let-values ([(v) (vector 1 2 3)]) (vector-length v))", 3)
run("(let-values ([(v) (vector 1 2 3)]) (vector-set! v 0 0))", w_void)
run_fix("(let-values ([(v) (vector 1 2 3)]) (vector-set! v 0 0) (vector-length v))", 3)
run_fix("(let-values ([(v) (vector 1 2 3)]) (vector-set! v 0 0) (vector-ref v 0))", 0)
def test_vec_equal():
run("(equal? (vector 1 2 3) (vector 1 2 3))", w_true)
run("(equal? (vector 1 2 3) (vector 1 2))", w_false)
run("(equal? (vector 1 2 3) (vector 1 2 5))", w_false)
def test_make_vector():
run_fix("(let-values ([(v) (vector)]) (vector-length v))", 0)
run_fix("(let-values ([(v) (make-vector 5)]) (vector-length v))", 5)
vec = run('(make-vector 5)')
for i in range(vec.length()):
assert vec.ref(i).value == 0
def test_vec_strategies_empty():
vec = run("(vector)")
print "First size: %s" % vec.length()
assert isinstance(vec.strategy, ObjectVectorStrategy)
vec = run("(make-vector 0)")
print "Second size: %s" % vec.length()
assert isinstance(vec.strategy, ObjectVectorStrategy)
vec = run("(vector-immutable)")
assert isinstance(vec.strategy, ObjectImmutableVectorStrategy)
vec = run("(vector-immutable (quote hello))")
assert isinstance(vec.strategy, ObjectImmutableVectorStrategy)
def test_vec_strategies_fixnum():
vec = run("(vector 1 2 3)")
assert isinstance(vec.strategy, FixnumVectorStrategy)
vec = run("(make-vector 2)")
assert isinstance(vec.strategy, ConstantVectorStrategy)
def test_vec_strategies_flonum():
vec = run("(vector 1.0 2.1 3.2)")
assert isinstance(vec.strategy, FlonumVectorStrategy)
vec = run("(make-vector 2 1.2)")
assert isinstance(vec.strategy, ConstantVectorStrategy)
def test_vec_strategies_fixnum_singleton():
vec1 = run("(vector 1 2 3)")
vec2 = run("(vector 3 2 1)")
assert vec1.strategy is vec2.strategy
def test_vec_strategies_object():
vec = run("(vector (cons 1 2) 2 3)")
assert isinstance(vec.strategy, ObjectVectorStrategy)
vec = run("(vector-immutable (cons 1 2) 2 3)")
assert isinstance(vec.strategy, ObjectImmutableVectorStrategy)
def test_vec_strategies_stays_fixnum():
vec = run("(let-values ([(vec) (vector 0 0 0)]) (vector-set! vec 1 5) vec)")
assert isinstance(vec.strategy, FixnumVectorStrategy)
def test_vec_strategies_stays_flonum():
vec = run("(let-values ([(vec) (vector 1.2 1.2 1.2)]) (vector-set! vec 1 5.5) vec)")
assert isinstance(vec.strategy, FlonumVectorStrategy)
vec = run("(let-values ([(vec) (vector 1.2 1.2 1.2)]) (vector-set! vec 1 0) vec)")
# Test that we can encode the full range of signed 32-bit values in the tagged
# flonum strategy
assert isinstance(vec.strategy, FlonumTaggedVectorStrategy)
vec = run("(let-values ([(vec) (vector 1.2 1.2 1.2)]) (vector-set! vec 1 %d) vec)" % (2 ** 31 - 1))
assert isinstance(vec.strategy, FlonumTaggedVectorStrategy)
vec = run("(let-values ([(vec) (vector 1.2 1.2 1.2)]) (vector-set! vec 1 %d) vec)" % (-(2 ** 31)))
assert isinstance(vec.strategy, FlonumTaggedVectorStrategy)
# Test transitions from the constant vector strategy to the tagged flonum strategy
vec = run("(let-values ([(vec) (make-vector 10 0)]) (vector-set! vec 1 1.1) vec)")
assert isinstance(vec.strategy, FlonumTaggedVectorStrategy)
vec = run("(let-values ([(vec) (make-vector 10 %d)]) (vector-set! vec 1 1.1) vec)" % (2 ** 31 - 1))
assert isinstance(vec.strategy, FlonumTaggedVectorStrategy)
vec = run("(let-values ([(vec) (make-vector 10 %d)]) (vector-set! vec 1 1.1) vec)" % (-(2 ** 31)))
assert isinstance(vec.strategy, FlonumTaggedVectorStrategy)
def test_vec_strategies_dehomogenize():
vec = run('(let-values ([(vec) (vector 1 2 3)]) (vector-set! vec 1 "Anna") vec)')
assert isinstance(vec.strategy, ObjectVectorStrategy)
vec = run('(let-values ([(vec) (make-vector 3 1)]) (vector-set! vec 1 "Anna") vec)')
assert isinstance(vec.strategy, ObjectVectorStrategy)
vec = run('(let-values ([(vec) (make-vector 3 1)]) (vector-set! vec 1 2) vec)')
assert isinstance(vec.strategy, FixnumVectorStrategy)
def test_vec_strategies_character():
vec1 = run(r"(vector #\A #\B #\C)")
assert isinstance(vec1.strategy, CharacterVectorStrategy)
vec2 = run(r"(vector #\a)")
assert isinstance(vec2.strategy, CharacterVectorStrategy)
def test_vec_strategies_stays_character():
vec = run(r"(let-values ([(vec) (vector #\A #\A #\A)]) (vector-set! vec 1 #\D) vec)")
assert isinstance(vec.strategy, CharacterVectorStrategy)
def test_vec_strategies_character_singleton():
vec1 = run(r"(vector #\A #\A #\A)")
vec2 = run(r"(vector #\B #\B)")
assert vec1.strategy is vec2.strategy
def test_vec_strategies_character_ref(doctest):
r"""
> (define v (vector #\a #\b #\c))
> (vector-ref v 0)
#\a
> (vector-ref v 1)
#\b
> (vector-ref v 2)
#\c
"""
def run_unsafe_expander(e,v):
run("(begin (#%%require (quote #%%unsafe)) %s)" % e,v,extra="")
def run_fix_unsafe_expander(e,v):
run_fix("(begin (#%%require (quote #%%unsafe)) %s)" % e,v,extra="")
def run_unsafe(e,v):
run(e,v,extra="")
def run_fix_unsafe(e,v):
run_fix(e,v,extra="")
def test_unsafe_impersonators():
ru = run_unsafe
ru_fix = run_fix_unsafe
if pytest.config.load_expander:
ru = run_unsafe_expander
ru_fix = run_fix_unsafe_expander
ru("(equal? 3 (unsafe-vector-length (impersonate-vector (vector 1 2 3) (lambda (x y z) z) (lambda (x y z) z))))", w_true)
ru("(equal? 3 (unsafe-vector-ref (impersonate-vector (vector 1 2 3) (lambda (x y z) z) (lambda (x y z) z)) 2))", w_true)
ru_fix("(let-values ([(v) (impersonate-vector (vector 1 2 3) (lambda (x y z) z) (lambda (x y z) z))]) (unsafe-vector-set! v 0 0) (unsafe-vector-ref v 0))", 0)
def test_unsafe():
ru = run_unsafe
ru_fix = run_fix_unsafe
if pytest.config.load_expander:
ru = run_unsafe_expander
ru_fix = run_fix_unsafe_expander
ru("(equal? 3 (unsafe-vector-length (vector 1 2 3)))", w_true)
ru("(equal? 3 (unsafe-vector*-length (vector 1 2 3)))", w_true)
ru("(equal? 3 (unsafe-vector-ref (vector 1 2 3) 2))", w_true)
ru("(equal? 3 (unsafe-vector*-ref (vector 1 2 3) 2))", w_true)
ru_fix("(let-values ([(v) (vector 1 2 3)]) (unsafe-vector-set! v 0 0) (unsafe-vector-ref v 0))", 0)
ru_fix("(let-values ([(v) (vector 1 2 3)]) (unsafe-vector*-set! v 0 0) (unsafe-vector*-ref v 0))", 0)
def test_vec_imp():
assert isinstance(run('(impersonate-vector (vector 1) values values)'), W_ImpVector)
#run('(vector? (chaperone-vector \'#(0 (2 2 2 2) "Anna") values values))', w_true)
run_fix("(let-values ([(v) (impersonate-vector (vector 1 2 3) values values)]) (vector-length v))", 3)
run("(let-values ([(v) (impersonate-vector (vector 1 2 3) (lambda (x y z) z) (lambda (x y z) z))]) (vector-set! v 0 0))", w_void)
run_fix("(let-values ([(v) (impersonate-vector (vector 1 2 3) (lambda (x y z) z) (lambda (x y z) z))]) (vector-set! v 0 0) (vector-length v))", 3)
run_fix("(let-values ([(v) (impersonate-vector (vector 1 2 3) (lambda (x y z) z) (lambda (x y z) z))]) (vector-set! v 0 0) (vector-ref v 0))", 0)
def test_vec_equal_imp():
run("(equal? (impersonate-vector (vector 1 2 3) (lambda (x y z) z) (lambda (x y z) z)) (vector 1 2 3))", w_true)
run("(equal? (impersonate-vector (vector 1 2 3) (lambda (x y z) z) (lambda (x y z) z)) (vector 1 2))", w_false)
run("(equal? (impersonate-vector (vector 1 2 3) (lambda (x y z) z) (lambda (x y z) z)) (vector 1 2 5))", w_false)
def test_make_vector_imp():
run_fix("(let-values ([(v) (impersonate-vector (vector) (lambda (x y z) z) (lambda (x y z) z))]) (vector-length v))", 0)
run_fix("(let-values ([(v) (impersonate-vector (make-vector 5) (lambda (x y z) z) (lambda (x y z) z))]) (vector-length v))", 5)
@pytest.mark.skipif(pytest.config.new_pycket and not pytest.config.load_expander, reason="-and- is not in kernel")
def test_bug_symbol_in_vector():
# FIXME somebody who knows expand
run("#('a)")
def test_vec_values():
run_fix("(let-values ([(a b c) (vector->values (vector 1 2 3))]) (+ a b c))", 6)
run_fix("(let-values ([(b c) (vector->values (vector 1 2 3) 1)]) (+ b c))", 5)
run_fix("(let-values ([(b) (vector->values (vector 1 2 3) 1 2)]) (+ b))", 2)
def test_flvector(doctest):
"""
! (require '#%flfxnum '#%unsafe)
> (flvector-ref (flvector 0.0) 0)
0.0
> (define v (flvector 0.0 1.0))
> (flvector-ref v 0)
0.0
> (flvector-ref v 1)
1.0
> (flvector-set! v 0 2.0)
(void)
> (flvector-ref v 0)
2.0
> (unsafe-flvector-ref v 0)
2.0
> (unsafe-flvector-set! v 0 3.0)
(void)
> (flvector-ref v 0)
3.0
> (define v2 (make-flvector 5))
> (flvector-ref v2 4)
0.0
> (define v3 (make-flvector 5 3.0))
> (flvector-ref v3 4)
3.0
"""
assert doctest
def test_flvector_set_wrong_type():
with pytest.raises(SchemeException):
run_mod("""
#lang pycket
(require '#%flfxnum '#%unsafe)
(let [(a (flvector 1.2 1.3))] (flvector-set! a 1 'a))
""")
def test_vector_copy_bang(doctest):
"""
> (define v (vector 'A 'p 'p 'l 'e))
> (define src (vector))
> (define dest (vector 1))
> (vector-copy! v 4 #(y))
> (vector-copy! v 0 v 3 4)
> v
'#(l p p l y)
> (vector-copy! v 0 #() 0 0)
> (vector-copy! dest 1 src 0)
> dest
'#(1)
"""
def test_list_vector_conversion():
check_equal(
"(vector->list #(1 2 3 4))", "(list 1 2 3 4)",
"(vector->list #())", "'()",
"(vector->list #(1.1 a))", "(list 1.1 'a)",
"#(1 2 3 4)", "(list->vector (list 1 2 3 4))",
"#()", "(list->vector '())",
"#(1.1 a)", "(list->vector (list 1.1 'a))",
)
def test_eq_and_vectors_agree(doctest):
"""
> (eq? 3.5 (vector-ref (vector 3.5) 0))
#t
"""
def test_vector_to_immutable_vector(doctest):
r"""
! (define fl-vector^ (vector-immutable 1.0 2.0 3.0))
! (define fl-vector (vector 1.0 2.0 3.0))
! (define fx-vector (vector 1 2 3))
! (define ch-vector (vector #\a #\b #\c))
> (eq? fl-vector^ (vector->immutable-vector fl-vector^))
#t
> (eq? fl-vector (vector->immutable-vector fl-vector))
#f
> (eq? fx-vector (vector->immutable-vector fx-vector))
#f
> (eq? ch-vector (vector->immutable-vector ch-vector))
#f
"""
def test_copy_vector_strategy_preserve():
vec = run("(vector->immutable-vector (vector 1.0 2.0 3.0))")
assert vec.strategy is FlonumImmutableVectorStrategy.singleton
vec = run("(vector->immutable-vector (vector 1 2 3))")
assert vec.strategy is FixnumImmutableVectorStrategy.singleton
vec = run(r"(vector->immutable-vector (vector #\a #\b #\c))")
assert vec.strategy is CharacterImmutableVectorStrategy.singleton
vec = run(r"(vector->immutable-vector (vector #\a #\b #\c 1 1.0))")
assert vec.strategy is ObjectImmutableVectorStrategy.singleton
def test_constant_strategy():
vec = run("(make-vector 10 #f)")
assert vec.strategy is ConstantVectorStrategy.singleton
vec = run("(vector->immutable-vector (make-vector 10 #t))")
assert vec.strategy is ConstantImmutableVectorStrategy.singleton
|
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import time
import threading
import logging
try:
import queue
except ImportError:
import Queue as queue
log = logging.getLogger(__name__)
class Task(object):
"""
A scheduled task that will be executed by the scheduler
after a given time interval has passed.
Attributes:
name -- The name of the task.
seconds -- The number of seconds to wait before executing.
callback -- The function to execute.
args -- The arguments to pass to the callback.
kwargs -- The keyword arguments to pass to the callback.
repeat -- Indicates if the task should repeat.
Defaults to False.
qpointer -- A pointer to an event queue for queuing callback
execution instead of executing immediately.
Methods:
run -- Either queue or execute the callback.
reset -- Reset the task's timer.
"""
def __init__(self, name, seconds, callback, args=None,
kwargs=None, repeat=False, qpointer=None):
"""
Create a new task.
Arguments:
name -- The name of the task.
seconds -- The number of seconds to wait before executing.
callback -- The function to execute.
args -- The arguments to pass to the callback.
kwargs -- The keyword arguments to pass to the callback.
repeat -- Indicates if the task should repeat.
Defaults to False.
qpointer -- A pointer to an event queue for queuing callback
execution instead of executing immediately.
"""
self.name = name
self.seconds = seconds
self.callback = callback
self.args = args or tuple()
self.kwargs = kwargs or {}
self.repeat = repeat
self.next = time.time() + self.seconds
self.qpointer = qpointer
def run(self):
"""
Execute the task's callback.
If an event queue was supplied, place the callback in the queue;
otherwise, execute the callback immediately.
"""
if self.qpointer is not None:
self.qpointer.put(('schedule', self.callback, self.args))
else:
self.callback(*self.args, **self.kwargs)
self.reset()
return self.repeat
def reset(self):
"""
Reset the task's timer so that it will repeat.
"""
self.next = time.time() + self.seconds
class Scheduler(object):
"""
A threaded scheduler that allows for updates mid-execution unlike the
scheduler in the standard library.
http://docs.python.org/library/sched.html#module-sched
Attributes:
addq -- A queue storing added tasks.
schedule -- A list of tasks in order of execution times.
thread -- If threaded, the thread processing the schedule.
run -- Indicates if the scheduler is running.
parentqueue -- A parent event queue in control of this scheduler.
Methods:
add -- Add a new task to the schedule.
process -- Process and schedule tasks.
quit -- Stop the scheduler.
"""
def __init__(self, parentqueue=None, parentstop=None):
"""
Create a new scheduler.
Arguments:
parentqueue -- A separate event queue controlling this scheduler.
"""
self.addq = queue.Queue()
self.schedule = []
self.thread = None
self.run = False
self.parentqueue = parentqueue
self.parentstop = parentstop
def process(self, threaded=True):
"""
Begin accepting and processing scheduled tasks.
Arguments:
threaded -- Indicates if the scheduler should execute in its own
thread. Defaults to True.
"""
if threaded:
self.thread = threading.Thread(name='sheduler_process',
target=self._process)
self.thread.start()
else:
self._process()
def _process(self):
"""Process scheduled tasks."""
self.run = True
try:
while self.run and (self.parentstop is None or not self.parentstop.isSet()):
wait = 1
updated = False
if self.schedule:
wait = self.schedule[0].next - time.time()
try:
if wait <= 0.0:
newtask = self.addq.get(False)
else:
if wait >= 3.0:
wait = 3.0
newtask = self.addq.get(True, wait)
except queue.Empty:
cleanup = []
for task in self.schedule:
if time.time() >= task.next:
updated = True
if not task.run():
cleanup.append(task)
else:
break
for task in cleanup:
x = self.schedule.pop(self.schedule.index(task))
else:
updated = True
self.schedule.append(newtask)
finally:
if updated:
self.schedule = sorted(self.schedule,
key=lambda task: task.next)
except KeyboardInterrupt:
self.run = False
if self.parentstop is not None:
log.debug("stopping parent")
self.parentstop.set()
except SystemExit:
self.run = False
if self.parentstop is not None:
self.parentstop.set()
log.debug("Quitting Scheduler thread")
if self.parentqueue is not None:
self.parentqueue.put(('quit', None, None))
def add(self, name, seconds, callback, args=None,
kwargs=None, repeat=False, qpointer=None):
"""
Schedule a new task.
Arguments:
name -- The name of the task.
seconds -- The number of seconds to wait before executing.
callback -- The function to execute.
args -- The arguments to pass to the callback.
kwargs -- The keyword arguments to pass to the callback.
repeat -- Indicates if the task should repeat.
Defaults to False.
qpointer -- A pointer to an event queue for queuing callback
execution instead of executing immediately.
"""
self.addq.put(Task(name, seconds, callback, args,
kwargs, repeat, qpointer))
def quit(self):
"""Shutdown the scheduler."""
self.run = False
|
|
"""
An alternate implementation of bound inner classes.
http://code.activestate.com/recipes/577623-bound-inner-classes-using-an-alternate-approach/
See also, the original approach:
http://code.activestate.com/recipes/577070-bound-inner-classes/
Copyright (C) 2011 by Larry Hastings
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__all__ = ["BindingOuterClass", "BoundInnerClass", "UnboundInnerClass"]
import weakref
class BindingOuterClass(object):
def __getattribute__(self, name):
attr = super(BindingOuterClass, self).__getattribute__(name)
bind_request = getattr(attr, '_binding_class', False)
suitable = isinstance(attr, type) and bind_request
if not suitable:
return attr
wrapper_bases = [attr]
# iterate over attr's bases and look in self to see
# if any have bound inner classes in self.
# if so, multiply inherit from the bound inner version(s).
multiply_inherit = False
for base in attr.__bases__:
# if we can't find a bound inner base,
# add the original unbound base instead.
# this is harmless but helps preserve the original MRO.
inherit_from = base
# if we inherit from a boundinnerclass from another outer class,
# we might have the same name as a legitimate base class.
# but if we look it up, we'll call __get__ recursively... forever.
# if the name is the same as our own name, there's no way it's a
# bound inner class we need to inherit from, so just skip it.
if base.__name__ != name:
bound_inner_base = getattr(self, base.__name__, None)
if bound_inner_base:
bases = getattr(bound_inner_base, "__bases__", (None,))
# the unbound class is always the first base of
# the bound inner class.
if bases[0] == base:
inherit_from = bound_inner_base
multiply_inherit = True
wrapper_bases.append(inherit_from)
Wrapper = attr._binding_class(attr, self, wrapper_bases[0])
Wrapper.__name__ = name
# Assigning to __bases__ is startling, but it's the only way to get
# this code working simultaneously in both Python 2 and Python 3.
if multiply_inherit:
Wrapper.__bases__ = tuple(wrapper_bases)
# cache in self
setattr(self, name, Wrapper)
return Wrapper
class BoundInnerClass(object):
@staticmethod
def _binding_class(attr, outer, base):
assert outer
outer_weakref = weakref.ref(outer)
class Wrapper(base):
# occlude the original _binding_class!
# otherwise we'll recurse forever.
_binding_class = None
def __init__(self, *args, **kwargs):
attr.__init__(self, outer_weakref(), *args, **kwargs)
# give the bound inner class a nice repr
# (but only if it doesn't already have a custom repr)
if attr.__repr__ is object.__repr__:
def __repr__(self):
return "".join([
"<",
self.__module__,
".",
self.__class__.__name__,
" object bound to ",
repr(outer_weakref()),
" at ",
hex(id(self)),
">"])
return Wrapper
class UnboundInnerClass(object):
@staticmethod
def _binding_class(attr, outer, base):
class Wrapper(base):
# occlude the original _binding_class!
# otherwise we'll recurse forever.
_binding_class = None
return Wrapper
# The code in this "if" statement will only execute if you run the module
# directly; it won't run if you "import" this code into your own programs.
if __name__ == "__main__":
class Outer(BindingOuterClass):
class Inner(BoundInnerClass):
def __init__(self, outer):
self.outer = outer
class SubclassOfInner(Inner):
def __init__(self, outer):
super(Outer.SubclassOfInner, self).__init__()
assert self.outer == outer
class SubsubclassOfInner(SubclassOfInner):
def __init__(self, outer):
super(Outer.SubsubclassOfInner, self).__init__()
assert self.outer == outer
class Subclass2OfInner(Inner):
def __init__(self, outer):
super(Outer.Subclass2OfInner, self).__init__()
assert self.outer == outer
class RandomUnboundInner(object):
def __init__(self):
super(Outer.RandomUnboundInner, self).__init__()
pass
class MultipleInheritanceTest(SubclassOfInner,
RandomUnboundInner,
Subclass2OfInner):
def __init__(self, outer):
super(Outer.MultipleInheritanceTest, self).__init__()
assert self.outer == outer
class UnboundSubclassOfInner(UnboundInnerClass, Inner):
pass
def tests():
assert outer.Inner == outer.Inner
assert isinstance(inner, outer.Inner)
assert isinstance(inner, Outer.Inner)
assert isinstance(subclass, Outer.SubclassOfInner)
assert isinstance(subclass, outer.SubclassOfInner)
assert isinstance(subclass, Outer.Inner)
assert isinstance(subclass, outer.Inner)
assert isinstance(subsubclass, Outer.SubsubclassOfInner)
assert isinstance(subsubclass, outer.SubsubclassOfInner)
assert isinstance(subsubclass, Outer.SubclassOfInner)
assert isinstance(subsubclass, outer.SubclassOfInner)
assert isinstance(subsubclass, Outer.Inner)
assert isinstance(subsubclass, outer.Inner)
import itertools
for order in itertools.permutations([1, 2, 3]):
outer = Outer()
# This strange "for" statement lets us test every possible order of
# initialization for the "inner" / "subclass" / "subsubclass" objects.
for which in order:
if which == 1: inner = outer.Inner()
elif which == 2: subclass = outer.SubclassOfInner()
elif which == 3: subsubclass = outer.SubsubclassOfInner()
tests()
multiple_inheritance_test = outer.MultipleInheritanceTest()
assert outer.MultipleInheritanceTest.mro() == [
# bound inner class, notice lowercase-o "outer"
outer.MultipleInheritanceTest,
# unbound inner class, notice uppercase-o "Outer"
Outer.MultipleInheritanceTest,
outer.SubclassOfInner, # bound
Outer.SubclassOfInner, # unbound
Outer.RandomUnboundInner, # etc.
outer.Subclass2OfInner,
Outer.Subclass2OfInner,
outer.Inner,
Outer.Inner,
BoundInnerClass,
object
]
unbound = outer.UnboundSubclassOfInner()
assert outer.UnboundSubclassOfInner.mro() == [
outer.UnboundSubclassOfInner,
Outer.UnboundSubclassOfInner,
UnboundInnerClass,
outer.Inner,
Outer.Inner,
BoundInnerClass,
object
]
class InnerChild(outer.Inner):
pass
inner_child = InnerChild()
isinstance(inner_child, Outer.Inner)
isinstance(inner_child, InnerChild)
isinstance(inner_child, outer.Inner)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2017-2021 AVSystem <avsystem@avsystem.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import sys
from typing import List, T
from . import coap
from .coap.packet import ANY
from .coap.utils import hexlify_nonprintable, hexlify
from .path import CoapPath, Lwm2mPath, Lwm2mNonemptyPath, Lwm2mObjectPath, Lwm2mResourcePath
from .tlv import TLV
class EscapedBytes:
"""
A pseudo-type that allows parsing a hex-escaped string into a standard
bytes object when used as an argument type hint for the powercmd command
handler.
Example:
'\x61\x62\x63' - parsed as b'abc'
"""
@staticmethod
def powercmd_parse(text):
import ast
result = ast.literal_eval('b"%s"' % (text,))
return result
def concat_if_not_any(*lists: List[T]):
if all(list is ANY for list in lists):
return ANY
return sum((([] if list is ANY else list) for list in lists), [])
class Lwm2mMsg(coap.Packet):
"""
Base class of all LWM2M messages.
"""
@classmethod
def from_packet(cls, pkt: coap.Packet):
if not cls._pkt_matches(pkt):
raise TypeError('packet does not match %s' % (cls.__name__,))
msg = copy.copy(pkt)
# It's an awful hack that may explode if any Lwm2mMsg subclass
# introduces any fields not present in the coap.Packet class.
# Since all Lwm2mMsg subclasses are meant to be thin wrappers
# facilitating message creation/recognition, it should never be
# a problem.
msg.__class__ = cls
return msg
@staticmethod
def _pkt_matches(_pkt: coap.Packet):
return True
def summary(self):
def block_summary(pkt):
blk1 = pkt.get_options(coap.Option.BLOCK1)
blk2 = pkt.get_options(coap.Option.BLOCK2)
return ', '.join(['']
+ ([repr(blk1[0])] if blk1 else [])
+ ([repr(blk2[0])] if blk2 else []))
return ('%s, %s, id=%s, token=%s%s'
% (self.code,
self.type,
self.msg_id,
self.token,
block_summary(self)))
def content_summary(self):
if self.content is ANY:
return 'ANY'
else:
return shorten(hexlify_nonprintable(self.content))
@staticmethod
def _decode_text_content(content):
return 'ascii-ish:\n' + hexlify_nonprintable(content) + '\n'
@staticmethod
def _decode_binary_content(content):
return 'binary:\n' + hexlify(content) + '\n'
@staticmethod
def _decode_tlv_content(content):
try:
return str(TLV.parse(content))
except Exception as exc:
return ('(malformed TLV: %s)\n' % (exc,)
+ Lwm2mMsg._decode_binary_content(content))
@staticmethod
def _decode_json_content(content):
import json
import pprint
return str(pprint.pformat(json.loads(content)))
def _decode_content(self):
if self.content is ANY:
return ''
decoders = {
coap.ContentFormat.TEXT_PLAIN: Lwm2mMsg._decode_text_content,
coap.ContentFormat.APPLICATION_LINK: Lwm2mMsg._decode_text_content,
coap.ContentFormat.APPLICATION_LWM2M_TLV: Lwm2mMsg._decode_tlv_content,
coap.ContentFormat.APPLICATION_OCTET_STREAM: Lwm2mMsg._decode_binary_content,
coap.ContentFormat.APPLICATION_LWM2M_JSON: Lwm2mMsg._decode_json_content,
coap.ContentFormat.APPLICATION_LWM2M_SENML_JSON: Lwm2mMsg._decode_json_content,
}
desired_decoders = set()
for opt in self.get_options(coap.Option.CONTENT_FORMAT):
decoder = decoders.get(opt.content_to_int(),
Lwm2mMsg._decode_binary_content)
desired_decoders.add(decoder)
if not desired_decoders:
desired_decoders.add(decoders[coap.ContentFormat.TEXT_PLAIN])
decoded_content = ''
for decoder in desired_decoders:
decoded_content += decoder(self.content) + '\n'
return decoded_content
def details(self):
return str(self)
def __str__(self):
return '%s\n\n%s\n%s' % (self.summary(),
super().__str__(),
self._decode_content())
class Lwm2mResponse(Lwm2mMsg):
"""
Base class for all LWM2M responses.
"""
@staticmethod
def _pkt_matches(_pkt: coap.Packet):
return False
@classmethod
def matching(cls, request):
if issubclass(cls, Lwm2mEmpty):
return functools.partial(cls, msg_id=request.msg_id)
else:
return functools.partial(cls,
msg_id=request.msg_id,
token=request.token)
def is_lwm2m_nonempty_path(path):
try:
return Lwm2mNonemptyPath(path) is not None
except ValueError:
return False
def is_lwm2m_path(path):
try:
return Lwm2mPath(path) is not None
except ValueError:
return False
def is_link_format(pkt):
fmt = pkt.get_options(coap.Option.CONTENT_FORMAT)
return (fmt == [coap.Option.CONTENT_FORMAT.APPLICATION_LINK]
or (fmt == [] and pkt.content == b''))
def shorten(text):
if len(text) > 30:
return text[:27] + '...'
return text
class Lwm2mRequestBootstrap(Lwm2mMsg):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
"""Checks if the PKT is a LWM2M Request Bootstrap message."""
return (pkt.type in (None, coap.Type.CONFIRMABLE)
and pkt.code == coap.Code.REQ_POST
and '/bs?ep=' in pkt.get_full_uri())
def __init__(self,
endpoint_name: str,
preferred_content_format: int = None,
msg_id: int = ANY,
token: EscapedBytes = ANY,
uri_path: str = '',
uri_query: List[str] = None,
options: List[coap.Option] = ANY,
content: EscapedBytes = ANY):
if not uri_query:
uri_query = []
uri_query = uri_query + ['ep=' + endpoint_name]
if preferred_content_format is not None:
uri_query = uri_query + ['pct=%d' % (preferred_content_format,)]
super().__init__(type=coap.Type.CONFIRMABLE,
code=coap.Code.REQ_POST,
msg_id=msg_id,
token=token,
options=concat_if_not_any(
CoapPath(uri_path + '/bs').to_uri_options(),
[coap.Option.URI_QUERY(query)
for query in uri_query],
options),
content=content)
def summary(self):
return ('Request Bootstrap %s: %s' % (self.get_full_uri(),
self.content_summary()))
class Lwm2mBootstrapFinish(Lwm2mMsg):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
"""Checks if the PKT is a LWM2M Bootstrap Finish message."""
return (pkt.type in (None, coap.Type.CONFIRMABLE)
and pkt.code == coap.Code.REQ_POST
and pkt.get_full_uri().endswith('/bs'))
def __init__(self,
msg_id: int = ANY,
token: EscapedBytes = ANY,
options: List[coap.Option] = ANY,
content: EscapedBytes = ANY):
super().__init__(type=coap.Type.CONFIRMABLE,
code=coap.Code.REQ_POST,
msg_id=msg_id,
token=token,
options=concat_if_not_any(
CoapPath('/bs').to_uri_options(),
options),
content=content)
def summary(self):
return ('Bootstrap Finish %s: %s' % (self.get_full_uri(),
self.content_summary()))
def _split_string_path(path: str,
query: List[str] = None):
"""
Splits a CoAP PATH given as string into a path component and a list of
query strings ("foo=bar").
Returns (CoapPath, List[str]) tuple with a parsed CoapPath and a list of
query strings from the PATH concatenated with QUERY contents (if any).
"""
path_query = []
if isinstance(path, str):
if query is None:
if '?' in path:
path, query_string = path.split('?', maxsplit=1)
path_query = query_string.split('&')
path = CoapPath(path)
return path, path_query + (query or [])
class Lwm2mRegister(Lwm2mMsg):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
"""Checks if the PKT is a LWM2M Register message."""
return (pkt.type in (None, coap.Type.CONFIRMABLE)
and pkt.code == coap.Code.REQ_POST
and is_link_format(pkt)
and pkt.get_uri_path().endswith('/rd'))
def __init__(self,
path: str or CoapPath,
query: List[str] = None,
msg_id: int = ANY,
token: EscapedBytes = ANY,
options: List[coap.Option] = ANY,
content: EscapedBytes = ANY):
path, query = _split_string_path(path, query)
super().__init__(type=coap.Type.CONFIRMABLE,
code=coap.Code.REQ_POST,
msg_id=msg_id,
token=token,
options=concat_if_not_any(
path.to_uri_options(),
[coap.Option.URI_QUERY(q) for q in query],
[coap.Option.CONTENT_FORMAT.APPLICATION_LINK],
options),
content=content)
def summary(self):
return ('Register %s: %s' % (self.get_full_uri(),
self.content_summary()))
class Lwm2mUpdate(Lwm2mMsg):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
"""Checks if the PKT is a LWM2M Update message."""
# Update is very similar to Execute
# assumption: Update will never be called on a path that resembles
# /OID or /OID/IID or /OID/IID/RID
return (pkt.type in (None, coap.Type.CONFIRMABLE)
and pkt.code == coap.Code.REQ_POST
and '/rd/' in pkt.get_uri_path()
and (is_link_format(pkt)
or not is_lwm2m_nonempty_path(pkt.get_full_uri())))
def __init__(self,
path: str or CoapPath,
query: List[str] = None,
msg_id: int = ANY,
token: EscapedBytes = ANY,
options: List[coap.Option] = ANY,
content: EscapedBytes = ANY):
path, query = _split_string_path(path, query)
super().__init__(type=coap.Type.CONFIRMABLE,
code=coap.Code.REQ_POST,
msg_id=msg_id,
token=token,
options=concat_if_not_any(
path.to_uri_options(),
[coap.Option.URI_QUERY(q) for q in query],
([coap.Option.CONTENT_FORMAT.APPLICATION_LINK]
if content else []),
options),
content=content)
def summary(self):
return ('Update %s: %s' % (self.get_full_uri(),
self.content_summary()))
class Lwm2mDeregister(Lwm2mMsg):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
return (pkt.type in (None, coap.Type.CONFIRMABLE)
and pkt.code == coap.Code.REQ_DELETE
and not is_lwm2m_nonempty_path(pkt.get_uri_path()))
def __init__(self,
path: str or CoapPath,
msg_id: int = ANY,
token: EscapedBytes = ANY,
options: List[coap.Option] = ANY):
if isinstance(path, str):
path = CoapPath(path)
super().__init__(type=coap.Type.CONFIRMABLE,
code=coap.Code.REQ_DELETE,
msg_id=msg_id,
token=token,
options=concat_if_not_any(
path.to_uri_options(),
options))
def summary(self):
return 'De-register ' + self.get_full_uri()
class CoapGet(Lwm2mMsg):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
return (pkt.type in (None, coap.Type.CONFIRMABLE)
and pkt.code == coap.Code.REQ_GET)
def __init__(self,
path: str or CoapPath,
accept: coap.AcceptOption = None,
msg_id: int = ANY,
token: EscapedBytes = ANY,
options: List[coap.Option] = ANY):
if isinstance(path, str):
path = CoapPath(path)
if isinstance(accept, int):
accept = coap.Option.ACCEPT(accept)
super().__init__(type=coap.Type.CONFIRMABLE,
code=coap.Code.REQ_GET,
msg_id=msg_id,
token=token,
options=concat_if_not_any(
path.to_uri_options(),
([accept] if accept is not None else []),
options))
def summary(self):
text = 'GET ' + self.get_full_uri()
accept = self.get_options(coap.Option.ACCEPT)
if accept:
accept_vals = [x.content_to_int() for x in accept]
text += ': accept ' + \
', '.join(map(coap.ContentFormat.to_str, accept_vals))
return text
class Lwm2mRead(CoapGet):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
return (CoapGet._pkt_matches(pkt)
and is_lwm2m_nonempty_path(pkt.get_uri_path())
and not is_link_format(pkt))
def __init__(self,
path: str or Lwm2mNonemptyPath,
accept: coap.AcceptOption = None,
msg_id: int = ANY,
token: EscapedBytes = ANY,
options: List[coap.Option] = ANY):
if isinstance(path, str):
path = Lwm2mNonemptyPath(path)
super().__init__(path=path, accept=accept, msg_id=msg_id,
token=token, options=options)
def summary(self):
text = 'Read ' + self.get_full_uri()
accept = self.get_options(coap.Option.ACCEPT)
if accept:
accept_vals = [x.content_to_int() for x in accept]
text += ': accept ' + \
', '.join(map(coap.ContentFormat.to_str, accept_vals))
return text
class Lwm2mObserve(Lwm2mRead):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
return (Lwm2mRead._pkt_matches(pkt)
and len(pkt.get_options(coap.Option.OBSERVE)) > 0)
def __init__(self,
path: str or Lwm2mNonemptyPath,
observe: int = 0,
accept: coap.AcceptOption = None,
msg_id: int = ANY,
token: EscapedBytes = ANY,
options: List[coap.Option] = ANY):
if isinstance(path, str):
path = Lwm2mNonemptyPath(path)
if isinstance(accept, int):
accept = coap.Option.ACCEPT(accept)
super().__init__(path=path,
accept=accept,
msg_id=msg_id,
token=token,
options=concat_if_not_any(
[coap.Option.OBSERVE(observe)],
options))
def summary(self):
opt = self.get_options(coap.Option.OBSERVE)
if len(opt) > 1:
text = 'Observe %s (multiple Observe options)' % (
self.get_full_uri(),)
else:
opt = opt[0]
if opt.content_to_int() == 0:
text = 'Observe ' + self.get_full_uri()
elif opt.content_to_int() == 1:
text = 'Cancel Observation ' + self.get_full_uri()
else:
text = 'Observe %s (invalid Observe value: %d)' % (
self.get_full_uri(), opt.content_to_int())
accept = self.get_options(coap.Option.ACCEPT)
if accept:
accept_vals = [x.content_to_int() for x in accept]
text += ': accept ' + \
', '.join(map(coap.ContentFormat.to_str, accept_vals))
return text
class Lwm2mDiscover(CoapGet):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
return (CoapGet._pkt_matches(pkt)
and is_lwm2m_nonempty_path(pkt.get_uri_path())
and is_link_format(pkt))
def __init__(self,
path: str or Lwm2mPath,
msg_id: int = ANY,
token: EscapedBytes = ANY,
options: List[coap.Option] = ANY):
super().__init__(path=path,
msg_id=msg_id,
token=token,
accept=coap.Option.ACCEPT.APPLICATION_LINK,
options=options)
def summary(self):
return 'Discover ' + self.get_full_uri()
class Lwm2mWrite(Lwm2mMsg):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
return (pkt.type in (None, coap.Type.CONFIRMABLE)
and pkt.code in (coap.Code.REQ_PUT, coap.Code.REQ_POST)
and is_lwm2m_nonempty_path(pkt.get_uri_path())
and not is_link_format(pkt))
def __init__(self,
path: str or Lwm2mNonemptyPath,
content: EscapedBytes,
format: coap.ContentFormatOption = coap.ContentFormat.TEXT_PLAIN,
update: bool = False,
msg_id: int = ANY,
token: EscapedBytes = ANY,
options: List[coap.Option] = ANY):
if isinstance(path, str):
path = Lwm2mNonemptyPath(path)
if isinstance(content, str):
content = bytes(content, 'ascii')
if isinstance(format, int):
format = coap.Option.CONTENT_FORMAT(format)
super().__init__(type=coap.Type.CONFIRMABLE,
code=(coap.Code.REQ_POST if update else coap.Code.REQ_PUT),
msg_id=msg_id,
token=token,
options=concat_if_not_any(
path.to_uri_options(),
[format],
options),
content=content)
def summary(self):
fmt_vals = [x.content_to_int()
for x in self.get_options(coap.Option.CONTENT_FORMAT)]
fmt = ', '.join(map(coap.ContentFormat.to_str, fmt_vals))
return ('Write%s %s: %s, %d bytes'
% (' (update)' if self.code == coap.Code.REQ_POST else '',
self.get_full_uri(), fmt, len(self.content)))
class Lwm2mWriteAttributes(Lwm2mMsg):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
return (pkt.type in (None, coap.Type.CONFIRMABLE)
and pkt.code == coap.Code.REQ_PUT
and is_lwm2m_nonempty_path(pkt.get_uri_path())
and pkt.get_content_format() is None)
def __init__(self,
path: str or Lwm2mNonemptyPath,
lt: float = None,
gt: float = None,
st: float = None,
pmin: int = None,
pmax: int = None,
epmin: int = None,
epmax: int = None,
query: List[str] = None,
msg_id: int = ANY,
token: EscapedBytes = ANY,
options: List[coap.Option] = ANY):
path, query = _split_string_path(path, query)
if lt is not None:
query.append('lt=%f' % (lt,))
if gt is not None:
query.append('gt=%f' % (gt,))
if st is not None:
query.append('st=%f' % (st,))
if pmin is not None:
query.append('pmin=%d' % (pmin,))
if pmax is not None:
query.append('pmax=%d' % (pmax,))
if epmin is not None:
query.append('epmin=%d' % (epmin,))
if epmax is not None:
query.append('epmax=%d' % (epmax,))
super().__init__(type=coap.Type.CONFIRMABLE,
code=coap.Code.REQ_PUT,
msg_id=msg_id,
token=token,
options=concat_if_not_any(
path.to_uri_options(),
[coap.Option.URI_QUERY(x) for x in query],
options),
content=b'')
def summary(self):
attrs = ', '.join(x.content_to_str()
for x in self.get_options(coap.Option.URI_QUERY))
return 'Write Attributes %s: %s' % (self.get_full_uri(), attrs)
class Lwm2mExecute(Lwm2mMsg):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
return (pkt.type in (None, coap.Type.CONFIRMABLE)
and pkt.code == coap.Code.REQ_POST
and is_lwm2m_nonempty_path(pkt.get_full_uri())
and pkt.get_content_format() is None)
def __init__(self,
path: str or Lwm2mResourcePath,
content: EscapedBytes = b'',
msg_id: int = ANY,
token: EscapedBytes = ANY,
options: List[coap.Option] = ANY):
if isinstance(path, str):
path = Lwm2mResourcePath(path)
super().__init__(type=coap.Type.CONFIRMABLE,
code=coap.Code.REQ_POST,
msg_id=msg_id,
token=token,
options=concat_if_not_any(path.to_uri_options(),
options),
content=content)
def summary(self):
return ('Execute %s: %s' % (self.get_full_uri(),
self.content_summary()))
class Lwm2mCreate(Lwm2mMsg):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
return (pkt.type in (None, coap.Type.CONFIRMABLE)
and pkt.code == coap.Code.REQ_POST
and is_lwm2m_nonempty_path(pkt.get_uri_path())
and pkt.get_content_format() == coap.ContentFormat.APPLICATION_LWM2M_TLV)
def __init__(self,
path: str or Lwm2mObjectPath,
content: EscapedBytes = b'',
msg_id: int = ANY,
token: EscapedBytes = ANY,
options: List[coap.Option] = ANY,
format: coap.ContentFormatOption = coap.Option.CONTENT_FORMAT.APPLICATION_LWM2M_TLV):
if isinstance(path, str):
path = Lwm2mObjectPath(path)
if isinstance(format, int):
format = coap.Option.CONTENT_FORMAT(format)
super().__init__(type=coap.Type.CONFIRMABLE,
code=coap.Code.REQ_POST,
msg_id=msg_id,
token=token,
options=concat_if_not_any(
path.to_uri_options(),
[format],
options),
content=content)
def summary(self):
return 'Create %s: %s' % (self.get_full_uri(), self.content_summary())
class Lwm2mDelete(Lwm2mMsg):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
# TODO: this should be done by checking the packet source/target
# if REQ_DELETE is sent by server. it's Delete; otherwise - De-Register
# assumption: De-Register will never be called on a path
# that resembles /OID/IID
return (pkt.type in (None, coap.Type.CONFIRMABLE)
and pkt.code == coap.Code.REQ_DELETE
and is_lwm2m_path(pkt.get_uri_path()))
def __init__(self,
path: str or Lwm2mPath,
msg_id: int = ANY,
token: EscapedBytes = ANY,
options: List[coap.Option] = ANY):
if isinstance(path, str):
path = Lwm2mPath(path)
if path.resource_id is not None:
raise ValueError(
'LWM2M Resource path is not applicable to a Delete: %s' % (path,))
super().__init__(type=coap.Type.CONFIRMABLE,
code=coap.Code.REQ_DELETE,
msg_id=msg_id,
token=token,
options=concat_if_not_any(path.to_uri_options(),
options))
def summary(self):
return 'Delete ' + self.get_full_uri()
# Classes defined below are responses that should be matched to some request.
# Therefeore, msg_id and token in the constructor are mandatory.
class Lwm2mContent(Lwm2mResponse):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
return pkt.code == coap.Code.RES_CONTENT
def __init__(self,
msg_id: int,
token: EscapedBytes,
content: EscapedBytes = ANY,
format: coap.ContentFormatOption = ANY,
type: coap.Type = coap.Type.ACKNOWLEDGEMENT,
options: List[coap.Option] = ANY):
if isinstance(format, int):
format = coap.Option.CONTENT_FORMAT(format)
all_opts = [format] if format is not ANY else ANY
if options is not ANY:
all_opts = (all_opts + options) if all_opts is not ANY else options
super().__init__(type=type,
code=coap.Code.RES_CONTENT,
msg_id=msg_id,
token=token,
options=all_opts,
content=content)
def make_content_summary(self):
format_opts = self.get_options(coap.Option.CONTENT_FORMAT)
if format_opts:
fmt = format_opts[0]
else:
fmt = coap.Option.CONTENT_FORMAT.TEXT_PLAIN
if fmt == coap.Option.CONTENT_FORMAT.TEXT_PLAIN:
return self.content_summary()
else:
return '(%s; %d bytes)' % (fmt.content_to_str(), len(self.content))
def summary(self):
return 'Content ' + self.make_content_summary()
class Lwm2mNotify(Lwm2mContent):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
return (Lwm2mContent._pkt_matches(pkt)
and pkt.get_options(coap.Option.OBSERVE))
def __init__(self,
token: EscapedBytes,
content: EscapedBytes = ANY,
format: coap.ContentFormatOption = ANY,
confirmable: bool = False,
options: List[coap.Option] = ANY):
if isinstance(format, int):
format = coap.Option.CONTENT_FORMAT(format)
super().__init__(type=coap.Type.CONFIRMABLE if confirmable else coap.Type.NON_CONFIRMABLE,
msg_id=ANY,
token=token,
content=content,
format=format,
options=options)
def summary(self):
observe_opts = self.get_options(coap.Option.OBSERVE)
seq = '/'.join(str(opt.content_to_int()) for opt in observe_opts)
return 'Notify (%s, seq %s, token %s) %s' % (
str(self.type), seq, hexlify(self.token), self.make_content_summary())
class Lwm2mCreated(Lwm2mResponse):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
return pkt.code == coap.Code.RES_CREATED
def __init__(self,
msg_id: int,
token: EscapedBytes,
location: str or CoapPath = ANY,
options: List[coap.Option] = ANY):
if isinstance(location, str):
location = CoapPath(location)
super().__init__(type=coap.Type.ACKNOWLEDGEMENT,
code=coap.Code.RES_CREATED,
msg_id=msg_id,
token=token,
options=concat_if_not_any(
location.to_uri_options(
opt=coap.Option.LOCATION_PATH),
options))
def summary(self):
location = self.get_location_path()
return 'Created ' + (location or '(no location-path)')
class Lwm2mDeleted(Lwm2mResponse):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
return pkt.code == coap.Code.RES_DELETED
def __init__(self,
msg_id: int,
token: EscapedBytes,
location: str or CoapPath = ANY,
options: List[coap.Option] = ANY):
if location is not ANY and isinstance(location, str):
location = CoapPath(location)
super().__init__(type=coap.Type.ACKNOWLEDGEMENT,
code=coap.Code.RES_DELETED,
msg_id=msg_id,
token=token,
options=concat_if_not_any(
location.to_uri_options(
coap.Option.LOCATION_PATH),
options))
def summary(self):
location = self.get_location_path()
return 'Deleted ' + (location or '(no location-path)')
class Lwm2mChanged(Lwm2mResponse):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
return pkt.code == coap.Code.RES_CHANGED
def __init__(self,
msg_id: int,
token: EscapedBytes,
location: str or CoapPath = ANY,
options: List[coap.Option] = ANY,
content: EscapedBytes = ANY):
if location is not ANY and isinstance(location, str):
location = CoapPath(location)
super().__init__(type=coap.Type.ACKNOWLEDGEMENT,
code=coap.Code.RES_CHANGED,
msg_id=msg_id,
token=token,
options=concat_if_not_any(
location.to_uri_options(
coap.Option.LOCATION_PATH),
options),
content=content)
def summary(self):
location = self.get_location_path()
return 'Changed ' + (location or '(no location path)')
class Lwm2mErrorResponse(Lwm2mResponse):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
return (pkt.type in (None, coap.Type.ACKNOWLEDGEMENT)
and pkt.code.cls in (4, 5))
def __init__(self,
code: coap.Code,
msg_id: int,
token: EscapedBytes,
options: List[coap.Option] = ANY):
if code.cls not in (4, 5):
raise ValueError('Error responses must have code class 4 or 5')
super().__init__(type=coap.Type.ACKNOWLEDGEMENT,
code=code,
msg_id=msg_id,
token=token,
options=options)
def summary(self):
content_str = shorten(
hexlify_nonprintable(self.content)) if self.content else '(no details available)'
return '%s: %s' % (str(self.code), content_str)
class Lwm2mEmpty(Lwm2mResponse):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
return (pkt.code == coap.Code.EMPTY
and pkt.token == b''
and pkt.options == []
and pkt.content == b'')
def __init__(self,
type: coap.Type = coap.Type.ACKNOWLEDGEMENT,
msg_id: int = ANY):
super().__init__(type=type,
code=coap.Code.EMPTY,
msg_id=msg_id,
token=b'',
content=b'')
def summary(self):
return 'Empty %s, msg_id = %d' % (str(self.type), self.msg_id)
class Lwm2mReset(Lwm2mEmpty):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
return (Lwm2mEmpty._pkt_matches(pkt)
and pkt.type == coap.Type.RESET)
def __init__(self, msg_id: int = ANY):
super().__init__(msg_id=msg_id, type=coap.Type.RESET)
def summary(self):
return 'Reset, msg_id = %d' % (self.msg_id,)
class Lwm2mContinue(Lwm2mResponse):
@staticmethod
def _pkt_matches(pkt: coap.Packet):
return pkt.code == coap.Code.RES_CONTINUE
def __init__(self,
msg_id: int,
token: EscapedBytes,
type: coap.Type = coap.Type.ACKNOWLEDGEMENT,
options: List[coap.Option] = ANY):
super().__init__(type=type,
code=coap.Code.RES_CONTINUE,
msg_id=msg_id,
token=token,
options=options)
def summary(self):
return 'Continue, msg_id = %d, token = %s' \
% (self.msg_id if self.msg_id is not None else 'None', self.token)
def _get_ordered_types_list():
def _sequence_preserving_uniq(seq):
seen = set()
return [x for x in seq if not (x in seen or seen.add(x))]
msg_subclasses = [v for v in sys.modules[__name__].__dict__.values()
if isinstance(v, type) and issubclass(v, Lwm2mMsg)]
# for each Lwm2mMsg subclass, list the subclass and all its base classes
# up to and including Lwm2mMsg
# make sure that bases are before subclasses
types = []
for cls in msg_subclasses:
types += [base for base in reversed(cls.mro())
if issubclass(base, Lwm2mMsg)]
# leave only the first occurrence of every class
# reverse the result so that subclasses are always first
ordered_types = list(reversed(_sequence_preserving_uniq(types)))
# sanity check: for any class in ORDERED_TYPES all its subclasses are
# BEFORE it on the list
for left_idx, left in enumerate(ordered_types):
for right in ordered_types[left_idx + 1:]:
assert not issubclass(right, left)
return ordered_types
TYPES = _get_ordered_types_list()
def get_lwm2m_msg(pkt: coap.Packet):
for t in TYPES:
try:
return t.from_packet(pkt)
except TypeError:
pass
raise ValueError('should never happen')
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version >= '3':
basestring = unicode = str
from py4j.java_gateway import JavaClass
from pyspark import RDD, since, keyword_only
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.column import _to_seq
from pyspark.sql.types import *
from pyspark.sql import utils
__all__ = ["DataFrameReader", "DataFrameWriter"]
def to_str(value):
"""
A wrapper over str(), but converts bool values to lower case strings.
If None is given, just returns None, instead of converting it to string "None".
"""
if isinstance(value, bool):
return str(value).lower()
elif value is None:
return value
else:
return str(value)
class OptionUtils(object):
def _set_opts(self, schema=None, **options):
"""
Set named options (filter out those the value is None)
"""
if schema is not None:
self.schema(schema)
for k, v in options.items():
if v is not None:
self.option(k, v)
class DataFrameReader(OptionUtils):
"""
Interface used to load a :class:`DataFrame` from external storage systems
(e.g. file systems, key-value stores, etc). Use :func:`spark.read`
to access this.
.. versionadded:: 1.4
"""
def __init__(self, spark):
self._jreader = spark._ssql_ctx.read()
self._spark = spark
def _df(self, jdf):
from pyspark.sql.dataframe import DataFrame
return DataFrame(jdf, self._spark)
@since(1.4)
def format(self, source):
"""Specifies the input data source format.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df = spark.read.format('json').load('python/test_support/sql/people.json')
>>> df.dtypes
[('age', 'bigint'), ('name', 'string')]
"""
self._jreader = self._jreader.format(source)
return self
@since(1.4)
def schema(self, schema):
"""Specifies the input schema.
Some data sources (e.g. JSON) can infer the input schema automatically from data.
By specifying the schema here, the underlying data source can skip the schema
inference step, and thus speed up data loading.
:param schema: a :class:`pyspark.sql.types.StructType` object
"""
from pyspark.sql import SparkSession
if not isinstance(schema, StructType):
raise TypeError("schema should be StructType")
spark = SparkSession.builder.getOrCreate()
jschema = spark._jsparkSession.parseDataType(schema.json())
self._jreader = self._jreader.schema(jschema)
return self
@since(1.5)
def option(self, key, value):
"""Adds an input option for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
self._jreader = self._jreader.option(key, to_str(value))
return self
@since(1.4)
def options(self, **options):
"""Adds input options for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
for k in options:
self._jreader = self._jreader.option(k, to_str(options[k]))
return self
@since(1.4)
def load(self, path=None, format=None, schema=None, **options):
"""Loads data from a data source and returns it as a :class`DataFrame`.
:param path: optional string or a list of string for file-system backed data sources.
:param format: optional string for format of the data source. Default to 'parquet'.
:param schema: optional :class:`pyspark.sql.types.StructType` for the input schema.
:param options: all other string options
>>> df = spark.read.load('python/test_support/sql/parquet_partitioned', opt1=True,
... opt2=1, opt3='str')
>>> df.dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
>>> df = spark.read.format('json').load(['python/test_support/sql/people.json',
... 'python/test_support/sql/people1.json'])
>>> df.dtypes
[('age', 'bigint'), ('aka', 'string'), ('name', 'string')]
"""
if format is not None:
self.format(format)
if schema is not None:
self.schema(schema)
self.options(**options)
if isinstance(path, basestring):
return self._df(self._jreader.load(path))
elif path is not None:
if type(path) != list:
path = [path]
return self._df(self._jreader.load(self._spark._sc._jvm.PythonUtils.toSeq(path)))
else:
return self._df(self._jreader.load())
@since(1.4)
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None,
allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None,
allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None,
mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None,
wholeFile=None):
"""
Loads JSON files and returns the results as a :class:`DataFrame`.
`JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default.
For JSON (one record per file), set the ``wholeFile`` parameter to ``true``.
If the ``schema`` parameter is not specified, this function goes
through the input once to determine the input schema.
:param path: string represents path to the JSON dataset, or a list of paths,
or RDD of Strings storing JSON objects.
:param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema.
:param primitivesAsString: infers all primitive values as a string type. If None is set,
it uses the default value, ``false``.
:param prefersDecimal: infers all floating-point values as a decimal type. If the values
do not fit in decimal, then it infers them as doubles. If None is
set, it uses the default value, ``false``.
:param allowComments: ignores Java/C++ style comment in JSON records. If None is set,
it uses the default value, ``false``.
:param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set,
it uses the default value, ``false``.
:param allowSingleQuotes: allows single quotes in addition to double quotes. If None is
set, it uses the default value, ``true``.
:param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is
set, it uses the default value, ``false``.
:param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character
using backslash quoting mechanism. If None is
set, it uses the default value, ``false``.
:param mode: allows a mode for dealing with corrupt records during parsing. If None is
set, it uses the default value, ``PERMISSIVE``.
* ``PERMISSIVE`` : sets other fields to ``null`` when it meets a corrupted \
record, and puts the malformed string into a field configured by \
``columnNameOfCorruptRecord``. To keep corrupt records, an user can set \
a string type field named ``columnNameOfCorruptRecord`` in an user-defined \
schema. If a schema does not have the field, it drops corrupt records during \
parsing. When inferring a schema, it implicitly adds a \
``columnNameOfCorruptRecord`` field in an output schema.
* ``DROPMALFORMED`` : ignores the whole corrupted records.
* ``FAILFAST`` : throws an exception when it meets corrupted records.
:param columnNameOfCorruptRecord: allows renaming the new field having malformed string
created by ``PERMISSIVE`` mode. This overrides
``spark.sql.columnNameOfCorruptRecord``. If None is set,
it uses the value specified in
``spark.sql.columnNameOfCorruptRecord``.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param wholeFile: parse one record, which may span multiple lines, per file. If None is
set, it uses the default value, ``false``.
>>> df1 = spark.read.json('python/test_support/sql/people.json')
>>> df1.dtypes
[('age', 'bigint'), ('name', 'string')]
>>> rdd = sc.textFile('python/test_support/sql/people.json')
>>> df2 = spark.read.json(rdd)
>>> df2.dtypes
[('age', 'bigint'), ('name', 'string')]
"""
self._set_opts(
schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal,
allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames,
allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero,
allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter,
mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat,
timestampFormat=timestampFormat, wholeFile=wholeFile)
if isinstance(path, basestring):
path = [path]
if type(path) == list:
return self._df(self._jreader.json(self._spark._sc._jvm.PythonUtils.toSeq(path)))
elif isinstance(path, RDD):
def func(iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = path.mapPartitions(func)
keyed._bypass_serializer = True
jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString())
return self._df(self._jreader.json(jrdd))
else:
raise TypeError("path can be only string, list or RDD")
@since(1.4)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:param tableName: string, name of the table.
>>> df = spark.read.parquet('python/test_support/sql/parquet_partitioned')
>>> df.createOrReplaceTempView('tmpTable')
>>> spark.read.table('tmpTable').dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
"""
return self._df(self._jreader.table(tableName))
@since(1.4)
def parquet(self, *paths):
"""Loads Parquet files, returning the result as a :class:`DataFrame`.
You can set the following Parquet-specific option(s) for reading Parquet files:
* ``mergeSchema``: sets whether we should merge schemas collected from all \
Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. \
The default value is specified in ``spark.sql.parquet.mergeSchema``.
>>> df = spark.read.parquet('python/test_support/sql/parquet_partitioned')
>>> df.dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
"""
return self._df(self._jreader.parquet(_to_seq(self._spark._sc, paths)))
@ignore_unicode_prefix
@since(1.6)
def text(self, paths):
"""
Loads text files and returns a :class:`DataFrame` whose schema starts with a
string column named "value", and followed by partitioned columns if there
are any.
Each line in the text file is a new row in the resulting DataFrame.
:param paths: string, or list of strings, for input path(s).
>>> df = spark.read.text('python/test_support/sql/text-test.txt')
>>> df.collect()
[Row(value=u'hello'), Row(value=u'this')]
"""
if isinstance(paths, basestring):
paths = [paths]
return self._df(self._jreader.text(self._spark._sc._jvm.PythonUtils.toSeq(paths)))
@since(2.0)
def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None,
comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None,
ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None,
negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None,
maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None,
columnNameOfCorruptRecord=None, wholeFile=None):
"""Loads a CSV file and returns the result as a :class:`DataFrame`.
This function will go through the input once to determine the input schema if
``inferSchema`` is enabled. To avoid going through the entire data once, disable
``inferSchema`` option or specify the schema explicitly using ``schema``.
:param path: string, or list of strings, for input path(s).
:param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema.
:param sep: sets the single character as a separator for each field and value.
If None is set, it uses the default value, ``,``.
:param encoding: decodes the CSV files by the given encoding type. If None is set,
it uses the default value, ``UTF-8``.
:param quote: sets the single character used for escaping quoted values where the
separator can be part of the value. If None is set, it uses the default
value, ``"``. If you would like to turn off quotations, you need to set an
empty string.
:param escape: sets the single character used for escaping quotes inside an already
quoted value. If None is set, it uses the default value, ``\``.
:param comment: sets the single character used for skipping lines beginning with this
character. By default (None), it is disabled.
:param header: uses the first line as names of columns. If None is set, it uses the
default value, ``false``.
:param inferSchema: infers the input schema automatically from data. It requires one extra
pass over the data. If None is set, it uses the default value, ``false``.
:param ignoreLeadingWhiteSpace: A flag indicating whether or not leading whitespaces from
values being read should be skipped. If None is set, it
uses the default value, ``false``.
:param ignoreTrailingWhiteSpace: A flag indicating whether or not trailing whitespaces from
values being read should be skipped. If None is set, it
uses the default value, ``false``.
:param nullValue: sets the string representation of a null value. If None is set, it uses
the default value, empty string. Since 2.0.1, this ``nullValue`` param
applies to all supported types including the string type.
:param nanValue: sets the string representation of a non-number value. If None is set, it
uses the default value, ``NaN``.
:param positiveInf: sets the string representation of a positive infinity value. If None
is set, it uses the default value, ``Inf``.
:param negativeInf: sets the string representation of a negative infinity value. If None
is set, it uses the default value, ``Inf``.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param maxColumns: defines a hard limit of how many columns a record can have. If None is
set, it uses the default value, ``20480``.
:param maxCharsPerColumn: defines the maximum number of characters allowed for any given
value being read. If None is set, it uses the default value,
``-1`` meaning unlimited length.
:param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0.
If specified, it is ignored.
:param mode: allows a mode for dealing with corrupt records during parsing. If None is
set, it uses the default value, ``PERMISSIVE``.
* ``PERMISSIVE`` : sets other fields to ``null`` when it meets a corrupted \
record, and puts the malformed string into a field configured by \
``columnNameOfCorruptRecord``. To keep corrupt records, an user can set \
a string type field named ``columnNameOfCorruptRecord`` in an \
user-defined schema. If a schema does not have the field, it drops corrupt \
records during parsing. When a length of parsed CSV tokens is shorter than \
an expected length of a schema, it sets `null` for extra fields.
* ``DROPMALFORMED`` : ignores the whole corrupted records.
* ``FAILFAST`` : throws an exception when it meets corrupted records.
:param columnNameOfCorruptRecord: allows renaming the new field having malformed string
created by ``PERMISSIVE`` mode. This overrides
``spark.sql.columnNameOfCorruptRecord``. If None is set,
it uses the value specified in
``spark.sql.columnNameOfCorruptRecord``.
:param wholeFile: parse records, which may span multiple lines. If None is
set, it uses the default value, ``false``.
>>> df = spark.read.csv('python/test_support/sql/ages.csv')
>>> df.dtypes
[('_c0', 'string'), ('_c1', 'string')]
"""
self._set_opts(
schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment,
header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue,
nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf,
dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns,
maxCharsPerColumn=maxCharsPerColumn,
maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode,
columnNameOfCorruptRecord=columnNameOfCorruptRecord, wholeFile=wholeFile)
if isinstance(path, basestring):
path = [path]
return self._df(self._jreader.csv(self._spark._sc._jvm.PythonUtils.toSeq(path)))
@since(1.5)
def orc(self, path):
"""Loads ORC files, returning the result as a :class:`DataFrame`.
.. note:: Currently ORC support is only available together with Hive support.
>>> df = spark.read.orc('python/test_support/sql/orc_partitioned')
>>> df.dtypes
[('a', 'bigint'), ('b', 'int'), ('c', 'int')]
"""
if isinstance(path, basestring):
path = [path]
return self._df(self._jreader.orc(_to_seq(self._spark._sc, path)))
@since(1.4)
def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPartitions=None,
predicates=None, properties=None):
"""
Construct a :class:`DataFrame` representing the database table named ``table``
accessible via JDBC URL ``url`` and connection ``properties``.
Partitions of the table will be retrieved in parallel if either ``column`` or
``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartitions``
is needed when ``column`` is specified.
If both ``column`` and ``predicates`` are specified, ``column`` will be used.
.. note:: Don't create too many partitions in parallel on a large cluster; \
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: the name of the table
:param column: the name of an integer column that will be used for partitioning;
if this parameter is specified, then ``numPartitions``, ``lowerBound``
(inclusive), and ``upperBound`` (exclusive) will form partition strides
for generated WHERE clause expressions used to split the column
``column`` evenly
:param lowerBound: the minimum value of ``column`` used to decide partition stride
:param upperBound: the maximum value of ``column`` used to decide partition stride
:param numPartitions: the number of partitions
:param predicates: a list of expressions suitable for inclusion in WHERE clauses;
each one defines one partition of the :class:`DataFrame`
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
:return: a DataFrame
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
if column is not None:
assert lowerBound is not None, "lowerBound can not be None when ``column`` is specified"
assert upperBound is not None, "upperBound can not be None when ``column`` is specified"
assert numPartitions is not None, \
"numPartitions can not be None when ``column`` is specified"
return self._df(self._jreader.jdbc(url, table, column, int(lowerBound), int(upperBound),
int(numPartitions), jprop))
if predicates is not None:
gateway = self._spark._sc._gateway
jpredicates = utils.toJArray(gateway, gateway.jvm.java.lang.String, predicates)
return self._df(self._jreader.jdbc(url, table, jpredicates, jprop))
return self._df(self._jreader.jdbc(url, table, jprop))
class DataFrameWriter(OptionUtils):
"""
Interface used to write a :class:`DataFrame` to external storage systems
(e.g. file systems, key-value stores, etc). Use :func:`DataFrame.write`
to access this.
.. versionadded:: 1.4
"""
def __init__(self, df):
self._df = df
self._spark = df.sql_ctx
self._jwrite = df._jdf.write()
def _sq(self, jsq):
from pyspark.sql.streaming import StreamingQuery
return StreamingQuery(jsq)
@since(1.4)
def mode(self, saveMode):
"""Specifies the behavior when data or table already exists.
Options include:
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
# At the JVM side, the default value of mode is already set to "error".
# So, if the given saveMode is None, we will not call JVM-side's mode method.
if saveMode is not None:
self._jwrite = self._jwrite.mode(saveMode)
return self
@since(1.4)
def format(self, source):
"""Specifies the underlying output data source.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df.write.format('json').save(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self._jwrite = self._jwrite.format(source)
return self
@since(1.5)
def option(self, key, value):
"""Adds an output option for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
self._jwrite = self._jwrite.option(key, to_str(value))
return self
@since(1.4)
def options(self, **options):
"""Adds output options for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
for k in options:
self._jwrite = self._jwrite.option(k, to_str(options[k]))
return self
@since(1.4)
def partitionBy(self, *cols):
"""Partitions the output by the given columns on the file system.
If specified, the output is laid out on the file system similar
to Hive's partitioning scheme.
:param cols: name of columns
>>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
cols = cols[0]
self._jwrite = self._jwrite.partitionBy(_to_seq(self._spark._sc, cols))
return self
@since(2.3)
def bucketBy(self, numBuckets, col, *cols):
"""Buckets the output by the given columns.If specified,
the output is laid out on the file system similar to Hive's bucketing scheme.
:param numBuckets: the number of buckets to save
:param col: a name of a column, or a list of names.
:param cols: additional names (optional). If `col` is a list it should be empty.
.. note:: Applicable for file-based data sources in combination with
:py:meth:`DataFrameWriter.saveAsTable`.
>>> (df.write.format('parquet')
... .bucketBy(100, 'year', 'month')
... .mode("overwrite")
... .saveAsTable('bucketed_table'))
"""
if not isinstance(numBuckets, int):
raise TypeError("numBuckets should be an int, got {0}.".format(type(numBuckets)))
if isinstance(col, (list, tuple)):
if cols:
raise ValueError("col is a {0} but cols are not empty".format(type(col)))
col, cols = col[0], col[1:]
if not all(isinstance(c, basestring) for c in cols) or not(isinstance(col, basestring)):
raise TypeError("all names should be `str`")
self._jwrite = self._jwrite.bucketBy(numBuckets, col, _to_seq(self._spark._sc, cols))
return self
@since(2.3)
def sortBy(self, col, *cols):
"""Sorts the output in each bucket by the given columns on the file system.
:param col: a name of a column, or a list of names.
:param cols: additional names (optional). If `col` is a list it should be empty.
>>> (df.write.format('parquet')
... .bucketBy(100, 'year', 'month')
... .sortBy('day')
... .mode("overwrite")
... .saveAsTable('sorted_bucketed_table'))
"""
if isinstance(col, (list, tuple)):
if cols:
raise ValueError("col is a {0} but cols are not empty".format(type(col)))
col, cols = col[0], col[1:]
if not all(isinstance(c, basestring) for c in cols) or not(isinstance(col, basestring)):
raise TypeError("all names should be `str`")
self._jwrite = self._jwrite.sortBy(col, _to_seq(self._spark._sc, cols))
return self
@since(1.4)
def save(self, path=None, format=None, mode=None, partitionBy=None, **options):
"""Saves the contents of the :class:`DataFrame` to a data source.
The data source is specified by the ``format`` and a set of ``options``.
If ``format`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
:param path: the path in a Hadoop supported file system
:param format: the format used to save
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param partitionBy: names of partitioning columns
:param options: all other string options
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
if path is None:
self._jwrite.save()
else:
self._jwrite.save(path)
@since(1.4)
def insertInto(self, tableName, overwrite=False):
"""Inserts the content of the :class:`DataFrame` to the specified table.
It requires that the schema of the class:`DataFrame` is the same as the
schema of the table.
Optionally overwriting any existing data.
"""
self._jwrite.mode("overwrite" if overwrite else "append").insertInto(tableName)
@since(1.4)
def saveAsTable(self, name, format=None, mode=None, partitionBy=None, **options):
"""Saves the content of the :class:`DataFrame` as the specified table.
In the case the table already exists, behavior of this function depends on the
save mode, specified by the `mode` function (default to throwing an exception).
When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be
the same as that of the existing table.
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
:param name: the table name
:param format: the format used to save
:param mode: one of `append`, `overwrite`, `error`, `ignore` (default: error)
:param partitionBy: names of partitioning columns
:param options: all other string options
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
self._jwrite.saveAsTable(name)
@since(1.4)
def json(self, path, mode=None, compression=None, dateFormat=None, timestampFormat=None):
"""Saves the content of the :class:`DataFrame` in JSON format
(`JSON Lines text format or newline-delimited JSON <http://jsonlines.org/>`_) at the
specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
>>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
self._set_opts(
compression=compression, dateFormat=dateFormat, timestampFormat=timestampFormat)
self._jwrite.json(path)
@since(1.4)
def parquet(self, path, mode=None, partitionBy=None, compression=None):
"""Saves the content of the :class:`DataFrame` in Parquet format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param partitionBy: names of partitioning columns
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, snappy, gzip, and lzo).
This will override ``spark.sql.parquet.compression.codec``. If None
is set, it uses the value specified in
``spark.sql.parquet.compression.codec``.
>>> df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._set_opts(compression=compression)
self._jwrite.parquet(path)
@since(1.6)
def text(self, path, compression=None):
"""Saves the content of the DataFrame in a text file at the specified path.
:param path: the path in any Hadoop supported file system
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
The DataFrame must have only one column that is of string type.
Each row becomes a new line in the output file.
"""
self._set_opts(compression=compression)
self._jwrite.text(path)
@since(2.0)
def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=None,
header=None, nullValue=None, escapeQuotes=None, quoteAll=None, dateFormat=None,
timestampFormat=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None):
"""Saves the content of the :class:`DataFrame` in CSV format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param sep: sets the single character as a separator for each field and value. If None is
set, it uses the default value, ``,``.
:param quote: sets the single character used for escaping quoted values where the
separator can be part of the value. If None is set, it uses the default
value, ``"``. If you would like to turn off quotations, you need to set an
empty string.
:param escape: sets the single character used for escaping quotes inside an already
quoted value. If None is set, it uses the default value, ``\``
:param escapeQuotes: a flag indicating whether values containing quotes should always
be enclosed in quotes. If None is set, it uses the default value
``true``, escaping all values containing a quote character.
:param quoteAll: a flag indicating whether all values should always be enclosed in
quotes. If None is set, it uses the default value ``false``,
only escaping values containing a quote character.
:param header: writes the names of columns as the first line. If None is set, it uses
the default value, ``false``.
:param nullValue: sets the string representation of a null value. If None is set, it uses
the default value, empty string.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from
values being written should be skipped. If None is set, it
uses the default value, ``true``.
:param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from
values being written should be skipped. If None is set, it
uses the default value, ``true``.
>>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
self._set_opts(compression=compression, sep=sep, quote=quote, escape=escape, header=header,
nullValue=nullValue, escapeQuotes=escapeQuotes, quoteAll=quoteAll,
dateFormat=dateFormat, timestampFormat=timestampFormat,
ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace)
self._jwrite.csv(path)
@since(1.5)
def orc(self, path, mode=None, partitionBy=None, compression=None):
"""Saves the content of the :class:`DataFrame` in ORC format at the specified path.
.. note:: Currently ORC support is only available together with Hive support.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param partitionBy: names of partitioning columns
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, snappy, zlib, and lzo).
This will override ``orc.compress``. If None is set, it uses the
default value, ``snappy``.
>>> orc_df = spark.read.orc('python/test_support/sql/orc_partitioned')
>>> orc_df.write.orc(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._set_opts(compression=compression)
self._jwrite.orc(path)
@since(1.4)
def jdbc(self, url, table, mode=None, properties=None):
"""Saves the content of the :class:`DataFrame` to an external database table via JDBC.
.. note:: Don't create too many partitions in parallel on a large cluster; \
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: Name of the table in the external database.
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
self._jwrite.mode(mode).jdbc(url, table, jprop)
def _test():
import doctest
import os
import tempfile
import py4j
from pyspark.context import SparkContext
from pyspark.sql import SparkSession, Row
import pyspark.sql.readwriter
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.readwriter.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
try:
spark = SparkSession.builder.enableHiveSupport().getOrCreate()
except py4j.protocol.Py4JError:
spark = SparkSession(sc)
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = spark.read.parquet('python/test_support/sql/parquet_partitioned')
(failure_count, test_count) = doctest.testmod(
pyspark.sql.readwriter, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
sc.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
|
# -*- coding: utf-8 -*-
import sys
sys.path[0:0] = [""]
import copy
import os
import unittest
import tempfile
import gridfs
from nose.plugins.skip import SkipTest
from mongoengine import *
from mongoengine.connection import get_db
from mongoengine.python_support import PY3, b, StringIO
try:
from PIL import Image
HAS_PIL = True
except ImportError:
HAS_PIL = False
TEST_IMAGE_PATH = os.path.join(os.path.dirname(__file__), 'mongoengine.png')
TEST_IMAGE2_PATH = os.path.join(os.path.dirname(__file__), 'mongodb_leaf.png')
class FileTest(unittest.TestCase):
def setUp(self):
connect(db='mongoenginetest')
self.db = get_db()
def tearDown(self):
self.db.drop_collection('fs.files')
self.db.drop_collection('fs.chunks')
def test_file_field_optional(self):
# Make sure FileField is optional and not required
class DemoFile(Document):
the_file = FileField()
DemoFile.objects.create()
def test_file_fields(self):
"""Ensure that file fields can be written to and their data retrieved
"""
class PutFile(Document):
the_file = FileField()
PutFile.drop_collection()
text = b('Hello, World!')
content_type = 'text/plain'
putfile = PutFile()
putfile.the_file.put(text, content_type=content_type, filename="hello")
putfile.save()
result = PutFile.objects.first()
self.assertTrue(putfile == result)
self.assertEqual("%s" % result.the_file, "<GridFSProxy: hello>")
self.assertEqual(result.the_file.read(), text)
self.assertEqual(result.the_file.content_type, content_type)
result.the_file.delete() # Remove file from GridFS
PutFile.objects.delete()
# Ensure file-like objects are stored
PutFile.drop_collection()
putfile = PutFile()
putstring = StringIO()
putstring.write(text)
putstring.seek(0)
putfile.the_file.put(putstring, content_type=content_type)
putfile.save()
result = PutFile.objects.first()
self.assertTrue(putfile == result)
self.assertEqual(result.the_file.read(), text)
self.assertEqual(result.the_file.content_type, content_type)
result.the_file.delete()
def test_file_fields_stream(self):
"""Ensure that file fields can be written to and their data retrieved
"""
class StreamFile(Document):
the_file = FileField()
StreamFile.drop_collection()
text = b('Hello, World!')
more_text = b('Foo Bar')
content_type = 'text/plain'
streamfile = StreamFile()
streamfile.the_file.new_file(content_type=content_type)
streamfile.the_file.write(text)
streamfile.the_file.write(more_text)
streamfile.the_file.close()
streamfile.save()
result = StreamFile.objects.first()
self.assertTrue(streamfile == result)
self.assertEqual(result.the_file.read(), text + more_text)
self.assertEqual(result.the_file.content_type, content_type)
result.the_file.seek(0)
self.assertEqual(result.the_file.tell(), 0)
self.assertEqual(result.the_file.read(len(text)), text)
self.assertEqual(result.the_file.tell(), len(text))
self.assertEqual(result.the_file.read(len(more_text)), more_text)
self.assertEqual(result.the_file.tell(), len(text + more_text))
result.the_file.delete()
# Ensure deleted file returns None
self.assertTrue(result.the_file.read() == None)
def test_file_fields_stream_after_none(self):
"""Ensure that a file field can be written to after it has been saved as
None
"""
class StreamFile(Document):
the_file = FileField()
StreamFile.drop_collection()
text = b('Hello, World!')
more_text = b('Foo Bar')
content_type = 'text/plain'
streamfile = StreamFile()
streamfile.save()
streamfile.the_file.new_file()
streamfile.the_file.write(text)
streamfile.the_file.write(more_text)
streamfile.the_file.close()
streamfile.save()
result = StreamFile.objects.first()
self.assertTrue(streamfile == result)
self.assertEqual(result.the_file.read(), text + more_text)
#self.assertEqual(result.the_file.content_type, content_type)
result.the_file.seek(0)
self.assertEqual(result.the_file.tell(), 0)
self.assertEqual(result.the_file.read(len(text)), text)
self.assertEqual(result.the_file.tell(), len(text))
self.assertEqual(result.the_file.read(len(more_text)), more_text)
self.assertEqual(result.the_file.tell(), len(text + more_text))
result.the_file.delete()
# Ensure deleted file returns None
self.assertTrue(result.the_file.read() == None)
def test_file_fields_set(self):
class SetFile(Document):
the_file = FileField()
text = b('Hello, World!')
more_text = b('Foo Bar')
SetFile.drop_collection()
setfile = SetFile()
setfile.the_file = text
setfile.save()
result = SetFile.objects.first()
self.assertTrue(setfile == result)
self.assertEqual(result.the_file.read(), text)
# Try replacing file with new one
result.the_file.replace(more_text)
result.save()
result = SetFile.objects.first()
self.assertTrue(setfile == result)
self.assertEqual(result.the_file.read(), more_text)
result.the_file.delete()
def test_file_field_no_default(self):
class GridDocument(Document):
the_file = FileField()
GridDocument.drop_collection()
with tempfile.TemporaryFile() as f:
f.write(b("Hello World!"))
f.flush()
# Test without default
doc_a = GridDocument()
doc_a.save()
doc_b = GridDocument.objects.with_id(doc_a.id)
doc_b.the_file.replace(f, filename='doc_b')
doc_b.save()
self.assertNotEqual(doc_b.the_file.grid_id, None)
# Test it matches
doc_c = GridDocument.objects.with_id(doc_b.id)
self.assertEqual(doc_b.the_file.grid_id, doc_c.the_file.grid_id)
# Test with default
doc_d = GridDocument(the_file=b(''))
doc_d.save()
doc_e = GridDocument.objects.with_id(doc_d.id)
self.assertEqual(doc_d.the_file.grid_id, doc_e.the_file.grid_id)
doc_e.the_file.replace(f, filename='doc_e')
doc_e.save()
doc_f = GridDocument.objects.with_id(doc_e.id)
self.assertEqual(doc_e.the_file.grid_id, doc_f.the_file.grid_id)
db = GridDocument._get_db()
grid_fs = gridfs.GridFS(db)
self.assertEqual(['doc_b', 'doc_e'], grid_fs.list())
def test_file_uniqueness(self):
"""Ensure that each instance of a FileField is unique
"""
class TestFile(Document):
name = StringField()
the_file = FileField()
# First instance
test_file = TestFile()
test_file.name = "Hello, World!"
test_file.the_file.put(b('Hello, World!'))
test_file.save()
# Second instance
test_file_dupe = TestFile()
data = test_file_dupe.the_file.read() # Should be None
self.assertTrue(test_file.name != test_file_dupe.name)
self.assertTrue(test_file.the_file.read() != data)
TestFile.drop_collection()
def test_file_saving(self):
"""Ensure you can add meta data to file"""
class Animal(Document):
genus = StringField()
family = StringField()
photo = FileField()
Animal.drop_collection()
marmot = Animal(genus='Marmota', family='Sciuridae')
marmot_photo = open(TEST_IMAGE_PATH, 'rb') # Retrieve a photo from disk
marmot.photo.put(marmot_photo, content_type='image/jpeg', foo='bar')
marmot.photo.close()
marmot.save()
marmot = Animal.objects.get()
self.assertEqual(marmot.photo.content_type, 'image/jpeg')
self.assertEqual(marmot.photo.foo, 'bar')
def test_file_reassigning(self):
class TestFile(Document):
the_file = FileField()
TestFile.drop_collection()
test_file = TestFile(the_file=open(TEST_IMAGE_PATH, 'rb')).save()
self.assertEqual(test_file.the_file.get().length, 8313)
test_file = TestFile.objects.first()
test_file.the_file = open(TEST_IMAGE2_PATH, 'rb')
test_file.save()
self.assertEqual(test_file.the_file.get().length, 4971)
def test_file_boolean(self):
"""Ensure that a boolean test of a FileField indicates its presence
"""
class TestFile(Document):
the_file = FileField()
TestFile.drop_collection()
test_file = TestFile()
self.assertFalse(bool(test_file.the_file))
test_file.the_file.put(b('Hello, World!'), content_type='text/plain')
test_file.save()
self.assertTrue(bool(test_file.the_file))
test_file = TestFile.objects.first()
self.assertEqual(test_file.the_file.content_type, "text/plain")
def test_file_cmp(self):
"""Test comparing against other types"""
class TestFile(Document):
the_file = FileField()
test_file = TestFile()
self.assertFalse(test_file.the_file in [{"test": 1}])
def test_file_disk_space(self):
""" Test disk space usage when we delete/replace a file """
class TestFile(Document):
the_file = FileField()
text = b('Hello, World!')
content_type = 'text/plain'
testfile = TestFile()
testfile.the_file.put(text, content_type=content_type, filename="hello")
testfile.save()
# Now check fs.files and fs.chunks
db = TestFile._get_db()
files = db.fs.files.find()
chunks = db.fs.chunks.find()
self.assertEquals(len(list(files)), 1)
self.assertEquals(len(list(chunks)), 1)
# Deleting the docoument should delete the files
testfile.delete()
files = db.fs.files.find()
chunks = db.fs.chunks.find()
self.assertEquals(len(list(files)), 0)
self.assertEquals(len(list(chunks)), 0)
# Test case where we don't store a file in the first place
testfile = TestFile()
testfile.save()
files = db.fs.files.find()
chunks = db.fs.chunks.find()
self.assertEquals(len(list(files)), 0)
self.assertEquals(len(list(chunks)), 0)
testfile.delete()
files = db.fs.files.find()
chunks = db.fs.chunks.find()
self.assertEquals(len(list(files)), 0)
self.assertEquals(len(list(chunks)), 0)
# Test case where we overwrite the file
testfile = TestFile()
testfile.the_file.put(text, content_type=content_type, filename="hello")
testfile.save()
text = b('Bonjour, World!')
testfile.the_file.replace(text, content_type=content_type, filename="hello")
testfile.save()
files = db.fs.files.find()
chunks = db.fs.chunks.find()
self.assertEquals(len(list(files)), 1)
self.assertEquals(len(list(chunks)), 1)
testfile.delete()
files = db.fs.files.find()
chunks = db.fs.chunks.find()
self.assertEquals(len(list(files)), 0)
self.assertEquals(len(list(chunks)), 0)
def test_image_field(self):
if not HAS_PIL:
raise SkipTest('PIL not installed')
class TestImage(Document):
image = ImageField()
TestImage.drop_collection()
with tempfile.TemporaryFile() as f:
f.write(b("Hello World!"))
f.flush()
t = TestImage()
try:
t.image.put(f)
self.fail("Should have raised an invalidation error")
except ValidationError, e:
self.assertEqual("%s" % e, "Invalid image: cannot identify image file %s" % f)
t = TestImage()
t.image.put(open(TEST_IMAGE_PATH, 'rb'))
t.save()
t = TestImage.objects.first()
self.assertEqual(t.image.format, 'PNG')
w, h = t.image.size
self.assertEqual(w, 371)
self.assertEqual(h, 76)
t.image.delete()
def test_image_field_reassigning(self):
if not HAS_PIL:
raise SkipTest('PIL not installed')
class TestFile(Document):
the_file = ImageField()
TestFile.drop_collection()
test_file = TestFile(the_file=open(TEST_IMAGE_PATH, 'rb')).save()
self.assertEqual(test_file.the_file.size, (371, 76))
test_file = TestFile.objects.first()
test_file.the_file = open(TEST_IMAGE2_PATH, 'rb')
test_file.save()
self.assertEqual(test_file.the_file.size, (45, 101))
def test_image_field_resize(self):
if not HAS_PIL:
raise SkipTest('PIL not installed')
class TestImage(Document):
image = ImageField(size=(185, 37))
TestImage.drop_collection()
t = TestImage()
t.image.put(open(TEST_IMAGE_PATH, 'rb'))
t.save()
t = TestImage.objects.first()
self.assertEqual(t.image.format, 'PNG')
w, h = t.image.size
self.assertEqual(w, 185)
self.assertEqual(h, 37)
t.image.delete()
def test_image_field_resize_force(self):
if not HAS_PIL:
raise SkipTest('PIL not installed')
class TestImage(Document):
image = ImageField(size=(185, 37, True))
TestImage.drop_collection()
t = TestImage()
t.image.put(open(TEST_IMAGE_PATH, 'rb'))
t.save()
t = TestImage.objects.first()
self.assertEqual(t.image.format, 'PNG')
w, h = t.image.size
self.assertEqual(w, 185)
self.assertEqual(h, 37)
t.image.delete()
def test_image_field_thumbnail(self):
if not HAS_PIL:
raise SkipTest('PIL not installed')
class TestImage(Document):
image = ImageField(thumbnail_size=(92, 18))
TestImage.drop_collection()
t = TestImage()
t.image.put(open(TEST_IMAGE_PATH, 'rb'))
t.save()
t = TestImage.objects.first()
self.assertEqual(t.image.thumbnail.format, 'PNG')
self.assertEqual(t.image.thumbnail.width, 92)
self.assertEqual(t.image.thumbnail.height, 18)
t.image.delete()
def test_file_multidb(self):
register_connection('test_files', 'test_files')
class TestFile(Document):
name = StringField()
the_file = FileField(db_alias="test_files",
collection_name="macumba")
TestFile.drop_collection()
# delete old filesystem
get_db("test_files").macumba.files.drop()
get_db("test_files").macumba.chunks.drop()
# First instance
test_file = TestFile()
test_file.name = "Hello, World!"
test_file.the_file.put(b('Hello, World!'),
name="hello.txt")
test_file.save()
data = get_db("test_files").macumba.files.find_one()
self.assertEqual(data.get('name'), 'hello.txt')
test_file = TestFile.objects.first()
self.assertEqual(test_file.the_file.read(),
b('Hello, World!'))
test_file = TestFile.objects.first()
test_file.the_file = b('HELLO, WORLD!')
test_file.save()
test_file = TestFile.objects.first()
self.assertEqual(test_file.the_file.read(),
b('HELLO, WORLD!'))
def test_copyable(self):
class PutFile(Document):
the_file = FileField()
PutFile.drop_collection()
text = b('Hello, World!')
content_type = 'text/plain'
putfile = PutFile()
putfile.the_file.put(text, content_type=content_type)
putfile.save()
class TestFile(Document):
name = StringField()
self.assertEqual(putfile, copy.copy(putfile))
self.assertEqual(putfile, copy.deepcopy(putfile))
def test_get_image_by_grid_id(self):
if not HAS_PIL:
raise SkipTest('PIL not installed')
class TestImage(Document):
image1 = ImageField()
image2 = ImageField()
TestImage.drop_collection()
t = TestImage()
t.image1.put(open(TEST_IMAGE_PATH, 'rb'))
t.image2.put(open(TEST_IMAGE2_PATH, 'rb'))
t.save()
test = TestImage.objects.first()
grid_id = test.image1.grid_id
self.assertEqual(1, TestImage.objects(Q(image1=grid_id)
or Q(image2=grid_id)).count())
def test_complex_field_filefield(self):
"""Ensure you can add meta data to file"""
class Animal(Document):
genus = StringField()
family = StringField()
photos = ListField(FileField())
Animal.drop_collection()
marmot = Animal(genus='Marmota', family='Sciuridae')
marmot_photo = open(TEST_IMAGE_PATH, 'rb') # Retrieve a photo from disk
photos_field = marmot._fields['photos'].field
new_proxy = photos_field.get_proxy_obj('photos', marmot)
new_proxy.put(marmot_photo, content_type='image/jpeg', foo='bar')
marmot_photo.close()
marmot.photos.append(new_proxy)
marmot.save()
marmot = Animal.objects.get()
self.assertEqual(marmot.photos[0].content_type, 'image/jpeg')
self.assertEqual(marmot.photos[0].foo, 'bar')
self.assertEqual(marmot.photos[0].get().length, 8313)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2016 Clinton Knight
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Performance metrics functions and cache for NetApp systems.
"""
import copy
from oslo_log import log as logging
from manila import exception
from manila.i18n import _
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
LOG = logging.getLogger(__name__)
DEFAULT_UTILIZATION = 50
class PerformanceLibrary(object):
def __init__(self, zapi_client):
self.zapi_client = zapi_client
self.performance_counters = {}
self.pool_utilization = {}
self._init_counter_info()
def _init_counter_info(self):
"""Set a few counter names based on Data ONTAP version."""
self.system_object_name = None
self.avg_processor_busy_base_counter_name = None
try:
if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS:
self.system_object_name = 'system:constituent'
self.avg_processor_busy_base_counter_name = (
self._get_base_counter_name('system:constituent',
'avg_processor_busy'))
elif self.zapi_client.features.SYSTEM_METRICS:
self.system_object_name = 'system'
self.avg_processor_busy_base_counter_name = (
self._get_base_counter_name('system',
'avg_processor_busy'))
except netapp_api.NaApiError:
if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS:
self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time'
else:
self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1'
LOG.exception('Could not get performance base counter '
'name. Performance-based scheduler '
'functions may not be available.')
def update_performance_cache(self, flexvol_pools, aggregate_pools):
"""Called periodically to update per-pool node utilization metrics."""
# Nothing to do on older systems
if not (self.zapi_client.features.SYSTEM_METRICS or
self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS):
return
# Get aggregates and nodes for all known pools
aggr_names = self._get_aggregates_for_pools(flexvol_pools,
aggregate_pools)
node_names, aggr_node_map = self._get_nodes_for_aggregates(aggr_names)
# Update performance counter cache for each node
node_utilization = {}
for node_name in node_names:
if node_name not in self.performance_counters:
self.performance_counters[node_name] = []
# Get new performance counters and save only the last 10
counters = self._get_node_utilization_counters(node_name)
if not counters:
continue
self.performance_counters[node_name].append(counters)
self.performance_counters[node_name] = (
self.performance_counters[node_name][-10:])
# Update utilization for each node using newest & oldest sample
counters = self.performance_counters[node_name]
if len(counters) < 2:
node_utilization[node_name] = DEFAULT_UTILIZATION
else:
node_utilization[node_name] = self._get_node_utilization(
counters[0], counters[-1], node_name)
# Update pool utilization map atomically
pool_utilization = {}
all_pools = copy.deepcopy(flexvol_pools)
all_pools.update(aggregate_pools)
for pool_name, pool_info in all_pools.items():
aggr_name = pool_info.get('netapp_aggregate', 'unknown')
node_name = aggr_node_map.get(aggr_name)
if node_name:
pool_utilization[pool_name] = node_utilization.get(
node_name, DEFAULT_UTILIZATION)
else:
pool_utilization[pool_name] = DEFAULT_UTILIZATION
self.pool_utilization = pool_utilization
def get_node_utilization_for_pool(self, pool_name):
"""Get the node utilization for the specified pool, if available."""
return self.pool_utilization.get(pool_name, DEFAULT_UTILIZATION)
def update_for_failover(self, zapi_client, flexvol_pools, aggregate_pools):
"""Change API client after a whole-backend failover event."""
self.zapi_client = zapi_client
self.update_performance_cache(flexvol_pools, aggregate_pools)
def _get_aggregates_for_pools(self, flexvol_pools, aggregate_pools):
"""Get the set of aggregates that contain the specified pools."""
aggr_names = set()
for pool_name, pool_info in aggregate_pools.items():
aggr_names.add(pool_info.get('netapp_aggregate'))
for pool_name, pool_info in flexvol_pools.items():
aggr_names.add(pool_info.get('netapp_aggregate'))
return list(aggr_names)
def _get_nodes_for_aggregates(self, aggr_names):
"""Get the cluster nodes that own the specified aggregates."""
node_names = set()
aggr_node_map = {}
for aggr_name in aggr_names:
node_name = self.zapi_client.get_node_for_aggregate(aggr_name)
if node_name:
node_names.add(node_name)
aggr_node_map[aggr_name] = node_name
return list(node_names), aggr_node_map
def _get_node_utilization(self, counters_t1, counters_t2, node_name):
"""Get node utilization from two sets of performance counters."""
try:
# Time spent in the single-threaded Kahuna domain
kahuna_percent = self._get_kahuna_utilization(counters_t1,
counters_t2)
# If Kahuna is using >60% of the CPU, the controller is fully busy
if kahuna_percent > 60:
return 100.0
# Average CPU busyness across all processors
avg_cpu_percent = 100.0 * self._get_average_cpu_utilization(
counters_t1, counters_t2)
# Total Consistency Point (CP) time
total_cp_time_msec = self._get_total_consistency_point_time(
counters_t1, counters_t2)
# Time spent in CP Phase 2 (buffer flush)
p2_flush_time_msec = self._get_consistency_point_p2_flush_time(
counters_t1, counters_t2)
# Wall-clock time between the two counter sets
poll_time_msec = self._get_total_time(counters_t1,
counters_t2,
'total_cp_msecs')
# If two polls happened in quick succession, use CPU utilization
if total_cp_time_msec == 0 or poll_time_msec == 0:
return max(min(100.0, avg_cpu_percent), 0)
# Adjusted Consistency Point time
adjusted_cp_time_msec = self._get_adjusted_consistency_point_time(
total_cp_time_msec, p2_flush_time_msec)
adjusted_cp_percent = (100.0 *
adjusted_cp_time_msec / poll_time_msec)
# Utilization is the greater of CPU busyness & CP time
node_utilization = max(avg_cpu_percent, adjusted_cp_percent)
return max(min(100.0, node_utilization), 0)
except Exception:
LOG.exception('Could not calculate node utilization for '
'node %s.', node_name)
return DEFAULT_UTILIZATION
def _get_kahuna_utilization(self, counters_t1, counters_t2):
"""Get time spent in the single-threaded Kahuna domain."""
# Note(cknight): Because Kahuna is single-threaded, running only on
# one CPU at a time, we can safely sum the Kahuna CPU usage
# percentages across all processors in a node.
return sum(self._get_performance_counter_average_multi_instance(
counters_t1, counters_t2, 'domain_busy:kahuna',
'processor_elapsed_time')) * 100.0
def _get_average_cpu_utilization(self, counters_t1, counters_t2):
"""Get average CPU busyness across all processors."""
return self._get_performance_counter_average(
counters_t1, counters_t2, 'avg_processor_busy',
self.avg_processor_busy_base_counter_name)
def _get_total_consistency_point_time(self, counters_t1, counters_t2):
"""Get time spent in Consistency Points in msecs."""
return float(self._get_performance_counter_delta(
counters_t1, counters_t2, 'total_cp_msecs'))
def _get_consistency_point_p2_flush_time(self, counters_t1, counters_t2):
"""Get time spent in CP Phase 2 (buffer flush) in msecs."""
return float(self._get_performance_counter_delta(
counters_t1, counters_t2, 'cp_phase_times:p2_flush'))
def _get_total_time(self, counters_t1, counters_t2, counter_name):
"""Get wall clock time between two successive counters in msecs."""
timestamp_t1 = float(self._find_performance_counter_timestamp(
counters_t1, counter_name))
timestamp_t2 = float(self._find_performance_counter_timestamp(
counters_t2, counter_name))
return (timestamp_t2 - timestamp_t1) * 1000.0
def _get_adjusted_consistency_point_time(self, total_cp_time,
p2_flush_time):
"""Get adjusted CP time by limiting CP phase 2 flush time to 20%."""
return (total_cp_time - p2_flush_time) * 1.20
def _get_performance_counter_delta(self, counters_t1, counters_t2,
counter_name):
"""Calculate a delta value from two performance counters."""
counter_t1 = int(
self._find_performance_counter_value(counters_t1, counter_name))
counter_t2 = int(
self._find_performance_counter_value(counters_t2, counter_name))
return counter_t2 - counter_t1
def _get_performance_counter_average(self, counters_t1, counters_t2,
counter_name, base_counter_name,
instance_name=None):
"""Calculate an average value from two performance counters."""
counter_t1 = float(self._find_performance_counter_value(
counters_t1, counter_name, instance_name))
counter_t2 = float(self._find_performance_counter_value(
counters_t2, counter_name, instance_name))
base_counter_t1 = float(self._find_performance_counter_value(
counters_t1, base_counter_name, instance_name))
base_counter_t2 = float(self._find_performance_counter_value(
counters_t2, base_counter_name, instance_name))
return (counter_t2 - counter_t1) / (base_counter_t2 - base_counter_t1)
def _get_performance_counter_average_multi_instance(self, counters_t1,
counters_t2,
counter_name,
base_counter_name):
"""Calculate an average value from multiple counter instances."""
averages = []
instance_names = []
for counter in counters_t1:
if counter_name in counter:
instance_names.append(counter['instance-name'])
for instance_name in instance_names:
average = self._get_performance_counter_average(
counters_t1, counters_t2, counter_name, base_counter_name,
instance_name)
averages.append(average)
return averages
def _find_performance_counter_value(self, counters, counter_name,
instance_name=None):
"""Given a counter set, return the value of a named instance."""
for counter in counters:
if counter_name in counter:
if (instance_name is None
or counter['instance-name'] == instance_name):
return counter[counter_name]
else:
raise exception.NotFound(_('Counter %s not found') % counter_name)
def _find_performance_counter_timestamp(self, counters, counter_name,
instance_name=None):
"""Given a counter set, return the timestamp of a named instance."""
for counter in counters:
if counter_name in counter:
if (instance_name is None
or counter['instance-name'] == instance_name):
return counter['timestamp']
else:
raise exception.NotFound(_('Counter %s not found') % counter_name)
def _expand_performance_array(self, object_name, counter_name, counter):
"""Get array labels and expand counter data array."""
# Get array labels for counter value
counter_info = self.zapi_client.get_performance_counter_info(
object_name, counter_name)
array_labels = [counter_name + ':' + label.lower()
for label in counter_info['labels']]
array_values = counter[counter_name].split(',')
# Combine labels and values, and then mix into existing counter
array_data = dict(zip(array_labels, array_values))
counter.update(array_data)
def _get_base_counter_name(self, object_name, counter_name):
"""Get the name of the base counter for the specified counter."""
counter_info = self.zapi_client.get_performance_counter_info(
object_name, counter_name)
return counter_info['base-counter']
def _get_node_utilization_counters(self, node_name):
"""Get all performance counters for calculating node utilization."""
try:
return (self._get_node_utilization_system_counters(node_name) +
self._get_node_utilization_wafl_counters(node_name) +
self._get_node_utilization_processor_counters(node_name))
except netapp_api.NaApiError:
LOG.exception('Could not get utilization counters from node '
'%s', node_name)
return None
def _get_node_utilization_system_counters(self, node_name):
"""Get the system counters for calculating node utilization."""
system_instance_uuids = (
self.zapi_client.get_performance_instance_uuids(
self.system_object_name, node_name))
system_counter_names = [
'avg_processor_busy',
self.avg_processor_busy_base_counter_name,
]
if 'cpu_elapsed_time1' in system_counter_names:
system_counter_names.append('cpu_elapsed_time')
system_counters = self.zapi_client.get_performance_counters(
self.system_object_name, system_instance_uuids,
system_counter_names)
return system_counters
def _get_node_utilization_wafl_counters(self, node_name):
"""Get the WAFL counters for calculating node utilization."""
wafl_instance_uuids = self.zapi_client.get_performance_instance_uuids(
'wafl', node_name)
wafl_counter_names = ['total_cp_msecs', 'cp_phase_times']
wafl_counters = self.zapi_client.get_performance_counters(
'wafl', wafl_instance_uuids, wafl_counter_names)
# Expand array data so we can use wafl:cp_phase_times[P2_FLUSH]
for counter in wafl_counters:
if 'cp_phase_times' in counter:
self._expand_performance_array(
'wafl', 'cp_phase_times', counter)
return wafl_counters
def _get_node_utilization_processor_counters(self, node_name):
"""Get the processor counters for calculating node utilization."""
processor_instance_uuids = (
self.zapi_client.get_performance_instance_uuids('processor',
node_name))
processor_counter_names = ['domain_busy', 'processor_elapsed_time']
processor_counters = self.zapi_client.get_performance_counters(
'processor', processor_instance_uuids, processor_counter_names)
# Expand array data so we can use processor:domain_busy[kahuna]
for counter in processor_counters:
if 'domain_busy' in counter:
self._expand_performance_array(
'processor', 'domain_busy', counter)
return processor_counters
|
|
import unittest
from pqhelper.versus import Advisor
from pqhelper.base import Board, Actor
class Test_Advisor(unittest.TestCase):
# Common test data
board_string_3_valid_swaps = '........\n' \
'........\n' \
'........\n' \
'........\n' \
'........\n' \
'8..*..g.\n' \
'8..s..g.\n' \
'xr.xs.xg'
# Attributes
def test_current_completed_turn_is_get_only(self):
advisor = Advisor()
self.assertRaises(AttributeError,
setattr, *(advisor, 'current_completed_turn', 10))
def test_current_completed_turn_is_zero_by_default(self):
advisor = Advisor()
self.assertEqual(advisor.current_completed_turn, 0)
# Setup and Maintenance of the simulation
def test_reset_sets_a_new_internal_root(self):
advisor = Advisor()
# patch the root to some value
original_root = advisor._root = 'fake root'
# confirm reset changes the root and it's not None
advisor.reset(Board(),
generic_actor('player'), generic_actor('opponent'), 0)
self.assertIsNot(advisor._root, original_root,
'Unexpectedly found the same root after resetting:'
'\n{}'.format(original_root))
self.assertIsNotNone(advisor._root,
'Unexpectedly found "None" as the root after'
' resetting')
def test_reset_sets_the_current_completed_turn_to_zero(self):
advisor = Advisor()
# set a fake original value
advisor._current_completed_turn = 2
advisor.reset(Board(),
generic_actor('player'), generic_actor('opponent'), 0)
# confirm reset of the current turn indicator
completed_turn = advisor.current_completed_turn
self.assertEqual(completed_turn, 0,
'Unexpectedly found {} instead of zero for the'
'current completed turn value after resetting'
'the advisor'.format(completed_turn))
def test_reset_sets_root_state_actions_remaining_to_1_plus_extra(self):
advisor = Advisor()
extra_actions = 2
advisor.reset(Board(),
generic_actor('player'), generic_actor('opponent'),
extra_actions)
actions_remaming_spec = 1 + extra_actions
self.assertEqual(advisor._root.actions_remaining, actions_remaming_spec)
# Run the versus simulation one turn at a time
def test_simulate_next_turn_produces_correct_tree_internally(self):
advisor = generic_preset_advisor(self.board_string_3_valid_swaps)
# turn 1: 3 leaves
advisor.simulate_next_turn()
leaf_count = len(list(advisor._root.leaves()))
self.assertEqual(leaf_count, 3)
# turn 2: 6 leaves
advisor.simulate_next_turn()
leaf_count = len(list(advisor._root.leaves()))
self.assertEqual(leaf_count, 6)
def test_simulate_next_turn_increments_current_turn_if_simulated(self):
board_string_two_moves = '........\n' \
'........\n' \
'........\n' \
'........\n' \
'........\n' \
'r.....*.\n' \
'3.....s.\n' \
'xr....xs'
advisor = generic_preset_advisor(board_string_two_moves)
# turn 0
self.assertEqual(advisor.current_completed_turn, 0)
# turn 1, 2: increments with simulation
advisor.simulate_next_turn()
self.assertEqual(advisor.current_completed_turn, 1)
advisor.simulate_next_turn()
self.assertEqual(advisor.current_completed_turn, 2)
# turn 3: nothing to simulate so does not increment
advisor.simulate_next_turn()
self.assertEqual(advisor.current_completed_turn, 2)
# Summaries: general
def test_current_summaries_generates_empty_sequence_for_None_root(self):
advisor = Advisor()
advisor._root = None # just to be sure
summaries_for_None = list(advisor.sorted_current_summaries())
empty_sequence = tuple()
self.assertSequenceEqual(summaries_for_None, empty_sequence)
# Summaries: action details
def test_current_summaries_generates_correct_swap_choices(self):
advisor = generic_preset_advisor(self.board_string_3_valid_swaps)
# sim and confirm chocies for turn 1
advisor.simulate_next_turn()
summaries_turn_1 = advisor.sorted_current_summaries()
swaps_turn_1 = [summary.action for summary in summaries_turn_1]
swaps_spec = (((7, 0), (7, 1)),
((7, 3), (7, 4)),
((7, 6), (7, 7)))
self.assertItemsEqual(swaps_turn_1, swaps_spec,
'Unexpectedly received these swaps:\n{}\n'
'instead of the expected swaps:\n{}'
''.format(swaps_turn_1, swaps_spec))
def test_current_summaries_generates_same_choices_each_turn(self):
advisor = generic_preset_advisor(self.board_string_3_valid_swaps)
# sim and store choices for turn 1
advisor.simulate_next_turn()
summaries_turn_1 = advisor.sorted_current_summaries()
swaps_turn_1 = [summary.action for summary in summaries_turn_1]
# sim and store choices for turn 2
advisor.simulate_next_turn()
summaries_turn_2 = advisor.sorted_current_summaries()
swaps_turn_2 = [summary.action for summary in summaries_turn_2]
# confirm the choices are the same between turn 1 and turn 2
self.assertItemsEqual(swaps_turn_1, swaps_turn_2,
'Unexpectedly found different root choices'
'on turn 1:\n{}\nand turn 2:\n{}'
''.format(swaps_turn_1, swaps_turn_2))
# Summaries: scoring
def test_current_summaries_generates_correct_scoring(self):
surprise_board_string = '........\n' \
'........\n' \
'........\n' \
'........\n' \
'........\n' \
'r.....r.\n' \
'2.....r.\n' \
'sr.**.xr'
flexible_value = (500, 1000)
flexible_player = generic_actor(name='player',
r=flexible_value,
g=flexible_value,
health=flexible_value)
flexible_opponent = generic_actor(name='opponent',
r=flexible_value,
g=flexible_value,
health=flexible_value)
advisor = generic_preset_advisor(board_string=surprise_board_string,
player=flexible_player,
opponent=flexible_opponent)
left_swap = ((7, 0), (7, 1))
right_swap = ((7, 6), (7, 7))
# Simulate 1 turn and confirm relative scoring
advisor.simulate_next_turn()
# with only one turn simulated, 4 reds seems better than 3
ordered_swaps_turn_1_spec = (left_swap, # 4 reds 2 * (1 + 1)
right_swap) # 3 reds
summaries_turn_1 = advisor.sorted_current_summaries()
ordered_swaps_turn_1 = [summary.action
for summary in summaries_turn_1]
self.assertSequenceEqual(ordered_swaps_turn_1,
ordered_swaps_turn_1_spec,
'After turn 1, expected to get this order'
' of swaps based on the scoring:\n{}'
'\nbut got this order:\n{}'
''.format(ordered_swaps_turn_1_spec,
ordered_swaps_turn_1))
# with two turns simulated, 4 reds actually opened up a great move
# for the opponent. so the 3 red move is scored higher
advisor.simulate_next_turn()
ordered_swaps_turn_2_spec = (right_swap, left_swap)
summaries_turn_2 = advisor.sorted_current_summaries()
ordered_swaps_turn_2 = [summary.action
for summary in summaries_turn_2]
self.assertSequenceEqual(ordered_swaps_turn_2,
ordered_swaps_turn_2_spec,
'After turn 2, expected to get this order'
' of swaps based on the scoring:\n{}'
'\nbut got this order:\n{}'
''.format(ordered_swaps_turn_2_spec,
ordered_swaps_turn_2))
def test_current_summaries_generates_different_scoring_each_turn(self):
advisor = generic_preset_advisor(self.board_string_3_valid_swaps)
# sim and store scoring for turn 1
advisor.simulate_next_turn()
summaries_turn_1 = advisor.sorted_current_summaries()
overalls_turn_1 = [summary.score
for summary in summaries_turn_1]
# sim and store scoring for turn 2
advisor.simulate_next_turn()
summaries_turn_2 = advisor.sorted_current_summaries()
overalls_turn_2 = [summary.score
for summary in summaries_turn_2]
# confirm the scoring is different between turn 1 and turn 2
items_were_equal = True
try:
self.assertItemsEqual(overalls_turn_1, overalls_turn_2)
except AssertionError:
items_were_equal = False # ok! the items shouldn't be equal
if items_were_equal:
self.fail('Expected the scoring for different turns to be different'
'but got the same:\n{}'.format(overalls_turn_1))
def generic_preset_advisor(board_string, player=None, opponent=None,
random_fill=False):
advisor = Advisor()
player = player or generic_actor('player')
opponent = opponent or generic_actor('opponent')
advisor.reset(Board(board_string), player, opponent, 0)
# patch the game not to do random fills
advisor._game.random_fill = random_fill
return advisor
def generic_actor(name=None, health=None,
r=None, g=None, b=None, y=None,
x=None, m=None, h=None, c=None):
"""Simple factory to keep tests focused."""
base = (50, 100)
health = health or base
r = r or base
g = g or base
b = b or base
y = y or base
x = x or base
m = m or base
h = h or base
c = c or base
return Actor(name, health, r, g, b, y, x, m, h, c)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import re
import socket
import ssl
import sys
import threading
import time
import traceback
import requests
ca_path = requests.certs.where()
import util
import x509
import pem
def Connection(server, queue, config_path):
"""Makes asynchronous connections to a remote electrum server.
Returns the running thread that is making the connection.
Once the thread has connected, it finishes, placing a tuple on the
queue of the form (server, socket), where socket is None if
connection failed.
"""
host, port, protocol = server.split(':')
if not protocol in 'st':
raise Exception('Unknown protocol: %s' % protocol)
c = TcpConnection(server, queue, config_path)
c.start()
return c
class TcpConnection(threading.Thread, util.PrintError):
def __init__(self, server, queue, config_path):
threading.Thread.__init__(self)
self.config_path = config_path
self.queue = queue
self.server = server
self.host, self.port, self.protocol = self.server.split(':')
self.host = str(self.host)
self.port = int(self.port)
self.use_ssl = (self.protocol == 's')
self.daemon = True
def diagnostic_name(self):
return self.host
def check_host_name(self, peercert, name):
"""Simple certificate/host name checker. Returns True if the
certificate matches, False otherwise. Does not support
wildcards."""
# Check that the peer has supplied a certificate.
# None/{} is not acceptable.
if not peercert:
return False
if peercert.has_key("subjectAltName"):
for typ, val in peercert["subjectAltName"]:
if typ == "DNS" and val == name:
return True
else:
# Only check the subject DN if there is no subject alternative
# name.
cn = None
for attr, val in peercert["subject"]:
# Use most-specific (last) commonName attribute.
if attr == "commonName":
cn = val
if cn is not None:
return cn == name
return False
def get_simple_socket(self):
try:
l = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)
except socket.gaierror:
self.print_error("cannot resolve hostname")
return
for res in l:
try:
s = socket.socket(res[0], socket.SOCK_STREAM)
s.settimeout(10)
s.connect(res[4])
s.settimeout(2)
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
return s
except BaseException as e:
continue
else:
self.print_error("failed to connect", str(e))
def get_socket(self):
if self.use_ssl:
cert_path = os.path.join(self.config_path, 'certs', self.host)
if not os.path.exists(cert_path):
is_new = True
s = self.get_simple_socket()
if s is None:
return
# try with CA first
try:
s = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_SSLv23, cert_reqs=ssl.CERT_REQUIRED, ca_certs=ca_path, do_handshake_on_connect=True)
except ssl.SSLError, e:
s = None
if s and self.check_host_name(s.getpeercert(), self.host):
self.print_error("SSL certificate signed by CA")
return s
# get server certificate.
# Do not use ssl.get_server_certificate because it does not work with proxy
s = self.get_simple_socket()
if s is None:
return
try:
s = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_SSLv23, cert_reqs=ssl.CERT_NONE, ca_certs=None)
except ssl.SSLError, e:
self.print_error("SSL error retrieving SSL certificate:", e)
return
dercert = s.getpeercert(True)
s.close()
cert = ssl.DER_cert_to_PEM_cert(dercert)
# workaround android bug
cert = re.sub("([^\n])-----END CERTIFICATE-----","\\1\n-----END CERTIFICATE-----",cert)
temporary_path = cert_path + '.temp'
with open(temporary_path,"w") as f:
f.write(cert)
else:
is_new = False
s = self.get_simple_socket()
if s is None:
return
if self.use_ssl:
try:
s = ssl.wrap_socket(s,
ssl_version=ssl.PROTOCOL_SSLv23,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs= (temporary_path if is_new else cert_path),
do_handshake_on_connect=True)
except ssl.SSLError, e:
self.print_error("SSL error:", e)
if e.errno != 1:
return
if is_new:
rej = cert_path + '.rej'
if os.path.exists(rej):
os.unlink(rej)
os.rename(temporary_path, rej)
else:
with open(cert_path) as f:
cert = f.read()
try:
b = pem.dePem(cert, 'CERTIFICATE')
x = x509.X509(b)
except:
traceback.print_exc(file=sys.stderr)
self.print_error("wrong certificate")
return
try:
x.check_date()
except:
self.print_error("certificate has expired:", cert_path)
os.unlink(cert_path)
return
self.print_error("wrong certificate")
return
except BaseException, e:
self.print_error(e)
if e.errno == 104:
return
traceback.print_exc(file=sys.stderr)
return
if is_new:
self.print_error("saving certificate")
os.rename(temporary_path, cert_path)
return s
def run(self):
socket = self.get_socket()
if socket:
self.print_error("connected")
self.queue.put((self.server, socket))
class Interface(util.PrintError):
"""The Interface class handles a socket connected to a single remote
electrum server. It's exposed API is:
- Member functions close(), fileno(), get_responses(), has_timed_out(),
ping_required(), queue_request(), send_requests()
- Member variable server.
"""
def __init__(self, server, socket):
self.server = server
self.host, _, _ = server.split(':')
self.socket = socket
self.pipe = util.SocketPipe(socket)
self.pipe.set_timeout(0.0) # Don't wait for data
# Dump network messages. Set at runtime from the console.
self.debug = False
self.unsent_requests = []
self.unanswered_requests = {}
# Set last ping to zero to ensure immediate ping
self.last_request = time.time()
self.last_ping = 0
self.closed_remotely = False
def diagnostic_name(self):
return self.host
def fileno(self):
# Needed for select
return self.socket.fileno()
def close(self):
if not self.closed_remotely:
try:
self.socket.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
self.socket.close()
def queue_request(self, *args): # method, params, _id
'''Queue a request, later to be send with send_requests when the
socket is available for writing.
'''
self.request_time = time.time()
self.unsent_requests.append(args)
def send_requests(self):
'''Sends all queued requests. Returns False on failure.'''
make_dict = lambda (m, p, i): {'method': m, 'params': p, 'id': i}
wire_requests = map(make_dict, self.unsent_requests)
try:
self.pipe.send_all(wire_requests)
except socket.error, e:
self.print_error("socket error:", e)
return False
for request in self.unsent_requests:
if self.debug:
self.print_error("-->", request)
self.unanswered_requests[request[2]] = request
self.unsent_requests = []
return True
def ping_required(self):
'''Maintains time since last ping. Returns True if a ping should
be sent.
'''
now = time.time()
if now - self.last_ping > 60:
self.last_ping = now
return True
return False
def has_timed_out(self):
'''Returns True if the interface has timed out.'''
if (self.unanswered_requests and time.time() - self.request_time > 10
and self.pipe.idle_time() > 10):
self.print_error("timeout", len(self.unanswered_requests))
return True
return False
def get_responses(self):
'''Call if there is data available on the socket. Returns a list of
(request, response) pairs. Notifications are singleton
unsolicited responses presumably as a result of prior
subscriptions, so request is None and there is no 'id' member.
Otherwise it is a response, which has an 'id' member and a
corresponding request. If the connection was closed remotely
or the remote server is misbehaving, a (None, None) will appear.
'''
responses = []
while True:
try:
response = self.pipe.get()
except util.timeout:
break
if response is None:
responses.append((None, None))
self.closed_remotely = True
self.print_error("connection closed remotely")
break
if self.debug:
self.print_error("<--", response)
wire_id = response.get('id', None)
if wire_id is None: # Notification
responses.append((None, response))
else:
request = self.unanswered_requests.pop(wire_id, None)
if request:
responses.append((request, response))
else:
self.print_error("unknown wire ID", wire_id)
responses.append((None, None)) # Signal
break
return responses
def check_cert(host, cert):
try:
b = pem.dePem(cert, 'CERTIFICATE')
x = x509.X509(b)
except:
traceback.print_exc(file=sys.stdout)
return
try:
x.check_date()
expired = False
except:
expired = True
m = "host: %s\n"%host
m += "has_expired: %s\n"% expired
util.print_msg(m)
# Used by tests
def _match_hostname(name, val):
if val == name:
return True
return val.startswith('*.') and name.endswith(val[1:])
def test_certificates():
from simple_config import SimpleConfig
config = SimpleConfig()
mydir = os.path.join(config.path, "certs")
certs = os.listdir(mydir)
for c in certs:
print c
p = os.path.join(mydir,c)
with open(p) as f:
cert = f.read()
check_cert(c, cert)
if __name__ == "__main__":
test_certificates()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.